1/* Renesas Ethernet AVB device driver
2 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
6 *
7 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */
13
14#include <linux/cache.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/etherdevice.h>
20#include <linux/ethtool.h>
21#include <linux/if_vlan.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/net_tstamp.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_irq.h>
29#include <linux/of_mdio.h>
30#include <linux/of_net.h>
31#include <linux/pm_runtime.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34
35#include "ravb.h"
36
37#define RAVB_DEF_MSG_ENABLE \
38		(NETIF_MSG_LINK	  | \
39		 NETIF_MSG_TIMER  | \
40		 NETIF_MSG_RX_ERR | \
41		 NETIF_MSG_TX_ERR)
42
43int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
44{
45	int i;
46
47	for (i = 0; i < 10000; i++) {
48		if ((ravb_read(ndev, reg) & mask) == value)
49			return 0;
50		udelay(10);
51	}
52	return -ETIMEDOUT;
53}
54
55static int ravb_config(struct net_device *ndev)
56{
57	int error;
58
59	/* Set config mode */
60	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
61		   CCC);
62	/* Check if the operating mode is changed to the config mode */
63	error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
64	if (error)
65		netdev_err(ndev, "failed to switch device to config mode\n");
66
67	return error;
68}
69
70static void ravb_set_duplex(struct net_device *ndev)
71{
72	struct ravb_private *priv = netdev_priv(ndev);
73	u32 ecmr = ravb_read(ndev, ECMR);
74
75	if (priv->duplex)	/* Full */
76		ecmr |=  ECMR_DM;
77	else			/* Half */
78		ecmr &= ~ECMR_DM;
79	ravb_write(ndev, ecmr, ECMR);
80}
81
82static void ravb_set_rate(struct net_device *ndev)
83{
84	struct ravb_private *priv = netdev_priv(ndev);
85
86	switch (priv->speed) {
87	case 100:		/* 100BASE */
88		ravb_write(ndev, GECMR_SPEED_100, GECMR);
89		break;
90	case 1000:		/* 1000BASE */
91		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
92		break;
93	default:
94		break;
95	}
96}
97
98static void ravb_set_buffer_align(struct sk_buff *skb)
99{
100	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
101
102	if (reserve)
103		skb_reserve(skb, RAVB_ALIGN - reserve);
104}
105
106/* Get MAC address from the MAC address registers
107 *
108 * Ethernet AVB device doesn't have ROM for MAC address.
109 * This function gets the MAC address that was used by a bootloader.
110 */
111static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
112{
113	if (mac) {
114		ether_addr_copy(ndev->dev_addr, mac);
115	} else {
116		ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
117		ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
118		ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
119		ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
120		ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
121		ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
122	}
123}
124
125static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
126{
127	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
128						 mdiobb);
129	u32 pir = ravb_read(priv->ndev, PIR);
130
131	if (set)
132		pir |=  mask;
133	else
134		pir &= ~mask;
135	ravb_write(priv->ndev, pir, PIR);
136}
137
138/* MDC pin control */
139static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
140{
141	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
142}
143
144/* Data I/O pin control */
145static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
146{
147	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
148}
149
150/* Set data bit */
151static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
152{
153	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
154}
155
156/* Get data bit */
157static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
158{
159	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
160						 mdiobb);
161
162	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
163}
164
165/* MDIO bus control struct */
166static struct mdiobb_ops bb_ops = {
167	.owner = THIS_MODULE,
168	.set_mdc = ravb_set_mdc,
169	.set_mdio_dir = ravb_set_mdio_dir,
170	.set_mdio_data = ravb_set_mdio_data,
171	.get_mdio_data = ravb_get_mdio_data,
172};
173
174/* Free skb's and DMA buffers for Ethernet AVB */
175static void ravb_ring_free(struct net_device *ndev, int q)
176{
177	struct ravb_private *priv = netdev_priv(ndev);
178	int ring_size;
179	int i;
180
181	/* Free RX skb ringbuffer */
182	if (priv->rx_skb[q]) {
183		for (i = 0; i < priv->num_rx_ring[q]; i++)
184			dev_kfree_skb(priv->rx_skb[q][i]);
185	}
186	kfree(priv->rx_skb[q]);
187	priv->rx_skb[q] = NULL;
188
189	/* Free TX skb ringbuffer */
190	if (priv->tx_skb[q]) {
191		for (i = 0; i < priv->num_tx_ring[q]; i++)
192			dev_kfree_skb(priv->tx_skb[q][i]);
193	}
194	kfree(priv->tx_skb[q]);
195	priv->tx_skb[q] = NULL;
196
197	/* Free aligned TX buffers */
198	kfree(priv->tx_align[q]);
199	priv->tx_align[q] = NULL;
200
201	if (priv->rx_ring[q]) {
202		ring_size = sizeof(struct ravb_ex_rx_desc) *
203			    (priv->num_rx_ring[q] + 1);
204		dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
205				  priv->rx_desc_dma[q]);
206		priv->rx_ring[q] = NULL;
207	}
208
209	if (priv->tx_ring[q]) {
210		ring_size = sizeof(struct ravb_tx_desc) *
211			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
212		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
213				  priv->tx_desc_dma[q]);
214		priv->tx_ring[q] = NULL;
215	}
216}
217
218/* Format skb and descriptor buffer for Ethernet AVB */
219static void ravb_ring_format(struct net_device *ndev, int q)
220{
221	struct ravb_private *priv = netdev_priv(ndev);
222	struct ravb_ex_rx_desc *rx_desc;
223	struct ravb_tx_desc *tx_desc;
224	struct ravb_desc *desc;
225	int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
226	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
227			   NUM_TX_DESC;
228	dma_addr_t dma_addr;
229	int i;
230
231	priv->cur_rx[q] = 0;
232	priv->cur_tx[q] = 0;
233	priv->dirty_rx[q] = 0;
234	priv->dirty_tx[q] = 0;
235
236	memset(priv->rx_ring[q], 0, rx_ring_size);
237	/* Build RX ring buffer */
238	for (i = 0; i < priv->num_rx_ring[q]; i++) {
239		/* RX descriptor */
240		rx_desc = &priv->rx_ring[q][i];
241		/* The size of the buffer should be on 16-byte boundary. */
242		rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
243		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
244					  ALIGN(PKT_BUF_SZ, 16),
245					  DMA_FROM_DEVICE);
246		/* We just set the data size to 0 for a failed mapping which
247		 * should prevent DMA from happening...
248		 */
249		if (dma_mapping_error(ndev->dev.parent, dma_addr))
250			rx_desc->ds_cc = cpu_to_le16(0);
251		rx_desc->dptr = cpu_to_le32(dma_addr);
252		rx_desc->die_dt = DT_FEMPTY;
253	}
254	rx_desc = &priv->rx_ring[q][i];
255	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
256	rx_desc->die_dt = DT_LINKFIX; /* type */
257
258	memset(priv->tx_ring[q], 0, tx_ring_size);
259	/* Build TX ring buffer */
260	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
261	     i++, tx_desc++) {
262		tx_desc->die_dt = DT_EEMPTY;
263		tx_desc++;
264		tx_desc->die_dt = DT_EEMPTY;
265	}
266	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
267	tx_desc->die_dt = DT_LINKFIX; /* type */
268
269	/* RX descriptor base address for best effort */
270	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
271	desc->die_dt = DT_LINKFIX; /* type */
272	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
273
274	/* TX descriptor base address for best effort */
275	desc = &priv->desc_bat[q];
276	desc->die_dt = DT_LINKFIX; /* type */
277	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
278}
279
280/* Init skb and descriptor buffer for Ethernet AVB */
281static int ravb_ring_init(struct net_device *ndev, int q)
282{
283	struct ravb_private *priv = netdev_priv(ndev);
284	struct sk_buff *skb;
285	int ring_size;
286	int i;
287
288	/* Allocate RX and TX skb rings */
289	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
290				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
291	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
292				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
293	if (!priv->rx_skb[q] || !priv->tx_skb[q])
294		goto error;
295
296	for (i = 0; i < priv->num_rx_ring[q]; i++) {
297		skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
298		if (!skb)
299			goto error;
300		ravb_set_buffer_align(skb);
301		priv->rx_skb[q][i] = skb;
302	}
303
304	/* Allocate rings for the aligned buffers */
305	priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
306				    DPTR_ALIGN - 1, GFP_KERNEL);
307	if (!priv->tx_align[q])
308		goto error;
309
310	/* Allocate all RX descriptors. */
311	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
312	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
313					      &priv->rx_desc_dma[q],
314					      GFP_KERNEL);
315	if (!priv->rx_ring[q])
316		goto error;
317
318	priv->dirty_rx[q] = 0;
319
320	/* Allocate all TX descriptors. */
321	ring_size = sizeof(struct ravb_tx_desc) *
322		    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
323	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
324					      &priv->tx_desc_dma[q],
325					      GFP_KERNEL);
326	if (!priv->tx_ring[q])
327		goto error;
328
329	return 0;
330
331error:
332	ravb_ring_free(ndev, q);
333
334	return -ENOMEM;
335}
336
337/* E-MAC init function */
338static void ravb_emac_init(struct net_device *ndev)
339{
340	struct ravb_private *priv = netdev_priv(ndev);
341	u32 ecmr;
342
343	/* Receive frame limit set register */
344	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
345
346	/* PAUSE prohibition */
347	ecmr =  ravb_read(ndev, ECMR);
348	ecmr &= ECMR_DM;
349	ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
350	ravb_write(ndev, ecmr, ECMR);
351
352	ravb_set_rate(ndev);
353
354	/* Set MAC address */
355	ravb_write(ndev,
356		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
357		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
358	ravb_write(ndev,
359		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
360
361	ravb_write(ndev, 1, MPR);
362
363	/* E-MAC status register clear */
364	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
365
366	/* E-MAC interrupt enable register */
367	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
368}
369
370/* Device init function for Ethernet AVB */
371static int ravb_dmac_init(struct net_device *ndev)
372{
373	int error;
374
375	/* Set CONFIG mode */
376	error = ravb_config(ndev);
377	if (error)
378		return error;
379
380	error = ravb_ring_init(ndev, RAVB_BE);
381	if (error)
382		return error;
383	error = ravb_ring_init(ndev, RAVB_NC);
384	if (error) {
385		ravb_ring_free(ndev, RAVB_BE);
386		return error;
387	}
388
389	/* Descriptor format */
390	ravb_ring_format(ndev, RAVB_BE);
391	ravb_ring_format(ndev, RAVB_NC);
392
393#if defined(__LITTLE_ENDIAN)
394	ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
395#else
396	ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
397#endif
398
399	/* Set AVB RX */
400	ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
401
402	/* Set FIFO size */
403	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
404
405	/* Timestamp enable */
406	ravb_write(ndev, TCCR_TFEN, TCCR);
407
408	/* Interrupt enable: */
409	/* Frame receive */
410	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
411	/* Receive FIFO full error, descriptor empty */
412	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
413	/* Frame transmitted, timestamp FIFO updated */
414	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
415
416	/* Setting the control will start the AVB-DMAC process. */
417	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
418		   CCC);
419
420	return 0;
421}
422
423/* Free TX skb function for AVB-IP */
424static int ravb_tx_free(struct net_device *ndev, int q)
425{
426	struct ravb_private *priv = netdev_priv(ndev);
427	struct net_device_stats *stats = &priv->stats[q];
428	struct ravb_tx_desc *desc;
429	int free_num = 0;
430	int entry;
431	u32 size;
432
433	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
434		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
435					     NUM_TX_DESC);
436		desc = &priv->tx_ring[q][entry];
437		if (desc->die_dt != DT_FEMPTY)
438			break;
439		/* Descriptor type must be checked before all other reads */
440		dma_rmb();
441		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
442		/* Free the original skb. */
443		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
444			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
445					 size, DMA_TO_DEVICE);
446			/* Last packet descriptor? */
447			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
448				entry /= NUM_TX_DESC;
449				dev_kfree_skb_any(priv->tx_skb[q][entry]);
450				priv->tx_skb[q][entry] = NULL;
451				stats->tx_packets++;
452			}
453			free_num++;
454		}
455		stats->tx_bytes += size;
456		desc->die_dt = DT_EEMPTY;
457	}
458	return free_num;
459}
460
461static void ravb_get_tx_tstamp(struct net_device *ndev)
462{
463	struct ravb_private *priv = netdev_priv(ndev);
464	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
465	struct skb_shared_hwtstamps shhwtstamps;
466	struct sk_buff *skb;
467	struct timespec64 ts;
468	u16 tag, tfa_tag;
469	int count;
470	u32 tfa2;
471
472	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
473	while (count--) {
474		tfa2 = ravb_read(ndev, TFA2);
475		tfa_tag = (tfa2 & TFA2_TST) >> 16;
476		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
477		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
478			    ravb_read(ndev, TFA1);
479		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
480		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
481		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
482					 list) {
483			skb = ts_skb->skb;
484			tag = ts_skb->tag;
485			list_del(&ts_skb->list);
486			kfree(ts_skb);
487			if (tag == tfa_tag) {
488				skb_tstamp_tx(skb, &shhwtstamps);
489				break;
490			}
491		}
492		ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
493	}
494}
495
496/* Packet receive function for Ethernet AVB */
497static bool ravb_rx(struct net_device *ndev, int *quota, int q)
498{
499	struct ravb_private *priv = netdev_priv(ndev);
500	int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
501	int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
502			priv->cur_rx[q];
503	struct net_device_stats *stats = &priv->stats[q];
504	struct ravb_ex_rx_desc *desc;
505	struct sk_buff *skb;
506	dma_addr_t dma_addr;
507	struct timespec64 ts;
508	u8  desc_status;
509	u16 pkt_len;
510	int limit;
511
512	boguscnt = min(boguscnt, *quota);
513	limit = boguscnt;
514	desc = &priv->rx_ring[q][entry];
515	while (desc->die_dt != DT_FEMPTY) {
516		/* Descriptor type must be checked before all other reads */
517		dma_rmb();
518		desc_status = desc->msc;
519		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
520
521		if (--boguscnt < 0)
522			break;
523
524		/* We use 0-byte descriptors to mark the DMA mapping errors */
525		if (!pkt_len)
526			continue;
527
528		if (desc_status & MSC_MC)
529			stats->multicast++;
530
531		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
532				   MSC_CEEF)) {
533			stats->rx_errors++;
534			if (desc_status & MSC_CRC)
535				stats->rx_crc_errors++;
536			if (desc_status & MSC_RFE)
537				stats->rx_frame_errors++;
538			if (desc_status & (MSC_RTLF | MSC_RTSF))
539				stats->rx_length_errors++;
540			if (desc_status & MSC_CEEF)
541				stats->rx_missed_errors++;
542		} else {
543			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
544
545			skb = priv->rx_skb[q][entry];
546			priv->rx_skb[q][entry] = NULL;
547			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
548					 ALIGN(PKT_BUF_SZ, 16),
549					 DMA_FROM_DEVICE);
550			get_ts &= (q == RAVB_NC) ?
551					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
552					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
553			if (get_ts) {
554				struct skb_shared_hwtstamps *shhwtstamps;
555
556				shhwtstamps = skb_hwtstamps(skb);
557				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
558				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
559					     32) | le32_to_cpu(desc->ts_sl);
560				ts.tv_nsec = le32_to_cpu(desc->ts_n);
561				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
562			}
563			skb_put(skb, pkt_len);
564			skb->protocol = eth_type_trans(skb, ndev);
565			napi_gro_receive(&priv->napi[q], skb);
566			stats->rx_packets++;
567			stats->rx_bytes += pkt_len;
568		}
569
570		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
571		desc = &priv->rx_ring[q][entry];
572	}
573
574	/* Refill the RX ring buffers. */
575	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
576		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
577		desc = &priv->rx_ring[q][entry];
578		/* The size of the buffer should be on 16-byte boundary. */
579		desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
580
581		if (!priv->rx_skb[q][entry]) {
582			skb = netdev_alloc_skb(ndev,
583					       PKT_BUF_SZ + RAVB_ALIGN - 1);
584			if (!skb)
585				break;	/* Better luck next round. */
586			ravb_set_buffer_align(skb);
587			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
588						  le16_to_cpu(desc->ds_cc),
589						  DMA_FROM_DEVICE);
590			skb_checksum_none_assert(skb);
591			/* We just set the data size to 0 for a failed mapping
592			 * which should prevent DMA  from happening...
593			 */
594			if (dma_mapping_error(ndev->dev.parent, dma_addr))
595				desc->ds_cc = cpu_to_le16(0);
596			desc->dptr = cpu_to_le32(dma_addr);
597			priv->rx_skb[q][entry] = skb;
598		}
599		/* Descriptor type must be set after all the above writes */
600		dma_wmb();
601		desc->die_dt = DT_FEMPTY;
602	}
603
604	*quota -= limit - (++boguscnt);
605
606	return boguscnt <= 0;
607}
608
609static void ravb_rcv_snd_disable(struct net_device *ndev)
610{
611	/* Disable TX and RX */
612	ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
613}
614
615static void ravb_rcv_snd_enable(struct net_device *ndev)
616{
617	/* Enable TX and RX */
618	ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
619}
620
621/* function for waiting dma process finished */
622static int ravb_stop_dma(struct net_device *ndev)
623{
624	int error;
625
626	/* Wait for stopping the hardware TX process */
627	error = ravb_wait(ndev, TCCR,
628			  TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
629	if (error)
630		return error;
631
632	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
633			  0);
634	if (error)
635		return error;
636
637	/* Stop the E-MAC's RX/TX processes. */
638	ravb_rcv_snd_disable(ndev);
639
640	/* Wait for stopping the RX DMA process */
641	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
642	if (error)
643		return error;
644
645	/* Stop AVB-DMAC process */
646	return ravb_config(ndev);
647}
648
649/* E-MAC interrupt handler */
650static void ravb_emac_interrupt(struct net_device *ndev)
651{
652	struct ravb_private *priv = netdev_priv(ndev);
653	u32 ecsr, psr;
654
655	ecsr = ravb_read(ndev, ECSR);
656	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
657	if (ecsr & ECSR_ICD)
658		ndev->stats.tx_carrier_errors++;
659	if (ecsr & ECSR_LCHNG) {
660		/* Link changed */
661		if (priv->no_avb_link)
662			return;
663		psr = ravb_read(ndev, PSR);
664		if (priv->avb_link_active_low)
665			psr ^= PSR_LMON;
666		if (!(psr & PSR_LMON)) {
667			/* DIsable RX and TX */
668			ravb_rcv_snd_disable(ndev);
669		} else {
670			/* Enable RX and TX */
671			ravb_rcv_snd_enable(ndev);
672		}
673	}
674}
675
676/* Error interrupt handler */
677static void ravb_error_interrupt(struct net_device *ndev)
678{
679	struct ravb_private *priv = netdev_priv(ndev);
680	u32 eis, ris2;
681
682	eis = ravb_read(ndev, EIS);
683	ravb_write(ndev, ~EIS_QFS, EIS);
684	if (eis & EIS_QFS) {
685		ris2 = ravb_read(ndev, RIS2);
686		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
687
688		/* Receive Descriptor Empty int */
689		if (ris2 & RIS2_QFF0)
690			priv->stats[RAVB_BE].rx_over_errors++;
691
692		    /* Receive Descriptor Empty int */
693		if (ris2 & RIS2_QFF1)
694			priv->stats[RAVB_NC].rx_over_errors++;
695
696		/* Receive FIFO Overflow int */
697		if (ris2 & RIS2_RFFF)
698			priv->rx_fifo_errors++;
699	}
700}
701
702static irqreturn_t ravb_interrupt(int irq, void *dev_id)
703{
704	struct net_device *ndev = dev_id;
705	struct ravb_private *priv = netdev_priv(ndev);
706	irqreturn_t result = IRQ_NONE;
707	u32 iss;
708
709	spin_lock(&priv->lock);
710	/* Get interrupt status */
711	iss = ravb_read(ndev, ISS);
712
713	/* Received and transmitted interrupts */
714	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
715		u32 ris0 = ravb_read(ndev, RIS0);
716		u32 ric0 = ravb_read(ndev, RIC0);
717		u32 tis  = ravb_read(ndev, TIS);
718		u32 tic  = ravb_read(ndev, TIC);
719		int q;
720
721		/* Timestamp updated */
722		if (tis & TIS_TFUF) {
723			ravb_write(ndev, ~TIS_TFUF, TIS);
724			ravb_get_tx_tstamp(ndev);
725			result = IRQ_HANDLED;
726		}
727
728		/* Network control and best effort queue RX/TX */
729		for (q = RAVB_NC; q >= RAVB_BE; q--) {
730			if (((ris0 & ric0) & BIT(q)) ||
731			    ((tis  & tic)  & BIT(q))) {
732				if (napi_schedule_prep(&priv->napi[q])) {
733					/* Mask RX and TX interrupts */
734					ric0 &= ~BIT(q);
735					tic &= ~BIT(q);
736					ravb_write(ndev, ric0, RIC0);
737					ravb_write(ndev, tic, TIC);
738					__napi_schedule(&priv->napi[q]);
739				} else {
740					netdev_warn(ndev,
741						    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
742						    ris0, ric0);
743					netdev_warn(ndev,
744						    "                    tx status 0x%08x, tx mask 0x%08x.\n",
745						    tis, tic);
746				}
747				result = IRQ_HANDLED;
748			}
749		}
750	}
751
752	/* E-MAC status summary */
753	if (iss & ISS_MS) {
754		ravb_emac_interrupt(ndev);
755		result = IRQ_HANDLED;
756	}
757
758	/* Error status summary */
759	if (iss & ISS_ES) {
760		ravb_error_interrupt(ndev);
761		result = IRQ_HANDLED;
762	}
763
764	if (iss & ISS_CGIS)
765		result = ravb_ptp_interrupt(ndev);
766
767	mmiowb();
768	spin_unlock(&priv->lock);
769	return result;
770}
771
772static int ravb_poll(struct napi_struct *napi, int budget)
773{
774	struct net_device *ndev = napi->dev;
775	struct ravb_private *priv = netdev_priv(ndev);
776	unsigned long flags;
777	int q = napi - priv->napi;
778	int mask = BIT(q);
779	int quota = budget;
780	u32 ris0, tis;
781
782	for (;;) {
783		tis = ravb_read(ndev, TIS);
784		ris0 = ravb_read(ndev, RIS0);
785		if (!((ris0 & mask) || (tis & mask)))
786			break;
787
788		/* Processing RX Descriptor Ring */
789		if (ris0 & mask) {
790			/* Clear RX interrupt */
791			ravb_write(ndev, ~mask, RIS0);
792			if (ravb_rx(ndev, &quota, q))
793				goto out;
794		}
795		/* Processing TX Descriptor Ring */
796		if (tis & mask) {
797			spin_lock_irqsave(&priv->lock, flags);
798			/* Clear TX interrupt */
799			ravb_write(ndev, ~mask, TIS);
800			ravb_tx_free(ndev, q);
801			netif_wake_subqueue(ndev, q);
802			mmiowb();
803			spin_unlock_irqrestore(&priv->lock, flags);
804		}
805	}
806
807	napi_complete(napi);
808
809	/* Re-enable RX/TX interrupts */
810	spin_lock_irqsave(&priv->lock, flags);
811	ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
812	ravb_write(ndev, ravb_read(ndev, TIC)  | mask,  TIC);
813	mmiowb();
814	spin_unlock_irqrestore(&priv->lock, flags);
815
816	/* Receive error message handling */
817	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
818	priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
819	if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
820		ndev->stats.rx_over_errors = priv->rx_over_errors;
821		netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
822	}
823	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
824		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
825		netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
826	}
827out:
828	return budget - quota;
829}
830
831/* PHY state control function */
832static void ravb_adjust_link(struct net_device *ndev)
833{
834	struct ravb_private *priv = netdev_priv(ndev);
835	struct phy_device *phydev = priv->phydev;
836	bool new_state = false;
837
838	if (phydev->link) {
839		if (phydev->duplex != priv->duplex) {
840			new_state = true;
841			priv->duplex = phydev->duplex;
842			ravb_set_duplex(ndev);
843		}
844
845		if (phydev->speed != priv->speed) {
846			new_state = true;
847			priv->speed = phydev->speed;
848			ravb_set_rate(ndev);
849		}
850		if (!priv->link) {
851			ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
852				   ECMR);
853			new_state = true;
854			priv->link = phydev->link;
855			if (priv->no_avb_link)
856				ravb_rcv_snd_enable(ndev);
857		}
858	} else if (priv->link) {
859		new_state = true;
860		priv->link = 0;
861		priv->speed = 0;
862		priv->duplex = -1;
863		if (priv->no_avb_link)
864			ravb_rcv_snd_disable(ndev);
865	}
866
867	if (new_state && netif_msg_link(priv))
868		phy_print_status(phydev);
869}
870
871/* PHY init function */
872static int ravb_phy_init(struct net_device *ndev)
873{
874	struct device_node *np = ndev->dev.parent->of_node;
875	struct ravb_private *priv = netdev_priv(ndev);
876	struct phy_device *phydev;
877	struct device_node *pn;
878
879	priv->link = 0;
880	priv->speed = 0;
881	priv->duplex = -1;
882
883	/* Try connecting to PHY */
884	pn = of_parse_phandle(np, "phy-handle", 0);
885	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
886				priv->phy_interface);
887	if (!phydev) {
888		netdev_err(ndev, "failed to connect PHY\n");
889		return -ENOENT;
890	}
891
892	/* This driver only support 10/100Mbit speeds on Gen3
893	 * at this time.
894	 */
895	if (priv->chip_id == RCAR_GEN3) {
896		int err;
897
898		err = phy_set_max_speed(phydev, SPEED_100);
899		if (err) {
900			netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
901			phy_disconnect(phydev);
902			return err;
903		}
904
905		netdev_info(ndev, "limited PHY to 100Mbit/s\n");
906	}
907
908	/* 10BASE is not supported */
909	phydev->supported &= ~PHY_10BT_FEATURES;
910
911	netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
912		    phydev->addr, phydev->irq, phydev->drv->name);
913
914	priv->phydev = phydev;
915
916	return 0;
917}
918
919/* PHY control start function */
920static int ravb_phy_start(struct net_device *ndev)
921{
922	struct ravb_private *priv = netdev_priv(ndev);
923	int error;
924
925	error = ravb_phy_init(ndev);
926	if (error)
927		return error;
928
929	phy_start(priv->phydev);
930
931	return 0;
932}
933
934static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
935{
936	struct ravb_private *priv = netdev_priv(ndev);
937	int error = -ENODEV;
938	unsigned long flags;
939
940	if (priv->phydev) {
941		spin_lock_irqsave(&priv->lock, flags);
942		error = phy_ethtool_gset(priv->phydev, ecmd);
943		spin_unlock_irqrestore(&priv->lock, flags);
944	}
945
946	return error;
947}
948
949static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
950{
951	struct ravb_private *priv = netdev_priv(ndev);
952	unsigned long flags;
953	int error;
954
955	if (!priv->phydev)
956		return -ENODEV;
957
958	spin_lock_irqsave(&priv->lock, flags);
959
960	/* Disable TX and RX */
961	ravb_rcv_snd_disable(ndev);
962
963	error = phy_ethtool_sset(priv->phydev, ecmd);
964	if (error)
965		goto error_exit;
966
967	if (ecmd->duplex == DUPLEX_FULL)
968		priv->duplex = 1;
969	else
970		priv->duplex = 0;
971
972	ravb_set_duplex(ndev);
973
974error_exit:
975	mdelay(1);
976
977	/* Enable TX and RX */
978	ravb_rcv_snd_enable(ndev);
979
980	mmiowb();
981	spin_unlock_irqrestore(&priv->lock, flags);
982
983	return error;
984}
985
986static int ravb_nway_reset(struct net_device *ndev)
987{
988	struct ravb_private *priv = netdev_priv(ndev);
989	int error = -ENODEV;
990	unsigned long flags;
991
992	if (priv->phydev) {
993		spin_lock_irqsave(&priv->lock, flags);
994		error = phy_start_aneg(priv->phydev);
995		spin_unlock_irqrestore(&priv->lock, flags);
996	}
997
998	return error;
999}
1000
1001static u32 ravb_get_msglevel(struct net_device *ndev)
1002{
1003	struct ravb_private *priv = netdev_priv(ndev);
1004
1005	return priv->msg_enable;
1006}
1007
1008static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1009{
1010	struct ravb_private *priv = netdev_priv(ndev);
1011
1012	priv->msg_enable = value;
1013}
1014
1015static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1016	"rx_queue_0_current",
1017	"tx_queue_0_current",
1018	"rx_queue_0_dirty",
1019	"tx_queue_0_dirty",
1020	"rx_queue_0_packets",
1021	"tx_queue_0_packets",
1022	"rx_queue_0_bytes",
1023	"tx_queue_0_bytes",
1024	"rx_queue_0_mcast_packets",
1025	"rx_queue_0_errors",
1026	"rx_queue_0_crc_errors",
1027	"rx_queue_0_frame_errors",
1028	"rx_queue_0_length_errors",
1029	"rx_queue_0_missed_errors",
1030	"rx_queue_0_over_errors",
1031
1032	"rx_queue_1_current",
1033	"tx_queue_1_current",
1034	"rx_queue_1_dirty",
1035	"tx_queue_1_dirty",
1036	"rx_queue_1_packets",
1037	"tx_queue_1_packets",
1038	"rx_queue_1_bytes",
1039	"tx_queue_1_bytes",
1040	"rx_queue_1_mcast_packets",
1041	"rx_queue_1_errors",
1042	"rx_queue_1_crc_errors",
1043	"rx_queue_1_frame_errors",
1044	"rx_queue_1_length_errors",
1045	"rx_queue_1_missed_errors",
1046	"rx_queue_1_over_errors",
1047};
1048
1049#define RAVB_STATS_LEN	ARRAY_SIZE(ravb_gstrings_stats)
1050
1051static int ravb_get_sset_count(struct net_device *netdev, int sset)
1052{
1053	switch (sset) {
1054	case ETH_SS_STATS:
1055		return RAVB_STATS_LEN;
1056	default:
1057		return -EOPNOTSUPP;
1058	}
1059}
1060
1061static void ravb_get_ethtool_stats(struct net_device *ndev,
1062				   struct ethtool_stats *stats, u64 *data)
1063{
1064	struct ravb_private *priv = netdev_priv(ndev);
1065	int i = 0;
1066	int q;
1067
1068	/* Device-specific stats */
1069	for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1070		struct net_device_stats *stats = &priv->stats[q];
1071
1072		data[i++] = priv->cur_rx[q];
1073		data[i++] = priv->cur_tx[q];
1074		data[i++] = priv->dirty_rx[q];
1075		data[i++] = priv->dirty_tx[q];
1076		data[i++] = stats->rx_packets;
1077		data[i++] = stats->tx_packets;
1078		data[i++] = stats->rx_bytes;
1079		data[i++] = stats->tx_bytes;
1080		data[i++] = stats->multicast;
1081		data[i++] = stats->rx_errors;
1082		data[i++] = stats->rx_crc_errors;
1083		data[i++] = stats->rx_frame_errors;
1084		data[i++] = stats->rx_length_errors;
1085		data[i++] = stats->rx_missed_errors;
1086		data[i++] = stats->rx_over_errors;
1087	}
1088}
1089
1090static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1091{
1092	switch (stringset) {
1093	case ETH_SS_STATS:
1094		memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
1095		break;
1096	}
1097}
1098
1099static void ravb_get_ringparam(struct net_device *ndev,
1100			       struct ethtool_ringparam *ring)
1101{
1102	struct ravb_private *priv = netdev_priv(ndev);
1103
1104	ring->rx_max_pending = BE_RX_RING_MAX;
1105	ring->tx_max_pending = BE_TX_RING_MAX;
1106	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1107	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1108}
1109
1110static int ravb_set_ringparam(struct net_device *ndev,
1111			      struct ethtool_ringparam *ring)
1112{
1113	struct ravb_private *priv = netdev_priv(ndev);
1114	int error;
1115
1116	if (ring->tx_pending > BE_TX_RING_MAX ||
1117	    ring->rx_pending > BE_RX_RING_MAX ||
1118	    ring->tx_pending < BE_TX_RING_MIN ||
1119	    ring->rx_pending < BE_RX_RING_MIN)
1120		return -EINVAL;
1121	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1122		return -EINVAL;
1123
1124	if (netif_running(ndev)) {
1125		netif_device_detach(ndev);
1126		/* Stop PTP Clock driver */
1127		ravb_ptp_stop(ndev);
1128		/* Wait for DMA stopping */
1129		error = ravb_stop_dma(ndev);
1130		if (error) {
1131			netdev_err(ndev,
1132				   "cannot set ringparam! Any AVB processes are still running?\n");
1133			return error;
1134		}
1135		synchronize_irq(ndev->irq);
1136
1137		/* Free all the skb's in the RX queue and the DMA buffers. */
1138		ravb_ring_free(ndev, RAVB_BE);
1139		ravb_ring_free(ndev, RAVB_NC);
1140	}
1141
1142	/* Set new parameters */
1143	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1144	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1145
1146	if (netif_running(ndev)) {
1147		error = ravb_dmac_init(ndev);
1148		if (error) {
1149			netdev_err(ndev,
1150				   "%s: ravb_dmac_init() failed, error %d\n",
1151				   __func__, error);
1152			return error;
1153		}
1154
1155		ravb_emac_init(ndev);
1156
1157		/* Initialise PTP Clock driver */
1158		ravb_ptp_init(ndev, priv->pdev);
1159
1160		netif_device_attach(ndev);
1161	}
1162
1163	return 0;
1164}
1165
1166static int ravb_get_ts_info(struct net_device *ndev,
1167			    struct ethtool_ts_info *info)
1168{
1169	struct ravb_private *priv = netdev_priv(ndev);
1170
1171	info->so_timestamping =
1172		SOF_TIMESTAMPING_TX_SOFTWARE |
1173		SOF_TIMESTAMPING_RX_SOFTWARE |
1174		SOF_TIMESTAMPING_SOFTWARE |
1175		SOF_TIMESTAMPING_TX_HARDWARE |
1176		SOF_TIMESTAMPING_RX_HARDWARE |
1177		SOF_TIMESTAMPING_RAW_HARDWARE;
1178	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1179	info->rx_filters =
1180		(1 << HWTSTAMP_FILTER_NONE) |
1181		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1182		(1 << HWTSTAMP_FILTER_ALL);
1183	info->phc_index = ptp_clock_index(priv->ptp.clock);
1184
1185	return 0;
1186}
1187
1188static const struct ethtool_ops ravb_ethtool_ops = {
1189	.get_settings		= ravb_get_settings,
1190	.set_settings		= ravb_set_settings,
1191	.nway_reset		= ravb_nway_reset,
1192	.get_msglevel		= ravb_get_msglevel,
1193	.set_msglevel		= ravb_set_msglevel,
1194	.get_link		= ethtool_op_get_link,
1195	.get_strings		= ravb_get_strings,
1196	.get_ethtool_stats	= ravb_get_ethtool_stats,
1197	.get_sset_count		= ravb_get_sset_count,
1198	.get_ringparam		= ravb_get_ringparam,
1199	.set_ringparam		= ravb_set_ringparam,
1200	.get_ts_info		= ravb_get_ts_info,
1201};
1202
1203/* Network device open function for Ethernet AVB */
1204static int ravb_open(struct net_device *ndev)
1205{
1206	struct ravb_private *priv = netdev_priv(ndev);
1207	int error;
1208
1209	napi_enable(&priv->napi[RAVB_BE]);
1210	napi_enable(&priv->napi[RAVB_NC]);
1211
1212	error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
1213			    ndev);
1214	if (error) {
1215		netdev_err(ndev, "cannot request IRQ\n");
1216		goto out_napi_off;
1217	}
1218
1219	if (priv->chip_id == RCAR_GEN3) {
1220		error = request_irq(priv->emac_irq, ravb_interrupt,
1221				    IRQF_SHARED, ndev->name, ndev);
1222		if (error) {
1223			netdev_err(ndev, "cannot request IRQ\n");
1224			goto out_free_irq;
1225		}
1226	}
1227
1228	/* Device init */
1229	error = ravb_dmac_init(ndev);
1230	if (error)
1231		goto out_free_irq2;
1232	ravb_emac_init(ndev);
1233
1234	/* Initialise PTP Clock driver */
1235	ravb_ptp_init(ndev, priv->pdev);
1236
1237	netif_tx_start_all_queues(ndev);
1238
1239	/* PHY control start */
1240	error = ravb_phy_start(ndev);
1241	if (error)
1242		goto out_ptp_stop;
1243
1244	return 0;
1245
1246out_ptp_stop:
1247	/* Stop PTP Clock driver */
1248	ravb_ptp_stop(ndev);
1249out_free_irq2:
1250	if (priv->chip_id == RCAR_GEN3)
1251		free_irq(priv->emac_irq, ndev);
1252out_free_irq:
1253	free_irq(ndev->irq, ndev);
1254out_napi_off:
1255	napi_disable(&priv->napi[RAVB_NC]);
1256	napi_disable(&priv->napi[RAVB_BE]);
1257	return error;
1258}
1259
1260/* Timeout function for Ethernet AVB */
1261static void ravb_tx_timeout(struct net_device *ndev)
1262{
1263	struct ravb_private *priv = netdev_priv(ndev);
1264
1265	netif_err(priv, tx_err, ndev,
1266		  "transmit timed out, status %08x, resetting...\n",
1267		  ravb_read(ndev, ISS));
1268
1269	/* tx_errors count up */
1270	ndev->stats.tx_errors++;
1271
1272	schedule_work(&priv->work);
1273}
1274
1275static void ravb_tx_timeout_work(struct work_struct *work)
1276{
1277	struct ravb_private *priv = container_of(work, struct ravb_private,
1278						 work);
1279	struct net_device *ndev = priv->ndev;
1280
1281	netif_tx_stop_all_queues(ndev);
1282
1283	/* Stop PTP Clock driver */
1284	ravb_ptp_stop(ndev);
1285
1286	/* Wait for DMA stopping */
1287	ravb_stop_dma(ndev);
1288
1289	ravb_ring_free(ndev, RAVB_BE);
1290	ravb_ring_free(ndev, RAVB_NC);
1291
1292	/* Device init */
1293	ravb_dmac_init(ndev);
1294	ravb_emac_init(ndev);
1295
1296	/* Initialise PTP Clock driver */
1297	ravb_ptp_init(ndev, priv->pdev);
1298
1299	netif_tx_start_all_queues(ndev);
1300}
1301
1302/* Packet transmit function for Ethernet AVB */
1303static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1304{
1305	struct ravb_private *priv = netdev_priv(ndev);
1306	u16 q = skb_get_queue_mapping(skb);
1307	struct ravb_tstamp_skb *ts_skb;
1308	struct ravb_tx_desc *desc;
1309	unsigned long flags;
1310	u32 dma_addr;
1311	void *buffer;
1312	u32 entry;
1313	u32 len;
1314
1315	spin_lock_irqsave(&priv->lock, flags);
1316	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1317	    NUM_TX_DESC) {
1318		netif_err(priv, tx_queued, ndev,
1319			  "still transmitting with the full ring!\n");
1320		netif_stop_subqueue(ndev, q);
1321		spin_unlock_irqrestore(&priv->lock, flags);
1322		return NETDEV_TX_BUSY;
1323	}
1324	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1325	priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1326
1327	if (skb_put_padto(skb, ETH_ZLEN))
1328		goto drop;
1329
1330	buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1331		 entry / NUM_TX_DESC * DPTR_ALIGN;
1332	len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1333	memcpy(buffer, skb->data, len);
1334	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1335	if (dma_mapping_error(ndev->dev.parent, dma_addr))
1336		goto drop;
1337
1338	desc = &priv->tx_ring[q][entry];
1339	desc->ds_tagl = cpu_to_le16(len);
1340	desc->dptr = cpu_to_le32(dma_addr);
1341
1342	buffer = skb->data + len;
1343	len = skb->len - len;
1344	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1345	if (dma_mapping_error(ndev->dev.parent, dma_addr))
1346		goto unmap;
1347
1348	desc++;
1349	desc->ds_tagl = cpu_to_le16(len);
1350	desc->dptr = cpu_to_le32(dma_addr);
1351
1352	/* TX timestamp required */
1353	if (q == RAVB_NC) {
1354		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1355		if (!ts_skb) {
1356			desc--;
1357			dma_unmap_single(ndev->dev.parent, dma_addr, len,
1358					 DMA_TO_DEVICE);
1359			goto unmap;
1360		}
1361		ts_skb->skb = skb;
1362		ts_skb->tag = priv->ts_skb_tag++;
1363		priv->ts_skb_tag &= 0x3ff;
1364		list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1365
1366		/* TAG and timestamp required flag */
1367		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1368		skb_tx_timestamp(skb);
1369		desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1370		desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1371	}
1372
1373	/* Descriptor type must be set after all the above writes */
1374	dma_wmb();
1375	desc->die_dt = DT_FEND;
1376	desc--;
1377	desc->die_dt = DT_FSTART;
1378
1379	ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1380
1381	priv->cur_tx[q] += NUM_TX_DESC;
1382	if (priv->cur_tx[q] - priv->dirty_tx[q] >
1383	    (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
1384		netif_stop_subqueue(ndev, q);
1385
1386exit:
1387	mmiowb();
1388	spin_unlock_irqrestore(&priv->lock, flags);
1389	return NETDEV_TX_OK;
1390
1391unmap:
1392	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
1393			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
1394drop:
1395	dev_kfree_skb_any(skb);
1396	priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
1397	goto exit;
1398}
1399
1400static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1401			     void *accel_priv, select_queue_fallback_t fallback)
1402{
1403	/* If skb needs TX timestamp, it is handled in network control queue */
1404	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1405							       RAVB_BE;
1406
1407}
1408
1409static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1410{
1411	struct ravb_private *priv = netdev_priv(ndev);
1412	struct net_device_stats *nstats, *stats0, *stats1;
1413
1414	nstats = &ndev->stats;
1415	stats0 = &priv->stats[RAVB_BE];
1416	stats1 = &priv->stats[RAVB_NC];
1417
1418	nstats->tx_dropped += ravb_read(ndev, TROCR);
1419	ravb_write(ndev, 0, TROCR);	/* (write clear) */
1420	nstats->collisions += ravb_read(ndev, CDCR);
1421	ravb_write(ndev, 0, CDCR);	/* (write clear) */
1422	nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
1423	ravb_write(ndev, 0, LCCR);	/* (write clear) */
1424
1425	nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
1426	ravb_write(ndev, 0, CERCR);	/* (write clear) */
1427	nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
1428	ravb_write(ndev, 0, CEECR);	/* (write clear) */
1429
1430	nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1431	nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1432	nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1433	nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1434	nstats->multicast = stats0->multicast + stats1->multicast;
1435	nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1436	nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1437	nstats->rx_frame_errors =
1438		stats0->rx_frame_errors + stats1->rx_frame_errors;
1439	nstats->rx_length_errors =
1440		stats0->rx_length_errors + stats1->rx_length_errors;
1441	nstats->rx_missed_errors =
1442		stats0->rx_missed_errors + stats1->rx_missed_errors;
1443	nstats->rx_over_errors =
1444		stats0->rx_over_errors + stats1->rx_over_errors;
1445
1446	return nstats;
1447}
1448
1449/* Update promiscuous bit */
1450static void ravb_set_rx_mode(struct net_device *ndev)
1451{
1452	struct ravb_private *priv = netdev_priv(ndev);
1453	unsigned long flags;
1454	u32 ecmr;
1455
1456	spin_lock_irqsave(&priv->lock, flags);
1457	ecmr = ravb_read(ndev, ECMR);
1458	if (ndev->flags & IFF_PROMISC)
1459		ecmr |=  ECMR_PRM;
1460	else
1461		ecmr &= ~ECMR_PRM;
1462	ravb_write(ndev, ecmr, ECMR);
1463	mmiowb();
1464	spin_unlock_irqrestore(&priv->lock, flags);
1465}
1466
1467/* Device close function for Ethernet AVB */
1468static int ravb_close(struct net_device *ndev)
1469{
1470	struct ravb_private *priv = netdev_priv(ndev);
1471	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1472
1473	netif_tx_stop_all_queues(ndev);
1474
1475	/* Disable interrupts by clearing the interrupt masks. */
1476	ravb_write(ndev, 0, RIC0);
1477	ravb_write(ndev, 0, RIC1);
1478	ravb_write(ndev, 0, RIC2);
1479	ravb_write(ndev, 0, TIC);
1480
1481	/* Stop PTP Clock driver */
1482	ravb_ptp_stop(ndev);
1483
1484	/* Set the config mode to stop the AVB-DMAC's processes */
1485	if (ravb_stop_dma(ndev) < 0)
1486		netdev_err(ndev,
1487			   "device will be stopped after h/w processes are done.\n");
1488
1489	/* Clear the timestamp list */
1490	list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1491		list_del(&ts_skb->list);
1492		kfree(ts_skb);
1493	}
1494
1495	/* PHY disconnect */
1496	if (priv->phydev) {
1497		phy_stop(priv->phydev);
1498		phy_disconnect(priv->phydev);
1499		priv->phydev = NULL;
1500	}
1501
1502	free_irq(ndev->irq, ndev);
1503
1504	napi_disable(&priv->napi[RAVB_NC]);
1505	napi_disable(&priv->napi[RAVB_BE]);
1506
1507	/* Free all the skb's in the RX queue and the DMA buffers. */
1508	ravb_ring_free(ndev, RAVB_BE);
1509	ravb_ring_free(ndev, RAVB_NC);
1510
1511	return 0;
1512}
1513
1514static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1515{
1516	struct ravb_private *priv = netdev_priv(ndev);
1517	struct hwtstamp_config config;
1518
1519	config.flags = 0;
1520	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1521						HWTSTAMP_TX_OFF;
1522	if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
1523		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1524	else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
1525		config.rx_filter = HWTSTAMP_FILTER_ALL;
1526	else
1527		config.rx_filter = HWTSTAMP_FILTER_NONE;
1528
1529	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1530		-EFAULT : 0;
1531}
1532
1533/* Control hardware time stamping */
1534static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1535{
1536	struct ravb_private *priv = netdev_priv(ndev);
1537	struct hwtstamp_config config;
1538	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1539	u32 tstamp_tx_ctrl;
1540
1541	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1542		return -EFAULT;
1543
1544	/* Reserved for future extensions */
1545	if (config.flags)
1546		return -EINVAL;
1547
1548	switch (config.tx_type) {
1549	case HWTSTAMP_TX_OFF:
1550		tstamp_tx_ctrl = 0;
1551		break;
1552	case HWTSTAMP_TX_ON:
1553		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1554		break;
1555	default:
1556		return -ERANGE;
1557	}
1558
1559	switch (config.rx_filter) {
1560	case HWTSTAMP_FILTER_NONE:
1561		tstamp_rx_ctrl = 0;
1562		break;
1563	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1564		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1565		break;
1566	default:
1567		config.rx_filter = HWTSTAMP_FILTER_ALL;
1568		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1569	}
1570
1571	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1572	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1573
1574	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1575		-EFAULT : 0;
1576}
1577
1578/* ioctl to device function */
1579static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1580{
1581	struct ravb_private *priv = netdev_priv(ndev);
1582	struct phy_device *phydev = priv->phydev;
1583
1584	if (!netif_running(ndev))
1585		return -EINVAL;
1586
1587	if (!phydev)
1588		return -ENODEV;
1589
1590	switch (cmd) {
1591	case SIOCGHWTSTAMP:
1592		return ravb_hwtstamp_get(ndev, req);
1593	case SIOCSHWTSTAMP:
1594		return ravb_hwtstamp_set(ndev, req);
1595	}
1596
1597	return phy_mii_ioctl(phydev, req, cmd);
1598}
1599
1600static const struct net_device_ops ravb_netdev_ops = {
1601	.ndo_open		= ravb_open,
1602	.ndo_stop		= ravb_close,
1603	.ndo_start_xmit		= ravb_start_xmit,
1604	.ndo_select_queue	= ravb_select_queue,
1605	.ndo_get_stats		= ravb_get_stats,
1606	.ndo_set_rx_mode	= ravb_set_rx_mode,
1607	.ndo_tx_timeout		= ravb_tx_timeout,
1608	.ndo_do_ioctl		= ravb_do_ioctl,
1609	.ndo_validate_addr	= eth_validate_addr,
1610	.ndo_set_mac_address	= eth_mac_addr,
1611	.ndo_change_mtu		= eth_change_mtu,
1612};
1613
1614/* MDIO bus init function */
1615static int ravb_mdio_init(struct ravb_private *priv)
1616{
1617	struct platform_device *pdev = priv->pdev;
1618	struct device *dev = &pdev->dev;
1619	int error;
1620
1621	/* Bitbang init */
1622	priv->mdiobb.ops = &bb_ops;
1623
1624	/* MII controller setting */
1625	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1626	if (!priv->mii_bus)
1627		return -ENOMEM;
1628
1629	/* Hook up MII support for ethtool */
1630	priv->mii_bus->name = "ravb_mii";
1631	priv->mii_bus->parent = dev;
1632	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1633		 pdev->name, pdev->id);
1634
1635	/* Register MDIO bus */
1636	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1637	if (error)
1638		goto out_free_bus;
1639
1640	return 0;
1641
1642out_free_bus:
1643	free_mdio_bitbang(priv->mii_bus);
1644	return error;
1645}
1646
1647/* MDIO bus release function */
1648static int ravb_mdio_release(struct ravb_private *priv)
1649{
1650	/* Unregister mdio bus */
1651	mdiobus_unregister(priv->mii_bus);
1652
1653	/* Free bitbang info */
1654	free_mdio_bitbang(priv->mii_bus);
1655
1656	return 0;
1657}
1658
1659static const struct of_device_id ravb_match_table[] = {
1660	{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
1661	{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1662	{ .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
1663	{ }
1664};
1665MODULE_DEVICE_TABLE(of, ravb_match_table);
1666
1667static int ravb_probe(struct platform_device *pdev)
1668{
1669	struct device_node *np = pdev->dev.of_node;
1670	const struct of_device_id *match;
1671	struct ravb_private *priv;
1672	enum ravb_chip_id chip_id;
1673	struct net_device *ndev;
1674	int error, irq, q;
1675	struct resource *res;
1676
1677	if (!np) {
1678		dev_err(&pdev->dev,
1679			"this driver is required to be instantiated from device tree\n");
1680		return -EINVAL;
1681	}
1682
1683	/* Get base address */
1684	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1685	if (!res) {
1686		dev_err(&pdev->dev, "invalid resource\n");
1687		return -EINVAL;
1688	}
1689
1690	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
1691				  NUM_TX_QUEUE, NUM_RX_QUEUE);
1692	if (!ndev)
1693		return -ENOMEM;
1694
1695	pm_runtime_enable(&pdev->dev);
1696	pm_runtime_get_sync(&pdev->dev);
1697
1698	/* The Ether-specific entries in the device structure. */
1699	ndev->base_addr = res->start;
1700	ndev->dma = -1;
1701
1702	match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
1703	chip_id = (enum ravb_chip_id)match->data;
1704
1705	if (chip_id == RCAR_GEN3)
1706		irq = platform_get_irq_byname(pdev, "ch22");
1707	else
1708		irq = platform_get_irq(pdev, 0);
1709	if (irq < 0) {
1710		error = irq;
1711		goto out_release;
1712	}
1713	ndev->irq = irq;
1714
1715	SET_NETDEV_DEV(ndev, &pdev->dev);
1716
1717	priv = netdev_priv(ndev);
1718	priv->ndev = ndev;
1719	priv->pdev = pdev;
1720	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
1721	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
1722	priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
1723	priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
1724	priv->addr = devm_ioremap_resource(&pdev->dev, res);
1725	if (IS_ERR(priv->addr)) {
1726		error = PTR_ERR(priv->addr);
1727		goto out_release;
1728	}
1729
1730	spin_lock_init(&priv->lock);
1731	INIT_WORK(&priv->work, ravb_tx_timeout_work);
1732
1733	priv->phy_interface = of_get_phy_mode(np);
1734
1735	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
1736	priv->avb_link_active_low =
1737		of_property_read_bool(np, "renesas,ether-link-active-low");
1738
1739	if (chip_id == RCAR_GEN3) {
1740		irq = platform_get_irq_byname(pdev, "ch24");
1741		if (irq < 0) {
1742			error = irq;
1743			goto out_release;
1744		}
1745		priv->emac_irq = irq;
1746	}
1747
1748	priv->chip_id = chip_id;
1749
1750	/* Set function */
1751	ndev->netdev_ops = &ravb_netdev_ops;
1752	ndev->ethtool_ops = &ravb_ethtool_ops;
1753
1754	/* Set AVB config mode */
1755	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
1756		   CCC);
1757
1758	/* Set CSEL value */
1759	ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1760		   CCC);
1761
1762	/* Set GTI value */
1763	ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
1764
1765	/* Request GTI loading */
1766	ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
1767
1768	/* Allocate descriptor base address table */
1769	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
1770	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
1771					    &priv->desc_bat_dma, GFP_KERNEL);
1772	if (!priv->desc_bat) {
1773		dev_err(&pdev->dev,
1774			"Cannot allocate desc base address table (size %d bytes)\n",
1775			priv->desc_bat_size);
1776		error = -ENOMEM;
1777		goto out_release;
1778	}
1779	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
1780		priv->desc_bat[q].die_dt = DT_EOS;
1781	ravb_write(ndev, priv->desc_bat_dma, DBAT);
1782
1783	/* Initialise HW timestamp list */
1784	INIT_LIST_HEAD(&priv->ts_skb_list);
1785
1786	/* Debug message level */
1787	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
1788
1789	/* Read and set MAC address */
1790	ravb_read_mac_address(ndev, of_get_mac_address(np));
1791	if (!is_valid_ether_addr(ndev->dev_addr)) {
1792		dev_warn(&pdev->dev,
1793			 "no valid MAC address supplied, using a random one\n");
1794		eth_hw_addr_random(ndev);
1795	}
1796
1797	/* MDIO bus init */
1798	error = ravb_mdio_init(priv);
1799	if (error) {
1800		dev_err(&pdev->dev, "failed to initialize MDIO\n");
1801		goto out_dma_free;
1802	}
1803
1804	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
1805	netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
1806
1807	/* Network device register */
1808	error = register_netdev(ndev);
1809	if (error)
1810		goto out_napi_del;
1811
1812	/* Print device information */
1813	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
1814		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1815
1816	platform_set_drvdata(pdev, ndev);
1817
1818	return 0;
1819
1820out_napi_del:
1821	netif_napi_del(&priv->napi[RAVB_NC]);
1822	netif_napi_del(&priv->napi[RAVB_BE]);
1823	ravb_mdio_release(priv);
1824out_dma_free:
1825	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
1826			  priv->desc_bat_dma);
1827out_release:
1828	if (ndev)
1829		free_netdev(ndev);
1830
1831	pm_runtime_put(&pdev->dev);
1832	pm_runtime_disable(&pdev->dev);
1833	return error;
1834}
1835
1836static int ravb_remove(struct platform_device *pdev)
1837{
1838	struct net_device *ndev = platform_get_drvdata(pdev);
1839	struct ravb_private *priv = netdev_priv(ndev);
1840
1841	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
1842			  priv->desc_bat_dma);
1843	/* Set reset mode */
1844	ravb_write(ndev, CCC_OPC_RESET, CCC);
1845	pm_runtime_put_sync(&pdev->dev);
1846	unregister_netdev(ndev);
1847	netif_napi_del(&priv->napi[RAVB_NC]);
1848	netif_napi_del(&priv->napi[RAVB_BE]);
1849	ravb_mdio_release(priv);
1850	pm_runtime_disable(&pdev->dev);
1851	free_netdev(ndev);
1852	platform_set_drvdata(pdev, NULL);
1853
1854	return 0;
1855}
1856
1857#ifdef CONFIG_PM
1858static int ravb_runtime_nop(struct device *dev)
1859{
1860	/* Runtime PM callback shared between ->runtime_suspend()
1861	 * and ->runtime_resume(). Simply returns success.
1862	 *
1863	 * This driver re-initializes all registers after
1864	 * pm_runtime_get_sync() anyway so there is no need
1865	 * to save and restore registers here.
1866	 */
1867	return 0;
1868}
1869
1870static const struct dev_pm_ops ravb_dev_pm_ops = {
1871	.runtime_suspend = ravb_runtime_nop,
1872	.runtime_resume = ravb_runtime_nop,
1873};
1874
1875#define RAVB_PM_OPS (&ravb_dev_pm_ops)
1876#else
1877#define RAVB_PM_OPS NULL
1878#endif
1879
1880static struct platform_driver ravb_driver = {
1881	.probe		= ravb_probe,
1882	.remove		= ravb_remove,
1883	.driver = {
1884		.name	= "ravb",
1885		.pm	= RAVB_PM_OPS,
1886		.of_match_table = ravb_match_table,
1887	},
1888};
1889
1890module_platform_driver(ravb_driver);
1891
1892MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
1893MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
1894MODULE_LICENSE("GPL v2");
1895