1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 *		http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/clk.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/net_tstamp.h>
20#include <linux/phy.h>
21#include <linux/ptp_clock_kernel.h>
22
23#include "sxgbe_common.h"
24#include "sxgbe_reg.h"
25#include "sxgbe_dma.h"
26
27struct sxgbe_stats {
28	char stat_string[ETH_GSTRING_LEN];
29	int sizeof_stat;
30	int stat_offset;
31};
32
33#define SXGBE_STAT(m)						\
34{								\
35	#m,							\
36	FIELD_SIZEOF(struct sxgbe_extra_stats, m),		\
37	offsetof(struct sxgbe_priv_data, xstats.m)		\
38}
39
40static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
41	/* TX/RX IRQ events */
42	SXGBE_STAT(tx_process_stopped_irq),
43	SXGBE_STAT(tx_ctxt_desc_err),
44	SXGBE_STAT(tx_threshold),
45	SXGBE_STAT(rx_threshold),
46	SXGBE_STAT(tx_pkt_n),
47	SXGBE_STAT(rx_pkt_n),
48	SXGBE_STAT(normal_irq_n),
49	SXGBE_STAT(tx_normal_irq_n),
50	SXGBE_STAT(rx_normal_irq_n),
51	SXGBE_STAT(napi_poll),
52	SXGBE_STAT(tx_clean),
53	SXGBE_STAT(tx_reset_ic_bit),
54	SXGBE_STAT(rx_process_stopped_irq),
55	SXGBE_STAT(rx_underflow_irq),
56
57	/* Bus access errors */
58	SXGBE_STAT(fatal_bus_error_irq),
59	SXGBE_STAT(tx_read_transfer_err),
60	SXGBE_STAT(tx_write_transfer_err),
61	SXGBE_STAT(tx_desc_access_err),
62	SXGBE_STAT(tx_buffer_access_err),
63	SXGBE_STAT(tx_data_transfer_err),
64	SXGBE_STAT(rx_read_transfer_err),
65	SXGBE_STAT(rx_write_transfer_err),
66	SXGBE_STAT(rx_desc_access_err),
67	SXGBE_STAT(rx_buffer_access_err),
68	SXGBE_STAT(rx_data_transfer_err),
69
70	/* EEE-LPI stats */
71	SXGBE_STAT(tx_lpi_entry_n),
72	SXGBE_STAT(tx_lpi_exit_n),
73	SXGBE_STAT(rx_lpi_entry_n),
74	SXGBE_STAT(rx_lpi_exit_n),
75	SXGBE_STAT(eee_wakeup_error_n),
76
77	/* RX specific */
78	/* L2 error */
79	SXGBE_STAT(rx_code_gmii_err),
80	SXGBE_STAT(rx_watchdog_err),
81	SXGBE_STAT(rx_crc_err),
82	SXGBE_STAT(rx_gaint_pkt_err),
83	SXGBE_STAT(ip_hdr_err),
84	SXGBE_STAT(ip_payload_err),
85	SXGBE_STAT(overflow_error),
86
87	/* L2 Pkt type */
88	SXGBE_STAT(len_pkt),
89	SXGBE_STAT(mac_ctl_pkt),
90	SXGBE_STAT(dcb_ctl_pkt),
91	SXGBE_STAT(arp_pkt),
92	SXGBE_STAT(oam_pkt),
93	SXGBE_STAT(untag_okt),
94	SXGBE_STAT(other_pkt),
95	SXGBE_STAT(svlan_tag_pkt),
96	SXGBE_STAT(cvlan_tag_pkt),
97	SXGBE_STAT(dvlan_ocvlan_icvlan_pkt),
98	SXGBE_STAT(dvlan_osvlan_isvlan_pkt),
99	SXGBE_STAT(dvlan_osvlan_icvlan_pkt),
100	SXGBE_STAT(dvan_ocvlan_icvlan_pkt),
101
102	/* L3/L4 Pkt type */
103	SXGBE_STAT(not_ip_pkt),
104	SXGBE_STAT(ip4_tcp_pkt),
105	SXGBE_STAT(ip4_udp_pkt),
106	SXGBE_STAT(ip4_icmp_pkt),
107	SXGBE_STAT(ip4_unknown_pkt),
108	SXGBE_STAT(ip6_tcp_pkt),
109	SXGBE_STAT(ip6_udp_pkt),
110	SXGBE_STAT(ip6_icmp_pkt),
111	SXGBE_STAT(ip6_unknown_pkt),
112
113	/* Filter specific */
114	SXGBE_STAT(vlan_filter_match),
115	SXGBE_STAT(sa_filter_fail),
116	SXGBE_STAT(da_filter_fail),
117	SXGBE_STAT(hash_filter_pass),
118	SXGBE_STAT(l3_filter_match),
119	SXGBE_STAT(l4_filter_match),
120
121	/* RX context specific */
122	SXGBE_STAT(timestamp_dropped),
123	SXGBE_STAT(rx_msg_type_no_ptp),
124	SXGBE_STAT(rx_ptp_type_sync),
125	SXGBE_STAT(rx_ptp_type_follow_up),
126	SXGBE_STAT(rx_ptp_type_delay_req),
127	SXGBE_STAT(rx_ptp_type_delay_resp),
128	SXGBE_STAT(rx_ptp_type_pdelay_req),
129	SXGBE_STAT(rx_ptp_type_pdelay_resp),
130	SXGBE_STAT(rx_ptp_type_pdelay_follow_up),
131	SXGBE_STAT(rx_ptp_announce),
132	SXGBE_STAT(rx_ptp_mgmt),
133	SXGBE_STAT(rx_ptp_signal),
134	SXGBE_STAT(rx_ptp_resv_msg_type),
135};
136#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
137
138static int sxgbe_get_eee(struct net_device *dev,
139			 struct ethtool_eee *edata)
140{
141	struct sxgbe_priv_data *priv = netdev_priv(dev);
142
143	if (!priv->hw_cap.eee)
144		return -EOPNOTSUPP;
145
146	edata->eee_enabled = priv->eee_enabled;
147	edata->eee_active = priv->eee_active;
148	edata->tx_lpi_timer = priv->tx_lpi_timer;
149
150	return phy_ethtool_get_eee(priv->phydev, edata);
151}
152
153static int sxgbe_set_eee(struct net_device *dev,
154			 struct ethtool_eee *edata)
155{
156	struct sxgbe_priv_data *priv = netdev_priv(dev);
157
158	priv->eee_enabled = edata->eee_enabled;
159
160	if (!priv->eee_enabled) {
161		sxgbe_disable_eee_mode(priv);
162	} else {
163		/* We are asking for enabling the EEE but it is safe
164		 * to verify all by invoking the eee_init function.
165		 * In case of failure it will return an error.
166		 */
167		priv->eee_enabled = sxgbe_eee_init(priv);
168		if (!priv->eee_enabled)
169			return -EOPNOTSUPP;
170
171		/* Do not change tx_lpi_timer in case of failure */
172		priv->tx_lpi_timer = edata->tx_lpi_timer;
173	}
174
175	return phy_ethtool_set_eee(priv->phydev, edata);
176}
177
178static void sxgbe_getdrvinfo(struct net_device *dev,
179			     struct ethtool_drvinfo *info)
180{
181	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
182	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
183}
184
185static int sxgbe_getsettings(struct net_device *dev,
186			     struct ethtool_cmd *cmd)
187{
188	struct sxgbe_priv_data *priv = netdev_priv(dev);
189
190	if (priv->phydev)
191		return phy_ethtool_gset(priv->phydev, cmd);
192
193	return -EOPNOTSUPP;
194}
195
196static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{
198	struct sxgbe_priv_data *priv = netdev_priv(dev);
199
200	if (priv->phydev)
201		return phy_ethtool_sset(priv->phydev, cmd);
202
203	return -EOPNOTSUPP;
204}
205
206static u32 sxgbe_getmsglevel(struct net_device *dev)
207{
208	struct sxgbe_priv_data *priv = netdev_priv(dev);
209	return priv->msg_enable;
210}
211
212static void sxgbe_setmsglevel(struct net_device *dev, u32 level)
213{
214	struct sxgbe_priv_data *priv = netdev_priv(dev);
215	priv->msg_enable = level;
216}
217
218static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
219{
220	int i;
221	u8 *p = data;
222
223	switch (stringset) {
224	case ETH_SS_STATS:
225		for (i = 0; i < SXGBE_STATS_LEN; i++) {
226			memcpy(p, sxgbe_gstrings_stats[i].stat_string,
227			       ETH_GSTRING_LEN);
228			p += ETH_GSTRING_LEN;
229		}
230		break;
231	default:
232		WARN_ON(1);
233		break;
234	}
235}
236
237static int sxgbe_get_sset_count(struct net_device *netdev, int sset)
238{
239	int len;
240
241	switch (sset) {
242	case ETH_SS_STATS:
243		len = SXGBE_STATS_LEN;
244		return len;
245	default:
246		return -EINVAL;
247	}
248}
249
250static void sxgbe_get_ethtool_stats(struct net_device *dev,
251				    struct ethtool_stats *dummy, u64 *data)
252{
253	struct sxgbe_priv_data *priv = netdev_priv(dev);
254	int i;
255	char *p;
256
257	if (priv->eee_enabled) {
258		int val = phy_get_eee_err(priv->phydev);
259
260		if (val)
261			priv->xstats.eee_wakeup_error_n = val;
262	}
263
264	for (i = 0; i < SXGBE_STATS_LEN; i++) {
265		p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset;
266		data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64))
267			? (*(u64 *)p) : (*(u32 *)p);
268	}
269}
270
271static void sxgbe_get_channels(struct net_device *dev,
272			       struct ethtool_channels *channel)
273{
274	channel->max_rx = SXGBE_MAX_RX_CHANNELS;
275	channel->max_tx = SXGBE_MAX_TX_CHANNELS;
276	channel->rx_count = SXGBE_RX_QUEUES;
277	channel->tx_count = SXGBE_TX_QUEUES;
278}
279
280static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv)
281{
282	unsigned long clk = clk_get_rate(priv->sxgbe_clk);
283
284	if (!clk)
285		return 0;
286
287	return (riwt * 256) / (clk / 1000000);
288}
289
290static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv)
291{
292	unsigned long clk = clk_get_rate(priv->sxgbe_clk);
293
294	if (!clk)
295		return 0;
296
297	return (usec * (clk / 1000000)) / 256;
298}
299
300static int sxgbe_get_coalesce(struct net_device *dev,
301			      struct ethtool_coalesce *ec)
302{
303	struct sxgbe_priv_data *priv = netdev_priv(dev);
304
305	if (priv->use_riwt)
306		ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv);
307
308	return 0;
309}
310
311static int sxgbe_set_coalesce(struct net_device *dev,
312			      struct ethtool_coalesce *ec)
313{
314	struct sxgbe_priv_data *priv = netdev_priv(dev);
315	unsigned int rx_riwt;
316
317	if (!ec->rx_coalesce_usecs)
318		return -EINVAL;
319
320	rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv);
321
322	if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT))
323		return -EINVAL;
324	else if (!priv->use_riwt)
325		return -EOPNOTSUPP;
326
327	priv->rx_riwt = rx_riwt;
328	priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
329
330	return 0;
331}
332
333static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
334				   struct ethtool_rxnfc *cmd)
335{
336	cmd->data = 0;
337
338	/* Report default options for RSS on sxgbe */
339	switch (cmd->flow_type) {
340	case TCP_V4_FLOW:
341	case UDP_V4_FLOW:
342		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
343	case SCTP_V4_FLOW:
344	case AH_ESP_V4_FLOW:
345	case AH_V4_FLOW:
346	case ESP_V4_FLOW:
347	case IPV4_FLOW:
348		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
349		break;
350	case TCP_V6_FLOW:
351	case UDP_V6_FLOW:
352		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
353	case SCTP_V6_FLOW:
354	case AH_ESP_V6_FLOW:
355	case AH_V6_FLOW:
356	case ESP_V6_FLOW:
357	case IPV6_FLOW:
358		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
359		break;
360	default:
361		return -EINVAL;
362	}
363
364	return 0;
365}
366
367static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
368			   u32 *rule_locs)
369{
370	struct sxgbe_priv_data *priv = netdev_priv(dev);
371	int ret = -EOPNOTSUPP;
372
373	switch (cmd->cmd) {
374	case ETHTOOL_GRXFH:
375		ret = sxgbe_get_rss_hash_opts(priv, cmd);
376		break;
377	default:
378		break;
379	}
380
381	return ret;
382}
383
384static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
385				  struct ethtool_rxnfc *cmd)
386{
387	u32 reg_val = 0;
388
389	/* RSS does not support anything other than hashing
390	 * to queues on src and dst IPs and ports
391	 */
392	if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST |
393			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
394		return -EINVAL;
395
396	switch (cmd->flow_type) {
397	case TCP_V4_FLOW:
398	case TCP_V6_FLOW:
399		if (!(cmd->data & RXH_IP_SRC) ||
400		    !(cmd->data & RXH_IP_DST) ||
401		    !(cmd->data & RXH_L4_B_0_1) ||
402		    !(cmd->data & RXH_L4_B_2_3))
403			return -EINVAL;
404		reg_val = SXGBE_CORE_RSS_CTL_TCP4TE;
405		break;
406	case UDP_V4_FLOW:
407	case UDP_V6_FLOW:
408		if (!(cmd->data & RXH_IP_SRC) ||
409		    !(cmd->data & RXH_IP_DST) ||
410		    !(cmd->data & RXH_L4_B_0_1) ||
411		    !(cmd->data & RXH_L4_B_2_3))
412			return -EINVAL;
413		reg_val = SXGBE_CORE_RSS_CTL_UDP4TE;
414		break;
415	case SCTP_V4_FLOW:
416	case AH_ESP_V4_FLOW:
417	case AH_V4_FLOW:
418	case ESP_V4_FLOW:
419	case AH_ESP_V6_FLOW:
420	case AH_V6_FLOW:
421	case ESP_V6_FLOW:
422	case SCTP_V6_FLOW:
423	case IPV4_FLOW:
424	case IPV6_FLOW:
425		if (!(cmd->data & RXH_IP_SRC) ||
426		    !(cmd->data & RXH_IP_DST) ||
427		    (cmd->data & RXH_L4_B_0_1) ||
428		    (cmd->data & RXH_L4_B_2_3))
429			return -EINVAL;
430		reg_val = SXGBE_CORE_RSS_CTL_IP2TE;
431		break;
432	default:
433		return -EINVAL;
434	}
435
436	/* Read SXGBE RSS control register and update */
437	reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
438	writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
439	readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
440
441	return 0;
442}
443
444static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
445{
446	struct sxgbe_priv_data *priv = netdev_priv(dev);
447	int ret = -EOPNOTSUPP;
448
449	switch (cmd->cmd) {
450	case ETHTOOL_SRXFH:
451		ret = sxgbe_set_rss_hash_opt(priv, cmd);
452		break;
453	default:
454		break;
455	}
456
457	return ret;
458}
459
460static void sxgbe_get_regs(struct net_device *dev,
461			   struct ethtool_regs *regs, void *space)
462{
463	struct sxgbe_priv_data *priv = netdev_priv(dev);
464	u32 *reg_space = (u32 *)space;
465	int reg_offset;
466	int reg_ix = 0;
467	void __iomem *ioaddr = priv->ioaddr;
468
469	memset(reg_space, 0x0, REG_SPACE_SIZE);
470
471	/* MAC registers */
472	for (reg_offset = START_MAC_REG_OFFSET;
473	     reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) {
474		reg_space[reg_ix] = readl(ioaddr + reg_offset);
475		reg_ix++;
476	}
477
478	/* MTL registers */
479	for (reg_offset = START_MTL_REG_OFFSET;
480	     reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
481		reg_space[reg_ix] = readl(ioaddr + reg_offset);
482		reg_ix++;
483	}
484
485	/* DMA registers */
486	for (reg_offset = START_DMA_REG_OFFSET;
487	     reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
488		reg_space[reg_ix] = readl(ioaddr + reg_offset);
489		reg_ix++;
490	}
491
492	BUG_ON(reg_ix * 4 > REG_SPACE_SIZE);
493}
494
495static int sxgbe_get_regs_len(struct net_device *dev)
496{
497	return REG_SPACE_SIZE;
498}
499
500static const struct ethtool_ops sxgbe_ethtool_ops = {
501	.get_drvinfo = sxgbe_getdrvinfo,
502	.get_settings = sxgbe_getsettings,
503	.set_settings = sxgbe_setsettings,
504	.get_msglevel = sxgbe_getmsglevel,
505	.set_msglevel = sxgbe_setmsglevel,
506	.get_link = ethtool_op_get_link,
507	.get_strings = sxgbe_get_strings,
508	.get_ethtool_stats = sxgbe_get_ethtool_stats,
509	.get_sset_count = sxgbe_get_sset_count,
510	.get_channels = sxgbe_get_channels,
511	.get_coalesce = sxgbe_get_coalesce,
512	.set_coalesce = sxgbe_set_coalesce,
513	.get_rxnfc = sxgbe_get_rxnfc,
514	.set_rxnfc = sxgbe_set_rxnfc,
515	.get_regs = sxgbe_get_regs,
516	.get_regs_len = sxgbe_get_regs_len,
517	.get_eee = sxgbe_get_eee,
518	.set_eee = sxgbe_set_eee,
519};
520
521void sxgbe_set_ethtool_ops(struct net_device *netdev)
522{
523	netdev->ethtool_ops = &sxgbe_ethtool_ops;
524}
525