1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 *		http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/io.h>
16#include <linux/errno.h>
17#include <linux/export.h>
18#include <linux/jiffies.h>
19
20#include "sxgbe_mtl.h"
21#include "sxgbe_reg.h"
22
23static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
24			   unsigned int raa)
25{
26	u32 reg_val;
27
28	reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
29	reg_val &= ETS_RST;
30
31	/* ETS Algorith */
32	switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
33	case ETS_WRR:
34		reg_val &= ETS_WRR;
35		break;
36	case ETS_WFQ:
37		reg_val |= ETS_WFQ;
38		break;
39	case ETS_DWRR:
40		reg_val |= ETS_DWRR;
41		break;
42	}
43	writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
44
45	switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
46	case RAA_SP:
47		reg_val &= RAA_SP;
48		break;
49	case RAA_WSP:
50		reg_val |= RAA_WSP;
51		break;
52	}
53	writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
54}
55
56/* For Dynamic DMA channel mapping for Rx queue */
57static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
58{
59	writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
60	writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
61	writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
62}
63
64static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
65				     int queue_fifo)
66{
67	u32 fifo_bits, reg_val;
68
69	/* 0 means 256 bytes */
70	fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
71	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
72	reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
73	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
74}
75
76static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
77				     int queue_fifo)
78{
79	u32 fifo_bits, reg_val;
80
81	/* 0 means 256 bytes */
82	fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
83	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
84	reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
85	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
86}
87
88static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
89{
90	u32 reg_val;
91
92	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
93	reg_val |= SXGBE_MTL_ENABLE_QUEUE;
94	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
95}
96
97static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
98{
99	u32 reg_val;
100
101	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
102	reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
103	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
104}
105
106static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
107				int threshold)
108{
109	u32 reg_val;
110
111	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
112	reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
113	reg_val |= (threshold << RX_FC_ACTIVE);
114
115	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
116}
117
118static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
119{
120	u32 reg_val;
121
122	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
123	reg_val |= SXGBE_MTL_ENABLE_FC;
124	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
125}
126
127static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
128				  int threshold)
129{
130	u32 reg_val;
131
132	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
133	reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
134	reg_val |= (threshold << RX_FC_DEACTIVE);
135
136	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
137}
138
139static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
140{
141	u32 reg_val;
142
143	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
144	reg_val |= SXGBE_MTL_RXQ_OP_FEP;
145
146	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
147}
148
149static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
150{
151	u32 reg_val;
152
153	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
154	reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
155
156	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
157}
158
159static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
160{
161	u32 reg_val;
162
163	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
164	reg_val |= SXGBE_MTL_RXQ_OP_FUP;
165
166	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
167}
168
169static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
170{
171	u32 reg_val;
172
173	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
174	reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
175
176	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
177}
178
179
180static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
181				  int tx_mode)
182{
183	u32 reg_val;
184
185	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
186	/* TX specific MTL mode settings */
187	if (tx_mode == SXGBE_MTL_SFMODE) {
188		reg_val |= SXGBE_MTL_SFMODE;
189	} else {
190		/* set the TTC values */
191		if (tx_mode <= 64)
192			reg_val |= MTL_CONTROL_TTC_64;
193		else if (tx_mode <= 96)
194			reg_val |= MTL_CONTROL_TTC_96;
195		else if (tx_mode <= 128)
196			reg_val |= MTL_CONTROL_TTC_128;
197		else if (tx_mode <= 192)
198			reg_val |= MTL_CONTROL_TTC_192;
199		else if (tx_mode <= 256)
200			reg_val |= MTL_CONTROL_TTC_256;
201		else if (tx_mode <= 384)
202			reg_val |= MTL_CONTROL_TTC_384;
203		else
204			reg_val |= MTL_CONTROL_TTC_512;
205	}
206
207	/* write into TXQ operation register */
208	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
209}
210
211static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
212				  int rx_mode)
213{
214	u32 reg_val;
215
216	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
217	/* RX specific MTL mode settings */
218	if (rx_mode == SXGBE_RX_MTL_SFMODE) {
219		reg_val |= SXGBE_RX_MTL_SFMODE;
220	} else {
221		if (rx_mode <= 64)
222			reg_val |= MTL_CONTROL_RTC_64;
223		else if (rx_mode <= 96)
224			reg_val |= MTL_CONTROL_RTC_96;
225		else if (rx_mode <= 128)
226			reg_val |= MTL_CONTROL_RTC_128;
227	}
228
229	/* write into RXQ operation register */
230	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
231}
232
233static const struct sxgbe_mtl_ops mtl_ops = {
234	.mtl_set_txfifosize		= sxgbe_mtl_set_txfifosize,
235	.mtl_set_rxfifosize		= sxgbe_mtl_set_rxfifosize,
236	.mtl_enable_txqueue		= sxgbe_mtl_enable_txqueue,
237	.mtl_disable_txqueue		= sxgbe_mtl_disable_txqueue,
238	.mtl_dynamic_dma_rxqueue	= sxgbe_mtl_dma_dm_rxqueue,
239	.set_tx_mtl_mode		= sxgbe_set_tx_mtl_mode,
240	.set_rx_mtl_mode		= sxgbe_set_rx_mtl_mode,
241	.mtl_init			= sxgbe_mtl_init,
242	.mtl_fc_active			= sxgbe_mtl_fc_active,
243	.mtl_fc_deactive		= sxgbe_mtl_fc_deactive,
244	.mtl_fc_enable			= sxgbe_mtl_fc_enable,
245	.mtl_fep_enable			= sxgbe_mtl_fep_enable,
246	.mtl_fep_disable		= sxgbe_mtl_fep_disable,
247	.mtl_fup_enable			= sxgbe_mtl_fup_enable,
248	.mtl_fup_disable		= sxgbe_mtl_fup_disable
249};
250
251const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
252{
253	return &mtl_ops;
254}
255