1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Small helper utilities.
30 */
31#include <linux/kernel.h>
32
33#include <asm/octeon/octeon.h>
34
35#include <asm/octeon/cvmx-config.h>
36
37#include <asm/octeon/cvmx-fpa.h>
38#include <asm/octeon/cvmx-pip.h>
39#include <asm/octeon/cvmx-pko.h>
40#include <asm/octeon/cvmx-ipd.h>
41#include <asm/octeon/cvmx-spi.h>
42
43#include <asm/octeon/cvmx-helper.h>
44#include <asm/octeon/cvmx-helper-util.h>
45
46#include <asm/octeon/cvmx-ipd-defs.h>
47
48/**
49 * Convert a interface mode into a human readable string
50 *
51 * @mode:   Mode to convert
52 *
53 * Returns String
54 */
55const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
56						 mode)
57{
58	switch (mode) {
59	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
60		return "DISABLED";
61	case CVMX_HELPER_INTERFACE_MODE_RGMII:
62		return "RGMII";
63	case CVMX_HELPER_INTERFACE_MODE_GMII:
64		return "GMII";
65	case CVMX_HELPER_INTERFACE_MODE_SPI:
66		return "SPI";
67	case CVMX_HELPER_INTERFACE_MODE_PCIE:
68		return "PCIE";
69	case CVMX_HELPER_INTERFACE_MODE_XAUI:
70		return "XAUI";
71	case CVMX_HELPER_INTERFACE_MODE_SGMII:
72		return "SGMII";
73	case CVMX_HELPER_INTERFACE_MODE_PICMG:
74		return "PICMG";
75	case CVMX_HELPER_INTERFACE_MODE_NPI:
76		return "NPI";
77	case CVMX_HELPER_INTERFACE_MODE_LOOP:
78		return "LOOP";
79	}
80	return "UNKNOWN";
81}
82
83/**
84 * Debug routine to dump the packet structure to the console
85 *
86 * @work:   Work queue entry containing the packet to dump
87 * Returns
88 */
89int cvmx_helper_dump_packet(cvmx_wqe_t *work)
90{
91	uint64_t count;
92	uint64_t remaining_bytes;
93	union cvmx_buf_ptr buffer_ptr;
94	uint64_t start_of_buffer;
95	uint8_t *data_address;
96	uint8_t *end_of_data;
97
98	cvmx_dprintf("Packet Length:   %u\n", work->len);
99	cvmx_dprintf("	  Input Port:  %u\n", work->ipprt);
100	cvmx_dprintf("	  QoS:	       %u\n", work->qos);
101	cvmx_dprintf("	  Buffers:     %u\n", work->word2.s.bufs);
102
103	if (work->word2.s.bufs == 0) {
104		union cvmx_ipd_wqe_fpa_queue wqe_pool;
105		wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
106		buffer_ptr.u64 = 0;
107		buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
108		buffer_ptr.s.size = 128;
109		buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
110		if (likely(!work->word2.s.not_IP)) {
111			union cvmx_pip_ip_offset pip_ip_offset;
112			pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
113			buffer_ptr.s.addr +=
114			    (pip_ip_offset.s.offset << 3) -
115			    work->word2.s.ip_offset;
116			buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
117		} else {
118			/*
119			 * WARNING: This code assumes that the packet
120			 * is not RAW. If it was, we would use
121			 * PIP_GBL_CFG[RAW_SHF] instead of
122			 * PIP_GBL_CFG[NIP_SHF].
123			 */
124			union cvmx_pip_gbl_cfg pip_gbl_cfg;
125			pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
126			buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
127		}
128	} else
129		buffer_ptr = work->packet_ptr;
130	remaining_bytes = work->len;
131
132	while (remaining_bytes) {
133		start_of_buffer =
134		    ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
135		cvmx_dprintf("	  Buffer Start:%llx\n",
136			     (unsigned long long)start_of_buffer);
137		cvmx_dprintf("	  Buffer I   : %u\n", buffer_ptr.s.i);
138		cvmx_dprintf("	  Buffer Back: %u\n", buffer_ptr.s.back);
139		cvmx_dprintf("	  Buffer Pool: %u\n", buffer_ptr.s.pool);
140		cvmx_dprintf("	  Buffer Data: %llx\n",
141			     (unsigned long long)buffer_ptr.s.addr);
142		cvmx_dprintf("	  Buffer Size: %u\n", buffer_ptr.s.size);
143
144		cvmx_dprintf("\t\t");
145		data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
146		end_of_data = data_address + buffer_ptr.s.size;
147		count = 0;
148		while (data_address < end_of_data) {
149			if (remaining_bytes == 0)
150				break;
151			else
152				remaining_bytes--;
153			cvmx_dprintf("%02x", (unsigned int)*data_address);
154			data_address++;
155			if (remaining_bytes && (count == 7)) {
156				cvmx_dprintf("\n\t\t");
157				count = 0;
158			} else
159				count++;
160		}
161		cvmx_dprintf("\n");
162
163		if (remaining_bytes)
164			buffer_ptr = *(union cvmx_buf_ptr *)
165				cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
166	}
167	return 0;
168}
169
170/**
171 * Setup Random Early Drop on a specific input queue
172 *
173 * @queue:  Input queue to setup RED on (0-7)
174 * @pass_thresh:
175 *		 Packets will begin slowly dropping when there are less than
176 *		 this many packet buffers free in FPA 0.
177 * @drop_thresh:
178 *		 All incoming packets will be dropped when there are less
179 *		 than this many free packet buffers in FPA 0.
180 * Returns Zero on success. Negative on failure
181 */
182int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
183{
184	union cvmx_ipd_qosx_red_marks red_marks;
185	union cvmx_ipd_red_quex_param red_param;
186
187	/* Set RED to begin dropping packets when there are pass_thresh buffers
188	   left. It will linearly drop more packets until reaching drop_thresh
189	   buffers */
190	red_marks.u64 = 0;
191	red_marks.s.drop = drop_thresh;
192	red_marks.s.pass = pass_thresh;
193	cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
194
195	/* Use the actual queue 0 counter, not the average */
196	red_param.u64 = 0;
197	red_param.s.prb_con =
198	    (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
199	red_param.s.avg_con = 1;
200	red_param.s.new_con = 255;
201	red_param.s.use_pcnt = 1;
202	cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
203	return 0;
204}
205
206/**
207 * Setup Random Early Drop to automatically begin dropping packets.
208 *
209 * @pass_thresh:
210 *		 Packets will begin slowly dropping when there are less than
211 *		 this many packet buffers free in FPA 0.
212 * @drop_thresh:
213 *		 All incoming packets will be dropped when there are less
214 *		 than this many free packet buffers in FPA 0.
215 * Returns Zero on success. Negative on failure
216 */
217int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
218{
219	union cvmx_ipd_portx_bp_page_cnt page_cnt;
220	union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
221	union cvmx_ipd_red_port_enable red_port_enable;
222	int queue;
223	int interface;
224	int port;
225
226	/* Disable backpressure based on queued buffers. It needs SW support */
227	page_cnt.u64 = 0;
228	page_cnt.s.bp_enb = 0;
229	page_cnt.s.page_cnt = 100;
230	for (interface = 0; interface < 2; interface++) {
231		for (port = cvmx_helper_get_first_ipd_port(interface);
232		     port < cvmx_helper_get_last_ipd_port(interface); port++)
233			cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
234				       page_cnt.u64);
235	}
236
237	for (queue = 0; queue < 8; queue++)
238		cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
239
240	/* Shutoff the dropping based on the per port page count. SW isn't
241	   decrementing it right now */
242	ipd_bp_prt_red_end.u64 = 0;
243	ipd_bp_prt_red_end.s.prt_enb = 0;
244	cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
245
246	red_port_enable.u64 = 0;
247	red_port_enable.s.prt_enb = 0xfffffffffull;
248	red_port_enable.s.avg_dly = 10000;
249	red_port_enable.s.prb_dly = 10000;
250	cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
251
252	return 0;
253}
254EXPORT_SYMBOL_GPL(cvmx_helper_setup_red);
255
256/**
257 * Setup the common GMX settings that determine the number of
258 * ports. These setting apply to almost all configurations of all
259 * chips.
260 *
261 * @interface: Interface to configure
262 * @num_ports: Number of ports on the interface
263 *
264 * Returns Zero on success, negative on failure
265 */
266int __cvmx_helper_setup_gmx(int interface, int num_ports)
267{
268	union cvmx_gmxx_tx_prts gmx_tx_prts;
269	union cvmx_gmxx_rx_prts gmx_rx_prts;
270	union cvmx_pko_reg_gmx_port_mode pko_mode;
271	union cvmx_gmxx_txx_thresh gmx_tx_thresh;
272	int index;
273
274	/* Tell GMX the number of TX ports on this interface */
275	gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
276	gmx_tx_prts.s.prts = num_ports;
277	cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
278
279	/* Tell GMX the number of RX ports on this interface.  This only
280	 ** applies to *GMII and XAUI ports */
281	if (cvmx_helper_interface_get_mode(interface) ==
282	    CVMX_HELPER_INTERFACE_MODE_RGMII
283	    || cvmx_helper_interface_get_mode(interface) ==
284	    CVMX_HELPER_INTERFACE_MODE_SGMII
285	    || cvmx_helper_interface_get_mode(interface) ==
286	    CVMX_HELPER_INTERFACE_MODE_GMII
287	    || cvmx_helper_interface_get_mode(interface) ==
288	    CVMX_HELPER_INTERFACE_MODE_XAUI) {
289		if (num_ports > 4) {
290			cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
291				     "num_ports\n");
292			return -1;
293		}
294
295		gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
296		gmx_rx_prts.s.prts = num_ports;
297		cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
298	}
299
300	/* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
301	if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
302	    && !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
303		/* Tell PKO the number of ports on this interface */
304		pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
305		if (interface == 0) {
306			if (num_ports == 1)
307				pko_mode.s.mode0 = 4;
308			else if (num_ports == 2)
309				pko_mode.s.mode0 = 3;
310			else if (num_ports <= 4)
311				pko_mode.s.mode0 = 2;
312			else if (num_ports <= 8)
313				pko_mode.s.mode0 = 1;
314			else
315				pko_mode.s.mode0 = 0;
316		} else {
317			if (num_ports == 1)
318				pko_mode.s.mode1 = 4;
319			else if (num_ports == 2)
320				pko_mode.s.mode1 = 3;
321			else if (num_ports <= 4)
322				pko_mode.s.mode1 = 2;
323			else if (num_ports <= 8)
324				pko_mode.s.mode1 = 1;
325			else
326				pko_mode.s.mode1 = 0;
327		}
328		cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
329	}
330
331	/*
332	 * Set GMX to buffer as much data as possible before starting
333	 * transmit.  This reduces the chances that we have a TX under
334	 * run due to memory contention. Any packet that fits entirely
335	 * in the GMX FIFO can never have an under run regardless of
336	 * memory load.
337	 */
338	gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
339	if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
340	    || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
341		/* These chips have a fixed max threshold of 0x40 */
342		gmx_tx_thresh.s.cnt = 0x40;
343	} else {
344		/* Choose the max value for the number of ports */
345		if (num_ports <= 1)
346			gmx_tx_thresh.s.cnt = 0x100 / 1;
347		else if (num_ports == 2)
348			gmx_tx_thresh.s.cnt = 0x100 / 2;
349		else
350			gmx_tx_thresh.s.cnt = 0x100 / 4;
351	}
352	/*
353	 * SPI and XAUI can have lots of ports but the GMX hardware
354	 * only ever has a max of 4.
355	 */
356	if (num_ports > 4)
357		num_ports = 4;
358	for (index = 0; index < num_ports; index++)
359		cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
360			       gmx_tx_thresh.u64);
361
362	return 0;
363}
364
365/**
366 * Returns the IPD/PKO port number for a port on the given
367 * interface.
368 *
369 * @interface: Interface to use
370 * @port:      Port on the interface
371 *
372 * Returns IPD/PKO port number
373 */
374int cvmx_helper_get_ipd_port(int interface, int port)
375{
376	switch (interface) {
377	case 0:
378		return port;
379	case 1:
380		return port + 16;
381	case 2:
382		return port + 32;
383	case 3:
384		return port + 36;
385	}
386	return -1;
387}
388EXPORT_SYMBOL_GPL(cvmx_helper_get_ipd_port);
389
390/**
391 * Returns the interface number for an IPD/PKO port number.
392 *
393 * @ipd_port: IPD/PKO port number
394 *
395 * Returns Interface number
396 */
397int cvmx_helper_get_interface_num(int ipd_port)
398{
399	if (ipd_port < 16)
400		return 0;
401	else if (ipd_port < 32)
402		return 1;
403	else if (ipd_port < 36)
404		return 2;
405	else if (ipd_port < 40)
406		return 3;
407	else
408		cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
409			     "port number\n");
410
411	return -1;
412}
413EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_num);
414
415/**
416 * Returns the interface index number for an IPD/PKO port
417 * number.
418 *
419 * @ipd_port: IPD/PKO port number
420 *
421 * Returns Interface index number
422 */
423int cvmx_helper_get_interface_index_num(int ipd_port)
424{
425	if (ipd_port < 32)
426		return ipd_port & 15;
427	else if (ipd_port < 36)
428		return ipd_port & 3;
429	else if (ipd_port < 40)
430		return ipd_port & 3;
431	else
432		cvmx_dprintf("cvmx_helper_get_interface_index_num: "
433			     "Illegal IPD port number\n");
434
435	return -1;
436}
437EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_index_num);
438