1/* bnx2x_stats.c: QLogic Everest network driver.
2 *
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include "bnx2x_stats.h"
23#include "bnx2x_cmn.h"
24#include "bnx2x_sriov.h"
25
26/* Statistics */
27
28/*
29 * General service functions
30 */
31
32static inline long bnx2x_hilo(u32 *hiref)
33{
34	u32 lo = *(hiref + 1);
35#if (BITS_PER_LONG == 64)
36	u32 hi = *hiref;
37
38	return HILO_U64(hi, lo);
39#else
40	return lo;
41#endif
42}
43
44static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
45{
46	u16 res = 0;
47
48	/* 'newest' convention - shmem2 cotains the size of the port stats */
49	if (SHMEM2_HAS(bp, sizeof_port_stats)) {
50		u32 size = SHMEM2_RD(bp, sizeof_port_stats);
51		if (size)
52			res = size;
53
54		/* prevent newer BC from causing buffer overflow */
55		if (res > sizeof(struct host_port_stats))
56			res = sizeof(struct host_port_stats);
57	}
58
59	/* Older convention - all BCs support the port stats' fields up until
60	 * the 'not_used' field
61	 */
62	if (!res) {
63		res = offsetof(struct host_port_stats, not_used) + 4;
64
65		/* if PFC stats are supported by the MFW, DMA them as well */
66		if (bp->flags & BC_SUPPORTS_PFC_STATS) {
67			res += offsetof(struct host_port_stats,
68					pfc_frames_rx_lo) -
69			       offsetof(struct host_port_stats,
70					pfc_frames_tx_hi) + 4 ;
71		}
72	}
73
74	res >>= 2;
75
76	WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
77	return res;
78}
79
80/*
81 * Init service functions
82 */
83
84static void bnx2x_dp_stats(struct bnx2x *bp)
85{
86	int i;
87
88	DP(BNX2X_MSG_STATS, "dumping stats:\n"
89	   "fw_stats_req\n"
90	   "    hdr\n"
91	   "        cmd_num %d\n"
92	   "        reserved0 %d\n"
93	   "        drv_stats_counter %d\n"
94	   "        reserved1 %d\n"
95	   "        stats_counters_addrs %x %x\n",
96	   bp->fw_stats_req->hdr.cmd_num,
97	   bp->fw_stats_req->hdr.reserved0,
98	   bp->fw_stats_req->hdr.drv_stats_counter,
99	   bp->fw_stats_req->hdr.reserved1,
100	   bp->fw_stats_req->hdr.stats_counters_addrs.hi,
101	   bp->fw_stats_req->hdr.stats_counters_addrs.lo);
102
103	for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
104		DP(BNX2X_MSG_STATS,
105		   "query[%d]\n"
106		   "              kind %d\n"
107		   "              index %d\n"
108		   "              funcID %d\n"
109		   "              reserved %d\n"
110		   "              address %x %x\n",
111		   i, bp->fw_stats_req->query[i].kind,
112		   bp->fw_stats_req->query[i].index,
113		   bp->fw_stats_req->query[i].funcID,
114		   bp->fw_stats_req->query[i].reserved,
115		   bp->fw_stats_req->query[i].address.hi,
116		   bp->fw_stats_req->query[i].address.lo);
117	}
118}
119
120/* Post the next statistics ramrod. Protect it with the spin in
121 * order to ensure the strict order between statistics ramrods
122 * (each ramrod has a sequence number passed in a
123 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
124 * sent in order).
125 */
126static void bnx2x_storm_stats_post(struct bnx2x *bp)
127{
128	int rc;
129
130	if (bp->stats_pending)
131		return;
132
133	bp->fw_stats_req->hdr.drv_stats_counter =
134		cpu_to_le16(bp->stats_counter++);
135
136	DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
137	   le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
138
139	/* adjust the ramrod to include VF queues statistics */
140	bnx2x_iov_adjust_stats_req(bp);
141	bnx2x_dp_stats(bp);
142
143	/* send FW stats ramrod */
144	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
145			   U64_HI(bp->fw_stats_req_mapping),
146			   U64_LO(bp->fw_stats_req_mapping),
147			   NONE_CONNECTION_TYPE);
148	if (rc == 0)
149		bp->stats_pending = 1;
150}
151
152static void bnx2x_hw_stats_post(struct bnx2x *bp)
153{
154	struct dmae_command *dmae = &bp->stats_dmae;
155	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
156
157	*stats_comp = DMAE_COMP_VAL;
158	if (CHIP_REV_IS_SLOW(bp))
159		return;
160
161	/* Update MCP's statistics if possible */
162	if (bp->func_stx)
163		memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
164		       sizeof(bp->func_stats));
165
166	/* loader */
167	if (bp->executer_idx) {
168		int loader_idx = PMF_DMAE_C(bp);
169		u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
170						 true, DMAE_COMP_GRC);
171		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
172
173		memset(dmae, 0, sizeof(struct dmae_command));
174		dmae->opcode = opcode;
175		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
176		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
177		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
178				     sizeof(struct dmae_command) *
179				     (loader_idx + 1)) >> 2;
180		dmae->dst_addr_hi = 0;
181		dmae->len = sizeof(struct dmae_command) >> 2;
182		if (CHIP_IS_E1(bp))
183			dmae->len--;
184		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
185		dmae->comp_addr_hi = 0;
186		dmae->comp_val = 1;
187
188		*stats_comp = 0;
189		bnx2x_post_dmae(bp, dmae, loader_idx);
190
191	} else if (bp->func_stx) {
192		*stats_comp = 0;
193		bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
194	}
195}
196
197static void bnx2x_stats_comp(struct bnx2x *bp)
198{
199	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
200	int cnt = 10;
201
202	might_sleep();
203	while (*stats_comp != DMAE_COMP_VAL) {
204		if (!cnt) {
205			BNX2X_ERR("timeout waiting for stats finished\n");
206			break;
207		}
208		cnt--;
209		usleep_range(1000, 2000);
210	}
211}
212
213/*
214 * Statistics service functions
215 */
216
217/* should be called under stats_sema */
218static void bnx2x_stats_pmf_update(struct bnx2x *bp)
219{
220	struct dmae_command *dmae;
221	u32 opcode;
222	int loader_idx = PMF_DMAE_C(bp);
223	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
224
225	/* sanity */
226	if (!bp->port.pmf || !bp->port.port_stx) {
227		BNX2X_ERR("BUG!\n");
228		return;
229	}
230
231	bp->executer_idx = 0;
232
233	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
234
235	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
236	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
237	dmae->src_addr_lo = bp->port.port_stx >> 2;
238	dmae->src_addr_hi = 0;
239	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
240	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
241	dmae->len = DMAE_LEN32_RD_MAX;
242	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
243	dmae->comp_addr_hi = 0;
244	dmae->comp_val = 1;
245
246	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
247	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
248	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
249	dmae->src_addr_hi = 0;
250	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
251				   DMAE_LEN32_RD_MAX * 4);
252	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
253				   DMAE_LEN32_RD_MAX * 4);
254	dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
255
256	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
257	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
258	dmae->comp_val = DMAE_COMP_VAL;
259
260	*stats_comp = 0;
261	bnx2x_hw_stats_post(bp);
262	bnx2x_stats_comp(bp);
263}
264
265static void bnx2x_port_stats_init(struct bnx2x *bp)
266{
267	struct dmae_command *dmae;
268	int port = BP_PORT(bp);
269	u32 opcode;
270	int loader_idx = PMF_DMAE_C(bp);
271	u32 mac_addr;
272	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
273
274	/* sanity */
275	if (!bp->link_vars.link_up || !bp->port.pmf) {
276		BNX2X_ERR("BUG!\n");
277		return;
278	}
279
280	bp->executer_idx = 0;
281
282	/* MCP */
283	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
284				    true, DMAE_COMP_GRC);
285
286	if (bp->port.port_stx) {
287
288		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
289		dmae->opcode = opcode;
290		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
291		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
292		dmae->dst_addr_lo = bp->port.port_stx >> 2;
293		dmae->dst_addr_hi = 0;
294		dmae->len = bnx2x_get_port_stats_dma_len(bp);
295		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
296		dmae->comp_addr_hi = 0;
297		dmae->comp_val = 1;
298	}
299
300	if (bp->func_stx) {
301
302		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
303		dmae->opcode = opcode;
304		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
305		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
306		dmae->dst_addr_lo = bp->func_stx >> 2;
307		dmae->dst_addr_hi = 0;
308		dmae->len = sizeof(struct host_func_stats) >> 2;
309		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
310		dmae->comp_addr_hi = 0;
311		dmae->comp_val = 1;
312	}
313
314	/* MAC */
315	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
316				   true, DMAE_COMP_GRC);
317
318	/* EMAC is special */
319	if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
320		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
321
322		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
323		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
324		dmae->opcode = opcode;
325		dmae->src_addr_lo = (mac_addr +
326				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
327		dmae->src_addr_hi = 0;
328		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
329		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
330		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
331		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
332		dmae->comp_addr_hi = 0;
333		dmae->comp_val = 1;
334
335		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
336		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
337		dmae->opcode = opcode;
338		dmae->src_addr_lo = (mac_addr +
339				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
340		dmae->src_addr_hi = 0;
341		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
342		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
343		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
344		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
345		dmae->len = 1;
346		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
347		dmae->comp_addr_hi = 0;
348		dmae->comp_val = 1;
349
350		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
351		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
352		dmae->opcode = opcode;
353		dmae->src_addr_lo = (mac_addr +
354				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
355		dmae->src_addr_hi = 0;
356		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
357			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
358		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
359			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
360		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
361		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
362		dmae->comp_addr_hi = 0;
363		dmae->comp_val = 1;
364	} else {
365		u32 tx_src_addr_lo, rx_src_addr_lo;
366		u16 rx_len, tx_len;
367
368		/* configure the params according to MAC type */
369		switch (bp->link_vars.mac_type) {
370		case MAC_TYPE_BMAC:
371			mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
372					   NIG_REG_INGRESS_BMAC0_MEM);
373
374			/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
375			   BIGMAC_REGISTER_TX_STAT_GTBYT */
376			if (CHIP_IS_E1x(bp)) {
377				tx_src_addr_lo = (mac_addr +
378					BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
379				tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
380					  BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
381				rx_src_addr_lo = (mac_addr +
382					BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
383				rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
384					  BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
385			} else {
386				tx_src_addr_lo = (mac_addr +
387					BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
388				tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
389					  BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
390				rx_src_addr_lo = (mac_addr +
391					BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
392				rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
393					  BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
394			}
395			break;
396
397		case MAC_TYPE_UMAC: /* handled by MSTAT */
398		case MAC_TYPE_XMAC: /* handled by MSTAT */
399		default:
400			mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
401			tx_src_addr_lo = (mac_addr +
402					  MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
403			rx_src_addr_lo = (mac_addr +
404					  MSTAT_REG_RX_STAT_GR64_LO) >> 2;
405			tx_len = sizeof(bp->slowpath->
406					mac_stats.mstat_stats.stats_tx) >> 2;
407			rx_len = sizeof(bp->slowpath->
408					mac_stats.mstat_stats.stats_rx) >> 2;
409			break;
410		}
411
412		/* TX stats */
413		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
414		dmae->opcode = opcode;
415		dmae->src_addr_lo = tx_src_addr_lo;
416		dmae->src_addr_hi = 0;
417		dmae->len = tx_len;
418		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
419		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
420		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
421		dmae->comp_addr_hi = 0;
422		dmae->comp_val = 1;
423
424		/* RX stats */
425		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
426		dmae->opcode = opcode;
427		dmae->src_addr_hi = 0;
428		dmae->src_addr_lo = rx_src_addr_lo;
429		dmae->dst_addr_lo =
430			U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
431		dmae->dst_addr_hi =
432			U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
433		dmae->len = rx_len;
434		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
435		dmae->comp_addr_hi = 0;
436		dmae->comp_val = 1;
437	}
438
439	/* NIG */
440	if (!CHIP_IS_E3(bp)) {
441		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
442		dmae->opcode = opcode;
443		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
444					    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
445		dmae->src_addr_hi = 0;
446		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
447				offsetof(struct nig_stats, egress_mac_pkt0_lo));
448		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
449				offsetof(struct nig_stats, egress_mac_pkt0_lo));
450		dmae->len = (2*sizeof(u32)) >> 2;
451		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
452		dmae->comp_addr_hi = 0;
453		dmae->comp_val = 1;
454
455		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
456		dmae->opcode = opcode;
457		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
458					    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
459		dmae->src_addr_hi = 0;
460		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
461				offsetof(struct nig_stats, egress_mac_pkt1_lo));
462		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
463				offsetof(struct nig_stats, egress_mac_pkt1_lo));
464		dmae->len = (2*sizeof(u32)) >> 2;
465		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
466		dmae->comp_addr_hi = 0;
467		dmae->comp_val = 1;
468	}
469
470	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
471	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
472						 true, DMAE_COMP_PCI);
473	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
474				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
475	dmae->src_addr_hi = 0;
476	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
477	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
478	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
479
480	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
481	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
482	dmae->comp_val = DMAE_COMP_VAL;
483
484	*stats_comp = 0;
485}
486
487static void bnx2x_func_stats_init(struct bnx2x *bp)
488{
489	struct dmae_command *dmae = &bp->stats_dmae;
490	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
491
492	/* sanity */
493	if (!bp->func_stx) {
494		BNX2X_ERR("BUG!\n");
495		return;
496	}
497
498	bp->executer_idx = 0;
499	memset(dmae, 0, sizeof(struct dmae_command));
500
501	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
502					 true, DMAE_COMP_PCI);
503	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
504	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
505	dmae->dst_addr_lo = bp->func_stx >> 2;
506	dmae->dst_addr_hi = 0;
507	dmae->len = sizeof(struct host_func_stats) >> 2;
508	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
509	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
510	dmae->comp_val = DMAE_COMP_VAL;
511
512	*stats_comp = 0;
513}
514
515/* should be called under stats_sema */
516static void bnx2x_stats_start(struct bnx2x *bp)
517{
518	if (IS_PF(bp)) {
519		if (bp->port.pmf)
520			bnx2x_port_stats_init(bp);
521
522		else if (bp->func_stx)
523			bnx2x_func_stats_init(bp);
524
525		bnx2x_hw_stats_post(bp);
526		bnx2x_storm_stats_post(bp);
527	}
528}
529
530static void bnx2x_stats_pmf_start(struct bnx2x *bp)
531{
532	bnx2x_stats_comp(bp);
533	bnx2x_stats_pmf_update(bp);
534	bnx2x_stats_start(bp);
535}
536
537static void bnx2x_stats_restart(struct bnx2x *bp)
538{
539	/* vfs travel through here as part of the statistics FSM, but no action
540	 * is required
541	 */
542	if (IS_VF(bp))
543		return;
544
545	bnx2x_stats_comp(bp);
546	bnx2x_stats_start(bp);
547}
548
549static void bnx2x_bmac_stats_update(struct bnx2x *bp)
550{
551	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
552	struct bnx2x_eth_stats *estats = &bp->eth_stats;
553	struct {
554		u32 lo;
555		u32 hi;
556	} diff;
557
558	if (CHIP_IS_E1x(bp)) {
559		struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
560
561		/* the macros below will use "bmac1_stats" type */
562		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
563		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
564		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
565		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
566		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
567		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
568		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
569		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
570		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
571
572		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
573		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
574		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
575		UPDATE_STAT64(tx_stat_gt127,
576				tx_stat_etherstatspkts65octetsto127octets);
577		UPDATE_STAT64(tx_stat_gt255,
578				tx_stat_etherstatspkts128octetsto255octets);
579		UPDATE_STAT64(tx_stat_gt511,
580				tx_stat_etherstatspkts256octetsto511octets);
581		UPDATE_STAT64(tx_stat_gt1023,
582				tx_stat_etherstatspkts512octetsto1023octets);
583		UPDATE_STAT64(tx_stat_gt1518,
584				tx_stat_etherstatspkts1024octetsto1522octets);
585		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
586		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
587		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
588		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
589		UPDATE_STAT64(tx_stat_gterr,
590				tx_stat_dot3statsinternalmactransmiterrors);
591		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
592
593	} else {
594		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
595
596		/* the macros below will use "bmac2_stats" type */
597		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
598		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
599		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
600		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
601		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
602		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
603		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
604		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
605		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
606		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
607		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
608		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
609		UPDATE_STAT64(tx_stat_gt127,
610				tx_stat_etherstatspkts65octetsto127octets);
611		UPDATE_STAT64(tx_stat_gt255,
612				tx_stat_etherstatspkts128octetsto255octets);
613		UPDATE_STAT64(tx_stat_gt511,
614				tx_stat_etherstatspkts256octetsto511octets);
615		UPDATE_STAT64(tx_stat_gt1023,
616				tx_stat_etherstatspkts512octetsto1023octets);
617		UPDATE_STAT64(tx_stat_gt1518,
618				tx_stat_etherstatspkts1024octetsto1522octets);
619		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
620		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
621		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
622		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
623		UPDATE_STAT64(tx_stat_gterr,
624				tx_stat_dot3statsinternalmactransmiterrors);
625		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
626
627		/* collect PFC stats */
628		pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
629		pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
630
631		pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
632		pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
633	}
634
635	estats->pause_frames_received_hi =
636				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
637	estats->pause_frames_received_lo =
638				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
639
640	estats->pause_frames_sent_hi =
641				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
642	estats->pause_frames_sent_lo =
643				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
644
645	estats->pfc_frames_received_hi =
646				pstats->pfc_frames_rx_hi;
647	estats->pfc_frames_received_lo =
648				pstats->pfc_frames_rx_lo;
649	estats->pfc_frames_sent_hi =
650				pstats->pfc_frames_tx_hi;
651	estats->pfc_frames_sent_lo =
652				pstats->pfc_frames_tx_lo;
653}
654
655static void bnx2x_mstat_stats_update(struct bnx2x *bp)
656{
657	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
658	struct bnx2x_eth_stats *estats = &bp->eth_stats;
659
660	struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
661
662	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
663	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
664	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
665	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
666	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
667	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
668	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
669	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
670	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
671	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
672
673	/* collect pfc stats */
674	ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
675		pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
676	ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
677		pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
678
679	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
680	ADD_STAT64(stats_tx.tx_gt127,
681			tx_stat_etherstatspkts65octetsto127octets);
682	ADD_STAT64(stats_tx.tx_gt255,
683			tx_stat_etherstatspkts128octetsto255octets);
684	ADD_STAT64(stats_tx.tx_gt511,
685			tx_stat_etherstatspkts256octetsto511octets);
686	ADD_STAT64(stats_tx.tx_gt1023,
687			tx_stat_etherstatspkts512octetsto1023octets);
688	ADD_STAT64(stats_tx.tx_gt1518,
689			tx_stat_etherstatspkts1024octetsto1522octets);
690	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
691
692	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
693	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
694	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
695
696	ADD_STAT64(stats_tx.tx_gterr,
697			tx_stat_dot3statsinternalmactransmiterrors);
698	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
699
700	estats->etherstatspkts1024octetsto1522octets_hi =
701	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
702	estats->etherstatspkts1024octetsto1522octets_lo =
703	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
704
705	estats->etherstatspktsover1522octets_hi =
706	    pstats->mac_stx[1].tx_stat_mac_2047_hi;
707	estats->etherstatspktsover1522octets_lo =
708	    pstats->mac_stx[1].tx_stat_mac_2047_lo;
709
710	ADD_64(estats->etherstatspktsover1522octets_hi,
711	       pstats->mac_stx[1].tx_stat_mac_4095_hi,
712	       estats->etherstatspktsover1522octets_lo,
713	       pstats->mac_stx[1].tx_stat_mac_4095_lo);
714
715	ADD_64(estats->etherstatspktsover1522octets_hi,
716	       pstats->mac_stx[1].tx_stat_mac_9216_hi,
717	       estats->etherstatspktsover1522octets_lo,
718	       pstats->mac_stx[1].tx_stat_mac_9216_lo);
719
720	ADD_64(estats->etherstatspktsover1522octets_hi,
721	       pstats->mac_stx[1].tx_stat_mac_16383_hi,
722	       estats->etherstatspktsover1522octets_lo,
723	       pstats->mac_stx[1].tx_stat_mac_16383_lo);
724
725	estats->pause_frames_received_hi =
726				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
727	estats->pause_frames_received_lo =
728				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
729
730	estats->pause_frames_sent_hi =
731				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
732	estats->pause_frames_sent_lo =
733				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
734
735	estats->pfc_frames_received_hi =
736				pstats->pfc_frames_rx_hi;
737	estats->pfc_frames_received_lo =
738				pstats->pfc_frames_rx_lo;
739	estats->pfc_frames_sent_hi =
740				pstats->pfc_frames_tx_hi;
741	estats->pfc_frames_sent_lo =
742				pstats->pfc_frames_tx_lo;
743}
744
745static void bnx2x_emac_stats_update(struct bnx2x *bp)
746{
747	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
748	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
749	struct bnx2x_eth_stats *estats = &bp->eth_stats;
750
751	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
752	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
753	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
754	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
755	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
756	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
757	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
758	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
759	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
760	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
761	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
762	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
763	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
764	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
765	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
766	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
767	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
768	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
769	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
770	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
771	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
772	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
773	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
774	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
775	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
776	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
777	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
778	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
779	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
780	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
781	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
782
783	estats->pause_frames_received_hi =
784			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
785	estats->pause_frames_received_lo =
786			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
787	ADD_64(estats->pause_frames_received_hi,
788	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
789	       estats->pause_frames_received_lo,
790	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
791
792	estats->pause_frames_sent_hi =
793			pstats->mac_stx[1].tx_stat_outxonsent_hi;
794	estats->pause_frames_sent_lo =
795			pstats->mac_stx[1].tx_stat_outxonsent_lo;
796	ADD_64(estats->pause_frames_sent_hi,
797	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
798	       estats->pause_frames_sent_lo,
799	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
800}
801
802static int bnx2x_hw_stats_update(struct bnx2x *bp)
803{
804	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
805	struct nig_stats *old = &(bp->port.old_nig_stats);
806	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
807	struct bnx2x_eth_stats *estats = &bp->eth_stats;
808	struct {
809		u32 lo;
810		u32 hi;
811	} diff;
812
813	switch (bp->link_vars.mac_type) {
814	case MAC_TYPE_BMAC:
815		bnx2x_bmac_stats_update(bp);
816		break;
817
818	case MAC_TYPE_EMAC:
819		bnx2x_emac_stats_update(bp);
820		break;
821
822	case MAC_TYPE_UMAC:
823	case MAC_TYPE_XMAC:
824		bnx2x_mstat_stats_update(bp);
825		break;
826
827	case MAC_TYPE_NONE: /* unreached */
828		DP(BNX2X_MSG_STATS,
829		   "stats updated by DMAE but no MAC active\n");
830		return -1;
831
832	default: /* unreached */
833		BNX2X_ERR("Unknown MAC type\n");
834	}
835
836	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
837		      new->brb_discard - old->brb_discard);
838	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
839		      new->brb_truncate - old->brb_truncate);
840
841	if (!CHIP_IS_E3(bp)) {
842		UPDATE_STAT64_NIG(egress_mac_pkt0,
843					etherstatspkts1024octetsto1522octets);
844		UPDATE_STAT64_NIG(egress_mac_pkt1,
845					etherstatspktsover1522octets);
846	}
847
848	memcpy(old, new, sizeof(struct nig_stats));
849
850	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
851	       sizeof(struct mac_stx));
852	estats->brb_drop_hi = pstats->brb_drop_hi;
853	estats->brb_drop_lo = pstats->brb_drop_lo;
854
855	pstats->host_port_stats_counter++;
856
857	if (CHIP_IS_E3(bp)) {
858		u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
859					  : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
860		estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
861	}
862
863	if (!BP_NOMCP(bp)) {
864		u32 nig_timer_max =
865			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
866		if (nig_timer_max != estats->nig_timer_max) {
867			estats->nig_timer_max = nig_timer_max;
868			BNX2X_ERR("NIG timer max (%u)\n",
869				  estats->nig_timer_max);
870		}
871	}
872
873	return 0;
874}
875
876static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
877{
878	struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
879	u16 cur_stats_counter;
880	/* Make sure we use the value of the counter
881	 * used for sending the last stats ramrod.
882	 */
883	cur_stats_counter = bp->stats_counter - 1;
884
885	/* are storm stats valid? */
886	if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
887		DP(BNX2X_MSG_STATS,
888		   "stats not updated by xstorm  xstorm counter (0x%x) != stats_counter (0x%x)\n",
889		   le16_to_cpu(counters->xstats_counter), bp->stats_counter);
890		return -EAGAIN;
891	}
892
893	if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
894		DP(BNX2X_MSG_STATS,
895		   "stats not updated by ustorm  ustorm counter (0x%x) != stats_counter (0x%x)\n",
896		   le16_to_cpu(counters->ustats_counter), bp->stats_counter);
897		return -EAGAIN;
898	}
899
900	if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
901		DP(BNX2X_MSG_STATS,
902		   "stats not updated by cstorm  cstorm counter (0x%x) != stats_counter (0x%x)\n",
903		   le16_to_cpu(counters->cstats_counter), bp->stats_counter);
904		return -EAGAIN;
905	}
906
907	if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
908		DP(BNX2X_MSG_STATS,
909		   "stats not updated by tstorm  tstorm counter (0x%x) != stats_counter (0x%x)\n",
910		   le16_to_cpu(counters->tstats_counter), bp->stats_counter);
911		return -EAGAIN;
912	}
913	return 0;
914}
915
916static int bnx2x_storm_stats_update(struct bnx2x *bp)
917{
918	struct tstorm_per_port_stats *tport =
919				&bp->fw_stats_data->port.tstorm_port_statistics;
920	struct tstorm_per_pf_stats *tfunc =
921				&bp->fw_stats_data->pf.tstorm_pf_statistics;
922	struct host_func_stats *fstats = &bp->func_stats;
923	struct bnx2x_eth_stats *estats = &bp->eth_stats;
924	struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
925	int i;
926
927	/* vfs stat counter is managed by pf */
928	if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
929		return -EAGAIN;
930
931	estats->error_bytes_received_hi = 0;
932	estats->error_bytes_received_lo = 0;
933
934	for_each_eth_queue(bp, i) {
935		struct bnx2x_fastpath *fp = &bp->fp[i];
936		struct tstorm_per_queue_stats *tclient =
937			&bp->fw_stats_data->queue_stats[i].
938			tstorm_queue_statistics;
939		struct tstorm_per_queue_stats *old_tclient =
940			&bnx2x_fp_stats(bp, fp)->old_tclient;
941		struct ustorm_per_queue_stats *uclient =
942			&bp->fw_stats_data->queue_stats[i].
943			ustorm_queue_statistics;
944		struct ustorm_per_queue_stats *old_uclient =
945			&bnx2x_fp_stats(bp, fp)->old_uclient;
946		struct xstorm_per_queue_stats *xclient =
947			&bp->fw_stats_data->queue_stats[i].
948			xstorm_queue_statistics;
949		struct xstorm_per_queue_stats *old_xclient =
950			&bnx2x_fp_stats(bp, fp)->old_xclient;
951		struct bnx2x_eth_q_stats *qstats =
952			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
953		struct bnx2x_eth_q_stats_old *qstats_old =
954			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
955
956		u32 diff;
957
958		DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
959		   i, xclient->ucast_pkts_sent,
960		   xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
961
962		DP(BNX2X_MSG_STATS, "---------------\n");
963
964		UPDATE_QSTAT(tclient->rcv_bcast_bytes,
965			     total_broadcast_bytes_received);
966		UPDATE_QSTAT(tclient->rcv_mcast_bytes,
967			     total_multicast_bytes_received);
968		UPDATE_QSTAT(tclient->rcv_ucast_bytes,
969			     total_unicast_bytes_received);
970
971		/*
972		 * sum to total_bytes_received all
973		 * unicast/multicast/broadcast
974		 */
975		qstats->total_bytes_received_hi =
976			qstats->total_broadcast_bytes_received_hi;
977		qstats->total_bytes_received_lo =
978			qstats->total_broadcast_bytes_received_lo;
979
980		ADD_64(qstats->total_bytes_received_hi,
981		       qstats->total_multicast_bytes_received_hi,
982		       qstats->total_bytes_received_lo,
983		       qstats->total_multicast_bytes_received_lo);
984
985		ADD_64(qstats->total_bytes_received_hi,
986		       qstats->total_unicast_bytes_received_hi,
987		       qstats->total_bytes_received_lo,
988		       qstats->total_unicast_bytes_received_lo);
989
990		qstats->valid_bytes_received_hi =
991					qstats->total_bytes_received_hi;
992		qstats->valid_bytes_received_lo =
993					qstats->total_bytes_received_lo;
994
995		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
996					total_unicast_packets_received);
997		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
998					total_multicast_packets_received);
999		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
1000					total_broadcast_packets_received);
1001		UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1002				      etherstatsoverrsizepkts, 32);
1003		UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1004
1005		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
1006					total_unicast_packets_received);
1007		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1008					total_multicast_packets_received);
1009		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1010					total_broadcast_packets_received);
1011		UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1012		UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1013		UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1014
1015		UPDATE_QSTAT(xclient->bcast_bytes_sent,
1016			     total_broadcast_bytes_transmitted);
1017		UPDATE_QSTAT(xclient->mcast_bytes_sent,
1018			     total_multicast_bytes_transmitted);
1019		UPDATE_QSTAT(xclient->ucast_bytes_sent,
1020			     total_unicast_bytes_transmitted);
1021
1022		/*
1023		 * sum to total_bytes_transmitted all
1024		 * unicast/multicast/broadcast
1025		 */
1026		qstats->total_bytes_transmitted_hi =
1027				qstats->total_unicast_bytes_transmitted_hi;
1028		qstats->total_bytes_transmitted_lo =
1029				qstats->total_unicast_bytes_transmitted_lo;
1030
1031		ADD_64(qstats->total_bytes_transmitted_hi,
1032		       qstats->total_broadcast_bytes_transmitted_hi,
1033		       qstats->total_bytes_transmitted_lo,
1034		       qstats->total_broadcast_bytes_transmitted_lo);
1035
1036		ADD_64(qstats->total_bytes_transmitted_hi,
1037		       qstats->total_multicast_bytes_transmitted_hi,
1038		       qstats->total_bytes_transmitted_lo,
1039		       qstats->total_multicast_bytes_transmitted_lo);
1040
1041		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1042					total_unicast_packets_transmitted);
1043		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1044					total_multicast_packets_transmitted);
1045		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1046					total_broadcast_packets_transmitted);
1047
1048		UPDATE_EXTEND_TSTAT(checksum_discard,
1049				    total_packets_received_checksum_discarded);
1050		UPDATE_EXTEND_TSTAT(ttl0_discard,
1051				    total_packets_received_ttl0_discarded);
1052
1053		UPDATE_EXTEND_XSTAT(error_drop_pkts,
1054				    total_transmitted_dropped_packets_error);
1055
1056		/* TPA aggregations completed */
1057		UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1058		/* Number of network frames aggregated by TPA */
1059		UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1060				      total_tpa_aggregated_frames);
1061		/* Total number of bytes in completed TPA aggregations */
1062		UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1063
1064		UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1065
1066		UPDATE_FSTAT_QSTAT(total_bytes_received);
1067		UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1068		UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1069		UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1070		UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1071		UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1072		UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1073		UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1074		UPDATE_FSTAT_QSTAT(valid_bytes_received);
1075	}
1076
1077	ADD_64(estats->total_bytes_received_hi,
1078	       estats->rx_stat_ifhcinbadoctets_hi,
1079	       estats->total_bytes_received_lo,
1080	       estats->rx_stat_ifhcinbadoctets_lo);
1081
1082	ADD_64_LE(estats->total_bytes_received_hi,
1083		  tfunc->rcv_error_bytes.hi,
1084		  estats->total_bytes_received_lo,
1085		  tfunc->rcv_error_bytes.lo);
1086
1087	ADD_64_LE(estats->error_bytes_received_hi,
1088		  tfunc->rcv_error_bytes.hi,
1089		  estats->error_bytes_received_lo,
1090		  tfunc->rcv_error_bytes.lo);
1091
1092	UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1093
1094	ADD_64(estats->error_bytes_received_hi,
1095	       estats->rx_stat_ifhcinbadoctets_hi,
1096	       estats->error_bytes_received_lo,
1097	       estats->rx_stat_ifhcinbadoctets_lo);
1098
1099	if (bp->port.pmf) {
1100		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1101		UPDATE_FW_STAT(mac_filter_discard);
1102		UPDATE_FW_STAT(mf_tag_discard);
1103		UPDATE_FW_STAT(brb_truncate_discard);
1104		UPDATE_FW_STAT(mac_discard);
1105	}
1106
1107	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1108
1109	bp->stats_pending = 0;
1110
1111	return 0;
1112}
1113
1114static void bnx2x_net_stats_update(struct bnx2x *bp)
1115{
1116	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1117	struct net_device_stats *nstats = &bp->dev->stats;
1118	unsigned long tmp;
1119	int i;
1120
1121	nstats->rx_packets =
1122		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1123		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1124		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1125
1126	nstats->tx_packets =
1127		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1128		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1129		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1130
1131	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1132
1133	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1134
1135	tmp = estats->mac_discard;
1136	for_each_rx_queue(bp, i) {
1137		struct tstorm_per_queue_stats *old_tclient =
1138			&bp->fp_stats[i].old_tclient;
1139		tmp += le32_to_cpu(old_tclient->checksum_discard);
1140	}
1141	nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1142
1143	nstats->tx_dropped = 0;
1144
1145	nstats->multicast =
1146		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1147
1148	nstats->collisions =
1149		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1150
1151	nstats->rx_length_errors =
1152		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1153		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1154	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1155				 bnx2x_hilo(&estats->brb_truncate_hi);
1156	nstats->rx_crc_errors =
1157		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1158	nstats->rx_frame_errors =
1159		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1160	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1161	nstats->rx_missed_errors = 0;
1162
1163	nstats->rx_errors = nstats->rx_length_errors +
1164			    nstats->rx_over_errors +
1165			    nstats->rx_crc_errors +
1166			    nstats->rx_frame_errors +
1167			    nstats->rx_fifo_errors +
1168			    nstats->rx_missed_errors;
1169
1170	nstats->tx_aborted_errors =
1171		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1172		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1173	nstats->tx_carrier_errors =
1174		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1175	nstats->tx_fifo_errors = 0;
1176	nstats->tx_heartbeat_errors = 0;
1177	nstats->tx_window_errors = 0;
1178
1179	nstats->tx_errors = nstats->tx_aborted_errors +
1180			    nstats->tx_carrier_errors +
1181	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1182}
1183
1184static void bnx2x_drv_stats_update(struct bnx2x *bp)
1185{
1186	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1187	int i;
1188
1189	for_each_queue(bp, i) {
1190		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1191		struct bnx2x_eth_q_stats_old *qstats_old =
1192			&bp->fp_stats[i].eth_q_stats_old;
1193
1194		UPDATE_ESTAT_QSTAT(driver_xoff);
1195		UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1196		UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1197		UPDATE_ESTAT_QSTAT(hw_csum_err);
1198		UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
1199	}
1200}
1201
1202static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1203{
1204	u32 val;
1205
1206	if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1207		val = SHMEM2_RD(bp, edebug_driver_if[1]);
1208
1209		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1210			return true;
1211	}
1212
1213	return false;
1214}
1215
1216static void bnx2x_stats_update(struct bnx2x *bp)
1217{
1218	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1219
1220	if (bnx2x_edebug_stats_stopped(bp))
1221		return;
1222
1223	if (IS_PF(bp)) {
1224		if (*stats_comp != DMAE_COMP_VAL)
1225			return;
1226
1227		if (bp->port.pmf)
1228			bnx2x_hw_stats_update(bp);
1229
1230		if (bnx2x_storm_stats_update(bp)) {
1231			if (bp->stats_pending++ == 3) {
1232				BNX2X_ERR("storm stats were not updated for 3 times\n");
1233				bnx2x_panic();
1234			}
1235			return;
1236		}
1237	} else {
1238		/* vf doesn't collect HW statistics, and doesn't get completions
1239		 * perform only update
1240		 */
1241		bnx2x_storm_stats_update(bp);
1242	}
1243
1244	bnx2x_net_stats_update(bp);
1245	bnx2x_drv_stats_update(bp);
1246
1247	/* vf is done */
1248	if (IS_VF(bp))
1249		return;
1250
1251	if (netif_msg_timer(bp)) {
1252		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1253
1254		netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
1255		       estats->brb_drop_lo, estats->brb_truncate_lo);
1256	}
1257
1258	bnx2x_hw_stats_post(bp);
1259	bnx2x_storm_stats_post(bp);
1260}
1261
1262static void bnx2x_port_stats_stop(struct bnx2x *bp)
1263{
1264	struct dmae_command *dmae;
1265	u32 opcode;
1266	int loader_idx = PMF_DMAE_C(bp);
1267	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1268
1269	bp->executer_idx = 0;
1270
1271	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1272
1273	if (bp->port.port_stx) {
1274
1275		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1276		if (bp->func_stx)
1277			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1278						opcode, DMAE_COMP_GRC);
1279		else
1280			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1281						opcode, DMAE_COMP_PCI);
1282
1283		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1284		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1285		dmae->dst_addr_lo = bp->port.port_stx >> 2;
1286		dmae->dst_addr_hi = 0;
1287		dmae->len = bnx2x_get_port_stats_dma_len(bp);
1288		if (bp->func_stx) {
1289			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1290			dmae->comp_addr_hi = 0;
1291			dmae->comp_val = 1;
1292		} else {
1293			dmae->comp_addr_lo =
1294				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1295			dmae->comp_addr_hi =
1296				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1297			dmae->comp_val = DMAE_COMP_VAL;
1298
1299			*stats_comp = 0;
1300		}
1301	}
1302
1303	if (bp->func_stx) {
1304
1305		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1306		dmae->opcode =
1307			bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1308		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1309		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1310		dmae->dst_addr_lo = bp->func_stx >> 2;
1311		dmae->dst_addr_hi = 0;
1312		dmae->len = sizeof(struct host_func_stats) >> 2;
1313		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1314		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1315		dmae->comp_val = DMAE_COMP_VAL;
1316
1317		*stats_comp = 0;
1318	}
1319}
1320
1321static void bnx2x_stats_stop(struct bnx2x *bp)
1322{
1323	bool update = false;
1324
1325	bnx2x_stats_comp(bp);
1326
1327	if (bp->port.pmf)
1328		update = (bnx2x_hw_stats_update(bp) == 0);
1329
1330	update |= (bnx2x_storm_stats_update(bp) == 0);
1331
1332	if (update) {
1333		bnx2x_net_stats_update(bp);
1334
1335		if (bp->port.pmf)
1336			bnx2x_port_stats_stop(bp);
1337
1338		bnx2x_hw_stats_post(bp);
1339		bnx2x_stats_comp(bp);
1340	}
1341}
1342
1343static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1344{
1345}
1346
1347static const struct {
1348	void (*action)(struct bnx2x *bp);
1349	enum bnx2x_stats_state next_state;
1350} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1351/* state	event	*/
1352{
1353/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1354/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
1355/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1356/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1357},
1358{
1359/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
1360/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
1361/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
1362/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
1363}
1364};
1365
1366void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1367{
1368	enum bnx2x_stats_state state = bp->stats_state;
1369
1370	if (unlikely(bp->panic))
1371		return;
1372
1373	/* Statistics update run from timer context, and we don't want to stop
1374	 * that context in case someone is in the middle of a transition.
1375	 * For other events, wait a bit until lock is taken.
1376	 */
1377	if (down_trylock(&bp->stats_lock)) {
1378		if (event == STATS_EVENT_UPDATE)
1379			return;
1380
1381		DP(BNX2X_MSG_STATS,
1382		   "Unlikely stats' lock contention [event %d]\n", event);
1383		if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
1384			BNX2X_ERR("Failed to take stats lock [event %d]\n",
1385				  event);
1386			return;
1387		}
1388	}
1389
1390	bnx2x_stats_stm[state][event].action(bp);
1391	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1392
1393	up(&bp->stats_lock);
1394
1395	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1396		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1397		   state, event, bp->stats_state);
1398}
1399
1400static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1401{
1402	struct dmae_command *dmae;
1403	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1404
1405	/* sanity */
1406	if (!bp->port.pmf || !bp->port.port_stx) {
1407		BNX2X_ERR("BUG!\n");
1408		return;
1409	}
1410
1411	bp->executer_idx = 0;
1412
1413	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1414	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1415					 true, DMAE_COMP_PCI);
1416	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1417	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1418	dmae->dst_addr_lo = bp->port.port_stx >> 2;
1419	dmae->dst_addr_hi = 0;
1420	dmae->len = bnx2x_get_port_stats_dma_len(bp);
1421	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1422	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1423	dmae->comp_val = DMAE_COMP_VAL;
1424
1425	*stats_comp = 0;
1426	bnx2x_hw_stats_post(bp);
1427	bnx2x_stats_comp(bp);
1428}
1429
1430/* This function will prepare the statistics ramrod data the way
1431 * we will only have to increment the statistics counter and
1432 * send the ramrod each time we have to.
1433 */
1434static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1435{
1436	int i;
1437	int first_queue_query_index;
1438	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1439
1440	dma_addr_t cur_data_offset;
1441	struct stats_query_entry *cur_query_entry;
1442
1443	stats_hdr->cmd_num = bp->fw_stats_num;
1444	stats_hdr->drv_stats_counter = 0;
1445
1446	/* storm_counters struct contains the counters of completed
1447	 * statistics requests per storm which are incremented by FW
1448	 * each time it completes hadning a statistics ramrod. We will
1449	 * check these counters in the timer handler and discard a
1450	 * (statistics) ramrod completion.
1451	 */
1452	cur_data_offset = bp->fw_stats_data_mapping +
1453		offsetof(struct bnx2x_fw_stats_data, storm_counters);
1454
1455	stats_hdr->stats_counters_addrs.hi =
1456		cpu_to_le32(U64_HI(cur_data_offset));
1457	stats_hdr->stats_counters_addrs.lo =
1458		cpu_to_le32(U64_LO(cur_data_offset));
1459
1460	/* prepare to the first stats ramrod (will be completed with
1461	 * the counters equal to zero) - init counters to somethig different.
1462	 */
1463	memset(&bp->fw_stats_data->storm_counters, 0xff,
1464	       sizeof(struct stats_counter));
1465
1466	/**** Port FW statistics data ****/
1467	cur_data_offset = bp->fw_stats_data_mapping +
1468		offsetof(struct bnx2x_fw_stats_data, port);
1469
1470	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1471
1472	cur_query_entry->kind = STATS_TYPE_PORT;
1473	/* For port query index is a DONT CARE */
1474	cur_query_entry->index = BP_PORT(bp);
1475	/* For port query funcID is a DONT CARE */
1476	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1477	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1478	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1479
1480	/**** PF FW statistics data ****/
1481	cur_data_offset = bp->fw_stats_data_mapping +
1482		offsetof(struct bnx2x_fw_stats_data, pf);
1483
1484	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1485
1486	cur_query_entry->kind = STATS_TYPE_PF;
1487	/* For PF query index is a DONT CARE */
1488	cur_query_entry->index = BP_PORT(bp);
1489	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1490	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1491	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1492
1493	/**** FCoE FW statistics data ****/
1494	if (!NO_FCOE(bp)) {
1495		cur_data_offset = bp->fw_stats_data_mapping +
1496			offsetof(struct bnx2x_fw_stats_data, fcoe);
1497
1498		cur_query_entry =
1499			&bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1500
1501		cur_query_entry->kind = STATS_TYPE_FCOE;
1502		/* For FCoE query index is a DONT CARE */
1503		cur_query_entry->index = BP_PORT(bp);
1504		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1505		cur_query_entry->address.hi =
1506			cpu_to_le32(U64_HI(cur_data_offset));
1507		cur_query_entry->address.lo =
1508			cpu_to_le32(U64_LO(cur_data_offset));
1509	}
1510
1511	/**** Clients' queries ****/
1512	cur_data_offset = bp->fw_stats_data_mapping +
1513		offsetof(struct bnx2x_fw_stats_data, queue_stats);
1514
1515	/* first queue query index depends whether FCoE offloaded request will
1516	 * be included in the ramrod
1517	 */
1518	if (!NO_FCOE(bp))
1519		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1520	else
1521		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1522
1523	for_each_eth_queue(bp, i) {
1524		cur_query_entry =
1525			&bp->fw_stats_req->
1526					query[first_queue_query_index + i];
1527
1528		cur_query_entry->kind = STATS_TYPE_QUEUE;
1529		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1530		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1531		cur_query_entry->address.hi =
1532			cpu_to_le32(U64_HI(cur_data_offset));
1533		cur_query_entry->address.lo =
1534			cpu_to_le32(U64_LO(cur_data_offset));
1535
1536		cur_data_offset += sizeof(struct per_queue_stats);
1537	}
1538
1539	/* add FCoE queue query if needed */
1540	if (!NO_FCOE(bp)) {
1541		cur_query_entry =
1542			&bp->fw_stats_req->
1543					query[first_queue_query_index + i];
1544
1545		cur_query_entry->kind = STATS_TYPE_QUEUE;
1546		cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1547		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1548		cur_query_entry->address.hi =
1549			cpu_to_le32(U64_HI(cur_data_offset));
1550		cur_query_entry->address.lo =
1551			cpu_to_le32(U64_LO(cur_data_offset));
1552	}
1553}
1554
1555void bnx2x_memset_stats(struct bnx2x *bp)
1556{
1557	int i;
1558
1559	/* function stats */
1560	for_each_queue(bp, i) {
1561		struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1562
1563		memset(&fp_stats->old_tclient, 0,
1564		       sizeof(fp_stats->old_tclient));
1565		memset(&fp_stats->old_uclient, 0,
1566		       sizeof(fp_stats->old_uclient));
1567		memset(&fp_stats->old_xclient, 0,
1568		       sizeof(fp_stats->old_xclient));
1569		if (bp->stats_init) {
1570			memset(&fp_stats->eth_q_stats, 0,
1571			       sizeof(fp_stats->eth_q_stats));
1572			memset(&fp_stats->eth_q_stats_old, 0,
1573			       sizeof(fp_stats->eth_q_stats_old));
1574		}
1575	}
1576
1577	memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1578
1579	if (bp->stats_init) {
1580		memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1581		memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1582		memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1583		memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1584		memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1585	}
1586
1587	bp->stats_state = STATS_STATE_DISABLED;
1588
1589	if (bp->port.pmf && bp->port.port_stx)
1590		bnx2x_port_stats_base_init(bp);
1591
1592	/* mark the end of statistics initialization */
1593	bp->stats_init = false;
1594}
1595
1596void bnx2x_stats_init(struct bnx2x *bp)
1597{
1598	int /*abs*/port = BP_PORT(bp);
1599	int mb_idx = BP_FW_MB_IDX(bp);
1600
1601	if (IS_VF(bp)) {
1602		bnx2x_memset_stats(bp);
1603		return;
1604	}
1605
1606	bp->stats_pending = 0;
1607	bp->executer_idx = 0;
1608	bp->stats_counter = 0;
1609
1610	/* port and func stats for management */
1611	if (!BP_NOMCP(bp)) {
1612		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1613		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1614
1615	} else {
1616		bp->port.port_stx = 0;
1617		bp->func_stx = 0;
1618	}
1619	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
1620	   bp->port.port_stx, bp->func_stx);
1621
1622	/* pmf should retrieve port statistics from SP on a non-init*/
1623	if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1624		bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1625
1626	port = BP_PORT(bp);
1627	/* port stats */
1628	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1629	bp->port.old_nig_stats.brb_discard =
1630			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1631	bp->port.old_nig_stats.brb_truncate =
1632			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1633	if (!CHIP_IS_E3(bp)) {
1634		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1635			    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1636		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1637			    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1638	}
1639
1640	/* Prepare statistics ramrod data */
1641	bnx2x_prep_fw_stats_req(bp);
1642
1643	/* Clean SP from previous statistics */
1644	if (bp->stats_init) {
1645		if (bp->func_stx) {
1646			memset(bnx2x_sp(bp, func_stats), 0,
1647			       sizeof(struct host_func_stats));
1648			bnx2x_func_stats_init(bp);
1649			bnx2x_hw_stats_post(bp);
1650			bnx2x_stats_comp(bp);
1651		}
1652	}
1653
1654	bnx2x_memset_stats(bp);
1655}
1656
1657void bnx2x_save_statistics(struct bnx2x *bp)
1658{
1659	int i;
1660	struct net_device_stats *nstats = &bp->dev->stats;
1661
1662	/* save queue statistics */
1663	for_each_eth_queue(bp, i) {
1664		struct bnx2x_fastpath *fp = &bp->fp[i];
1665		struct bnx2x_eth_q_stats *qstats =
1666			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
1667		struct bnx2x_eth_q_stats_old *qstats_old =
1668			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1669
1670		UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1671		UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1672		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1673		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1674		UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1675		UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1676		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1677		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1678		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1679		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1680		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1681		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1682		UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1683		UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1684	}
1685
1686	/* save net_device_stats statistics */
1687	bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1688
1689	/* store port firmware statistics */
1690	if (bp->port.pmf && IS_MF(bp)) {
1691		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1692		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1693		UPDATE_FW_STAT_OLD(mac_filter_discard);
1694		UPDATE_FW_STAT_OLD(mf_tag_discard);
1695		UPDATE_FW_STAT_OLD(brb_truncate_discard);
1696		UPDATE_FW_STAT_OLD(mac_discard);
1697	}
1698}
1699
1700void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1701			      u32 stats_type)
1702{
1703	int i;
1704	struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1705	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1706	struct per_queue_stats *fcoe_q_stats =
1707		&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1708
1709	struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1710		&fcoe_q_stats->tstorm_queue_statistics;
1711
1712	struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1713		&fcoe_q_stats->ustorm_queue_statistics;
1714
1715	struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1716		&fcoe_q_stats->xstorm_queue_statistics;
1717
1718	struct fcoe_statistics_params *fw_fcoe_stat =
1719		&bp->fw_stats_data->fcoe;
1720
1721	memset(afex_stats, 0, sizeof(struct afex_stats));
1722
1723	for_each_eth_queue(bp, i) {
1724		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1725
1726		ADD_64(afex_stats->rx_unicast_bytes_hi,
1727		       qstats->total_unicast_bytes_received_hi,
1728		       afex_stats->rx_unicast_bytes_lo,
1729		       qstats->total_unicast_bytes_received_lo);
1730
1731		ADD_64(afex_stats->rx_broadcast_bytes_hi,
1732		       qstats->total_broadcast_bytes_received_hi,
1733		       afex_stats->rx_broadcast_bytes_lo,
1734		       qstats->total_broadcast_bytes_received_lo);
1735
1736		ADD_64(afex_stats->rx_multicast_bytes_hi,
1737		       qstats->total_multicast_bytes_received_hi,
1738		       afex_stats->rx_multicast_bytes_lo,
1739		       qstats->total_multicast_bytes_received_lo);
1740
1741		ADD_64(afex_stats->rx_unicast_frames_hi,
1742		       qstats->total_unicast_packets_received_hi,
1743		       afex_stats->rx_unicast_frames_lo,
1744		       qstats->total_unicast_packets_received_lo);
1745
1746		ADD_64(afex_stats->rx_broadcast_frames_hi,
1747		       qstats->total_broadcast_packets_received_hi,
1748		       afex_stats->rx_broadcast_frames_lo,
1749		       qstats->total_broadcast_packets_received_lo);
1750
1751		ADD_64(afex_stats->rx_multicast_frames_hi,
1752		       qstats->total_multicast_packets_received_hi,
1753		       afex_stats->rx_multicast_frames_lo,
1754		       qstats->total_multicast_packets_received_lo);
1755
1756		/* sum to rx_frames_discarded all discraded
1757		 * packets due to size, ttl0 and checksum
1758		 */
1759		ADD_64(afex_stats->rx_frames_discarded_hi,
1760		       qstats->total_packets_received_checksum_discarded_hi,
1761		       afex_stats->rx_frames_discarded_lo,
1762		       qstats->total_packets_received_checksum_discarded_lo);
1763
1764		ADD_64(afex_stats->rx_frames_discarded_hi,
1765		       qstats->total_packets_received_ttl0_discarded_hi,
1766		       afex_stats->rx_frames_discarded_lo,
1767		       qstats->total_packets_received_ttl0_discarded_lo);
1768
1769		ADD_64(afex_stats->rx_frames_discarded_hi,
1770		       qstats->etherstatsoverrsizepkts_hi,
1771		       afex_stats->rx_frames_discarded_lo,
1772		       qstats->etherstatsoverrsizepkts_lo);
1773
1774		ADD_64(afex_stats->rx_frames_dropped_hi,
1775		       qstats->no_buff_discard_hi,
1776		       afex_stats->rx_frames_dropped_lo,
1777		       qstats->no_buff_discard_lo);
1778
1779		ADD_64(afex_stats->tx_unicast_bytes_hi,
1780		       qstats->total_unicast_bytes_transmitted_hi,
1781		       afex_stats->tx_unicast_bytes_lo,
1782		       qstats->total_unicast_bytes_transmitted_lo);
1783
1784		ADD_64(afex_stats->tx_broadcast_bytes_hi,
1785		       qstats->total_broadcast_bytes_transmitted_hi,
1786		       afex_stats->tx_broadcast_bytes_lo,
1787		       qstats->total_broadcast_bytes_transmitted_lo);
1788
1789		ADD_64(afex_stats->tx_multicast_bytes_hi,
1790		       qstats->total_multicast_bytes_transmitted_hi,
1791		       afex_stats->tx_multicast_bytes_lo,
1792		       qstats->total_multicast_bytes_transmitted_lo);
1793
1794		ADD_64(afex_stats->tx_unicast_frames_hi,
1795		       qstats->total_unicast_packets_transmitted_hi,
1796		       afex_stats->tx_unicast_frames_lo,
1797		       qstats->total_unicast_packets_transmitted_lo);
1798
1799		ADD_64(afex_stats->tx_broadcast_frames_hi,
1800		       qstats->total_broadcast_packets_transmitted_hi,
1801		       afex_stats->tx_broadcast_frames_lo,
1802		       qstats->total_broadcast_packets_transmitted_lo);
1803
1804		ADD_64(afex_stats->tx_multicast_frames_hi,
1805		       qstats->total_multicast_packets_transmitted_hi,
1806		       afex_stats->tx_multicast_frames_lo,
1807		       qstats->total_multicast_packets_transmitted_lo);
1808
1809		ADD_64(afex_stats->tx_frames_dropped_hi,
1810		       qstats->total_transmitted_dropped_packets_error_hi,
1811		       afex_stats->tx_frames_dropped_lo,
1812		       qstats->total_transmitted_dropped_packets_error_lo);
1813	}
1814
1815	/* now add FCoE statistics which are collected separately
1816	 * (both offloaded and non offloaded)
1817	 */
1818	if (!NO_FCOE(bp)) {
1819		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1820			  LE32_0,
1821			  afex_stats->rx_unicast_bytes_lo,
1822			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1823
1824		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1825			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1826			  afex_stats->rx_unicast_bytes_lo,
1827			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1828
1829		ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1830			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1831			  afex_stats->rx_broadcast_bytes_lo,
1832			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1833
1834		ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1835			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1836			  afex_stats->rx_multicast_bytes_lo,
1837			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1838
1839		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1840			  LE32_0,
1841			  afex_stats->rx_unicast_frames_lo,
1842			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1843
1844		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1845			  LE32_0,
1846			  afex_stats->rx_unicast_frames_lo,
1847			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1848
1849		ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1850			  LE32_0,
1851			  afex_stats->rx_broadcast_frames_lo,
1852			  fcoe_q_tstorm_stats->rcv_bcast_pkts);
1853
1854		ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1855			  LE32_0,
1856			  afex_stats->rx_multicast_frames_lo,
1857			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1858
1859		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1860			  LE32_0,
1861			  afex_stats->rx_frames_discarded_lo,
1862			  fcoe_q_tstorm_stats->checksum_discard);
1863
1864		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1865			  LE32_0,
1866			  afex_stats->rx_frames_discarded_lo,
1867			  fcoe_q_tstorm_stats->pkts_too_big_discard);
1868
1869		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1870			  LE32_0,
1871			  afex_stats->rx_frames_discarded_lo,
1872			  fcoe_q_tstorm_stats->ttl0_discard);
1873
1874		ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1875			    LE16_0,
1876			    afex_stats->rx_frames_dropped_lo,
1877			    fcoe_q_tstorm_stats->no_buff_discard);
1878
1879		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1880			  LE32_0,
1881			  afex_stats->rx_frames_dropped_lo,
1882			  fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1883
1884		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1885			  LE32_0,
1886			  afex_stats->rx_frames_dropped_lo,
1887			  fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1888
1889		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1890			  LE32_0,
1891			  afex_stats->rx_frames_dropped_lo,
1892			  fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1893
1894		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1895			  LE32_0,
1896			  afex_stats->rx_frames_dropped_lo,
1897			  fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1898
1899		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1900			  LE32_0,
1901			  afex_stats->rx_frames_dropped_lo,
1902			  fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1903
1904		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1905			  LE32_0,
1906			  afex_stats->tx_unicast_bytes_lo,
1907			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1908
1909		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1910			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1911			  afex_stats->tx_unicast_bytes_lo,
1912			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1913
1914		ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1915			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1916			  afex_stats->tx_broadcast_bytes_lo,
1917			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1918
1919		ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1920			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1921			  afex_stats->tx_multicast_bytes_lo,
1922			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1923
1924		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1925			  LE32_0,
1926			  afex_stats->tx_unicast_frames_lo,
1927			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1928
1929		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1930			  LE32_0,
1931			  afex_stats->tx_unicast_frames_lo,
1932			  fcoe_q_xstorm_stats->ucast_pkts_sent);
1933
1934		ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1935			  LE32_0,
1936			  afex_stats->tx_broadcast_frames_lo,
1937			  fcoe_q_xstorm_stats->bcast_pkts_sent);
1938
1939		ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1940			  LE32_0,
1941			  afex_stats->tx_multicast_frames_lo,
1942			  fcoe_q_xstorm_stats->mcast_pkts_sent);
1943
1944		ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1945			  LE32_0,
1946			  afex_stats->tx_frames_dropped_lo,
1947			  fcoe_q_xstorm_stats->error_drop_pkts);
1948	}
1949
1950	/* if port stats are requested, add them to the PMF
1951	 * stats, as anyway they will be accumulated by the
1952	 * MCP before sent to the switch
1953	 */
1954	if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1955		ADD_64(afex_stats->rx_frames_dropped_hi,
1956		       0,
1957		       afex_stats->rx_frames_dropped_lo,
1958		       estats->mac_filter_discard);
1959		ADD_64(afex_stats->rx_frames_dropped_hi,
1960		       0,
1961		       afex_stats->rx_frames_dropped_lo,
1962		       estats->brb_truncate_discard);
1963		ADD_64(afex_stats->rx_frames_discarded_hi,
1964		       0,
1965		       afex_stats->rx_frames_discarded_lo,
1966		       estats->mac_discard);
1967	}
1968}
1969
1970int bnx2x_stats_safe_exec(struct bnx2x *bp,
1971			  void (func_to_exec)(void *cookie),
1972			  void *cookie)
1973{
1974	int cnt = 10, rc = 0;
1975
1976	/* Wait for statistics to end [while blocking further requests],
1977	 * then run supplied function 'safely'.
1978	 */
1979	rc = down_timeout(&bp->stats_lock, HZ / 10);
1980	if (unlikely(rc)) {
1981		BNX2X_ERR("Failed to take statistics lock for safe execution\n");
1982		goto out_no_lock;
1983	}
1984
1985	bnx2x_stats_comp(bp);
1986	while (bp->stats_pending && cnt--)
1987		if (bnx2x_storm_stats_update(bp))
1988			usleep_range(1000, 2000);
1989	if (bp->stats_pending) {
1990		BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
1991		rc = -EBUSY;
1992		goto out;
1993	}
1994
1995	func_to_exec(cookie);
1996
1997out:
1998	/* No need to restart statistics - if they're enabled, the timer
1999	 * will restart the statistics.
2000	 */
2001	up(&bp->stats_lock);
2002out_no_lock:
2003	return rc;
2004}
2005