This source file includes following definitions.
- bnx2x_add_all_napi_cnic
- bnx2x_add_all_napi
- bnx2x_calc_num_queues
- bnx2x_move_fp
- bnx2x_fill_fw_str
- bnx2x_shrink_eth_fp
- bnx2x_free_tx_pkt
- bnx2x_tx_int
- bnx2x_update_last_max_sge
- bnx2x_update_sge_prod
- bnx2x_get_rxhash
- bnx2x_tpa_start
- bnx2x_set_gro_params
- bnx2x_alloc_rx_sge
- bnx2x_fill_frag_skb
- bnx2x_frag_free
- bnx2x_frag_alloc
- bnx2x_gro_ip_csum
- bnx2x_gro_ipv6_csum
- bnx2x_gro_csum
- bnx2x_gro_receive
- bnx2x_tpa_stop
- bnx2x_alloc_rx_data
- bnx2x_csum_validate
- bnx2x_rx_int
- bnx2x_msix_fp_int
- bnx2x_acquire_phy_lock
- bnx2x_release_phy_lock
- bnx2x_get_mf_speed
- bnx2x_fill_report_data
- bnx2x_link_report
- __bnx2x_link_report
- bnx2x_set_next_page_sgl
- bnx2x_free_tpa_pool
- bnx2x_init_rx_rings_cnic
- bnx2x_init_rx_rings
- bnx2x_free_tx_skbs_queue
- bnx2x_free_tx_skbs_cnic
- bnx2x_free_tx_skbs
- bnx2x_free_rx_bds
- bnx2x_free_rx_skbs_cnic
- bnx2x_free_rx_skbs
- bnx2x_free_skbs_cnic
- bnx2x_free_skbs
- bnx2x_update_max_mf_config
- bnx2x_free_msix_irqs
- bnx2x_free_irq
- bnx2x_enable_msix
- bnx2x_req_msix_irqs
- bnx2x_enable_msi
- bnx2x_req_irq
- bnx2x_setup_irqs
- bnx2x_napi_enable_cnic
- bnx2x_napi_enable
- bnx2x_napi_disable_cnic
- bnx2x_napi_disable
- bnx2x_netif_start
- bnx2x_netif_stop
- bnx2x_select_queue
- bnx2x_set_num_queues
- bnx2x_set_real_num_queues
- bnx2x_set_rx_buf_size
- bnx2x_init_rss
- bnx2x_rss
- bnx2x_init_hw
- bnx2x_squeeze_objects
- bnx2x_free_fw_stats_mem
- bnx2x_alloc_fw_stats_mem
- bnx2x_nic_load_request
- bnx2x_compare_fw_ver
- bnx2x_nic_load_no_mcp
- bnx2x_nic_load_pmf
- bnx2x_nic_load_afex_dcc
- bnx2x_bz_fp
- bnx2x_set_os_driver_state
- bnx2x_load_cnic
- bnx2x_nic_load
- bnx2x_drain_tx_queues
- bnx2x_nic_unload
- bnx2x_set_power_state
- bnx2x_poll
- bnx2x_tx_split
- bnx2x_csum_fix
- bnx2x_xmit_type
- bnx2x_pkt_req_lin
- bnx2x_set_pbd_gso
- bnx2x_set_pbd_csum_enc
- bnx2x_set_pbd_csum_e2
- bnx2x_set_sbd_csum
- bnx2x_set_pbd_csum
- bnx2x_update_pbds_gso_enc
- bnx2x_set_ipv6_ext_e2
- bnx2x_start_xmit
- bnx2x_get_c2s_mapping
- bnx2x_setup_tc
- __bnx2x_setup_tc
- bnx2x_change_mac_addr
- bnx2x_free_fp_mem_at
- bnx2x_free_fp_mem_cnic
- bnx2x_free_fp_mem
- set_sb_shortcuts
- bnx2x_alloc_rx_bds
- bnx2x_set_next_page_rx_cq
- bnx2x_alloc_fp_mem_at
- bnx2x_alloc_fp_mem_cnic
- bnx2x_alloc_fp_mem
- bnx2x_free_mem_bp
- bnx2x_alloc_mem_bp
- bnx2x_reload_if_running
- bnx2x_get_cur_phy_idx
- bnx2x_get_link_cfg_idx
- bnx2x_fcoe_get_wwn
- bnx2x_change_mtu
- bnx2x_fix_features
- bnx2x_set_features
- bnx2x_tx_timeout
- bnx2x_suspend
- bnx2x_resume
- bnx2x_set_ctx_validation
- storm_memset_hc_timeout
- storm_memset_hc_disable
- bnx2x_update_coalesce_sb_index
- bnx2x_schedule_sp_rtnl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <linux/prefetch.h>
31 #include "bnx2x_cmn.h"
32 #include "bnx2x_init.h"
33 #include "bnx2x_sp.h"
34
35 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
36 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
38 static int bnx2x_poll(struct napi_struct *napi, int budget);
39
40 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
41 {
42 int i;
43
44
45 for_each_rx_queue_cnic(bp, i) {
46 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
47 bnx2x_poll, NAPI_POLL_WEIGHT);
48 }
49 }
50
51 static void bnx2x_add_all_napi(struct bnx2x *bp)
52 {
53 int i;
54
55
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 }
60 }
61
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
63 {
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65
66
67 if (is_kdump_kernel())
68 nq = 1;
69
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 return nq;
72 }
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88 {
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98
99
100 from_fp->napi = to_fp->napi;
101
102
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
104 to_fp->index = to;
105
106
107
108
109 to_fp->tpa_info = old_tpa_info;
110
111
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
117
118
119
120
121
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124 (bp)->max_cos;
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 }
129
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134 }
135
136
137
138
139
140
141
142
143
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145 {
146 if (IS_PF(bp)) {
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154 "bc %d.%d.%d%s%s",
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159 } else {
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161 }
162 }
163
164
165
166
167
168
169
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171 {
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174
175
176
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
181
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185 }
186 }
187 }
188
189 int bnx2x_load_count[2][3] = { {0} };
190
191
192
193
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
197 {
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203 int nbd;
204 u16 split_bd_len = 0;
205
206
207 prefetch(&skb->end);
208
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
211
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
218 bnx2x_panic();
219 }
220 #endif
221 new_cons = nbd + tx_buf->first_bd;
222
223
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226
227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231
232 --nbd;
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234 }
235
236
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
240 --nbd;
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
242 }
243
244
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
247 DMA_TO_DEVICE);
248
249
250 while (nbd > 0) {
251
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
255 if (--nbd)
256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257 }
258
259
260 WARN_ON(!skb);
261 if (likely(skb)) {
262 (*pkts_compl)++;
263 (*bytes_compl) += skb->len;
264 dev_kfree_skb_any(skb);
265 }
266
267 tx_buf->first_bd = 0;
268 tx_buf->skb = NULL;
269
270 return new_cons;
271 }
272
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
274 {
275 struct netdev_queue *txq;
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277 unsigned int pkts_compl = 0, bytes_compl = 0;
278
279 #ifdef BNX2X_STOP_ON_ERROR
280 if (unlikely(bp->panic))
281 return -1;
282 #endif
283
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons;
287
288
289 smp_rmb();
290
291 while (sw_cons != hw_cons) {
292 u16 pkt_cons;
293
294 pkt_cons = TX_BD(sw_cons);
295
296 DP(NETIF_MSG_TX_DONE,
297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
298 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
299
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
301 &pkts_compl, &bytes_compl);
302
303 sw_cons++;
304 }
305
306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
307
308 txdata->tx_pkt_cons = sw_cons;
309 txdata->tx_bd_cons = bd_cons;
310
311
312
313
314
315
316
317
318
319
320 smp_mb();
321
322 if (unlikely(netif_tx_queue_stopped(txq))) {
323
324
325
326
327
328
329
330
331
332
333 __netif_tx_lock(txq, smp_processor_id());
334
335 if ((netif_tx_queue_stopped(txq)) &&
336 (bp->state == BNX2X_STATE_OPEN) &&
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338 netif_tx_wake_queue(txq);
339
340 __netif_tx_unlock(txq);
341 }
342 return 0;
343 }
344
345 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346 u16 idx)
347 {
348 u16 last_max = fp->last_max_sge;
349
350 if (SUB_S16(idx, last_max) > 0)
351 fp->last_max_sge = idx;
352 }
353
354 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355 u16 sge_len,
356 struct eth_end_agg_rx_cqe *cqe)
357 {
358 struct bnx2x *bp = fp->bp;
359 u16 last_max, last_elem, first_elem;
360 u16 delta = 0;
361 u16 i;
362
363 if (!sge_len)
364 return;
365
366
367 for (i = 0; i < sge_len; i++)
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
370
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
373
374
375 prefetch((void *)(fp->sge_mask));
376 bnx2x_update_last_max_sge(fp,
377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
378
379 last_max = RX_SGE(fp->last_max_sge);
380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
382
383
384 if (last_elem + 1 != first_elem)
385 last_elem++;
386
387
388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389 if (likely(fp->sge_mask[i]))
390 break;
391
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393 delta += BIT_VEC64_ELEM_SZ;
394 }
395
396 if (delta > 0) {
397 fp->rx_sge_prod += delta;
398
399 bnx2x_clear_sge_mask_next_elems(fp);
400 }
401
402 DP(NETIF_MSG_RX_STATUS,
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404 fp->last_max_sge, fp->rx_sge_prod);
405 }
406
407
408
409
410 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411 const struct eth_fast_path_rx_cqe *cqe,
412 enum pkt_hash_types *rxhash_type)
413 {
414
415 if ((bp->dev->features & NETIF_F_RXHASH) &&
416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417 enum eth_rss_hash_type htype;
418
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421 (htype == TCP_IPV6_HASH_TYPE)) ?
422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423
424 return le32_to_cpu(cqe->rss_hash_result);
425 }
426 *rxhash_type = PKT_HASH_TYPE_NONE;
427 return 0;
428 }
429
430 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
431 u16 cons, u16 prod,
432 struct eth_fast_path_rx_cqe *cqe)
433 {
434 struct bnx2x *bp = fp->bp;
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438 dma_addr_t mapping;
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
441
442
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445
446
447 mapping = dma_map_single(&bp->pdev->dev,
448 first_buf->data + NET_SKB_PAD,
449 fp->rx_buf_size, DMA_FROM_DEVICE);
450
451
452
453
454
455
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457
458 bnx2x_reuse_rx_data(fp, cons, prod);
459 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 return;
461 }
462
463
464 prod_rx_buf->data = first_buf->data;
465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466
467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469
470
471 *first_buf = *cons_rx_buf;
472
473
474 tpa_info->parsing_flags =
475 le16_to_cpu(cqe->pars_flags.flags);
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477 tpa_info->tpa_state = BNX2X_TPA_START;
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479 tpa_info->placement_offset = cqe->placement_offset;
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481 if (fp->mode == TPA_MODE_GRO) {
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484 tpa_info->gro_size = gro_size;
485 }
486
487 #ifdef BNX2X_STOP_ON_ERROR
488 fp->tpa_queue_used |= (1 << queue);
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490 fp->tpa_queue_used);
491 #endif
492 }
493
494
495
496
497
498 #define TPA_TSTAMP_OPT_LEN 12
499
500
501
502
503
504
505
506
507
508
509
510
511
512 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
513 u16 len_on_bd, unsigned int pkt_len,
514 u16 num_of_coalesced_segs)
515 {
516
517
518
519 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
520
521 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
522 PRS_FLAG_OVERETH_IPV6) {
523 hdrs_len += sizeof(struct ipv6hdr);
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
525 } else {
526 hdrs_len += sizeof(struct iphdr);
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 }
529
530
531
532
533
534
535 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536 hdrs_len += TPA_TSTAMP_OPT_LEN;
537
538 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
539
540
541
542
543 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
544 }
545
546 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547 u16 index, gfp_t gfp_mask)
548 {
549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
551 struct bnx2x_alloc_pool *pool = &fp->page_pool;
552 dma_addr_t mapping;
553
554 if (!pool->page) {
555 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
556 if (unlikely(!pool->page))
557 return -ENOMEM;
558
559 pool->offset = 0;
560 }
561
562 mapping = dma_map_page(&bp->pdev->dev, pool->page,
563 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
564 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
565 BNX2X_ERR("Can't map sge\n");
566 return -ENOMEM;
567 }
568
569 sw_buf->page = pool->page;
570 sw_buf->offset = pool->offset;
571
572 dma_unmap_addr_set(sw_buf, mapping, mapping);
573
574 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
575 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
576
577 pool->offset += SGE_PAGE_SIZE;
578 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
579 get_page(pool->page);
580 else
581 pool->page = NULL;
582 return 0;
583 }
584
585 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
586 struct bnx2x_agg_info *tpa_info,
587 u16 pages,
588 struct sk_buff *skb,
589 struct eth_end_agg_rx_cqe *cqe,
590 u16 cqe_idx)
591 {
592 struct sw_rx_page *rx_pg, old_rx_pg;
593 u32 i, frag_len, frag_size;
594 int err, j, frag_id = 0;
595 u16 len_on_bd = tpa_info->len_on_bd;
596 u16 full_page = 0, gro_size = 0;
597
598 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
599
600 if (fp->mode == TPA_MODE_GRO) {
601 gro_size = tpa_info->gro_size;
602 full_page = tpa_info->full_page;
603 }
604
605
606 if (frag_size)
607 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
608 le16_to_cpu(cqe->pkt_len),
609 le16_to_cpu(cqe->num_of_coalesced_segs));
610
611 #ifdef BNX2X_STOP_ON_ERROR
612 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
613 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
614 pages, cqe_idx);
615 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
616 bnx2x_panic();
617 return -EINVAL;
618 }
619 #endif
620
621
622 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
623 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
624
625
626
627 if (fp->mode == TPA_MODE_GRO)
628 frag_len = min_t(u32, frag_size, (u32)full_page);
629 else
630 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
631
632 rx_pg = &fp->rx_page_ring[sge_idx];
633 old_rx_pg = *rx_pg;
634
635
636
637 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
638 if (unlikely(err)) {
639 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
640 return err;
641 }
642
643 dma_unmap_page(&bp->pdev->dev,
644 dma_unmap_addr(&old_rx_pg, mapping),
645 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
646
647 if (fp->mode == TPA_MODE_LRO)
648 skb_fill_page_desc(skb, j, old_rx_pg.page,
649 old_rx_pg.offset, frag_len);
650 else {
651 int rem;
652 int offset = 0;
653 for (rem = frag_len; rem > 0; rem -= gro_size) {
654 int len = rem > gro_size ? gro_size : rem;
655 skb_fill_page_desc(skb, frag_id++,
656 old_rx_pg.page,
657 old_rx_pg.offset + offset,
658 len);
659 if (offset)
660 get_page(old_rx_pg.page);
661 offset += len;
662 }
663 }
664
665 skb->data_len += frag_len;
666 skb->truesize += SGE_PAGES;
667 skb->len += frag_len;
668
669 frag_size -= frag_len;
670 }
671
672 return 0;
673 }
674
675 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
676 {
677 if (fp->rx_frag_size)
678 skb_free_frag(data);
679 else
680 kfree(data);
681 }
682
683 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
684 {
685 if (fp->rx_frag_size) {
686
687 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
688 return (void *)__get_free_page(gfp_mask);
689
690 return napi_alloc_frag(fp->rx_frag_size);
691 }
692
693 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
694 }
695
696 #ifdef CONFIG_INET
697 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
698 {
699 const struct iphdr *iph = ip_hdr(skb);
700 struct tcphdr *th;
701
702 skb_set_transport_header(skb, sizeof(struct iphdr));
703 th = tcp_hdr(skb);
704
705 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
706 iph->saddr, iph->daddr, 0);
707 }
708
709 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
710 {
711 struct ipv6hdr *iph = ipv6_hdr(skb);
712 struct tcphdr *th;
713
714 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
715 th = tcp_hdr(skb);
716
717 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
718 &iph->saddr, &iph->daddr, 0);
719 }
720
721 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
722 void (*gro_func)(struct bnx2x*, struct sk_buff*))
723 {
724 skb_reset_network_header(skb);
725 gro_func(bp, skb);
726 tcp_gro_complete(skb);
727 }
728 #endif
729
730 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
731 struct sk_buff *skb)
732 {
733 #ifdef CONFIG_INET
734 if (skb_shinfo(skb)->gso_size) {
735 switch (be16_to_cpu(skb->protocol)) {
736 case ETH_P_IP:
737 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
738 break;
739 case ETH_P_IPV6:
740 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
741 break;
742 default:
743 netdev_WARN_ONCE(bp->dev,
744 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
745 be16_to_cpu(skb->protocol));
746 }
747 }
748 #endif
749 skb_record_rx_queue(skb, fp->rx_queue);
750 napi_gro_receive(&fp->napi, skb);
751 }
752
753 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
754 struct bnx2x_agg_info *tpa_info,
755 u16 pages,
756 struct eth_end_agg_rx_cqe *cqe,
757 u16 cqe_idx)
758 {
759 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
760 u8 pad = tpa_info->placement_offset;
761 u16 len = tpa_info->len_on_bd;
762 struct sk_buff *skb = NULL;
763 u8 *new_data, *data = rx_buf->data;
764 u8 old_tpa_state = tpa_info->tpa_state;
765
766 tpa_info->tpa_state = BNX2X_TPA_STOP;
767
768
769
770
771 if (old_tpa_state == BNX2X_TPA_ERROR)
772 goto drop;
773
774
775 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
776
777
778
779 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
780 fp->rx_buf_size, DMA_FROM_DEVICE);
781 if (likely(new_data))
782 skb = build_skb(data, fp->rx_frag_size);
783
784 if (likely(skb)) {
785 #ifdef BNX2X_STOP_ON_ERROR
786 if (pad + len > fp->rx_buf_size) {
787 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
788 pad, len, fp->rx_buf_size);
789 bnx2x_panic();
790 return;
791 }
792 #endif
793
794 skb_reserve(skb, pad + NET_SKB_PAD);
795 skb_put(skb, len);
796 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
797
798 skb->protocol = eth_type_trans(skb, bp->dev);
799 skb->ip_summed = CHECKSUM_UNNECESSARY;
800
801 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
802 skb, cqe, cqe_idx)) {
803 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
804 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
805 bnx2x_gro_receive(bp, fp, skb);
806 } else {
807 DP(NETIF_MSG_RX_STATUS,
808 "Failed to allocate new pages - dropping packet!\n");
809 dev_kfree_skb_any(skb);
810 }
811
812
813 rx_buf->data = new_data;
814
815 return;
816 }
817 if (new_data)
818 bnx2x_frag_free(fp, new_data);
819 drop:
820
821 DP(NETIF_MSG_RX_STATUS,
822 "Failed to allocate or map a new skb - dropping packet!\n");
823 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
824 }
825
826 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 index, gfp_t gfp_mask)
828 {
829 u8 *data;
830 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
831 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
832 dma_addr_t mapping;
833
834 data = bnx2x_frag_alloc(fp, gfp_mask);
835 if (unlikely(data == NULL))
836 return -ENOMEM;
837
838 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
839 fp->rx_buf_size,
840 DMA_FROM_DEVICE);
841 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
842 bnx2x_frag_free(fp, data);
843 BNX2X_ERR("Can't map rx data\n");
844 return -ENOMEM;
845 }
846
847 rx_buf->data = data;
848 dma_unmap_addr_set(rx_buf, mapping, mapping);
849
850 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
851 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
852
853 return 0;
854 }
855
856 static
857 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
858 struct bnx2x_fastpath *fp,
859 struct bnx2x_eth_q_stats *qstats)
860 {
861
862
863
864
865
866 if (cqe->fast_path_cqe.status_flags &
867 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
868 return;
869
870
871
872 if (cqe->fast_path_cqe.type_error_flags &
873 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
874 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
875 qstats->hw_csum_err++;
876 else
877 skb->ip_summed = CHECKSUM_UNNECESSARY;
878 }
879
880 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
881 {
882 struct bnx2x *bp = fp->bp;
883 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
884 u16 sw_comp_cons, sw_comp_prod;
885 int rx_pkt = 0;
886 union eth_rx_cqe *cqe;
887 struct eth_fast_path_rx_cqe *cqe_fp;
888
889 #ifdef BNX2X_STOP_ON_ERROR
890 if (unlikely(bp->panic))
891 return 0;
892 #endif
893 if (budget <= 0)
894 return rx_pkt;
895
896 bd_cons = fp->rx_bd_cons;
897 bd_prod = fp->rx_bd_prod;
898 bd_prod_fw = bd_prod;
899 sw_comp_cons = fp->rx_comp_cons;
900 sw_comp_prod = fp->rx_comp_prod;
901
902 comp_ring_cons = RCQ_BD(sw_comp_cons);
903 cqe = &fp->rx_comp_ring[comp_ring_cons];
904 cqe_fp = &cqe->fast_path_cqe;
905
906 DP(NETIF_MSG_RX_STATUS,
907 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
908
909 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
910 struct sw_rx_bd *rx_buf = NULL;
911 struct sk_buff *skb;
912 u8 cqe_fp_flags;
913 enum eth_rx_cqe_type cqe_fp_type;
914 u16 len, pad, queue;
915 u8 *data;
916 u32 rxhash;
917 enum pkt_hash_types rxhash_type;
918
919 #ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic))
921 return 0;
922 #endif
923
924 bd_prod = RX_BD(bd_prod);
925 bd_cons = RX_BD(bd_cons);
926
927
928
929
930
931
932
933
934
935
936
937 rmb();
938
939 cqe_fp_flags = cqe_fp->type_error_flags;
940 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
941
942 DP(NETIF_MSG_RX_STATUS,
943 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
944 CQE_TYPE(cqe_fp_flags),
945 cqe_fp_flags, cqe_fp->status_flags,
946 le32_to_cpu(cqe_fp->rss_hash_result),
947 le16_to_cpu(cqe_fp->vlan_tag),
948 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
949
950
951 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
952 bnx2x_sp_event(fp, cqe);
953 goto next_cqe;
954 }
955
956 rx_buf = &fp->rx_buf_ring[bd_cons];
957 data = rx_buf->data;
958
959 if (!CQE_TYPE_FAST(cqe_fp_type)) {
960 struct bnx2x_agg_info *tpa_info;
961 u16 frag_size, pages;
962 #ifdef BNX2X_STOP_ON_ERROR
963
964 if (fp->mode == TPA_MODE_DISABLED &&
965 (CQE_TYPE_START(cqe_fp_type) ||
966 CQE_TYPE_STOP(cqe_fp_type)))
967 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
968 CQE_TYPE(cqe_fp_type));
969 #endif
970
971 if (CQE_TYPE_START(cqe_fp_type)) {
972 u16 queue = cqe_fp->queue_index;
973 DP(NETIF_MSG_RX_STATUS,
974 "calling tpa_start on queue %d\n",
975 queue);
976
977 bnx2x_tpa_start(fp, queue,
978 bd_cons, bd_prod,
979 cqe_fp);
980
981 goto next_rx;
982 }
983 queue = cqe->end_agg_cqe.queue_index;
984 tpa_info = &fp->tpa_info[queue];
985 DP(NETIF_MSG_RX_STATUS,
986 "calling tpa_stop on queue %d\n",
987 queue);
988
989 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
990 tpa_info->len_on_bd;
991
992 if (fp->mode == TPA_MODE_GRO)
993 pages = (frag_size + tpa_info->full_page - 1) /
994 tpa_info->full_page;
995 else
996 pages = SGE_PAGE_ALIGN(frag_size) >>
997 SGE_PAGE_SHIFT;
998
999 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1000 &cqe->end_agg_cqe, comp_ring_cons);
1001 #ifdef BNX2X_STOP_ON_ERROR
1002 if (bp->panic)
1003 return 0;
1004 #endif
1005
1006 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1007 goto next_cqe;
1008 }
1009
1010 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1011 pad = cqe_fp->placement_offset;
1012 dma_sync_single_for_cpu(&bp->pdev->dev,
1013 dma_unmap_addr(rx_buf, mapping),
1014 pad + RX_COPY_THRESH,
1015 DMA_FROM_DEVICE);
1016 pad += NET_SKB_PAD;
1017 prefetch(data + pad);
1018
1019 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1020 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1021 "ERROR flags %x rx packet %u\n",
1022 cqe_fp_flags, sw_comp_cons);
1023 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1024 goto reuse_rx;
1025 }
1026
1027
1028
1029
1030 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1031 (len <= RX_COPY_THRESH)) {
1032 skb = napi_alloc_skb(&fp->napi, len);
1033 if (skb == NULL) {
1034 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1035 "ERROR packet dropped because of alloc failure\n");
1036 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1037 goto reuse_rx;
1038 }
1039 memcpy(skb->data, data + pad, len);
1040 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1041 } else {
1042 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1043 GFP_ATOMIC) == 0)) {
1044 dma_unmap_single(&bp->pdev->dev,
1045 dma_unmap_addr(rx_buf, mapping),
1046 fp->rx_buf_size,
1047 DMA_FROM_DEVICE);
1048 skb = build_skb(data, fp->rx_frag_size);
1049 if (unlikely(!skb)) {
1050 bnx2x_frag_free(fp, data);
1051 bnx2x_fp_qstats(bp, fp)->
1052 rx_skb_alloc_failed++;
1053 goto next_rx;
1054 }
1055 skb_reserve(skb, pad);
1056 } else {
1057 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1058 "ERROR packet dropped because of alloc failure\n");
1059 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1060 reuse_rx:
1061 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1062 goto next_rx;
1063 }
1064 }
1065
1066 skb_put(skb, len);
1067 skb->protocol = eth_type_trans(skb, bp->dev);
1068
1069
1070 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1071 skb_set_hash(skb, rxhash, rxhash_type);
1072
1073 skb_checksum_none_assert(skb);
1074
1075 if (bp->dev->features & NETIF_F_RXCSUM)
1076 bnx2x_csum_validate(skb, cqe, fp,
1077 bnx2x_fp_qstats(bp, fp));
1078
1079 skb_record_rx_queue(skb, fp->rx_queue);
1080
1081
1082 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1083 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1084 bnx2x_set_rx_ts(bp, skb);
1085
1086 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1087 PARSING_FLAGS_VLAN)
1088 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1089 le16_to_cpu(cqe_fp->vlan_tag));
1090
1091 napi_gro_receive(&fp->napi, skb);
1092 next_rx:
1093 rx_buf->data = NULL;
1094
1095 bd_cons = NEXT_RX_IDX(bd_cons);
1096 bd_prod = NEXT_RX_IDX(bd_prod);
1097 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1098 rx_pkt++;
1099 next_cqe:
1100 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1101 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1102
1103
1104 BNX2X_SEED_CQE(cqe_fp);
1105
1106 if (rx_pkt == budget)
1107 break;
1108
1109 comp_ring_cons = RCQ_BD(sw_comp_cons);
1110 cqe = &fp->rx_comp_ring[comp_ring_cons];
1111 cqe_fp = &cqe->fast_path_cqe;
1112 }
1113
1114 fp->rx_bd_cons = bd_cons;
1115 fp->rx_bd_prod = bd_prod_fw;
1116 fp->rx_comp_cons = sw_comp_cons;
1117 fp->rx_comp_prod = sw_comp_prod;
1118
1119
1120 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1121 fp->rx_sge_prod);
1122
1123 return rx_pkt;
1124 }
1125
1126 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1127 {
1128 struct bnx2x_fastpath *fp = fp_cookie;
1129 struct bnx2x *bp = fp->bp;
1130 u8 cos;
1131
1132 DP(NETIF_MSG_INTR,
1133 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1134 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1135
1136 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1137
1138 #ifdef BNX2X_STOP_ON_ERROR
1139 if (unlikely(bp->panic))
1140 return IRQ_HANDLED;
1141 #endif
1142
1143
1144 for_each_cos_in_tx_queue(fp, cos)
1145 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1146
1147 prefetch(&fp->sb_running_index[SM_RX_ID]);
1148 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1149
1150 return IRQ_HANDLED;
1151 }
1152
1153
1154 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1155 {
1156 mutex_lock(&bp->port.phy_mutex);
1157
1158 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1159 }
1160
1161 void bnx2x_release_phy_lock(struct bnx2x *bp)
1162 {
1163 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1164
1165 mutex_unlock(&bp->port.phy_mutex);
1166 }
1167
1168
1169 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1170 {
1171 u16 line_speed = bp->link_vars.line_speed;
1172 if (IS_MF(bp)) {
1173 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1174 bp->mf_config[BP_VN(bp)]);
1175
1176
1177
1178
1179 if (IS_MF_PERCENT_BW(bp))
1180 line_speed = (line_speed * maxCfg) / 100;
1181 else {
1182 u16 vn_max_rate = maxCfg * 100;
1183
1184 if (vn_max_rate < line_speed)
1185 line_speed = vn_max_rate;
1186 }
1187 }
1188
1189 return line_speed;
1190 }
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 static void bnx2x_fill_report_data(struct bnx2x *bp,
1201 struct bnx2x_link_report_data *data)
1202 {
1203 memset(data, 0, sizeof(*data));
1204
1205 if (IS_PF(bp)) {
1206
1207 data->line_speed = bnx2x_get_mf_speed(bp);
1208
1209
1210 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1211 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1212 &data->link_report_flags);
1213
1214 if (!BNX2X_NUM_ETH_QUEUES(bp))
1215 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1216 &data->link_report_flags);
1217
1218
1219 if (bp->link_vars.duplex == DUPLEX_FULL)
1220 __set_bit(BNX2X_LINK_REPORT_FD,
1221 &data->link_report_flags);
1222
1223
1224 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1225 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1226 &data->link_report_flags);
1227
1228
1229 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1230 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1231 &data->link_report_flags);
1232 } else {
1233 *data = bp->vf_link_vars;
1234 }
1235 }
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 void bnx2x_link_report(struct bnx2x *bp)
1248 {
1249 bnx2x_acquire_phy_lock(bp);
1250 __bnx2x_link_report(bp);
1251 bnx2x_release_phy_lock(bp);
1252 }
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 void __bnx2x_link_report(struct bnx2x *bp)
1263 {
1264 struct bnx2x_link_report_data cur_data;
1265
1266 if (bp->force_link_down) {
1267 bp->link_vars.link_up = 0;
1268 return;
1269 }
1270
1271
1272 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1273 bnx2x_read_mf_cfg(bp);
1274
1275
1276 bnx2x_fill_report_data(bp, &cur_data);
1277
1278
1279 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1280 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281 &bp->last_reported_link.link_report_flags) &&
1282 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1283 &cur_data.link_report_flags)))
1284 return;
1285
1286 bp->link_cnt++;
1287
1288
1289
1290
1291 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1292
1293
1294 if (IS_PF(bp))
1295 bnx2x_iov_link_update(bp);
1296
1297 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1298 &cur_data.link_report_flags)) {
1299 netif_carrier_off(bp->dev);
1300 netdev_err(bp->dev, "NIC Link is Down\n");
1301 return;
1302 } else {
1303 const char *duplex;
1304 const char *flow;
1305
1306 netif_carrier_on(bp->dev);
1307
1308 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1309 &cur_data.link_report_flags))
1310 duplex = "full";
1311 else
1312 duplex = "half";
1313
1314
1315
1316
1317
1318 if (cur_data.link_report_flags) {
1319 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1320 &cur_data.link_report_flags)) {
1321 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1322 &cur_data.link_report_flags))
1323 flow = "ON - receive & transmit";
1324 else
1325 flow = "ON - receive";
1326 } else {
1327 flow = "ON - transmit";
1328 }
1329 } else {
1330 flow = "none";
1331 }
1332 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1333 cur_data.line_speed, duplex, flow);
1334 }
1335 }
1336
1337 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1338 {
1339 int i;
1340
1341 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1342 struct eth_rx_sge *sge;
1343
1344 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1345 sge->addr_hi =
1346 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1347 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1348
1349 sge->addr_lo =
1350 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1351 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1352 }
1353 }
1354
1355 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1356 struct bnx2x_fastpath *fp, int last)
1357 {
1358 int i;
1359
1360 for (i = 0; i < last; i++) {
1361 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1362 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1363 u8 *data = first_buf->data;
1364
1365 if (data == NULL) {
1366 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1367 continue;
1368 }
1369 if (tpa_info->tpa_state == BNX2X_TPA_START)
1370 dma_unmap_single(&bp->pdev->dev,
1371 dma_unmap_addr(first_buf, mapping),
1372 fp->rx_buf_size, DMA_FROM_DEVICE);
1373 bnx2x_frag_free(fp, data);
1374 first_buf->data = NULL;
1375 }
1376 }
1377
1378 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1379 {
1380 int j;
1381
1382 for_each_rx_queue_cnic(bp, j) {
1383 struct bnx2x_fastpath *fp = &bp->fp[j];
1384
1385 fp->rx_bd_cons = 0;
1386
1387
1388
1389
1390
1391
1392 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1393 fp->rx_sge_prod);
1394 }
1395 }
1396
1397 void bnx2x_init_rx_rings(struct bnx2x *bp)
1398 {
1399 int func = BP_FUNC(bp);
1400 u16 ring_prod;
1401 int i, j;
1402
1403
1404 for_each_eth_queue(bp, j) {
1405 struct bnx2x_fastpath *fp = &bp->fp[j];
1406
1407 DP(NETIF_MSG_IFUP,
1408 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1409
1410 if (fp->mode != TPA_MODE_DISABLED) {
1411
1412 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1413 struct bnx2x_agg_info *tpa_info =
1414 &fp->tpa_info[i];
1415 struct sw_rx_bd *first_buf =
1416 &tpa_info->first_buf;
1417
1418 first_buf->data =
1419 bnx2x_frag_alloc(fp, GFP_KERNEL);
1420 if (!first_buf->data) {
1421 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1422 j);
1423 bnx2x_free_tpa_pool(bp, fp, i);
1424 fp->mode = TPA_MODE_DISABLED;
1425 break;
1426 }
1427 dma_unmap_addr_set(first_buf, mapping, 0);
1428 tpa_info->tpa_state = BNX2X_TPA_STOP;
1429 }
1430
1431
1432 bnx2x_set_next_page_sgl(fp);
1433
1434
1435 bnx2x_init_sge_ring_bit_mask(fp);
1436
1437
1438 for (i = 0, ring_prod = 0;
1439 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1440
1441 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1442 GFP_KERNEL) < 0) {
1443 BNX2X_ERR("was only able to allocate %d rx sges\n",
1444 i);
1445 BNX2X_ERR("disabling TPA for queue[%d]\n",
1446 j);
1447
1448 bnx2x_free_rx_sge_range(bp, fp,
1449 ring_prod);
1450 bnx2x_free_tpa_pool(bp, fp,
1451 MAX_AGG_QS(bp));
1452 fp->mode = TPA_MODE_DISABLED;
1453 ring_prod = 0;
1454 break;
1455 }
1456 ring_prod = NEXT_SGE_IDX(ring_prod);
1457 }
1458
1459 fp->rx_sge_prod = ring_prod;
1460 }
1461 }
1462
1463 for_each_eth_queue(bp, j) {
1464 struct bnx2x_fastpath *fp = &bp->fp[j];
1465
1466 fp->rx_bd_cons = 0;
1467
1468
1469
1470
1471
1472
1473 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1474 fp->rx_sge_prod);
1475
1476 if (j != 0)
1477 continue;
1478
1479 if (CHIP_IS_E1(bp)) {
1480 REG_WR(bp, BAR_USTRORM_INTMEM +
1481 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1482 U64_LO(fp->rx_comp_mapping));
1483 REG_WR(bp, BAR_USTRORM_INTMEM +
1484 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1485 U64_HI(fp->rx_comp_mapping));
1486 }
1487 }
1488 }
1489
1490 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1491 {
1492 u8 cos;
1493 struct bnx2x *bp = fp->bp;
1494
1495 for_each_cos_in_tx_queue(fp, cos) {
1496 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1497 unsigned pkts_compl = 0, bytes_compl = 0;
1498
1499 u16 sw_prod = txdata->tx_pkt_prod;
1500 u16 sw_cons = txdata->tx_pkt_cons;
1501
1502 while (sw_cons != sw_prod) {
1503 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1504 &pkts_compl, &bytes_compl);
1505 sw_cons++;
1506 }
1507
1508 netdev_tx_reset_queue(
1509 netdev_get_tx_queue(bp->dev,
1510 txdata->txq_index));
1511 }
1512 }
1513
1514 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1515 {
1516 int i;
1517
1518 for_each_tx_queue_cnic(bp, i) {
1519 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520 }
1521 }
1522
1523 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1524 {
1525 int i;
1526
1527 for_each_eth_queue(bp, i) {
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529 }
1530 }
1531
1532 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1533 {
1534 struct bnx2x *bp = fp->bp;
1535 int i;
1536
1537
1538 if (fp->rx_buf_ring == NULL)
1539 return;
1540
1541 for (i = 0; i < NUM_RX_BD; i++) {
1542 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1543 u8 *data = rx_buf->data;
1544
1545 if (data == NULL)
1546 continue;
1547 dma_unmap_single(&bp->pdev->dev,
1548 dma_unmap_addr(rx_buf, mapping),
1549 fp->rx_buf_size, DMA_FROM_DEVICE);
1550
1551 rx_buf->data = NULL;
1552 bnx2x_frag_free(fp, data);
1553 }
1554 }
1555
1556 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1557 {
1558 int j;
1559
1560 for_each_rx_queue_cnic(bp, j) {
1561 bnx2x_free_rx_bds(&bp->fp[j]);
1562 }
1563 }
1564
1565 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1566 {
1567 int j;
1568
1569 for_each_eth_queue(bp, j) {
1570 struct bnx2x_fastpath *fp = &bp->fp[j];
1571
1572 bnx2x_free_rx_bds(fp);
1573
1574 if (fp->mode != TPA_MODE_DISABLED)
1575 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1576 }
1577 }
1578
1579 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1580 {
1581 bnx2x_free_tx_skbs_cnic(bp);
1582 bnx2x_free_rx_skbs_cnic(bp);
1583 }
1584
1585 void bnx2x_free_skbs(struct bnx2x *bp)
1586 {
1587 bnx2x_free_tx_skbs(bp);
1588 bnx2x_free_rx_skbs(bp);
1589 }
1590
1591 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1592 {
1593
1594 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1595
1596 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1597
1598 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1599
1600
1601 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1602 & FUNC_MF_CFG_MAX_BW_MASK;
1603
1604 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1605 }
1606 }
1607
1608
1609
1610
1611
1612
1613
1614 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1615 {
1616 int i, offset = 0;
1617
1618 if (nvecs == offset)
1619 return;
1620
1621
1622 if (IS_PF(bp)) {
1623 free_irq(bp->msix_table[offset].vector, bp->dev);
1624 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1625 bp->msix_table[offset].vector);
1626 offset++;
1627 }
1628
1629 if (CNIC_SUPPORT(bp)) {
1630 if (nvecs == offset)
1631 return;
1632 offset++;
1633 }
1634
1635 for_each_eth_queue(bp, i) {
1636 if (nvecs == offset)
1637 return;
1638 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1639 i, bp->msix_table[offset].vector);
1640
1641 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1642 }
1643 }
1644
1645 void bnx2x_free_irq(struct bnx2x *bp)
1646 {
1647 if (bp->flags & USING_MSIX_FLAG &&
1648 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1649 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1650
1651
1652 if (IS_PF(bp))
1653 nvecs++;
1654
1655 bnx2x_free_msix_irqs(bp, nvecs);
1656 } else {
1657 free_irq(bp->dev->irq, bp->dev);
1658 }
1659 }
1660
1661 int bnx2x_enable_msix(struct bnx2x *bp)
1662 {
1663 int msix_vec = 0, i, rc;
1664
1665
1666 if (IS_PF(bp)) {
1667 bp->msix_table[msix_vec].entry = msix_vec;
1668 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1669 bp->msix_table[0].entry);
1670 msix_vec++;
1671 }
1672
1673
1674 if (CNIC_SUPPORT(bp)) {
1675 bp->msix_table[msix_vec].entry = msix_vec;
1676 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1677 msix_vec, bp->msix_table[msix_vec].entry);
1678 msix_vec++;
1679 }
1680
1681
1682 for_each_eth_queue(bp, i) {
1683 bp->msix_table[msix_vec].entry = msix_vec;
1684 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1685 msix_vec, msix_vec, i);
1686 msix_vec++;
1687 }
1688
1689 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1690 msix_vec);
1691
1692 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1693 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1694
1695
1696
1697
1698 if (rc == -ENOSPC) {
1699
1700 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1701 if (rc < 0) {
1702 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1703 rc);
1704 goto no_msix;
1705 }
1706
1707 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1708 bp->flags |= USING_SINGLE_MSIX_FLAG;
1709
1710 BNX2X_DEV_INFO("set number of queues to 1\n");
1711 bp->num_ethernet_queues = 1;
1712 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1713 } else if (rc < 0) {
1714 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1715 goto no_msix;
1716 } else if (rc < msix_vec) {
1717
1718 int diff = msix_vec - rc;
1719
1720 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1721
1722
1723
1724
1725 bp->num_ethernet_queues -= diff;
1726 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1727
1728 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1729 bp->num_queues);
1730 }
1731
1732 bp->flags |= USING_MSIX_FLAG;
1733
1734 return 0;
1735
1736 no_msix:
1737
1738 if (rc == -ENOMEM)
1739 bp->flags |= DISABLE_MSI_FLAG;
1740
1741 return rc;
1742 }
1743
1744 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1745 {
1746 int i, rc, offset = 0;
1747
1748
1749 if (IS_PF(bp)) {
1750 rc = request_irq(bp->msix_table[offset++].vector,
1751 bnx2x_msix_sp_int, 0,
1752 bp->dev->name, bp->dev);
1753 if (rc) {
1754 BNX2X_ERR("request sp irq failed\n");
1755 return -EBUSY;
1756 }
1757 }
1758
1759 if (CNIC_SUPPORT(bp))
1760 offset++;
1761
1762 for_each_eth_queue(bp, i) {
1763 struct bnx2x_fastpath *fp = &bp->fp[i];
1764 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1765 bp->dev->name, i);
1766
1767 rc = request_irq(bp->msix_table[offset].vector,
1768 bnx2x_msix_fp_int, 0, fp->name, fp);
1769 if (rc) {
1770 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1771 bp->msix_table[offset].vector, rc);
1772 bnx2x_free_msix_irqs(bp, offset);
1773 return -EBUSY;
1774 }
1775
1776 offset++;
1777 }
1778
1779 i = BNX2X_NUM_ETH_QUEUES(bp);
1780 if (IS_PF(bp)) {
1781 offset = 1 + CNIC_SUPPORT(bp);
1782 netdev_info(bp->dev,
1783 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1784 bp->msix_table[0].vector,
1785 0, bp->msix_table[offset].vector,
1786 i - 1, bp->msix_table[offset + i - 1].vector);
1787 } else {
1788 offset = CNIC_SUPPORT(bp);
1789 netdev_info(bp->dev,
1790 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1791 0, bp->msix_table[offset].vector,
1792 i - 1, bp->msix_table[offset + i - 1].vector);
1793 }
1794 return 0;
1795 }
1796
1797 int bnx2x_enable_msi(struct bnx2x *bp)
1798 {
1799 int rc;
1800
1801 rc = pci_enable_msi(bp->pdev);
1802 if (rc) {
1803 BNX2X_DEV_INFO("MSI is not attainable\n");
1804 return -1;
1805 }
1806 bp->flags |= USING_MSI_FLAG;
1807
1808 return 0;
1809 }
1810
1811 static int bnx2x_req_irq(struct bnx2x *bp)
1812 {
1813 unsigned long flags;
1814 unsigned int irq;
1815
1816 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1817 flags = 0;
1818 else
1819 flags = IRQF_SHARED;
1820
1821 if (bp->flags & USING_MSIX_FLAG)
1822 irq = bp->msix_table[0].vector;
1823 else
1824 irq = bp->pdev->irq;
1825
1826 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1827 }
1828
1829 static int bnx2x_setup_irqs(struct bnx2x *bp)
1830 {
1831 int rc = 0;
1832 if (bp->flags & USING_MSIX_FLAG &&
1833 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1834 rc = bnx2x_req_msix_irqs(bp);
1835 if (rc)
1836 return rc;
1837 } else {
1838 rc = bnx2x_req_irq(bp);
1839 if (rc) {
1840 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1841 return rc;
1842 }
1843 if (bp->flags & USING_MSI_FLAG) {
1844 bp->dev->irq = bp->pdev->irq;
1845 netdev_info(bp->dev, "using MSI IRQ %d\n",
1846 bp->dev->irq);
1847 }
1848 if (bp->flags & USING_MSIX_FLAG) {
1849 bp->dev->irq = bp->msix_table[0].vector;
1850 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1851 bp->dev->irq);
1852 }
1853 }
1854
1855 return 0;
1856 }
1857
1858 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1859 {
1860 int i;
1861
1862 for_each_rx_queue_cnic(bp, i) {
1863 napi_enable(&bnx2x_fp(bp, i, napi));
1864 }
1865 }
1866
1867 static void bnx2x_napi_enable(struct bnx2x *bp)
1868 {
1869 int i;
1870
1871 for_each_eth_queue(bp, i) {
1872 napi_enable(&bnx2x_fp(bp, i, napi));
1873 }
1874 }
1875
1876 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1877 {
1878 int i;
1879
1880 for_each_rx_queue_cnic(bp, i) {
1881 napi_disable(&bnx2x_fp(bp, i, napi));
1882 }
1883 }
1884
1885 static void bnx2x_napi_disable(struct bnx2x *bp)
1886 {
1887 int i;
1888
1889 for_each_eth_queue(bp, i) {
1890 napi_disable(&bnx2x_fp(bp, i, napi));
1891 }
1892 }
1893
1894 void bnx2x_netif_start(struct bnx2x *bp)
1895 {
1896 if (netif_running(bp->dev)) {
1897 bnx2x_napi_enable(bp);
1898 if (CNIC_LOADED(bp))
1899 bnx2x_napi_enable_cnic(bp);
1900 bnx2x_int_enable(bp);
1901 if (bp->state == BNX2X_STATE_OPEN)
1902 netif_tx_wake_all_queues(bp->dev);
1903 }
1904 }
1905
1906 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1907 {
1908 bnx2x_int_disable_sync(bp, disable_hw);
1909 bnx2x_napi_disable(bp);
1910 if (CNIC_LOADED(bp))
1911 bnx2x_napi_disable_cnic(bp);
1912 }
1913
1914 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1915 struct net_device *sb_dev)
1916 {
1917 struct bnx2x *bp = netdev_priv(dev);
1918
1919 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1920 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1921 u16 ether_type = ntohs(hdr->h_proto);
1922
1923
1924 if (ether_type == ETH_P_8021Q) {
1925 struct vlan_ethhdr *vhdr =
1926 (struct vlan_ethhdr *)skb->data;
1927
1928 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1929 }
1930
1931
1932 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1933 return bnx2x_fcoe_tx(bp, txq_index);
1934 }
1935
1936
1937 return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
1938 }
1939
1940 void bnx2x_set_num_queues(struct bnx2x *bp)
1941 {
1942
1943 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1944
1945
1946 if (IS_MF_STORAGE_ONLY(bp))
1947 bp->num_ethernet_queues = 1;
1948
1949
1950 bp->num_cnic_queues = CNIC_SUPPORT(bp);
1951 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1952
1953 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1979 {
1980 int rc, tx, rx;
1981
1982 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1983 rx = BNX2X_NUM_ETH_QUEUES(bp);
1984
1985
1986 if (include_cnic && !NO_FCOE(bp)) {
1987 rx++;
1988 tx++;
1989 }
1990
1991 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1992 if (rc) {
1993 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1994 return rc;
1995 }
1996 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1997 if (rc) {
1998 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1999 return rc;
2000 }
2001
2002 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2003 tx, rx);
2004
2005 return rc;
2006 }
2007
2008 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2009 {
2010 int i;
2011
2012 for_each_queue(bp, i) {
2013 struct bnx2x_fastpath *fp = &bp->fp[i];
2014 u32 mtu;
2015
2016
2017 if (IS_FCOE_IDX(i))
2018
2019
2020
2021
2022
2023
2024 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2025 else
2026 mtu = bp->dev->mtu;
2027 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2028 IP_HEADER_ALIGNMENT_PADDING +
2029 ETH_OVERHEAD +
2030 mtu +
2031 BNX2X_FW_RX_ALIGN_END;
2032 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2033
2034 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2035 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2036 else
2037 fp->rx_frag_size = 0;
2038 }
2039 }
2040
2041 static int bnx2x_init_rss(struct bnx2x *bp)
2042 {
2043 int i;
2044 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2045
2046
2047
2048
2049 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2050 bp->rss_conf_obj.ind_table[i] =
2051 bp->fp->cl_id +
2052 ethtool_rxfh_indir_default(i, num_eth_queues);
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2063 }
2064
2065 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2066 bool config_hash, bool enable)
2067 {
2068 struct bnx2x_config_rss_params params = {NULL};
2069
2070
2071
2072
2073
2074
2075
2076
2077 params.rss_obj = rss_obj;
2078
2079 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2080
2081 if (enable) {
2082 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2083
2084
2085 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2086 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2087 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2088 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2089 if (rss_obj->udp_rss_v4)
2090 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2091 if (rss_obj->udp_rss_v6)
2092 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2093
2094 if (!CHIP_IS_E1x(bp)) {
2095
2096 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2097 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2098
2099
2100 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2101 }
2102 } else {
2103 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2104 }
2105
2106
2107 params.rss_result_mask = MULTI_MASK;
2108
2109 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2110
2111 if (config_hash) {
2112
2113 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2114 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2115 }
2116
2117 if (IS_PF(bp))
2118 return bnx2x_config_rss(bp, ¶ms);
2119 else
2120 return bnx2x_vfpf_config_rss(bp, ¶ms);
2121 }
2122
2123 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2124 {
2125 struct bnx2x_func_state_params func_params = {NULL};
2126
2127
2128 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2129
2130 func_params.f_obj = &bp->func_obj;
2131 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2132
2133 func_params.params.hw_init.load_phase = load_code;
2134
2135 return bnx2x_func_state_change(bp, &func_params);
2136 }
2137
2138
2139
2140
2141
2142 void bnx2x_squeeze_objects(struct bnx2x *bp)
2143 {
2144 int rc;
2145 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2146 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2147 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2148
2149
2150
2151
2152 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2153
2154 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2155
2156
2157 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2158 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2159 &ramrod_flags);
2160 if (rc != 0)
2161 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2162
2163
2164 vlan_mac_flags = 0;
2165 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2166 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2167 &ramrod_flags);
2168 if (rc != 0)
2169 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2170
2171
2172 rparam.mcast_obj = &bp->mcast_obj;
2173 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2174
2175
2176
2177
2178
2179 netif_addr_lock_bh(bp->dev);
2180 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2181 if (rc < 0)
2182 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2183 rc);
2184
2185
2186 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2187 while (rc != 0) {
2188 if (rc < 0) {
2189 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2190 rc);
2191 netif_addr_unlock_bh(bp->dev);
2192 return;
2193 }
2194
2195 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2196 }
2197 netif_addr_unlock_bh(bp->dev);
2198 }
2199
2200 #ifndef BNX2X_STOP_ON_ERROR
2201 #define LOAD_ERROR_EXIT(bp, label) \
2202 do { \
2203 (bp)->state = BNX2X_STATE_ERROR; \
2204 goto label; \
2205 } while (0)
2206
2207 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2208 do { \
2209 bp->cnic_loaded = false; \
2210 goto label; \
2211 } while (0)
2212 #else
2213 #define LOAD_ERROR_EXIT(bp, label) \
2214 do { \
2215 (bp)->state = BNX2X_STATE_ERROR; \
2216 (bp)->panic = 1; \
2217 return -EBUSY; \
2218 } while (0)
2219 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2220 do { \
2221 bp->cnic_loaded = false; \
2222 (bp)->panic = 1; \
2223 return -EBUSY; \
2224 } while (0)
2225 #endif
2226
2227 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2228 {
2229 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2230 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2231 return;
2232 }
2233
2234 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2235 {
2236 int num_groups, vf_headroom = 0;
2237 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2238
2239
2240 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2241
2242
2243
2244
2245
2246
2247 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2248
2249
2250
2251
2252
2253
2254 if (IS_SRIOV(bp))
2255 vf_headroom = bnx2x_vf_headroom(bp);
2256
2257
2258
2259
2260
2261
2262 num_groups =
2263 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2264 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2265 1 : 0));
2266
2267 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2268 bp->fw_stats_num, vf_headroom, num_groups);
2269 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2270 num_groups * sizeof(struct stats_query_cmd_group);
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2281 sizeof(struct per_pf_stats) +
2282 sizeof(struct fcoe_statistics_params) +
2283 sizeof(struct per_queue_stats) * num_queue_stats +
2284 sizeof(struct stats_counter);
2285
2286 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2287 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2288 if (!bp->fw_stats)
2289 goto alloc_mem_err;
2290
2291
2292 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2293 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2294 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2295 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2296 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2297 bp->fw_stats_req_sz;
2298
2299 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2300 U64_HI(bp->fw_stats_req_mapping),
2301 U64_LO(bp->fw_stats_req_mapping));
2302 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2303 U64_HI(bp->fw_stats_data_mapping),
2304 U64_LO(bp->fw_stats_data_mapping));
2305 return 0;
2306
2307 alloc_mem_err:
2308 bnx2x_free_fw_stats_mem(bp);
2309 BNX2X_ERR("Can't allocate FW stats memory\n");
2310 return -ENOMEM;
2311 }
2312
2313
2314 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2315 {
2316 u32 param;
2317
2318
2319 bp->fw_seq =
2320 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2321 DRV_MSG_SEQ_NUMBER_MASK);
2322 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2323
2324
2325 bp->fw_drv_pulse_wr_seq =
2326 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2327 DRV_PULSE_SEQ_MASK);
2328 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2329
2330 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2331
2332 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2333 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2334
2335
2336 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2337
2338
2339 if (!(*load_code)) {
2340 BNX2X_ERR("MCP response failure, aborting\n");
2341 return -EBUSY;
2342 }
2343
2344
2345
2346
2347 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2348 BNX2X_ERR("MCP refused load request, aborting\n");
2349 return -EBUSY;
2350 }
2351 return 0;
2352 }
2353
2354
2355
2356
2357
2358 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2359 {
2360
2361 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2362 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2363
2364 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2365 (BCM_5710_FW_MINOR_VERSION << 8) +
2366 (BCM_5710_FW_REVISION_VERSION << 16) +
2367 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2368
2369
2370 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2371
2372 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2373 loaded_fw, my_fw);
2374
2375
2376 if (my_fw != loaded_fw) {
2377 if (print_err)
2378 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2379 loaded_fw, my_fw);
2380 else
2381 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2382 loaded_fw, my_fw);
2383 return -EBUSY;
2384 }
2385 }
2386 return 0;
2387 }
2388
2389
2390 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2391 {
2392 int path = BP_PATH(bp);
2393
2394 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2395 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2396 bnx2x_load_count[path][2]);
2397 bnx2x_load_count[path][0]++;
2398 bnx2x_load_count[path][1 + port]++;
2399 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2400 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2401 bnx2x_load_count[path][2]);
2402 if (bnx2x_load_count[path][0] == 1)
2403 return FW_MSG_CODE_DRV_LOAD_COMMON;
2404 else if (bnx2x_load_count[path][1 + port] == 1)
2405 return FW_MSG_CODE_DRV_LOAD_PORT;
2406 else
2407 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2408 }
2409
2410
2411 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2412 {
2413 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2414 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2415 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2416 bp->port.pmf = 1;
2417
2418
2419
2420
2421 smp_mb();
2422 } else {
2423 bp->port.pmf = 0;
2424 }
2425
2426 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2427 }
2428
2429 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2430 {
2431 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2432 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2433 (bp->common.shmem2_base)) {
2434 if (SHMEM2_HAS(bp, dcc_support))
2435 SHMEM2_WR(bp, dcc_support,
2436 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2437 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2438 if (SHMEM2_HAS(bp, afex_driver_support))
2439 SHMEM2_WR(bp, afex_driver_support,
2440 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2441 }
2442
2443
2444 bp->afex_def_vlan_tag = -1;
2445 }
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2457 {
2458 struct bnx2x_fastpath *fp = &bp->fp[index];
2459 int cos;
2460 struct napi_struct orig_napi = fp->napi;
2461 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2462
2463
2464 if (fp->tpa_info)
2465 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2466 sizeof(struct bnx2x_agg_info));
2467 memset(fp, 0, sizeof(*fp));
2468
2469
2470 fp->napi = orig_napi;
2471 fp->tpa_info = orig_tpa_info;
2472 fp->bp = bp;
2473 fp->index = index;
2474 if (IS_ETH_FP(fp))
2475 fp->max_cos = bp->max_cos;
2476 else
2477
2478 fp->max_cos = 1;
2479
2480
2481 if (IS_FCOE_FP(fp))
2482 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2483 if (IS_ETH_FP(fp))
2484 for_each_cos_in_tx_queue(fp, cos)
2485 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2486 BNX2X_NUM_ETH_QUEUES(bp) + index];
2487
2488
2489
2490
2491 if (bp->dev->features & NETIF_F_LRO)
2492 fp->mode = TPA_MODE_LRO;
2493 else if (bp->dev->features & NETIF_F_GRO_HW)
2494 fp->mode = TPA_MODE_GRO;
2495 else
2496 fp->mode = TPA_MODE_DISABLED;
2497
2498
2499
2500
2501 if (bp->disable_tpa || IS_FCOE_FP(fp))
2502 fp->mode = TPA_MODE_DISABLED;
2503 }
2504
2505 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2506 {
2507 u32 cur;
2508
2509 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2510 return;
2511
2512 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2513 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2514 cur, state);
2515
2516 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2517 }
2518
2519 int bnx2x_load_cnic(struct bnx2x *bp)
2520 {
2521 int i, rc, port = BP_PORT(bp);
2522
2523 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2524
2525 mutex_init(&bp->cnic_mutex);
2526
2527 if (IS_PF(bp)) {
2528 rc = bnx2x_alloc_mem_cnic(bp);
2529 if (rc) {
2530 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2531 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532 }
2533 }
2534
2535 rc = bnx2x_alloc_fp_mem_cnic(bp);
2536 if (rc) {
2537 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2538 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539 }
2540
2541
2542 rc = bnx2x_set_real_num_queues(bp, 1);
2543 if (rc) {
2544 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2545 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2546 }
2547
2548
2549 bnx2x_add_all_napi_cnic(bp);
2550 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2551 bnx2x_napi_enable_cnic(bp);
2552
2553 rc = bnx2x_init_hw_func_cnic(bp);
2554 if (rc)
2555 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2556
2557 bnx2x_nic_init_cnic(bp);
2558
2559 if (IS_PF(bp)) {
2560
2561 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2562
2563
2564 for_each_cnic_queue(bp, i) {
2565 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2566 if (rc) {
2567 BNX2X_ERR("Queue setup failed\n");
2568 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2569 }
2570 }
2571 }
2572
2573
2574 bnx2x_set_rx_mode_inner(bp);
2575
2576
2577 bnx2x_get_iscsi_info(bp);
2578 bnx2x_setup_cnic_irq_info(bp);
2579 bnx2x_setup_cnic_info(bp);
2580 bp->cnic_loaded = true;
2581 if (bp->state == BNX2X_STATE_OPEN)
2582 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2583
2584 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2585
2586 return 0;
2587
2588 #ifndef BNX2X_STOP_ON_ERROR
2589 load_error_cnic2:
2590
2591 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2592
2593 load_error_cnic1:
2594 bnx2x_napi_disable_cnic(bp);
2595
2596 if (bnx2x_set_real_num_queues(bp, 0))
2597 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2598 load_error_cnic0:
2599 BNX2X_ERR("CNIC-related load failed\n");
2600 bnx2x_free_fp_mem_cnic(bp);
2601 bnx2x_free_mem_cnic(bp);
2602 return rc;
2603 #endif
2604 }
2605
2606
2607 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2608 {
2609 int port = BP_PORT(bp);
2610 int i, rc = 0, load_code = 0;
2611
2612 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2613 DP(NETIF_MSG_IFUP,
2614 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2615
2616 #ifdef BNX2X_STOP_ON_ERROR
2617 if (unlikely(bp->panic)) {
2618 BNX2X_ERR("Can't load NIC when there is panic\n");
2619 return -EPERM;
2620 }
2621 #endif
2622
2623 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2624
2625
2626 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2627 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2628 &bp->last_reported_link.link_report_flags);
2629
2630 if (IS_PF(bp))
2631
2632 bnx2x_ilt_set_info(bp);
2633
2634
2635
2636
2637
2638
2639 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2640 for_each_queue(bp, i)
2641 bnx2x_bz_fp(bp, i);
2642 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2643 bp->num_cnic_queues) *
2644 sizeof(struct bnx2x_fp_txdata));
2645
2646 bp->fcoe_init = false;
2647
2648
2649 bnx2x_set_rx_buf_size(bp);
2650
2651 if (IS_PF(bp)) {
2652 rc = bnx2x_alloc_mem(bp);
2653 if (rc) {
2654 BNX2X_ERR("Unable to allocate bp memory\n");
2655 return rc;
2656 }
2657 }
2658
2659
2660
2661
2662 rc = bnx2x_alloc_fp_mem(bp);
2663 if (rc) {
2664 BNX2X_ERR("Unable to allocate memory for fps\n");
2665 LOAD_ERROR_EXIT(bp, load_error0);
2666 }
2667
2668
2669 if (bnx2x_alloc_fw_stats_mem(bp))
2670 LOAD_ERROR_EXIT(bp, load_error0);
2671
2672
2673 if (IS_VF(bp)) {
2674 rc = bnx2x_vfpf_init(bp);
2675 if (rc)
2676 LOAD_ERROR_EXIT(bp, load_error0);
2677 }
2678
2679
2680
2681
2682
2683 rc = bnx2x_set_real_num_queues(bp, 0);
2684 if (rc) {
2685 BNX2X_ERR("Unable to set real_num_queues\n");
2686 LOAD_ERROR_EXIT(bp, load_error0);
2687 }
2688
2689
2690
2691
2692
2693 bnx2x_setup_tc(bp->dev, bp->max_cos);
2694
2695
2696 bnx2x_add_all_napi(bp);
2697 DP(NETIF_MSG_IFUP, "napi added\n");
2698 bnx2x_napi_enable(bp);
2699
2700 if (IS_PF(bp)) {
2701
2702 bnx2x_set_pf_load(bp);
2703
2704
2705 if (!BP_NOMCP(bp)) {
2706
2707 rc = bnx2x_nic_load_request(bp, &load_code);
2708 if (rc)
2709 LOAD_ERROR_EXIT(bp, load_error1);
2710
2711
2712 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2713 if (rc) {
2714 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2715 LOAD_ERROR_EXIT(bp, load_error2);
2716 }
2717 } else {
2718 load_code = bnx2x_nic_load_no_mcp(bp, port);
2719 }
2720
2721
2722 bnx2x_nic_load_pmf(bp, load_code);
2723
2724
2725 bnx2x__init_func_obj(bp);
2726
2727
2728 rc = bnx2x_init_hw(bp, load_code);
2729 if (rc) {
2730 BNX2X_ERR("HW init failed, aborting\n");
2731 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2732 LOAD_ERROR_EXIT(bp, load_error2);
2733 }
2734 }
2735
2736 bnx2x_pre_irq_nic_init(bp);
2737
2738
2739 rc = bnx2x_setup_irqs(bp);
2740 if (rc) {
2741 BNX2X_ERR("setup irqs failed\n");
2742 if (IS_PF(bp))
2743 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2744 LOAD_ERROR_EXIT(bp, load_error2);
2745 }
2746
2747
2748 if (IS_PF(bp)) {
2749
2750 bnx2x_post_irq_nic_init(bp, load_code);
2751
2752 bnx2x_init_bp_objs(bp);
2753 bnx2x_iov_nic_init(bp);
2754
2755
2756 bp->afex_def_vlan_tag = -1;
2757 bnx2x_nic_load_afex_dcc(bp, load_code);
2758 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2759 rc = bnx2x_func_start(bp);
2760 if (rc) {
2761 BNX2X_ERR("Function start failed!\n");
2762 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2763
2764 LOAD_ERROR_EXIT(bp, load_error3);
2765 }
2766
2767
2768 if (!BP_NOMCP(bp)) {
2769 load_code = bnx2x_fw_command(bp,
2770 DRV_MSG_CODE_LOAD_DONE, 0);
2771 if (!load_code) {
2772 BNX2X_ERR("MCP response failure, aborting\n");
2773 rc = -EBUSY;
2774 LOAD_ERROR_EXIT(bp, load_error3);
2775 }
2776 }
2777
2778
2779 bnx2x_update_coalesce(bp);
2780 }
2781
2782
2783 rc = bnx2x_setup_leading(bp);
2784 if (rc) {
2785 BNX2X_ERR("Setup leading failed!\n");
2786 LOAD_ERROR_EXIT(bp, load_error3);
2787 }
2788
2789
2790 for_each_nondefault_eth_queue(bp, i) {
2791 if (IS_PF(bp))
2792 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2793 else
2794 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2795 if (rc) {
2796 BNX2X_ERR("Queue %d setup failed\n", i);
2797 LOAD_ERROR_EXIT(bp, load_error3);
2798 }
2799 }
2800
2801
2802 rc = bnx2x_init_rss(bp);
2803 if (rc) {
2804 BNX2X_ERR("PF RSS init failed\n");
2805 LOAD_ERROR_EXIT(bp, load_error3);
2806 }
2807
2808
2809 bp->state = BNX2X_STATE_OPEN;
2810
2811
2812 if (IS_PF(bp))
2813 rc = bnx2x_set_eth_mac(bp, true);
2814 else
2815 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2816 true);
2817 if (rc) {
2818 BNX2X_ERR("Setting Ethernet MAC failed\n");
2819 LOAD_ERROR_EXIT(bp, load_error3);
2820 }
2821
2822 if (IS_PF(bp) && bp->pending_max) {
2823 bnx2x_update_max_mf_config(bp, bp->pending_max);
2824 bp->pending_max = 0;
2825 }
2826
2827 bp->force_link_down = false;
2828 if (bp->port.pmf) {
2829 rc = bnx2x_initial_phy_init(bp, load_mode);
2830 if (rc)
2831 LOAD_ERROR_EXIT(bp, load_error3);
2832 }
2833 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2834
2835
2836
2837
2838 rc = bnx2x_vlan_reconfigure_vid(bp);
2839 if (rc)
2840 LOAD_ERROR_EXIT(bp, load_error3);
2841
2842
2843 bnx2x_set_rx_mode_inner(bp);
2844
2845 if (bp->flags & PTP_SUPPORTED) {
2846 bnx2x_register_phc(bp);
2847 bnx2x_init_ptp(bp);
2848 bnx2x_configure_ptp_filters(bp);
2849 }
2850
2851 switch (load_mode) {
2852 case LOAD_NORMAL:
2853
2854 netif_tx_wake_all_queues(bp->dev);
2855 break;
2856
2857 case LOAD_OPEN:
2858 netif_tx_start_all_queues(bp->dev);
2859 smp_mb__after_atomic();
2860 break;
2861
2862 case LOAD_DIAG:
2863 case LOAD_LOOPBACK_EXT:
2864 bp->state = BNX2X_STATE_DIAG;
2865 break;
2866
2867 default:
2868 break;
2869 }
2870
2871 if (bp->port.pmf)
2872 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2873 else
2874 bnx2x__link_status_update(bp);
2875
2876
2877 mod_timer(&bp->timer, jiffies + bp->current_interval);
2878
2879 if (CNIC_ENABLED(bp))
2880 bnx2x_load_cnic(bp);
2881
2882 if (IS_PF(bp))
2883 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2884
2885 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2886
2887 u32 val;
2888 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2889 val &= ~DRV_FLAGS_MTU_MASK;
2890 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2891 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2892 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2893 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2894 }
2895
2896
2897 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2898 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2899 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2900 return -EBUSY;
2901 }
2902
2903
2904 if (IS_PF(bp))
2905 bnx2x_update_mfw_dump(bp);
2906
2907
2908 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2909 bnx2x_dcbx_init(bp, false);
2910
2911 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2912 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2913
2914 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2915
2916 return 0;
2917
2918 #ifndef BNX2X_STOP_ON_ERROR
2919 load_error3:
2920 if (IS_PF(bp)) {
2921 bnx2x_int_disable_sync(bp, 1);
2922
2923
2924 bnx2x_squeeze_objects(bp);
2925 }
2926
2927
2928 bnx2x_free_skbs(bp);
2929 for_each_rx_queue(bp, i)
2930 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2931
2932
2933 bnx2x_free_irq(bp);
2934 load_error2:
2935 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2936 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2938 }
2939
2940 bp->port.pmf = 0;
2941 load_error1:
2942 bnx2x_napi_disable(bp);
2943 bnx2x_del_all_napi(bp);
2944
2945
2946 if (IS_PF(bp))
2947 bnx2x_clear_pf_load(bp);
2948 load_error0:
2949 bnx2x_free_fw_stats_mem(bp);
2950 bnx2x_free_fp_mem(bp);
2951 bnx2x_free_mem(bp);
2952
2953 return rc;
2954 #endif
2955 }
2956
2957 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2958 {
2959 u8 rc = 0, cos, i;
2960
2961
2962 for_each_tx_queue(bp, i) {
2963 struct bnx2x_fastpath *fp = &bp->fp[i];
2964
2965 for_each_cos_in_tx_queue(fp, cos)
2966 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2967 if (rc)
2968 return rc;
2969 }
2970 return 0;
2971 }
2972
2973
2974 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2975 {
2976 int i;
2977 bool global = false;
2978
2979 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2980
2981 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2982 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2983
2984
2985 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2986 u32 val;
2987 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2988 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2989 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2990 }
2991
2992 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2993 (bp->state == BNX2X_STATE_CLOSED ||
2994 bp->state == BNX2X_STATE_ERROR)) {
2995
2996
2997
2998
2999
3000
3001
3002 bp->recovery_state = BNX2X_RECOVERY_DONE;
3003 bp->is_leader = 0;
3004 bnx2x_release_leader_lock(bp);
3005 smp_mb();
3006
3007 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3008 BNX2X_ERR("Can't unload in closed or error state\n");
3009 return -EINVAL;
3010 }
3011
3012
3013
3014
3015
3016
3017
3018 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3019 return 0;
3020
3021
3022
3023
3024
3025 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3026 smp_mb();
3027
3028
3029 bnx2x_iov_channel_down(bp);
3030
3031 if (CNIC_LOADED(bp))
3032 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3033
3034
3035 bnx2x_tx_disable(bp);
3036 netdev_reset_tc(bp->dev);
3037
3038 bp->rx_mode = BNX2X_RX_MODE_NONE;
3039
3040 del_timer_sync(&bp->timer);
3041
3042 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3043
3044 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3045 bnx2x_drv_pulse(bp);
3046 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3047 bnx2x_save_statistics(bp);
3048 }
3049
3050
3051
3052
3053
3054 if (unload_mode != UNLOAD_RECOVERY)
3055 bnx2x_drain_tx_queues(bp);
3056
3057
3058
3059
3060 if (IS_VF(bp)) {
3061 bnx2x_clear_vlan_info(bp);
3062 bnx2x_vfpf_close_vf(bp);
3063 } else if (unload_mode != UNLOAD_RECOVERY) {
3064
3065 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3066 } else {
3067
3068 bnx2x_send_unload_req(bp, unload_mode);
3069
3070
3071
3072
3073
3074
3075
3076 if (!CHIP_IS_E1x(bp))
3077 bnx2x_pf_disable(bp);
3078
3079
3080 bnx2x_netif_stop(bp, 1);
3081
3082 bnx2x_del_all_napi(bp);
3083 if (CNIC_LOADED(bp))
3084 bnx2x_del_all_napi_cnic(bp);
3085
3086 bnx2x_free_irq(bp);
3087
3088
3089 bnx2x_send_unload_done(bp, false);
3090 }
3091
3092
3093
3094
3095
3096 if (IS_PF(bp))
3097 bnx2x_squeeze_objects(bp);
3098
3099
3100 bp->sp_state = 0;
3101
3102 bp->port.pmf = 0;
3103
3104
3105 bp->sp_rtnl_state = 0;
3106 smp_mb();
3107
3108
3109 bnx2x_free_skbs(bp);
3110 if (CNIC_LOADED(bp))
3111 bnx2x_free_skbs_cnic(bp);
3112 for_each_rx_queue(bp, i)
3113 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3114
3115 bnx2x_free_fp_mem(bp);
3116 if (CNIC_LOADED(bp))
3117 bnx2x_free_fp_mem_cnic(bp);
3118
3119 if (IS_PF(bp)) {
3120 if (CNIC_LOADED(bp))
3121 bnx2x_free_mem_cnic(bp);
3122 }
3123 bnx2x_free_mem(bp);
3124
3125 bp->state = BNX2X_STATE_CLOSED;
3126 bp->cnic_loaded = false;
3127
3128
3129 if (IS_PF(bp) && !BP_NOMCP(bp))
3130 bnx2x_update_mng_version(bp);
3131
3132
3133
3134
3135 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3136 bnx2x_set_reset_in_progress(bp);
3137
3138
3139 if (global)
3140 bnx2x_set_reset_global(bp);
3141 }
3142
3143
3144
3145
3146 if (IS_PF(bp) &&
3147 !bnx2x_clear_pf_load(bp) &&
3148 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3149 bnx2x_disable_close_the_gate(bp);
3150
3151 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3152
3153 return 0;
3154 }
3155
3156 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3157 {
3158 u16 pmcsr;
3159
3160
3161 if (!bp->pdev->pm_cap) {
3162 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3163 return 0;
3164 }
3165
3166 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3167
3168 switch (state) {
3169 case PCI_D0:
3170 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3171 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3172 PCI_PM_CTRL_PME_STATUS));
3173
3174 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3175
3176 msleep(20);
3177 break;
3178
3179 case PCI_D3hot:
3180
3181
3182 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3183 return 0;
3184
3185 if (CHIP_REV_IS_SLOW(bp))
3186 return 0;
3187
3188 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3189 pmcsr |= 3;
3190
3191 if (bp->wol)
3192 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3193
3194 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3195 pmcsr);
3196
3197
3198
3199
3200 break;
3201
3202 default:
3203 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3204 return -EINVAL;
3205 }
3206 return 0;
3207 }
3208
3209
3210
3211
3212 static int bnx2x_poll(struct napi_struct *napi, int budget)
3213 {
3214 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3215 napi);
3216 struct bnx2x *bp = fp->bp;
3217 int rx_work_done;
3218 u8 cos;
3219
3220 #ifdef BNX2X_STOP_ON_ERROR
3221 if (unlikely(bp->panic)) {
3222 napi_complete(napi);
3223 return 0;
3224 }
3225 #endif
3226 for_each_cos_in_tx_queue(fp, cos)
3227 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3228 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3229
3230 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3231
3232 if (rx_work_done < budget) {
3233
3234
3235
3236
3237 if (IS_FCOE_FP(fp)) {
3238 napi_complete_done(napi, rx_work_done);
3239 } else {
3240 bnx2x_update_fpsb_idx(fp);
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254 rmb();
3255
3256 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3257 if (napi_complete_done(napi, rx_work_done)) {
3258
3259 DP(NETIF_MSG_RX_STATUS,
3260 "Update index to %d\n", fp->fp_hc_idx);
3261 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3262 le16_to_cpu(fp->fp_hc_idx),
3263 IGU_INT_ENABLE, 1);
3264 }
3265 } else {
3266 rx_work_done = budget;
3267 }
3268 }
3269 }
3270
3271 return rx_work_done;
3272 }
3273
3274
3275
3276
3277
3278 static u16 bnx2x_tx_split(struct bnx2x *bp,
3279 struct bnx2x_fp_txdata *txdata,
3280 struct sw_tx_bd *tx_buf,
3281 struct eth_tx_start_bd **tx_bd, u16 hlen,
3282 u16 bd_prod)
3283 {
3284 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3285 struct eth_tx_bd *d_tx_bd;
3286 dma_addr_t mapping;
3287 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3288
3289
3290 h_tx_bd->nbytes = cpu_to_le16(hlen);
3291
3292 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3293 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3294
3295
3296
3297 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3298 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3299
3300 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3301 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3302
3303 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3304 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3305 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3306
3307
3308 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3309
3310 DP(NETIF_MSG_TX_QUEUED,
3311 "TSO split data size is %d (%x:%x)\n",
3312 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3313
3314
3315 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3316
3317 return bd_prod;
3318 }
3319
3320 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3321 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3322 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3323 {
3324 __sum16 tsum = (__force __sum16) csum;
3325
3326 if (fix > 0)
3327 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3328 csum_partial(t_header - fix, fix, 0)));
3329
3330 else if (fix < 0)
3331 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3332 csum_partial(t_header, -fix, 0)));
3333
3334 return bswab16(tsum);
3335 }
3336
3337 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3338 {
3339 u32 rc;
3340 __u8 prot = 0;
3341 __be16 protocol;
3342
3343 if (skb->ip_summed != CHECKSUM_PARTIAL)
3344 return XMIT_PLAIN;
3345
3346 protocol = vlan_get_protocol(skb);
3347 if (protocol == htons(ETH_P_IPV6)) {
3348 rc = XMIT_CSUM_V6;
3349 prot = ipv6_hdr(skb)->nexthdr;
3350 } else {
3351 rc = XMIT_CSUM_V4;
3352 prot = ip_hdr(skb)->protocol;
3353 }
3354
3355 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3356 if (inner_ip_hdr(skb)->version == 6) {
3357 rc |= XMIT_CSUM_ENC_V6;
3358 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3359 rc |= XMIT_CSUM_TCP;
3360 } else {
3361 rc |= XMIT_CSUM_ENC_V4;
3362 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3363 rc |= XMIT_CSUM_TCP;
3364 }
3365 }
3366 if (prot == IPPROTO_TCP)
3367 rc |= XMIT_CSUM_TCP;
3368
3369 if (skb_is_gso(skb)) {
3370 if (skb_is_gso_v6(skb)) {
3371 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3372 if (rc & XMIT_CSUM_ENC)
3373 rc |= XMIT_GSO_ENC_V6;
3374 } else {
3375 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3376 if (rc & XMIT_CSUM_ENC)
3377 rc |= XMIT_GSO_ENC_V4;
3378 }
3379 }
3380
3381 return rc;
3382 }
3383
3384
3385 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3386
3387
3388 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3389
3390 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3391
3392
3393
3394 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3395 u32 xmit_type)
3396 {
3397 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3398 int to_copy = 0, hlen = 0;
3399
3400 if (xmit_type & XMIT_GSO_ENC)
3401 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3402
3403 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3404 if (xmit_type & XMIT_GSO) {
3405 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3406 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3407
3408 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3409 int wnd_idx = 0;
3410 int frag_idx = 0;
3411 u32 wnd_sum = 0;
3412
3413
3414 if (xmit_type & XMIT_GSO_ENC)
3415 hlen = (int)(skb_inner_transport_header(skb) -
3416 skb->data) +
3417 inner_tcp_hdrlen(skb);
3418 else
3419 hlen = (int)(skb_transport_header(skb) -
3420 skb->data) + tcp_hdrlen(skb);
3421
3422
3423 first_bd_sz = skb_headlen(skb) - hlen;
3424
3425 wnd_sum = first_bd_sz;
3426
3427
3428 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3429 wnd_sum +=
3430 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3431
3432
3433 if (first_bd_sz > 0) {
3434 if (unlikely(wnd_sum < lso_mss)) {
3435 to_copy = 1;
3436 goto exit_lbl;
3437 }
3438
3439 wnd_sum -= first_bd_sz;
3440 }
3441
3442
3443
3444 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3445 wnd_sum +=
3446 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3447
3448 if (unlikely(wnd_sum < lso_mss)) {
3449 to_copy = 1;
3450 break;
3451 }
3452 wnd_sum -=
3453 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3454 }
3455 } else {
3456
3457
3458 to_copy = 1;
3459 }
3460 }
3461
3462 exit_lbl:
3463 if (unlikely(to_copy))
3464 DP(NETIF_MSG_TX_QUEUED,
3465 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3466 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3467 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3468
3469 return to_copy;
3470 }
3471 #endif
3472
3473
3474
3475
3476
3477
3478
3479
3480 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3481 struct eth_tx_parse_bd_e1x *pbd,
3482 u32 xmit_type)
3483 {
3484 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3485 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3486 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3487
3488 if (xmit_type & XMIT_GSO_V4) {
3489 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3490 pbd->tcp_pseudo_csum =
3491 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3492 ip_hdr(skb)->daddr,
3493 0, IPPROTO_TCP, 0));
3494 } else {
3495 pbd->tcp_pseudo_csum =
3496 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3497 &ipv6_hdr(skb)->daddr,
3498 0, IPPROTO_TCP, 0));
3499 }
3500
3501 pbd->global_data |=
3502 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3503 }
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3516 u32 *parsing_data, u32 xmit_type)
3517 {
3518 *parsing_data |=
3519 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3520 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3521 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3522
3523 if (xmit_type & XMIT_CSUM_TCP) {
3524 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3525 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3526 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3527
3528 return skb_inner_transport_header(skb) +
3529 inner_tcp_hdrlen(skb) - skb->data;
3530 }
3531
3532
3533
3534
3535 return skb_inner_transport_header(skb) +
3536 sizeof(struct udphdr) - skb->data;
3537 }
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3550 u32 *parsing_data, u32 xmit_type)
3551 {
3552 *parsing_data |=
3553 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3554 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3555 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3556
3557 if (xmit_type & XMIT_CSUM_TCP) {
3558 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3559 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3560 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3561
3562 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3563 }
3564
3565
3566
3567 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3568 }
3569
3570
3571 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3572 struct eth_tx_start_bd *tx_start_bd,
3573 u32 xmit_type)
3574 {
3575 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3576
3577 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3578 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3579
3580 if (!(xmit_type & XMIT_CSUM_TCP))
3581 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3582 }
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3593 struct eth_tx_parse_bd_e1x *pbd,
3594 u32 xmit_type)
3595 {
3596 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3597
3598
3599 pbd->global_data =
3600 cpu_to_le16(hlen |
3601 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3602 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3603
3604 pbd->ip_hlen_w = (skb_transport_header(skb) -
3605 skb_network_header(skb)) >> 1;
3606
3607 hlen += pbd->ip_hlen_w;
3608
3609
3610 if (xmit_type & XMIT_CSUM_TCP)
3611 hlen += tcp_hdrlen(skb) / 2;
3612 else
3613 hlen += sizeof(struct udphdr) / 2;
3614
3615 pbd->total_hlen_w = cpu_to_le16(hlen);
3616 hlen = hlen*2;
3617
3618 if (xmit_type & XMIT_CSUM_TCP) {
3619 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3620
3621 } else {
3622 s8 fix = SKB_CS_OFF(skb);
3623
3624 DP(NETIF_MSG_TX_QUEUED,
3625 "hlen %d fix %d csum before fix %x\n",
3626 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3627
3628
3629 pbd->tcp_pseudo_csum =
3630 bnx2x_csum_fix(skb_transport_header(skb),
3631 SKB_CS(skb), fix);
3632
3633 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3634 pbd->tcp_pseudo_csum);
3635 }
3636
3637 return hlen;
3638 }
3639
3640 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3641 struct eth_tx_parse_bd_e2 *pbd_e2,
3642 struct eth_tx_parse_2nd_bd *pbd2,
3643 u16 *global_data,
3644 u32 xmit_type)
3645 {
3646 u16 hlen_w = 0;
3647 u8 outerip_off, outerip_len = 0;
3648
3649
3650 hlen_w = (skb_inner_transport_header(skb) -
3651 skb_network_header(skb)) >> 1;
3652
3653
3654 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3655
3656 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3657
3658
3659 if (xmit_type & XMIT_CSUM_V4) {
3660 struct iphdr *iph = ip_hdr(skb);
3661 u32 csum = (__force u32)(~iph->check) -
3662 (__force u32)iph->tot_len -
3663 (__force u32)iph->frag_off;
3664
3665 outerip_len = iph->ihl << 1;
3666
3667 pbd2->fw_ip_csum_wo_len_flags_frag =
3668 bswab16(csum_fold((__force __wsum)csum));
3669 } else {
3670 pbd2->fw_ip_hdr_to_payload_w =
3671 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3672 pbd_e2->data.tunnel_data.flags |=
3673 ETH_TUNNEL_DATA_IPV6_OUTER;
3674 }
3675
3676 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3677
3678 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3679
3680
3681 if (xmit_type & XMIT_CSUM_ENC_V4) {
3682 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3683
3684 pbd_e2->data.tunnel_data.pseudo_csum =
3685 bswab16(~csum_tcpudp_magic(
3686 inner_ip_hdr(skb)->saddr,
3687 inner_ip_hdr(skb)->daddr,
3688 0, IPPROTO_TCP, 0));
3689 } else {
3690 pbd_e2->data.tunnel_data.pseudo_csum =
3691 bswab16(~csum_ipv6_magic(
3692 &inner_ipv6_hdr(skb)->saddr,
3693 &inner_ipv6_hdr(skb)->daddr,
3694 0, IPPROTO_TCP, 0));
3695 }
3696
3697 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3698
3699 *global_data |=
3700 outerip_off |
3701 (outerip_len <<
3702 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3703 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3704 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3705
3706 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3707 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3708 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3709 }
3710 }
3711
3712 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3713 u32 xmit_type)
3714 {
3715 struct ipv6hdr *ipv6;
3716
3717 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3718 return;
3719
3720 if (xmit_type & XMIT_GSO_ENC_V6)
3721 ipv6 = inner_ipv6_hdr(skb);
3722 else
3723 ipv6 = ipv6_hdr(skb);
3724
3725 if (ipv6->nexthdr == NEXTHDR_IPV6)
3726 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3727 }
3728
3729
3730
3731
3732
3733 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3734 {
3735 struct bnx2x *bp = netdev_priv(dev);
3736
3737 struct netdev_queue *txq;
3738 struct bnx2x_fp_txdata *txdata;
3739 struct sw_tx_bd *tx_buf;
3740 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3741 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3742 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3743 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3744 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3745 u32 pbd_e2_parsing_data = 0;
3746 u16 pkt_prod, bd_prod;
3747 int nbd, txq_index;
3748 dma_addr_t mapping;
3749 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3750 int i;
3751 u8 hlen = 0;
3752 __le16 pkt_size = 0;
3753 struct ethhdr *eth;
3754 u8 mac_type = UNICAST_ADDRESS;
3755
3756 #ifdef BNX2X_STOP_ON_ERROR
3757 if (unlikely(bp->panic))
3758 return NETDEV_TX_BUSY;
3759 #endif
3760
3761 txq_index = skb_get_queue_mapping(skb);
3762 txq = netdev_get_tx_queue(dev, txq_index);
3763
3764 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3765
3766 txdata = &bp->bnx2x_txq[txq_index];
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3778 skb_shinfo(skb)->nr_frags +
3779 BDS_PER_TX_PKT +
3780 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3781
3782 if (txdata->tx_ring_size == 0) {
3783 struct bnx2x_eth_q_stats *q_stats =
3784 bnx2x_fp_qstats(bp, txdata->parent_fp);
3785 q_stats->driver_filtered_tx_pkt++;
3786 dev_kfree_skb(skb);
3787 return NETDEV_TX_OK;
3788 }
3789 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3790 netif_tx_stop_queue(txq);
3791 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3792
3793 return NETDEV_TX_BUSY;
3794 }
3795
3796 DP(NETIF_MSG_TX_QUEUED,
3797 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3798 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3799 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3800 skb->len);
3801
3802 eth = (struct ethhdr *)skb->data;
3803
3804
3805 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3806 if (is_broadcast_ether_addr(eth->h_dest))
3807 mac_type = BROADCAST_ADDRESS;
3808 else
3809 mac_type = MULTICAST_ADDRESS;
3810 }
3811
3812 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3813
3814
3815
3816 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3817
3818 bp->lin_cnt++;
3819 if (skb_linearize(skb) != 0) {
3820 DP(NETIF_MSG_TX_QUEUED,
3821 "SKB linearization failed - silently dropping this SKB\n");
3822 dev_kfree_skb_any(skb);
3823 return NETDEV_TX_OK;
3824 }
3825 }
3826 #endif
3827
3828 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3829 skb_headlen(skb), DMA_TO_DEVICE);
3830 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3831 DP(NETIF_MSG_TX_QUEUED,
3832 "SKB mapping failed - silently dropping this SKB\n");
3833 dev_kfree_skb_any(skb);
3834 return NETDEV_TX_OK;
3835 }
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848 pkt_prod = txdata->tx_pkt_prod;
3849 bd_prod = TX_BD(txdata->tx_bd_prod);
3850
3851
3852
3853
3854
3855 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3856 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3857 first_bd = tx_start_bd;
3858
3859 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3860
3861 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3862 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3863 bp->eth_stats.ptp_skip_tx_ts++;
3864 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3865 } else if (bp->ptp_tx_skb) {
3866 bp->eth_stats.ptp_skip_tx_ts++;
3867 netdev_err_once(bp->dev,
3868 "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3869 } else {
3870 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3871
3872 bp->ptp_tx_skb = skb_get(skb);
3873 bp->ptp_tx_start = jiffies;
3874 schedule_work(&bp->ptp_task);
3875 }
3876 }
3877
3878
3879 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3880
3881
3882 tx_buf->first_bd = txdata->tx_bd_prod;
3883 tx_buf->skb = skb;
3884 tx_buf->flags = 0;
3885
3886 DP(NETIF_MSG_TX_QUEUED,
3887 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3888 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3889
3890 if (skb_vlan_tag_present(skb)) {
3891 tx_start_bd->vlan_or_ethertype =
3892 cpu_to_le16(skb_vlan_tag_get(skb));
3893 tx_start_bd->bd_flags.as_bitfield |=
3894 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3895 } else {
3896
3897
3898
3899 u16 vlan_tci = 0;
3900 #ifndef BNX2X_STOP_ON_ERROR
3901 if (IS_VF(bp)) {
3902 #endif
3903
3904 if (__vlan_get_tag(skb, &vlan_tci)) {
3905 tx_start_bd->vlan_or_ethertype =
3906 cpu_to_le16(ntohs(eth->h_proto));
3907 } else {
3908 tx_start_bd->bd_flags.as_bitfield |=
3909 (X_ETH_INBAND_VLAN <<
3910 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3911 tx_start_bd->vlan_or_ethertype =
3912 cpu_to_le16(vlan_tci);
3913 }
3914 #ifndef BNX2X_STOP_ON_ERROR
3915 } else {
3916
3917 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3918 }
3919 #endif
3920 }
3921
3922 nbd = 2;
3923
3924
3925 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3926
3927 if (xmit_type & XMIT_CSUM)
3928 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3929
3930 if (!CHIP_IS_E1x(bp)) {
3931 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3932 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3933
3934 if (xmit_type & XMIT_CSUM_ENC) {
3935 u16 global_data = 0;
3936
3937
3938 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3939 &pbd_e2_parsing_data,
3940 xmit_type);
3941
3942
3943 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3944
3945 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3946
3947 memset(pbd2, 0, sizeof(*pbd2));
3948
3949 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3950 (skb_inner_network_header(skb) -
3951 skb->data) >> 1;
3952
3953 if (xmit_type & XMIT_GSO_ENC)
3954 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3955 &global_data,
3956 xmit_type);
3957
3958 pbd2->global_data = cpu_to_le16(global_data);
3959
3960
3961 SET_FLAG(tx_start_bd->general_data,
3962 ETH_TX_START_BD_PARSE_NBDS, 1);
3963
3964 SET_FLAG(tx_start_bd->general_data,
3965 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3966
3967 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3968
3969 nbd++;
3970 } else if (xmit_type & XMIT_CSUM) {
3971
3972 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3973 &pbd_e2_parsing_data,
3974 xmit_type);
3975 }
3976
3977 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3978
3979
3980
3981 if (IS_VF(bp)) {
3982
3983 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3984 &pbd_e2->data.mac_addr.src_mid,
3985 &pbd_e2->data.mac_addr.src_lo,
3986 eth->h_source);
3987
3988 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3989 &pbd_e2->data.mac_addr.dst_mid,
3990 &pbd_e2->data.mac_addr.dst_lo,
3991 eth->h_dest);
3992 } else {
3993 if (bp->flags & TX_SWITCHING)
3994 bnx2x_set_fw_mac_addr(
3995 &pbd_e2->data.mac_addr.dst_hi,
3996 &pbd_e2->data.mac_addr.dst_mid,
3997 &pbd_e2->data.mac_addr.dst_lo,
3998 eth->h_dest);
3999 #ifdef BNX2X_STOP_ON_ERROR
4000
4001
4002
4003 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4004 &pbd_e2->data.mac_addr.src_mid,
4005 &pbd_e2->data.mac_addr.src_lo,
4006 eth->h_source);
4007 #endif
4008 }
4009
4010 SET_FLAG(pbd_e2_parsing_data,
4011 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4012 } else {
4013 u16 global_data = 0;
4014 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4015 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4016
4017 if (xmit_type & XMIT_CSUM)
4018 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4019
4020 SET_FLAG(global_data,
4021 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4022 pbd_e1x->global_data |= cpu_to_le16(global_data);
4023 }
4024
4025
4026 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4027 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4028 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4029 pkt_size = tx_start_bd->nbytes;
4030
4031 DP(NETIF_MSG_TX_QUEUED,
4032 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4033 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4034 le16_to_cpu(tx_start_bd->nbytes),
4035 tx_start_bd->bd_flags.as_bitfield,
4036 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4037
4038 if (xmit_type & XMIT_GSO) {
4039
4040 DP(NETIF_MSG_TX_QUEUED,
4041 "TSO packet len %d hlen %d total len %d tso size %d\n",
4042 skb->len, hlen, skb_headlen(skb),
4043 skb_shinfo(skb)->gso_size);
4044
4045 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4046
4047 if (unlikely(skb_headlen(skb) > hlen)) {
4048 nbd++;
4049 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4050 &tx_start_bd, hlen,
4051 bd_prod);
4052 }
4053 if (!CHIP_IS_E1x(bp))
4054 pbd_e2_parsing_data |=
4055 (skb_shinfo(skb)->gso_size <<
4056 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4057 ETH_TX_PARSE_BD_E2_LSO_MSS;
4058 else
4059 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4060 }
4061
4062
4063
4064
4065 if (pbd_e2_parsing_data)
4066 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4067
4068 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4069
4070
4071 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4072 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4073
4074 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4075 skb_frag_size(frag), DMA_TO_DEVICE);
4076 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4077 unsigned int pkts_compl = 0, bytes_compl = 0;
4078
4079 DP(NETIF_MSG_TX_QUEUED,
4080 "Unable to map page - dropping packet...\n");
4081
4082
4083
4084
4085
4086
4087 first_bd->nbd = cpu_to_le16(nbd);
4088 bnx2x_free_tx_pkt(bp, txdata,
4089 TX_BD(txdata->tx_pkt_prod),
4090 &pkts_compl, &bytes_compl);
4091 return NETDEV_TX_OK;
4092 }
4093
4094 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4095 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4096 if (total_pkt_bd == NULL)
4097 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4098
4099 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4100 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4101 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4102 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4103 nbd++;
4104
4105 DP(NETIF_MSG_TX_QUEUED,
4106 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4107 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4108 le16_to_cpu(tx_data_bd->nbytes));
4109 }
4110
4111 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4112
4113
4114 first_bd->nbd = cpu_to_le16(nbd);
4115
4116 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4117
4118
4119
4120
4121 if (TX_BD_POFF(bd_prod) < nbd)
4122 nbd++;
4123
4124
4125
4126
4127
4128
4129
4130
4131 if (total_pkt_bd != NULL)
4132 total_pkt_bd->total_pkt_bytes = pkt_size;
4133
4134 if (pbd_e1x)
4135 DP(NETIF_MSG_TX_QUEUED,
4136 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4137 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4138 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4139 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4140 le16_to_cpu(pbd_e1x->total_hlen_w));
4141 if (pbd_e2)
4142 DP(NETIF_MSG_TX_QUEUED,
4143 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4144 pbd_e2,
4145 pbd_e2->data.mac_addr.dst_hi,
4146 pbd_e2->data.mac_addr.dst_mid,
4147 pbd_e2->data.mac_addr.dst_lo,
4148 pbd_e2->data.mac_addr.src_hi,
4149 pbd_e2->data.mac_addr.src_mid,
4150 pbd_e2->data.mac_addr.src_lo,
4151 pbd_e2->parsing_data);
4152 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4153
4154 netdev_tx_sent_queue(txq, skb->len);
4155
4156 skb_tx_timestamp(skb);
4157
4158 txdata->tx_pkt_prod++;
4159
4160
4161
4162
4163
4164
4165
4166 wmb();
4167
4168 txdata->tx_db.data.prod += nbd;
4169
4170 wmb();
4171
4172 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4173
4174 txdata->tx_bd_prod += nbd;
4175
4176 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4177 netif_tx_stop_queue(txq);
4178
4179
4180
4181
4182 smp_mb();
4183
4184 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4185 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4186 netif_tx_wake_queue(txq);
4187 }
4188 txdata->tx_pkt++;
4189
4190 return NETDEV_TX_OK;
4191 }
4192
4193 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4194 {
4195 int mfw_vn = BP_FW_MB_IDX(bp);
4196 u32 tmp;
4197
4198
4199 if (!IS_MF_BD(bp)) {
4200 int i;
4201
4202 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4203 c2s_map[i] = i;
4204 *c2s_default = 0;
4205
4206 return;
4207 }
4208
4209 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4210 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4211 c2s_map[0] = tmp & 0xff;
4212 c2s_map[1] = (tmp >> 8) & 0xff;
4213 c2s_map[2] = (tmp >> 16) & 0xff;
4214 c2s_map[3] = (tmp >> 24) & 0xff;
4215
4216 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4217 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4218 c2s_map[4] = tmp & 0xff;
4219 c2s_map[5] = (tmp >> 8) & 0xff;
4220 c2s_map[6] = (tmp >> 16) & 0xff;
4221 c2s_map[7] = (tmp >> 24) & 0xff;
4222
4223 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4224 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4225 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4226 }
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4237 {
4238 struct bnx2x *bp = netdev_priv(dev);
4239 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4240 int cos, prio, count, offset;
4241
4242
4243 ASSERT_RTNL();
4244
4245
4246 if (!num_tc) {
4247 netdev_reset_tc(dev);
4248 return 0;
4249 }
4250
4251
4252 if (num_tc > bp->max_cos) {
4253 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4254 num_tc, bp->max_cos);
4255 return -EINVAL;
4256 }
4257
4258
4259 if (netdev_set_num_tc(dev, num_tc)) {
4260 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4261 return -EINVAL;
4262 }
4263
4264 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4265
4266
4267 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4268 int outer_prio = c2s_map[prio];
4269
4270 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4271 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4272 "mapping priority %d to tc %d\n",
4273 outer_prio, bp->prio_to_cos[outer_prio]);
4274 }
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287 for (cos = 0; cos < bp->max_cos; cos++) {
4288 count = BNX2X_NUM_ETH_QUEUES(bp);
4289 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4290 netdev_set_tc_queue(dev, cos, count, offset);
4291 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4292 "mapping tc %d to offset %d count %d\n",
4293 cos, offset, count);
4294 }
4295
4296 return 0;
4297 }
4298
4299 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4300 void *type_data)
4301 {
4302 struct tc_mqprio_qopt *mqprio = type_data;
4303
4304 if (type != TC_SETUP_QDISC_MQPRIO)
4305 return -EOPNOTSUPP;
4306
4307 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4308
4309 return bnx2x_setup_tc(dev, mqprio->num_tc);
4310 }
4311
4312
4313 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4314 {
4315 struct sockaddr *addr = p;
4316 struct bnx2x *bp = netdev_priv(dev);
4317 int rc = 0;
4318
4319 if (!is_valid_ether_addr(addr->sa_data)) {
4320 BNX2X_ERR("Requested MAC address is not valid\n");
4321 return -EINVAL;
4322 }
4323
4324 if (IS_MF_STORAGE_ONLY(bp)) {
4325 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4326 return -EINVAL;
4327 }
4328
4329 if (netif_running(dev)) {
4330 rc = bnx2x_set_eth_mac(bp, false);
4331 if (rc)
4332 return rc;
4333 }
4334
4335 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4336
4337 if (netif_running(dev))
4338 rc = bnx2x_set_eth_mac(bp, true);
4339
4340 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4341 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4342
4343 return rc;
4344 }
4345
4346 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4347 {
4348 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4349 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4350 u8 cos;
4351
4352
4353
4354 if (IS_FCOE_IDX(fp_index)) {
4355 memset(sb, 0, sizeof(union host_hc_status_block));
4356 fp->status_blk_mapping = 0;
4357 } else {
4358
4359 if (!CHIP_IS_E1x(bp))
4360 BNX2X_PCI_FREE(sb->e2_sb,
4361 bnx2x_fp(bp, fp_index,
4362 status_blk_mapping),
4363 sizeof(struct host_hc_status_block_e2));
4364 else
4365 BNX2X_PCI_FREE(sb->e1x_sb,
4366 bnx2x_fp(bp, fp_index,
4367 status_blk_mapping),
4368 sizeof(struct host_hc_status_block_e1x));
4369 }
4370
4371
4372 if (!skip_rx_queue(bp, fp_index)) {
4373 bnx2x_free_rx_bds(fp);
4374
4375
4376 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4377 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4378 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4379 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4380
4381 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4382 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4383 sizeof(struct eth_fast_path_rx_cqe) *
4384 NUM_RCQ_BD);
4385
4386
4387 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4388 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4389 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4390 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4391 }
4392
4393
4394 if (!skip_tx_queue(bp, fp_index)) {
4395
4396 for_each_cos_in_tx_queue(fp, cos) {
4397 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4398
4399 DP(NETIF_MSG_IFDOWN,
4400 "freeing tx memory of fp %d cos %d cid %d\n",
4401 fp_index, cos, txdata->cid);
4402
4403 BNX2X_FREE(txdata->tx_buf_ring);
4404 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4405 txdata->tx_desc_mapping,
4406 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4407 }
4408 }
4409
4410 }
4411
4412 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4413 {
4414 int i;
4415 for_each_cnic_queue(bp, i)
4416 bnx2x_free_fp_mem_at(bp, i);
4417 }
4418
4419 void bnx2x_free_fp_mem(struct bnx2x *bp)
4420 {
4421 int i;
4422 for_each_eth_queue(bp, i)
4423 bnx2x_free_fp_mem_at(bp, i);
4424 }
4425
4426 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4427 {
4428 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4429 if (!CHIP_IS_E1x(bp)) {
4430 bnx2x_fp(bp, index, sb_index_values) =
4431 (__le16 *)status_blk.e2_sb->sb.index_values;
4432 bnx2x_fp(bp, index, sb_running_index) =
4433 (__le16 *)status_blk.e2_sb->sb.running_index;
4434 } else {
4435 bnx2x_fp(bp, index, sb_index_values) =
4436 (__le16 *)status_blk.e1x_sb->sb.index_values;
4437 bnx2x_fp(bp, index, sb_running_index) =
4438 (__le16 *)status_blk.e1x_sb->sb.running_index;
4439 }
4440 }
4441
4442
4443 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4444 int rx_ring_size)
4445 {
4446 struct bnx2x *bp = fp->bp;
4447 u16 ring_prod, cqe_ring_prod;
4448 int i, failure_cnt = 0;
4449
4450 fp->rx_comp_cons = 0;
4451 cqe_ring_prod = ring_prod = 0;
4452
4453
4454
4455
4456 for (i = 0; i < rx_ring_size; i++) {
4457 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4458 failure_cnt++;
4459 continue;
4460 }
4461 ring_prod = NEXT_RX_IDX(ring_prod);
4462 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4463 WARN_ON(ring_prod <= (i - failure_cnt));
4464 }
4465
4466 if (failure_cnt)
4467 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4468 i - failure_cnt, fp->index);
4469
4470 fp->rx_bd_prod = ring_prod;
4471
4472 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4473 cqe_ring_prod);
4474
4475 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4476
4477 return i - failure_cnt;
4478 }
4479
4480 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4481 {
4482 int i;
4483
4484 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4485 struct eth_rx_cqe_next_page *nextpg;
4486
4487 nextpg = (struct eth_rx_cqe_next_page *)
4488 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4489 nextpg->addr_hi =
4490 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4491 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4492 nextpg->addr_lo =
4493 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4494 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4495 }
4496 }
4497
4498 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4499 {
4500 union host_hc_status_block *sb;
4501 struct bnx2x_fastpath *fp = &bp->fp[index];
4502 int ring_size = 0;
4503 u8 cos;
4504 int rx_ring_size = 0;
4505
4506 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4507 rx_ring_size = MIN_RX_SIZE_NONTPA;
4508 bp->rx_ring_size = rx_ring_size;
4509 } else if (!bp->rx_ring_size) {
4510 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4511
4512 if (CHIP_IS_E3(bp)) {
4513 u32 cfg = SHMEM_RD(bp,
4514 dev_info.port_hw_config[BP_PORT(bp)].
4515 default_cfg);
4516
4517
4518 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4519 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4520 rx_ring_size /= 10;
4521 }
4522
4523
4524 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4525 MIN_RX_SIZE_TPA, rx_ring_size);
4526
4527 bp->rx_ring_size = rx_ring_size;
4528 } else
4529 rx_ring_size = bp->rx_ring_size;
4530
4531 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4532
4533
4534 sb = &bnx2x_fp(bp, index, status_blk);
4535
4536 if (!IS_FCOE_IDX(index)) {
4537
4538 if (!CHIP_IS_E1x(bp)) {
4539 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4540 sizeof(struct host_hc_status_block_e2));
4541 if (!sb->e2_sb)
4542 goto alloc_mem_err;
4543 } else {
4544 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4545 sizeof(struct host_hc_status_block_e1x));
4546 if (!sb->e1x_sb)
4547 goto alloc_mem_err;
4548 }
4549 }
4550
4551
4552
4553
4554 if (!IS_FCOE_IDX(index))
4555 set_sb_shortcuts(bp, index);
4556
4557
4558 if (!skip_tx_queue(bp, index)) {
4559
4560 for_each_cos_in_tx_queue(fp, cos) {
4561 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4562
4563 DP(NETIF_MSG_IFUP,
4564 "allocating tx memory of fp %d cos %d\n",
4565 index, cos);
4566
4567 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4568 sizeof(struct sw_tx_bd),
4569 GFP_KERNEL);
4570 if (!txdata->tx_buf_ring)
4571 goto alloc_mem_err;
4572 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4573 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4574 if (!txdata->tx_desc_ring)
4575 goto alloc_mem_err;
4576 }
4577 }
4578
4579
4580 if (!skip_rx_queue(bp, index)) {
4581
4582 bnx2x_fp(bp, index, rx_buf_ring) =
4583 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4584 if (!bnx2x_fp(bp, index, rx_buf_ring))
4585 goto alloc_mem_err;
4586 bnx2x_fp(bp, index, rx_desc_ring) =
4587 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4588 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4589 if (!bnx2x_fp(bp, index, rx_desc_ring))
4590 goto alloc_mem_err;
4591
4592
4593 bnx2x_fp(bp, index, rx_comp_ring) =
4594 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4595 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4596 if (!bnx2x_fp(bp, index, rx_comp_ring))
4597 goto alloc_mem_err;
4598
4599
4600 bnx2x_fp(bp, index, rx_page_ring) =
4601 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4602 GFP_KERNEL);
4603 if (!bnx2x_fp(bp, index, rx_page_ring))
4604 goto alloc_mem_err;
4605 bnx2x_fp(bp, index, rx_sge_ring) =
4606 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4607 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4608 if (!bnx2x_fp(bp, index, rx_sge_ring))
4609 goto alloc_mem_err;
4610
4611 bnx2x_set_next_page_rx_bd(fp);
4612
4613
4614 bnx2x_set_next_page_rx_cq(fp);
4615
4616
4617 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4618 if (ring_size < rx_ring_size)
4619 goto alloc_mem_err;
4620 }
4621
4622 return 0;
4623
4624
4625 alloc_mem_err:
4626 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4627 index, ring_size);
4628
4629
4630
4631
4632 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4633 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4634
4635 bnx2x_free_fp_mem_at(bp, index);
4636 return -ENOMEM;
4637 }
4638 return 0;
4639 }
4640
4641 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4642 {
4643 if (!NO_FCOE(bp))
4644
4645 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4646
4647
4648
4649 return -ENOMEM;
4650
4651 return 0;
4652 }
4653
4654 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4655 {
4656 int i;
4657
4658
4659
4660
4661
4662
4663 if (bnx2x_alloc_fp_mem_at(bp, 0))
4664 return -ENOMEM;
4665
4666
4667 for_each_nondefault_eth_queue(bp, i)
4668 if (bnx2x_alloc_fp_mem_at(bp, i))
4669 break;
4670
4671
4672 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4673 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4674
4675 WARN_ON(delta < 0);
4676 bnx2x_shrink_eth_fp(bp, delta);
4677 if (CNIC_SUPPORT(bp))
4678
4679
4680
4681
4682
4683
4684 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4685 bp->num_ethernet_queues -= delta;
4686 bp->num_queues = bp->num_ethernet_queues +
4687 bp->num_cnic_queues;
4688 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4689 bp->num_queues + delta, bp->num_queues);
4690 }
4691
4692 return 0;
4693 }
4694
4695 void bnx2x_free_mem_bp(struct bnx2x *bp)
4696 {
4697 int i;
4698
4699 for (i = 0; i < bp->fp_array_size; i++)
4700 kfree(bp->fp[i].tpa_info);
4701 kfree(bp->fp);
4702 kfree(bp->sp_objs);
4703 kfree(bp->fp_stats);
4704 kfree(bp->bnx2x_txq);
4705 kfree(bp->msix_table);
4706 kfree(bp->ilt);
4707 }
4708
4709 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4710 {
4711 struct bnx2x_fastpath *fp;
4712 struct msix_entry *tbl;
4713 struct bnx2x_ilt *ilt;
4714 int msix_table_size = 0;
4715 int fp_array_size, txq_array_size;
4716 int i;
4717
4718
4719
4720
4721
4722 msix_table_size = bp->igu_sb_cnt;
4723 if (IS_PF(bp))
4724 msix_table_size++;
4725 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4726
4727
4728 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4729 bp->fp_array_size = fp_array_size;
4730 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4731
4732 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4733 if (!fp)
4734 goto alloc_err;
4735 for (i = 0; i < bp->fp_array_size; i++) {
4736 fp[i].tpa_info =
4737 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4738 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4739 if (!(fp[i].tpa_info))
4740 goto alloc_err;
4741 }
4742
4743 bp->fp = fp;
4744
4745
4746 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4747 GFP_KERNEL);
4748 if (!bp->sp_objs)
4749 goto alloc_err;
4750
4751
4752 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4753 GFP_KERNEL);
4754 if (!bp->fp_stats)
4755 goto alloc_err;
4756
4757
4758 txq_array_size =
4759 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4760 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4761
4762 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4763 GFP_KERNEL);
4764 if (!bp->bnx2x_txq)
4765 goto alloc_err;
4766
4767
4768 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4769 if (!tbl)
4770 goto alloc_err;
4771 bp->msix_table = tbl;
4772
4773
4774 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4775 if (!ilt)
4776 goto alloc_err;
4777 bp->ilt = ilt;
4778
4779 return 0;
4780 alloc_err:
4781 bnx2x_free_mem_bp(bp);
4782 return -ENOMEM;
4783 }
4784
4785 int bnx2x_reload_if_running(struct net_device *dev)
4786 {
4787 struct bnx2x *bp = netdev_priv(dev);
4788
4789 if (unlikely(!netif_running(dev)))
4790 return 0;
4791
4792 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4793 return bnx2x_nic_load(bp, LOAD_NORMAL);
4794 }
4795
4796 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4797 {
4798 u32 sel_phy_idx = 0;
4799 if (bp->link_params.num_phys <= 1)
4800 return INT_PHY;
4801
4802 if (bp->link_vars.link_up) {
4803 sel_phy_idx = EXT_PHY1;
4804
4805 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4806 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4807 sel_phy_idx = EXT_PHY2;
4808 } else {
4809
4810 switch (bnx2x_phy_selection(&bp->link_params)) {
4811 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4812 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4813 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4814 sel_phy_idx = EXT_PHY1;
4815 break;
4816 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4817 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4818 sel_phy_idx = EXT_PHY2;
4819 break;
4820 }
4821 }
4822
4823 return sel_phy_idx;
4824 }
4825 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4826 {
4827 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4828
4829
4830
4831
4832
4833
4834 if (bp->link_params.multi_phy_config &
4835 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4836 if (sel_phy_idx == EXT_PHY1)
4837 sel_phy_idx = EXT_PHY2;
4838 else if (sel_phy_idx == EXT_PHY2)
4839 sel_phy_idx = EXT_PHY1;
4840 }
4841 return LINK_CONFIG_IDX(sel_phy_idx);
4842 }
4843
4844 #ifdef NETDEV_FCOE_WWNN
4845 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4846 {
4847 struct bnx2x *bp = netdev_priv(dev);
4848 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4849
4850 switch (type) {
4851 case NETDEV_FCOE_WWNN:
4852 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4853 cp->fcoe_wwn_node_name_lo);
4854 break;
4855 case NETDEV_FCOE_WWPN:
4856 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4857 cp->fcoe_wwn_port_name_lo);
4858 break;
4859 default:
4860 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4861 return -EINVAL;
4862 }
4863
4864 return 0;
4865 }
4866 #endif
4867
4868
4869 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4870 {
4871 struct bnx2x *bp = netdev_priv(dev);
4872
4873 if (pci_num_vf(bp->pdev)) {
4874 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4875 return -EPERM;
4876 }
4877
4878 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4879 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4880 return -EAGAIN;
4881 }
4882
4883
4884
4885
4886
4887 dev->mtu = new_mtu;
4888
4889 if (!bnx2x_mtu_allows_gro(new_mtu))
4890 dev->features &= ~NETIF_F_GRO_HW;
4891
4892 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4893 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4894
4895 return bnx2x_reload_if_running(dev);
4896 }
4897
4898 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4899 netdev_features_t features)
4900 {
4901 struct bnx2x *bp = netdev_priv(dev);
4902
4903 if (pci_num_vf(bp->pdev)) {
4904 netdev_features_t changed = dev->features ^ features;
4905
4906
4907
4908
4909 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4910 features &= ~NETIF_F_RXCSUM;
4911 features |= dev->features & NETIF_F_RXCSUM;
4912 }
4913
4914 if (changed & NETIF_F_LOOPBACK) {
4915 features &= ~NETIF_F_LOOPBACK;
4916 features |= dev->features & NETIF_F_LOOPBACK;
4917 }
4918 }
4919
4920
4921 if (!(features & NETIF_F_RXCSUM))
4922 features &= ~NETIF_F_LRO;
4923
4924 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4925 features &= ~NETIF_F_GRO_HW;
4926 if (features & NETIF_F_GRO_HW)
4927 features &= ~NETIF_F_LRO;
4928
4929 return features;
4930 }
4931
4932 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4933 {
4934 struct bnx2x *bp = netdev_priv(dev);
4935 netdev_features_t changes = features ^ dev->features;
4936 bool bnx2x_reload = false;
4937 int rc;
4938
4939
4940 if (!pci_num_vf(bp->pdev)) {
4941 if (features & NETIF_F_LOOPBACK) {
4942 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4943 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4944 bnx2x_reload = true;
4945 }
4946 } else {
4947 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4948 bp->link_params.loopback_mode = LOOPBACK_NONE;
4949 bnx2x_reload = true;
4950 }
4951 }
4952 }
4953
4954
4955 changes &= ~NETIF_F_GRO;
4956
4957 if (changes)
4958 bnx2x_reload = true;
4959
4960 if (bnx2x_reload) {
4961 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4962 dev->features = features;
4963 rc = bnx2x_reload_if_running(dev);
4964 return rc ? rc : 1;
4965 }
4966
4967 }
4968
4969 return 0;
4970 }
4971
4972 void bnx2x_tx_timeout(struct net_device *dev)
4973 {
4974 struct bnx2x *bp = netdev_priv(dev);
4975
4976
4977
4978
4979 if (!bp->panic)
4980 #ifndef BNX2X_STOP_ON_ERROR
4981 bnx2x_panic_dump(bp, false);
4982 #else
4983 bnx2x_panic();
4984 #endif
4985
4986
4987 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4988 }
4989
4990 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4991 {
4992 struct net_device *dev = pci_get_drvdata(pdev);
4993 struct bnx2x *bp;
4994
4995 if (!dev) {
4996 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4997 return -ENODEV;
4998 }
4999 bp = netdev_priv(dev);
5000
5001 rtnl_lock();
5002
5003 pci_save_state(pdev);
5004
5005 if (!netif_running(dev)) {
5006 rtnl_unlock();
5007 return 0;
5008 }
5009
5010 netif_device_detach(dev);
5011
5012 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5013
5014 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5015
5016 rtnl_unlock();
5017
5018 return 0;
5019 }
5020
5021 int bnx2x_resume(struct pci_dev *pdev)
5022 {
5023 struct net_device *dev = pci_get_drvdata(pdev);
5024 struct bnx2x *bp;
5025 int rc;
5026
5027 if (!dev) {
5028 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5029 return -ENODEV;
5030 }
5031 bp = netdev_priv(dev);
5032
5033 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5034 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5035 return -EAGAIN;
5036 }
5037
5038 rtnl_lock();
5039
5040 pci_restore_state(pdev);
5041
5042 if (!netif_running(dev)) {
5043 rtnl_unlock();
5044 return 0;
5045 }
5046
5047 bnx2x_set_power_state(bp, PCI_D0);
5048 netif_device_attach(dev);
5049
5050 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5051
5052 rtnl_unlock();
5053
5054 return rc;
5055 }
5056
5057 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5058 u32 cid)
5059 {
5060 if (!cxt) {
5061 BNX2X_ERR("bad context pointer %p\n", cxt);
5062 return;
5063 }
5064
5065
5066 cxt->ustorm_ag_context.cdu_usage =
5067 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5068 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5069
5070 cxt->xstorm_ag_context.cdu_reserved =
5071 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5072 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5073 }
5074
5075 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5076 u8 fw_sb_id, u8 sb_index,
5077 u8 ticks)
5078 {
5079 u32 addr = BAR_CSTRORM_INTMEM +
5080 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5081 REG_WR8(bp, addr, ticks);
5082 DP(NETIF_MSG_IFUP,
5083 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5084 port, fw_sb_id, sb_index, ticks);
5085 }
5086
5087 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5088 u16 fw_sb_id, u8 sb_index,
5089 u8 disable)
5090 {
5091 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5092 u32 addr = BAR_CSTRORM_INTMEM +
5093 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5094 u8 flags = REG_RD8(bp, addr);
5095
5096 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5097 flags |= enable_flag;
5098 REG_WR8(bp, addr, flags);
5099 DP(NETIF_MSG_IFUP,
5100 "port %x fw_sb_id %d sb_index %d disable %d\n",
5101 port, fw_sb_id, sb_index, disable);
5102 }
5103
5104 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5105 u8 sb_index, u8 disable, u16 usec)
5106 {
5107 int port = BP_PORT(bp);
5108 u8 ticks = usec / BNX2X_BTR;
5109
5110 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5111
5112 disable = disable ? 1 : (usec ? 0 : 1);
5113 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5114 }
5115
5116 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5117 u32 verbose)
5118 {
5119 smp_mb__before_atomic();
5120 set_bit(flag, &bp->sp_rtnl_state);
5121 smp_mb__after_atomic();
5122 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5123 flag);
5124 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5125 }