This source file includes following definitions.
- aq_nic_rss_init
- aq_nic_cfg_start
- aq_nic_update_link_status
- aq_linkstate_threaded_isr
- aq_nic_service_task
- aq_nic_service_timer_cb
- aq_nic_polling_timer_cb
- aq_nic_ndev_register
- aq_nic_ndev_init
- aq_nic_set_tx_ring
- aq_nic_get_ndev
- aq_nic_init
- aq_nic_start
- aq_nic_map_skb
- aq_nic_xmit
- aq_nic_update_interrupt_moderation_settings
- aq_nic_set_packet_filter
- aq_nic_set_multicast_list
- aq_nic_set_mtu
- aq_nic_set_mac
- aq_nic_get_link_speed
- aq_nic_get_regs
- aq_nic_get_regs_count
- aq_nic_get_stats
- aq_nic_update_ndev_stats
- aq_nic_get_link_ksettings
- aq_nic_set_link_ksettings
- aq_nic_get_cfg
- aq_nic_get_fw_version
- aq_nic_stop
- aq_nic_deinit
- aq_nic_free_vectors
- aq_nic_change_pm_state
- aq_nic_shutdown
1
2
3
4
5
6
7
8
9 #include "aq_nic.h"
10 #include "aq_ring.h"
11 #include "aq_vec.h"
12 #include "aq_hw.h"
13 #include "aq_pci_func.h"
14 #include "aq_main.h"
15
16 #include <linux/moduleparam.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/timer.h>
20 #include <linux/cpu.h>
21 #include <linux/ip.h>
22 #include <linux/tcp.h>
23 #include <net/ip.h>
24
25 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
26 module_param_named(aq_itr, aq_itr, uint, 0644);
27 MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
28
29 static unsigned int aq_itr_tx;
30 module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
31 MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
32
33 static unsigned int aq_itr_rx;
34 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
35 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
36
37 static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
38
39 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
40 {
41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
42 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
43 int i = 0;
44
45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
51 };
52
53 rss_params->hash_secret_key_size = sizeof(rss_key);
54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
56
57 for (i = rss_params->indirection_table_size; i--;)
58 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
59 }
60
61
62 void aq_nic_cfg_start(struct aq_nic_s *self)
63 {
64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
65
66 cfg->tcs = AQ_CFG_TCS_DEF;
67
68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
69
70 cfg->itr = aq_itr;
71 cfg->tx_itr = aq_itr_tx;
72 cfg->rx_itr = aq_itr_rx;
73
74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
75 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
78 cfg->flow_control = AQ_CFG_FC_MODE;
79
80 cfg->mtu = AQ_CFG_MTU_DEF;
81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
83
84 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
85
86
87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
89
90
91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
92 cfg->vecs = min(cfg->vecs, num_online_cpus());
93 if (self->irqvecs > AQ_HW_SERVICE_IRQS)
94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
95
96 if (cfg->vecs >= 8U)
97 cfg->vecs = 8U;
98 else if (cfg->vecs >= 4U)
99 cfg->vecs = 4U;
100 else if (cfg->vecs >= 2U)
101 cfg->vecs = 2U;
102 else
103 cfg->vecs = 1U;
104
105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
106
107 aq_nic_rss_init(self, cfg->num_rss_queues);
108
109 cfg->irq_type = aq_pci_func_get_irq_type(self);
110
111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
112 (cfg->aq_hw_caps->vecs == 1U) ||
113 (cfg->vecs == 1U)) {
114 cfg->is_rss = 0U;
115 cfg->vecs = 1U;
116 }
117
118
119
120
121
122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
123 cfg->link_irq_vec = cfg->vecs;
124 else
125 cfg->link_irq_vec = 0;
126
127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
128 cfg->features = cfg->aq_hw_caps->hw_features;
129 cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
130 cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
131 cfg->is_vlan_force_promisc = true;
132 }
133
134 static int aq_nic_update_link_status(struct aq_nic_s *self)
135 {
136 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
137 u32 fc = 0;
138
139 if (err)
140 return err;
141
142 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
143 pr_info("%s: link change old %d new %d\n",
144 AQ_CFG_DRV_NAME, self->link_status.mbps,
145 self->aq_hw->aq_link_status.mbps);
146 aq_nic_update_interrupt_moderation_settings(self);
147
148
149
150
151
152 if (self->aq_fw_ops->get_flow_control)
153 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
154 if (self->aq_hw_ops->hw_set_fc)
155 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
156 }
157
158 self->link_status = self->aq_hw->aq_link_status;
159 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
160 aq_utils_obj_set(&self->flags,
161 AQ_NIC_FLAG_STARTED);
162 aq_utils_obj_clear(&self->flags,
163 AQ_NIC_LINK_DOWN);
164 netif_carrier_on(self->ndev);
165 netif_tx_wake_all_queues(self->ndev);
166 }
167 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
168 netif_carrier_off(self->ndev);
169 netif_tx_disable(self->ndev);
170 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
171 }
172 return 0;
173 }
174
175 static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
176 {
177 struct aq_nic_s *self = private;
178
179 if (!self)
180 return IRQ_NONE;
181
182 aq_nic_update_link_status(self);
183
184 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
185 BIT(self->aq_nic_cfg.link_irq_vec));
186 return IRQ_HANDLED;
187 }
188
189 static void aq_nic_service_task(struct work_struct *work)
190 {
191 struct aq_nic_s *self = container_of(work, struct aq_nic_s,
192 service_task);
193 int err;
194
195 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
196 return;
197
198 err = aq_nic_update_link_status(self);
199 if (err)
200 return;
201
202 mutex_lock(&self->fwreq_mutex);
203 if (self->aq_fw_ops->update_stats)
204 self->aq_fw_ops->update_stats(self->aq_hw);
205 mutex_unlock(&self->fwreq_mutex);
206
207 aq_nic_update_ndev_stats(self);
208 }
209
210 static void aq_nic_service_timer_cb(struct timer_list *t)
211 {
212 struct aq_nic_s *self = from_timer(self, t, service_timer);
213
214 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
215
216 aq_ndev_schedule_work(&self->service_task);
217 }
218
219 static void aq_nic_polling_timer_cb(struct timer_list *t)
220 {
221 struct aq_nic_s *self = from_timer(self, t, polling_timer);
222 struct aq_vec_s *aq_vec = NULL;
223 unsigned int i = 0U;
224
225 for (i = 0U, aq_vec = self->aq_vec[0];
226 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
227 aq_vec_isr(i, (void *)aq_vec);
228
229 mod_timer(&self->polling_timer, jiffies +
230 AQ_CFG_POLLING_TIMER_INTERVAL);
231 }
232
233 int aq_nic_ndev_register(struct aq_nic_s *self)
234 {
235 int err = 0;
236
237 if (!self->ndev) {
238 err = -EINVAL;
239 goto err_exit;
240 }
241
242 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
243 if (err)
244 goto err_exit;
245
246 mutex_lock(&self->fwreq_mutex);
247 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
248 self->ndev->dev_addr);
249 mutex_unlock(&self->fwreq_mutex);
250 if (err)
251 goto err_exit;
252
253 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
254 {
255 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
256
257 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
258 }
259 #endif
260
261 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
262 self->aq_vecs++) {
263 self->aq_vec[self->aq_vecs] =
264 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
265 if (!self->aq_vec[self->aq_vecs]) {
266 err = -ENOMEM;
267 goto err_exit;
268 }
269 }
270
271 netif_carrier_off(self->ndev);
272
273 netif_tx_disable(self->ndev);
274
275 err = register_netdev(self->ndev);
276 if (err)
277 goto err_exit;
278
279 err_exit:
280 return err;
281 }
282
283 void aq_nic_ndev_init(struct aq_nic_s *self)
284 {
285 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
286 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
287
288 self->ndev->hw_features |= aq_hw_caps->hw_features;
289 self->ndev->features = aq_hw_caps->hw_features;
290 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
291 NETIF_F_RXHASH | NETIF_F_SG |
292 NETIF_F_LRO | NETIF_F_TSO;
293 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
294 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
295
296 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
297 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
298
299 }
300
301 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
302 struct aq_ring_s *ring)
303 {
304 self->aq_ring_tx[idx] = ring;
305 }
306
307 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
308 {
309 return self->ndev;
310 }
311
312 int aq_nic_init(struct aq_nic_s *self)
313 {
314 struct aq_vec_s *aq_vec = NULL;
315 int err = 0;
316 unsigned int i = 0U;
317
318 self->power_state = AQ_HW_POWER_STATE_D0;
319 mutex_lock(&self->fwreq_mutex);
320 err = self->aq_hw_ops->hw_reset(self->aq_hw);
321 mutex_unlock(&self->fwreq_mutex);
322 if (err < 0)
323 goto err_exit;
324
325 err = self->aq_hw_ops->hw_init(self->aq_hw,
326 aq_nic_get_ndev(self)->dev_addr);
327 if (err < 0)
328 goto err_exit;
329
330 for (i = 0U, aq_vec = self->aq_vec[0];
331 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
332 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
333
334 netif_carrier_off(self->ndev);
335
336 err_exit:
337 return err;
338 }
339
340 int aq_nic_start(struct aq_nic_s *self)
341 {
342 struct aq_vec_s *aq_vec = NULL;
343 int err = 0;
344 unsigned int i = 0U;
345
346 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
347 self->mc_list.ar,
348 self->mc_list.count);
349 if (err < 0)
350 goto err_exit;
351
352 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
353 self->packet_filter);
354 if (err < 0)
355 goto err_exit;
356
357 for (i = 0U, aq_vec = self->aq_vec[0];
358 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
359 err = aq_vec_start(aq_vec);
360 if (err < 0)
361 goto err_exit;
362 }
363
364 err = self->aq_hw_ops->hw_start(self->aq_hw);
365 if (err < 0)
366 goto err_exit;
367
368 err = aq_nic_update_interrupt_moderation_settings(self);
369 if (err)
370 goto err_exit;
371
372 INIT_WORK(&self->service_task, aq_nic_service_task);
373
374 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
375 aq_nic_service_timer_cb(&self->service_timer);
376
377 if (self->aq_nic_cfg.is_polling) {
378 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
379 mod_timer(&self->polling_timer, jiffies +
380 AQ_CFG_POLLING_TIMER_INTERVAL);
381 } else {
382 for (i = 0U, aq_vec = self->aq_vec[0];
383 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
384 err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
385 aq_vec_isr, aq_vec,
386 aq_vec_get_affinity_mask(aq_vec));
387 if (err < 0)
388 goto err_exit;
389 }
390
391 if (self->aq_nic_cfg.link_irq_vec) {
392 int irqvec = pci_irq_vector(self->pdev,
393 self->aq_nic_cfg.link_irq_vec);
394 err = request_threaded_irq(irqvec, NULL,
395 aq_linkstate_threaded_isr,
396 IRQF_SHARED | IRQF_ONESHOT,
397 self->ndev->name, self);
398 if (err < 0)
399 goto err_exit;
400 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
401 }
402
403 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
404 AQ_CFG_IRQ_MASK);
405 if (err < 0)
406 goto err_exit;
407 }
408
409 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
410 if (err < 0)
411 goto err_exit;
412
413 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
414 if (err < 0)
415 goto err_exit;
416
417 netif_tx_start_all_queues(self->ndev);
418
419 err_exit:
420 return err;
421 }
422
423 static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
424 struct sk_buff *skb,
425 struct aq_ring_s *ring)
426 {
427 unsigned int ret = 0U;
428 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
429 unsigned int frag_count = 0U;
430 unsigned int dx = ring->sw_tail;
431 struct aq_ring_buff_s *first = NULL;
432 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
433 bool need_context_tag = false;
434
435 dx_buff->flags = 0U;
436
437 if (unlikely(skb_is_gso(skb))) {
438 dx_buff->mss = skb_shinfo(skb)->gso_size;
439 dx_buff->is_gso = 1U;
440 dx_buff->len_pkt = skb->len;
441 dx_buff->len_l2 = ETH_HLEN;
442 dx_buff->len_l3 = ip_hdrlen(skb);
443 dx_buff->len_l4 = tcp_hdrlen(skb);
444 dx_buff->eop_index = 0xffffU;
445 dx_buff->is_ipv6 =
446 (ip_hdr(skb)->version == 6) ? 1U : 0U;
447 need_context_tag = true;
448 }
449
450 if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
451 dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
452 dx_buff->len_pkt = skb->len;
453 dx_buff->is_vlan = 1U;
454 need_context_tag = true;
455 }
456
457 if (need_context_tag) {
458 dx = aq_ring_next_dx(ring, dx);
459 dx_buff = &ring->buff_ring[dx];
460 dx_buff->flags = 0U;
461 ++ret;
462 }
463
464 dx_buff->len = skb_headlen(skb);
465 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
466 skb->data,
467 dx_buff->len,
468 DMA_TO_DEVICE);
469
470 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
471 ret = 0;
472 goto exit;
473 }
474
475 first = dx_buff;
476 dx_buff->len_pkt = skb->len;
477 dx_buff->is_sop = 1U;
478 dx_buff->is_mapped = 1U;
479 ++ret;
480
481 if (skb->ip_summed == CHECKSUM_PARTIAL) {
482 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
483 1U : 0U;
484
485 if (ip_hdr(skb)->version == 4) {
486 dx_buff->is_tcp_cso =
487 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
488 1U : 0U;
489 dx_buff->is_udp_cso =
490 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
491 1U : 0U;
492 } else if (ip_hdr(skb)->version == 6) {
493 dx_buff->is_tcp_cso =
494 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
495 1U : 0U;
496 dx_buff->is_udp_cso =
497 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
498 1U : 0U;
499 }
500 }
501
502 for (; nr_frags--; ++frag_count) {
503 unsigned int frag_len = 0U;
504 unsigned int buff_offset = 0U;
505 unsigned int buff_size = 0U;
506 dma_addr_t frag_pa;
507 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
508
509 frag_len = skb_frag_size(frag);
510
511 while (frag_len) {
512 if (frag_len > AQ_CFG_TX_FRAME_MAX)
513 buff_size = AQ_CFG_TX_FRAME_MAX;
514 else
515 buff_size = frag_len;
516
517 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
518 frag,
519 buff_offset,
520 buff_size,
521 DMA_TO_DEVICE);
522
523 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
524 frag_pa)))
525 goto mapping_error;
526
527 dx = aq_ring_next_dx(ring, dx);
528 dx_buff = &ring->buff_ring[dx];
529
530 dx_buff->flags = 0U;
531 dx_buff->len = buff_size;
532 dx_buff->pa = frag_pa;
533 dx_buff->is_mapped = 1U;
534 dx_buff->eop_index = 0xffffU;
535
536 frag_len -= buff_size;
537 buff_offset += buff_size;
538
539 ++ret;
540 }
541 }
542
543 first->eop_index = dx;
544 dx_buff->is_eop = 1U;
545 dx_buff->skb = skb;
546 goto exit;
547
548 mapping_error:
549 for (dx = ring->sw_tail;
550 ret > 0;
551 --ret, dx = aq_ring_next_dx(ring, dx)) {
552 dx_buff = &ring->buff_ring[dx];
553
554 if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
555 if (unlikely(dx_buff->is_sop)) {
556 dma_unmap_single(aq_nic_get_dev(self),
557 dx_buff->pa,
558 dx_buff->len,
559 DMA_TO_DEVICE);
560 } else {
561 dma_unmap_page(aq_nic_get_dev(self),
562 dx_buff->pa,
563 dx_buff->len,
564 DMA_TO_DEVICE);
565 }
566 }
567 }
568
569 exit:
570 return ret;
571 }
572
573 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
574 {
575 struct aq_ring_s *ring = NULL;
576 unsigned int frags = 0U;
577 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
578 unsigned int tc = 0U;
579 int err = NETDEV_TX_OK;
580
581 frags = skb_shinfo(skb)->nr_frags + 1;
582
583 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
584
585 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
586 dev_kfree_skb_any(skb);
587 goto err_exit;
588 }
589
590 aq_ring_update_queue_state(ring);
591
592
593 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
594 err = NETDEV_TX_BUSY;
595 goto err_exit;
596 }
597
598 frags = aq_nic_map_skb(self, skb, ring);
599
600 if (likely(frags)) {
601 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
602 ring, frags);
603 } else {
604 err = NETDEV_TX_BUSY;
605 }
606
607 err_exit:
608 return err;
609 }
610
611 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
612 {
613 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
614 }
615
616 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
617 {
618 int err = 0;
619
620 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
621 if (err < 0)
622 goto err_exit;
623
624 self->packet_filter = flags;
625
626 err_exit:
627 return err;
628 }
629
630 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
631 {
632 const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
633 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
634 unsigned int packet_filter = ndev->flags;
635 struct netdev_hw_addr *ha = NULL;
636 unsigned int i = 0U;
637 int err = 0;
638
639 self->mc_list.count = 0;
640 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
641 packet_filter |= IFF_PROMISC;
642 } else {
643 netdev_for_each_uc_addr(ha, ndev) {
644 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
645 }
646 }
647
648 cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
649 if (cfg->is_mc_list_enabled) {
650 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
651 packet_filter |= IFF_ALLMULTI;
652 } else {
653 netdev_for_each_mc_addr(ha, ndev) {
654 ether_addr_copy(self->mc_list.ar[i++],
655 ha->addr);
656 }
657 }
658 }
659
660 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
661 self->mc_list.count = i;
662 err = hw_ops->hw_multicast_list_set(self->aq_hw,
663 self->mc_list.ar,
664 self->mc_list.count);
665 if (err < 0)
666 return err;
667 }
668 return aq_nic_set_packet_filter(self, packet_filter);
669 }
670
671 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
672 {
673 self->aq_nic_cfg.mtu = new_mtu;
674
675 return 0;
676 }
677
678 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
679 {
680 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
681 }
682
683 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
684 {
685 return self->link_status.mbps;
686 }
687
688 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
689 {
690 u32 *regs_buff = p;
691 int err = 0;
692
693 regs->version = 1;
694
695 err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
696 self->aq_nic_cfg.aq_hw_caps,
697 regs_buff);
698 if (err < 0)
699 goto err_exit;
700
701 err_exit:
702 return err;
703 }
704
705 int aq_nic_get_regs_count(struct aq_nic_s *self)
706 {
707 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
708 }
709
710 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
711 {
712 unsigned int i = 0U;
713 unsigned int count = 0U;
714 struct aq_vec_s *aq_vec = NULL;
715 struct aq_stats_s *stats;
716
717 if (self->aq_fw_ops->update_stats) {
718 mutex_lock(&self->fwreq_mutex);
719 self->aq_fw_ops->update_stats(self->aq_hw);
720 mutex_unlock(&self->fwreq_mutex);
721 }
722 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
723
724 if (!stats)
725 goto err_exit;
726
727 data[i] = stats->uprc + stats->mprc + stats->bprc;
728 data[++i] = stats->uprc;
729 data[++i] = stats->mprc;
730 data[++i] = stats->bprc;
731 data[++i] = stats->erpt;
732 data[++i] = stats->uptc + stats->mptc + stats->bptc;
733 data[++i] = stats->uptc;
734 data[++i] = stats->mptc;
735 data[++i] = stats->bptc;
736 data[++i] = stats->ubrc;
737 data[++i] = stats->ubtc;
738 data[++i] = stats->mbrc;
739 data[++i] = stats->mbtc;
740 data[++i] = stats->bbrc;
741 data[++i] = stats->bbtc;
742 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
743 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
744 data[++i] = stats->dma_pkt_rc;
745 data[++i] = stats->dma_pkt_tc;
746 data[++i] = stats->dma_oct_rc;
747 data[++i] = stats->dma_oct_tc;
748 data[++i] = stats->dpc;
749
750 i++;
751
752 data += i;
753
754 for (i = 0U, aq_vec = self->aq_vec[0];
755 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
756 data += count;
757 aq_vec_get_sw_stats(aq_vec, data, &count);
758 }
759
760 err_exit:;
761 }
762
763 static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
764 {
765 struct net_device *ndev = self->ndev;
766 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
767
768 ndev->stats.rx_packets = stats->dma_pkt_rc;
769 ndev->stats.rx_bytes = stats->dma_oct_rc;
770 ndev->stats.rx_errors = stats->erpr;
771 ndev->stats.rx_dropped = stats->dpc;
772 ndev->stats.tx_packets = stats->dma_pkt_tc;
773 ndev->stats.tx_bytes = stats->dma_oct_tc;
774 ndev->stats.tx_errors = stats->erpt;
775 ndev->stats.multicast = stats->mprc;
776 }
777
778 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
779 struct ethtool_link_ksettings *cmd)
780 {
781 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
782 cmd->base.port = PORT_FIBRE;
783 else
784 cmd->base.port = PORT_TP;
785
786 cmd->base.duplex = DUPLEX_FULL;
787 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
788
789 ethtool_link_ksettings_zero_link_mode(cmd, supported);
790
791 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
792 ethtool_link_ksettings_add_link_mode(cmd, supported,
793 10000baseT_Full);
794
795 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
796 ethtool_link_ksettings_add_link_mode(cmd, supported,
797 5000baseT_Full);
798
799 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
800 ethtool_link_ksettings_add_link_mode(cmd, supported,
801 2500baseT_Full);
802
803 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
804 ethtool_link_ksettings_add_link_mode(cmd, supported,
805 1000baseT_Full);
806
807 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
808 ethtool_link_ksettings_add_link_mode(cmd, supported,
809 100baseT_Full);
810
811 if (self->aq_nic_cfg.aq_hw_caps->flow_control)
812 ethtool_link_ksettings_add_link_mode(cmd, supported,
813 Pause);
814
815 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
816
817 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
818 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
819 else
820 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
821
822 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
823
824 if (self->aq_nic_cfg.is_autoneg)
825 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
826
827 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
828 ethtool_link_ksettings_add_link_mode(cmd, advertising,
829 10000baseT_Full);
830
831 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
832 ethtool_link_ksettings_add_link_mode(cmd, advertising,
833 5000baseT_Full);
834
835 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
836 ethtool_link_ksettings_add_link_mode(cmd, advertising,
837 2500baseT_Full);
838
839 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
840 ethtool_link_ksettings_add_link_mode(cmd, advertising,
841 1000baseT_Full);
842
843 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
844 ethtool_link_ksettings_add_link_mode(cmd, advertising,
845 100baseT_Full);
846
847 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
848 ethtool_link_ksettings_add_link_mode(cmd, advertising,
849 Pause);
850
851
852 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
853 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
854 ethtool_link_ksettings_add_link_mode(cmd, advertising,
855 Asym_Pause);
856
857 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
858 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
859 else
860 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
861 }
862
863 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
864 const struct ethtool_link_ksettings *cmd)
865 {
866 u32 speed = 0U;
867 u32 rate = 0U;
868 int err = 0;
869
870 if (cmd->base.autoneg == AUTONEG_ENABLE) {
871 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
872 self->aq_nic_cfg.is_autoneg = true;
873 } else {
874 speed = cmd->base.speed;
875
876 switch (speed) {
877 case SPEED_100:
878 rate = AQ_NIC_RATE_100M;
879 break;
880
881 case SPEED_1000:
882 rate = AQ_NIC_RATE_1G;
883 break;
884
885 case SPEED_2500:
886 rate = AQ_NIC_RATE_2GS;
887 break;
888
889 case SPEED_5000:
890 rate = AQ_NIC_RATE_5G;
891 break;
892
893 case SPEED_10000:
894 rate = AQ_NIC_RATE_10G;
895 break;
896
897 default:
898 err = -1;
899 goto err_exit;
900 break;
901 }
902 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
903 err = -1;
904 goto err_exit;
905 }
906
907 self->aq_nic_cfg.is_autoneg = false;
908 }
909
910 mutex_lock(&self->fwreq_mutex);
911 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
912 mutex_unlock(&self->fwreq_mutex);
913 if (err < 0)
914 goto err_exit;
915
916 self->aq_nic_cfg.link_speed_msk = rate;
917
918 err_exit:
919 return err;
920 }
921
922 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
923 {
924 return &self->aq_nic_cfg;
925 }
926
927 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
928 {
929 u32 fw_version = 0U;
930
931 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
932
933 return fw_version;
934 }
935
936 int aq_nic_stop(struct aq_nic_s *self)
937 {
938 struct aq_vec_s *aq_vec = NULL;
939 unsigned int i = 0U;
940
941 netif_tx_disable(self->ndev);
942 netif_carrier_off(self->ndev);
943
944 del_timer_sync(&self->service_timer);
945 cancel_work_sync(&self->service_task);
946
947 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
948
949 if (self->aq_nic_cfg.is_polling)
950 del_timer_sync(&self->polling_timer);
951 else
952 aq_pci_func_free_irqs(self);
953
954 for (i = 0U, aq_vec = self->aq_vec[0];
955 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
956 aq_vec_stop(aq_vec);
957
958 return self->aq_hw_ops->hw_stop(self->aq_hw);
959 }
960
961 void aq_nic_deinit(struct aq_nic_s *self)
962 {
963 struct aq_vec_s *aq_vec = NULL;
964 unsigned int i = 0U;
965
966 if (!self)
967 goto err_exit;
968
969 for (i = 0U, aq_vec = self->aq_vec[0];
970 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
971 aq_vec_deinit(aq_vec);
972
973 if (likely(self->aq_fw_ops->deinit)) {
974 mutex_lock(&self->fwreq_mutex);
975 self->aq_fw_ops->deinit(self->aq_hw);
976 mutex_unlock(&self->fwreq_mutex);
977 }
978
979 if (self->power_state != AQ_HW_POWER_STATE_D0 ||
980 self->aq_hw->aq_nic_cfg->wol)
981 if (likely(self->aq_fw_ops->set_power)) {
982 mutex_lock(&self->fwreq_mutex);
983 self->aq_fw_ops->set_power(self->aq_hw,
984 self->power_state,
985 self->ndev->dev_addr);
986 mutex_unlock(&self->fwreq_mutex);
987 }
988
989
990 err_exit:;
991 }
992
993 void aq_nic_free_vectors(struct aq_nic_s *self)
994 {
995 unsigned int i = 0U;
996
997 if (!self)
998 goto err_exit;
999
1000 for (i = ARRAY_SIZE(self->aq_vec); i--;) {
1001 if (self->aq_vec[i]) {
1002 aq_vec_free(self->aq_vec[i]);
1003 self->aq_vec[i] = NULL;
1004 }
1005 }
1006
1007 err_exit:;
1008 }
1009
1010 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
1011 {
1012 int err = 0;
1013
1014 if (!netif_running(self->ndev)) {
1015 err = 0;
1016 goto out;
1017 }
1018 rtnl_lock();
1019 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
1020 self->power_state = AQ_HW_POWER_STATE_D3;
1021 netif_device_detach(self->ndev);
1022 netif_tx_stop_all_queues(self->ndev);
1023
1024 err = aq_nic_stop(self);
1025 if (err < 0)
1026 goto err_exit;
1027
1028 aq_nic_deinit(self);
1029 } else {
1030 err = aq_nic_init(self);
1031 if (err < 0)
1032 goto err_exit;
1033
1034 err = aq_nic_start(self);
1035 if (err < 0)
1036 goto err_exit;
1037
1038 netif_device_attach(self->ndev);
1039 netif_tx_start_all_queues(self->ndev);
1040 }
1041
1042 err_exit:
1043 rtnl_unlock();
1044 out:
1045 return err;
1046 }
1047
1048 void aq_nic_shutdown(struct aq_nic_s *self)
1049 {
1050 int err = 0;
1051
1052 if (!self->ndev)
1053 return;
1054
1055 rtnl_lock();
1056
1057 netif_device_detach(self->ndev);
1058
1059 if (netif_running(self->ndev)) {
1060 err = aq_nic_stop(self);
1061 if (err < 0)
1062 goto err_exit;
1063 }
1064 aq_nic_deinit(self);
1065
1066 err_exit:
1067 rtnl_unlock();
1068 }