This source file includes following definitions.
- hns_get_mac_cb
- hns_ae_get_dsaf_dev
- hns_get_ppe_cb
- hns_ae_get_q_num_per_vf
- hns_ae_get_vf_num_per_port
- hns_ae_get_base_ring_pair
- hns_ae_get_ring_pair
- hns_ae_get_handle
- hns_ae_put_handle
- hns_ae_wait_flow_down
- hns_ae_ring_enable_all
- hns_ae_init_queue
- hns_ae_fini_queue
- hns_ae_set_mac_address
- hns_ae_add_uc_address
- hns_ae_rm_uc_address
- hns_ae_set_multicast_one
- hns_ae_clr_multicast
- hns_ae_set_mtu
- hns_ae_set_tso_stats
- hns_ae_start
- hns_ae_stop
- hns_ae_reset
- hns_ae_toggle_ring_irq
- hns_aev2_toggle_ring_irq
- hns_ae_get_link_status
- hns_ae_get_mac_info
- hns_ae_need_adjust_link
- hns_ae_adjust_link
- hns_ae_get_ring_bdnum_limit
- hns_ae_get_pauseparam
- hns_ae_set_autoneg
- hns_ae_set_promisc_mode
- hns_ae_get_autoneg
- hns_ae_set_pauseparam
- hns_ae_get_coalesce_usecs
- hns_ae_get_max_coalesced_frames
- hns_ae_set_coalesce_usecs
- hns_ae_set_coalesce_frames
- hns_ae_get_coalesce_range
- hns_ae_update_stats
- hns_ae_get_stats
- hns_ae_get_strings
- hns_ae_get_sset_count
- hns_ae_config_loopback
- hns_ae_update_led_status
- hns_ae_cpld_set_led_id
- hns_ae_get_regs
- hns_ae_get_regs_len
- hns_ae_get_rss_key_size
- hns_ae_get_rss_indir_size
- hns_ae_get_rss
- hns_ae_set_rss
- hns_dsaf_ae_init
- hns_dsaf_ae_uninit
1
2
3
4
5
6 #include <linux/etherdevice.h>
7 #include <linux/netdevice.h>
8 #include <linux/spinlock.h>
9
10 #include "hnae.h"
11 #include "hns_dsaf_mac.h"
12 #include "hns_dsaf_main.h"
13 #include "hns_dsaf_ppe.h"
14 #include "hns_dsaf_rcb.h"
15
16 #define AE_NAME_PORT_ID_IDX 6
17
18 static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
19 {
20 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
21
22 return vf_cb->mac_cb;
23 }
24
25 static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
26 {
27 return container_of(dev, struct dsaf_device, ae_dev);
28 }
29
30 static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
31 {
32 int ppe_index;
33 struct ppe_common_cb *ppe_comm;
34 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
35
36 ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
37 ppe_index = vf_cb->port_index;
38
39 return &ppe_comm->ppe_cb[ppe_index];
40 }
41
42 static int hns_ae_get_q_num_per_vf(
43 struct dsaf_device *dsaf_dev, int port)
44 {
45 return dsaf_dev->rcb_common[0]->max_q_per_vf;
46 }
47
48 static int hns_ae_get_vf_num_per_port(
49 struct dsaf_device *dsaf_dev, int port)
50 {
51 return dsaf_dev->rcb_common[0]->max_vfn;
52 }
53
54 static struct ring_pair_cb *hns_ae_get_base_ring_pair(
55 struct dsaf_device *dsaf_dev, int port)
56 {
57 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
58 int q_num = rcb_comm->max_q_per_vf;
59 int vf_num = rcb_comm->max_vfn;
60
61 return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
62 }
63
64 static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
65 {
66 return container_of(q, struct ring_pair_cb, q);
67 }
68
69 static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
70 u32 port_id)
71 {
72 int vfnum_per_port;
73 int qnum_per_vf;
74 int i;
75 struct dsaf_device *dsaf_dev;
76 struct hnae_handle *ae_handle;
77 struct ring_pair_cb *ring_pair_cb;
78 struct hnae_vf_cb *vf_cb;
79
80 dsaf_dev = hns_ae_get_dsaf_dev(dev);
81
82 ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
83 vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
84 qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
85
86 vf_cb = kzalloc(sizeof(*vf_cb) +
87 qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
88 if (unlikely(!vf_cb)) {
89 dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
90 ae_handle = ERR_PTR(-ENOMEM);
91 goto handle_err;
92 }
93 ae_handle = &vf_cb->ae_handle;
94
95 ae_handle->owner_dev = dsaf_dev->dev;
96 ae_handle->dev = dev;
97 ae_handle->q_num = qnum_per_vf;
98 ae_handle->coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
99
100
101 for (ae_handle->vf_id = 0;
102 ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
103 if (!ring_pair_cb->used_by_vf)
104 break;
105 ring_pair_cb += qnum_per_vf;
106 }
107 if (ae_handle->vf_id >= vfnum_per_port) {
108 dev_err(dsaf_dev->dev, "malloc queue fail!\n");
109 ae_handle = ERR_PTR(-EINVAL);
110 goto vf_id_err;
111 }
112
113 ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
114 for (i = 0; i < qnum_per_vf; i++) {
115 ae_handle->qs[i] = &ring_pair_cb->q;
116 ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
117 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
118
119 ring_pair_cb->used_by_vf = 1;
120 ring_pair_cb++;
121 }
122
123 vf_cb->dsaf_dev = dsaf_dev;
124 vf_cb->port_index = port_id;
125 vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
126
127 ae_handle->phy_if = vf_cb->mac_cb->phy_if;
128 ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
129 ae_handle->if_support = vf_cb->mac_cb->if_support;
130 ae_handle->port_type = vf_cb->mac_cb->mac_type;
131 ae_handle->media_type = vf_cb->mac_cb->media_type;
132 ae_handle->dport_id = port_id;
133
134 return ae_handle;
135 vf_id_err:
136 kfree(vf_cb);
137 handle_err:
138 return ae_handle;
139 }
140
141 static void hns_ae_put_handle(struct hnae_handle *handle)
142 {
143 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
144 int i;
145
146 for (i = 0; i < handle->q_num; i++)
147 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
148
149 kfree(vf_cb);
150 }
151
152 static int hns_ae_wait_flow_down(struct hnae_handle *handle)
153 {
154 struct dsaf_device *dsaf_dev;
155 struct hns_ppe_cb *ppe_cb;
156 struct hnae_vf_cb *vf_cb;
157 int ret;
158 int i;
159
160 for (i = 0; i < handle->q_num; i++) {
161 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
162 if (ret)
163 return ret;
164 }
165
166 ppe_cb = hns_get_ppe_cb(handle);
167 ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
168 if (ret)
169 return ret;
170
171 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
172 if (!dsaf_dev)
173 return -EINVAL;
174 ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
175 if (ret)
176 return ret;
177
178 vf_cb = hns_ae_get_vf_cb(handle);
179 ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
180 if (ret)
181 return ret;
182
183 mdelay(10);
184 return 0;
185 }
186
187 static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
188 {
189 int q_num = handle->q_num;
190 int i;
191
192 for (i = 0; i < q_num; i++)
193 hns_rcb_ring_enable_hw(handle->qs[i], val);
194 }
195
196 static void hns_ae_init_queue(struct hnae_queue *q)
197 {
198 struct ring_pair_cb *ring =
199 container_of(q, struct ring_pair_cb, q);
200
201 hns_rcb_init_hw(ring);
202 }
203
204 static void hns_ae_fini_queue(struct hnae_queue *q)
205 {
206 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
207
208 if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
209 hns_rcb_reset_ring_hw(q);
210 }
211
212 static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
213 {
214 int ret;
215 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
216
217 if (!p || !is_valid_ether_addr((const u8 *)p)) {
218 dev_err(handle->owner_dev, "is not valid ether addr !\n");
219 return -EADDRNOTAVAIL;
220 }
221
222 ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
223 if (ret != 0) {
224 dev_err(handle->owner_dev,
225 "set_mac_address fail, ret=%d!\n", ret);
226 return ret;
227 }
228
229 return 0;
230 }
231
232 static int hns_ae_add_uc_address(struct hnae_handle *handle,
233 const unsigned char *addr)
234 {
235 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
236
237 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
238 return -ENOSPC;
239
240 return hns_mac_add_uc_addr(mac_cb, handle->vf_id, addr);
241 }
242
243 static int hns_ae_rm_uc_address(struct hnae_handle *handle,
244 const unsigned char *addr)
245 {
246 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
247
248 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
249 return -ENOSPC;
250
251 return hns_mac_rm_uc_addr(mac_cb, handle->vf_id, addr);
252 }
253
254 static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
255 {
256 int ret;
257 char *mac_addr = (char *)addr;
258 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
259 u8 port_num;
260
261 assert(mac_cb);
262
263 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
264 return 0;
265
266 ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true);
267 if (ret) {
268 dev_err(handle->owner_dev,
269 "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
270 mac_addr, mac_cb->mac_id, ret);
271 return ret;
272 }
273
274 ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
275 if (ret)
276 return ret;
277
278 ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
279 if (ret)
280 dev_err(handle->owner_dev,
281 "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
282 mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
283
284 return ret;
285 }
286
287 static int hns_ae_clr_multicast(struct hnae_handle *handle)
288 {
289 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
290
291 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
292 return 0;
293
294 return hns_mac_clr_multicast(mac_cb, handle->vf_id);
295 }
296
297 static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
298 {
299 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
300 struct hnae_queue *q;
301 u32 rx_buf_size;
302 int i, ret;
303
304
305 if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
306 if (new_mtu <= BD_SIZE_2048_MAX_MTU)
307 rx_buf_size = 2048;
308 else
309 rx_buf_size = 4096;
310 } else {
311 rx_buf_size = mac_cb->dsaf_dev->buf_size;
312 }
313
314 ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
315
316 if (!ret) {
317
318 for (i = 0; i < handle->q_num; i++) {
319 q = handle->qs[i];
320 q->rx_ring.buf_size = rx_buf_size;
321 hns_rcb_set_rx_ring_bs(q, rx_buf_size);
322 }
323 }
324
325 return ret;
326 }
327
328 static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
329 {
330 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
331
332 hns_ppe_set_tso_enable(ppe_cb, enable);
333 }
334
335 static int hns_ae_start(struct hnae_handle *handle)
336 {
337 int ret;
338 int k;
339 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
340
341 ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
342 if (ret)
343 return ret;
344
345 for (k = 0; k < handle->q_num; k++) {
346 if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
347 hns_rcb_int_clr_hw(handle->qs[k],
348 RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
349 else
350 hns_rcbv2_int_clr_hw(handle->qs[k],
351 RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
352 }
353 hns_ae_ring_enable_all(handle, 1);
354 msleep(100);
355
356 hns_mac_start(mac_cb);
357
358 return 0;
359 }
360
361 static void hns_ae_stop(struct hnae_handle *handle)
362 {
363 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
364
365
366 hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
367
368 msleep(20);
369
370 hns_mac_stop(mac_cb);
371
372 usleep_range(10000, 20000);
373
374 hns_ae_ring_enable_all(handle, 0);
375
376
377 hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
378
379 (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
380 }
381
382 static void hns_ae_reset(struct hnae_handle *handle)
383 {
384 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
385
386 if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
387 hns_mac_reset(vf_cb->mac_cb);
388 hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
389 }
390 }
391
392 static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
393 {
394 u32 flag;
395
396 if (is_tx_ring(ring))
397 flag = RCB_INT_FLAG_TX;
398 else
399 flag = RCB_INT_FLAG_RX;
400
401 hns_rcb_int_ctrl_hw(ring->q, flag, mask);
402 }
403
404 static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
405 {
406 u32 flag;
407
408 if (is_tx_ring(ring))
409 flag = RCB_INT_FLAG_TX;
410 else
411 flag = RCB_INT_FLAG_RX;
412
413 hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
414 }
415
416 static int hns_ae_get_link_status(struct hnae_handle *handle)
417 {
418 u32 link_status;
419 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
420
421 hns_mac_get_link_status(mac_cb, &link_status);
422
423 return !!link_status;
424 }
425
426 static int hns_ae_get_mac_info(struct hnae_handle *handle,
427 u8 *auto_neg, u16 *speed, u8 *duplex)
428 {
429 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
430
431 return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
432 }
433
434 static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
435 int duplex)
436 {
437 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
438
439 return hns_mac_need_adjust_link(mac_cb, speed, duplex);
440 }
441
442 static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
443 int duplex)
444 {
445 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
446
447 switch (mac_cb->dsaf_dev->dsaf_ver) {
448 case AE_VERSION_1:
449 hns_mac_adjust_link(mac_cb, speed, duplex);
450 break;
451
452 case AE_VERSION_2:
453
454 hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
455 if (hns_ae_wait_flow_down(handle)) {
456 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
457 break;
458 }
459
460 hns_mac_adjust_link(mac_cb, speed, duplex);
461 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
462 break;
463
464 default:
465 break;
466 }
467
468 return;
469 }
470
471 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
472 u32 *uplimit)
473 {
474 *uplimit = HNS_RCB_RING_MAX_PENDING_BD;
475 }
476
477 static void hns_ae_get_pauseparam(struct hnae_handle *handle,
478 u32 *auto_neg, u32 *rx_en, u32 *tx_en)
479 {
480 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
481 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
482
483 hns_mac_get_autoneg(mac_cb, auto_neg);
484
485 hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
486
487
488 if (handle->port_type == HNAE_PORT_SERVICE)
489 hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
490 }
491
492 static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
493 {
494 assert(handle);
495
496 return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
497 }
498
499 static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
500 {
501 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
502
503 hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
504 hns_mac_set_promisc(mac_cb, (u8)!!en);
505 }
506
507 static int hns_ae_get_autoneg(struct hnae_handle *handle)
508 {
509 u32 auto_neg;
510
511 assert(handle);
512
513 hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
514
515 return auto_neg;
516 }
517
518 static int hns_ae_set_pauseparam(struct hnae_handle *handle,
519 u32 autoneg, u32 rx_en, u32 tx_en)
520 {
521 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
522 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
523 int ret;
524
525 ret = hns_mac_set_autoneg(mac_cb, autoneg);
526 if (ret)
527 return ret;
528
529
530 if (handle->port_type == HNAE_PORT_SERVICE) {
531 ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
532 mac_cb->mac_id, rx_en);
533 if (ret)
534 return ret;
535 rx_en = 0;
536 }
537 return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
538 }
539
540 static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
541 u32 *tx_usecs, u32 *rx_usecs)
542 {
543 struct ring_pair_cb *ring_pair =
544 container_of(handle->qs[0], struct ring_pair_cb, q);
545
546 *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
547 ring_pair->port_id_in_comm);
548 *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
549 ring_pair->port_id_in_comm);
550 }
551
552 static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
553 u32 *tx_frames, u32 *rx_frames)
554 {
555 struct ring_pair_cb *ring_pair =
556 container_of(handle->qs[0], struct ring_pair_cb, q);
557 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
558
559 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
560 handle->port_type == HNAE_PORT_DEBUG)
561 *tx_frames = hns_rcb_get_rx_coalesced_frames(
562 ring_pair->rcb_common, ring_pair->port_id_in_comm);
563 else
564 *tx_frames = hns_rcb_get_tx_coalesced_frames(
565 ring_pair->rcb_common, ring_pair->port_id_in_comm);
566 *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
567 ring_pair->port_id_in_comm);
568 }
569
570 static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
571 u32 timeout)
572 {
573 struct ring_pair_cb *ring_pair =
574 container_of(handle->qs[0], struct ring_pair_cb, q);
575
576 return hns_rcb_set_coalesce_usecs(
577 ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
578 }
579
580 static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
581 u32 tx_frames, u32 rx_frames)
582 {
583 int ret;
584 struct ring_pair_cb *ring_pair =
585 container_of(handle->qs[0], struct ring_pair_cb, q);
586 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
587
588 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
589 handle->port_type == HNAE_PORT_DEBUG) {
590 if (tx_frames != rx_frames)
591 return -EINVAL;
592 return hns_rcb_set_rx_coalesced_frames(
593 ring_pair->rcb_common,
594 ring_pair->port_id_in_comm, rx_frames);
595 } else {
596 if (tx_frames != 1)
597 return -EINVAL;
598 ret = hns_rcb_set_tx_coalesced_frames(
599 ring_pair->rcb_common,
600 ring_pair->port_id_in_comm, tx_frames);
601 if (ret)
602 return ret;
603
604 return hns_rcb_set_rx_coalesced_frames(
605 ring_pair->rcb_common,
606 ring_pair->port_id_in_comm, rx_frames);
607 }
608 }
609
610 static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
611 u32 *tx_frames_low, u32 *rx_frames_low,
612 u32 *tx_frames_high, u32 *rx_frames_high,
613 u32 *tx_usecs_low, u32 *rx_usecs_low,
614 u32 *tx_usecs_high, u32 *rx_usecs_high)
615 {
616 struct dsaf_device *dsaf_dev;
617
618 assert(handle);
619
620 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
621
622 *tx_frames_low = HNS_RCB_TX_FRAMES_LOW;
623 *rx_frames_low = HNS_RCB_RX_FRAMES_LOW;
624
625 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
626 handle->port_type == HNAE_PORT_DEBUG)
627 *tx_frames_high =
628 (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
629 HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
630 else
631 *tx_frames_high = 1;
632
633 *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
634 HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
635 *tx_usecs_low = HNS_RCB_TX_USECS_LOW;
636 *rx_usecs_low = HNS_RCB_RX_USECS_LOW;
637 *tx_usecs_high = HNS_RCB_TX_USECS_HIGH;
638 *rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
639 }
640
641 static void hns_ae_update_stats(struct hnae_handle *handle,
642 struct net_device_stats *net_stats)
643 {
644 int port;
645 int idx;
646 struct dsaf_device *dsaf_dev;
647 struct hns_mac_cb *mac_cb;
648 struct hns_ppe_cb *ppe_cb;
649 struct hnae_queue *queue;
650 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
651 u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
652 u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
653 u64 rx_missed_errors = 0;
654
655 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
656 if (!dsaf_dev)
657 return;
658 port = vf_cb->port_index;
659 ppe_cb = hns_get_ppe_cb(handle);
660 mac_cb = hns_get_mac_cb(handle);
661
662 for (idx = 0; idx < handle->q_num; idx++) {
663 queue = handle->qs[idx];
664 hns_rcb_update_stats(queue);
665
666 tx_bytes += queue->tx_ring.stats.tx_bytes;
667 tx_packets += queue->tx_ring.stats.tx_pkts;
668 rx_bytes += queue->rx_ring.stats.rx_bytes;
669 rx_packets += queue->rx_ring.stats.rx_pkts;
670
671 rx_errors += queue->rx_ring.stats.err_pkt_len
672 + queue->rx_ring.stats.l2_err
673 + queue->rx_ring.stats.l3l4_csum_err;
674 }
675
676 hns_ppe_update_stats(ppe_cb);
677 rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
678 tx_errors += ppe_cb->hw_stats.tx_err_checksum
679 + ppe_cb->hw_stats.tx_err_fifo_empty;
680
681 if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
682 hns_dsaf_update_stats(dsaf_dev, port);
683
684 rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
685 rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
686 rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
687
688
689 port = port + DSAF_PPE_INODE_BASE;
690 hns_dsaf_update_stats(dsaf_dev, port);
691 tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
692 tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
693 tx_dropped += dsaf_dev->hw_stats[port].crc_false;
694 tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
695 tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
696 tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
697 }
698
699 hns_mac_update_stats(mac_cb);
700 rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
701
702 tx_errors += mac_cb->hw_stats.tx_bad_pkts
703 + mac_cb->hw_stats.tx_fragment_err
704 + mac_cb->hw_stats.tx_jabber_err
705 + mac_cb->hw_stats.tx_underrun_err
706 + mac_cb->hw_stats.tx_crc_err;
707
708 net_stats->tx_bytes = tx_bytes;
709 net_stats->tx_packets = tx_packets;
710 net_stats->rx_bytes = rx_bytes;
711 net_stats->rx_dropped = 0;
712 net_stats->rx_packets = rx_packets;
713 net_stats->rx_errors = rx_errors;
714 net_stats->tx_errors = tx_errors;
715 net_stats->tx_dropped = tx_dropped;
716 net_stats->rx_missed_errors = rx_missed_errors;
717 net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
718 net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
719 net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
720 net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
721 net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
722 }
723
724 static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
725 {
726 int idx;
727 struct hns_mac_cb *mac_cb;
728 struct hns_ppe_cb *ppe_cb;
729 u64 *p = data;
730 struct hnae_vf_cb *vf_cb;
731
732 if (!handle || !data) {
733 pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
734 return;
735 }
736
737 vf_cb = hns_ae_get_vf_cb(handle);
738 mac_cb = hns_get_mac_cb(handle);
739 ppe_cb = hns_get_ppe_cb(handle);
740
741 for (idx = 0; idx < handle->q_num; idx++) {
742 hns_rcb_get_stats(handle->qs[idx], p);
743 p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
744 }
745
746 hns_ppe_get_stats(ppe_cb, p);
747 p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
748
749 hns_mac_get_stats(mac_cb, p);
750 p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
751
752 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
753 hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
754 }
755
756 static void hns_ae_get_strings(struct hnae_handle *handle,
757 u32 stringset, u8 *data)
758 {
759 int port;
760 int idx;
761 struct hns_mac_cb *mac_cb;
762 struct hns_ppe_cb *ppe_cb;
763 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
764 u8 *p = data;
765 struct hnae_vf_cb *vf_cb;
766
767 assert(handle);
768
769 vf_cb = hns_ae_get_vf_cb(handle);
770 port = vf_cb->port_index;
771 mac_cb = hns_get_mac_cb(handle);
772 ppe_cb = hns_get_ppe_cb(handle);
773
774 for (idx = 0; idx < handle->q_num; idx++) {
775 hns_rcb_get_strings(stringset, p, idx);
776 p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
777 }
778
779 hns_ppe_get_strings(ppe_cb, stringset, p);
780 p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
781
782 hns_mac_get_strings(mac_cb, stringset, p);
783 p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
784
785 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
786 hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
787 }
788
789 static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
790 {
791 u32 sset_count = 0;
792 struct hns_mac_cb *mac_cb;
793 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
794
795 assert(handle);
796
797 mac_cb = hns_get_mac_cb(handle);
798
799 sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
800 sset_count += hns_ppe_get_sset_count(stringset);
801 sset_count += hns_mac_get_sset_count(mac_cb, stringset);
802
803 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
804 sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
805
806 return sset_count;
807 }
808
809 static int hns_ae_config_loopback(struct hnae_handle *handle,
810 enum hnae_loop loop, int en)
811 {
812 int ret;
813 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
814 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
815 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
816
817 switch (loop) {
818 case MAC_INTERNALLOOP_PHY:
819 ret = 0;
820 break;
821 case MAC_INTERNALLOOP_SERDES:
822 ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
823 !!en);
824 break;
825 case MAC_INTERNALLOOP_MAC:
826 ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
827 break;
828 default:
829 ret = -EINVAL;
830 }
831
832 return ret;
833 }
834
835 static void hns_ae_update_led_status(struct hnae_handle *handle)
836 {
837 struct hns_mac_cb *mac_cb;
838
839 assert(handle);
840 mac_cb = hns_get_mac_cb(handle);
841 if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
842 return;
843
844 hns_set_led_opt(mac_cb);
845 }
846
847 static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
848 enum hnae_led_state status)
849 {
850 struct hns_mac_cb *mac_cb;
851
852 assert(handle);
853
854 mac_cb = hns_get_mac_cb(handle);
855
856 return hns_cpld_led_set_id(mac_cb, status);
857 }
858
859 static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
860 {
861 u32 *p = data;
862 int i;
863 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
864 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
865
866 hns_ppe_get_regs(ppe_cb, p);
867 p += hns_ppe_get_regs_count();
868
869 hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
870 p += hns_rcb_get_common_regs_count();
871
872 for (i = 0; i < handle->q_num; i++) {
873 hns_rcb_get_ring_regs(handle->qs[i], p);
874 p += hns_rcb_get_ring_regs_count();
875 }
876
877 hns_mac_get_regs(vf_cb->mac_cb, p);
878 p += hns_mac_get_regs_count(vf_cb->mac_cb);
879
880 if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
881 hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
882 }
883
884 static int hns_ae_get_regs_len(struct hnae_handle *handle)
885 {
886 u32 total_num;
887 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
888
889 total_num = hns_ppe_get_regs_count();
890 total_num += hns_rcb_get_common_regs_count();
891 total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
892 total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
893
894 if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
895 total_num += hns_dsaf_get_regs_count();
896
897 return total_num;
898 }
899
900 static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle)
901 {
902 return HNS_PPEV2_RSS_KEY_SIZE;
903 }
904
905 static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle)
906 {
907 return HNS_PPEV2_RSS_IND_TBL_SIZE;
908 }
909
910 static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
911 u8 *hfunc)
912 {
913 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
914
915
916 if (hfunc)
917 *hfunc = ETH_RSS_HASH_TOP;
918
919
920 if (key)
921 memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
922
923
924 if (indir)
925 memcpy(indir, ppe_cb->rss_indir_table,
926 HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
927
928 return 0;
929 }
930
931 static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
932 const u8 *key, const u8 hfunc)
933 {
934 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
935
936
937 if (key) {
938 memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
939 hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
940 }
941
942 if (indir) {
943
944 memcpy(ppe_cb->rss_indir_table, indir,
945 HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
946
947
948 hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
949 }
950
951 return 0;
952 }
953
954 static struct hnae_ae_ops hns_dsaf_ops = {
955 .get_handle = hns_ae_get_handle,
956 .put_handle = hns_ae_put_handle,
957 .init_queue = hns_ae_init_queue,
958 .fini_queue = hns_ae_fini_queue,
959 .start = hns_ae_start,
960 .stop = hns_ae_stop,
961 .reset = hns_ae_reset,
962 .toggle_ring_irq = hns_ae_toggle_ring_irq,
963 .get_status = hns_ae_get_link_status,
964 .get_info = hns_ae_get_mac_info,
965 .adjust_link = hns_ae_adjust_link,
966 .need_adjust_link = hns_ae_need_adjust_link,
967 .set_loopback = hns_ae_config_loopback,
968 .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
969 .get_pauseparam = hns_ae_get_pauseparam,
970 .set_autoneg = hns_ae_set_autoneg,
971 .get_autoneg = hns_ae_get_autoneg,
972 .set_pauseparam = hns_ae_set_pauseparam,
973 .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
974 .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
975 .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
976 .set_coalesce_frames = hns_ae_set_coalesce_frames,
977 .get_coalesce_range = hns_ae_get_coalesce_range,
978 .set_promisc_mode = hns_ae_set_promisc_mode,
979 .set_mac_addr = hns_ae_set_mac_address,
980 .add_uc_addr = hns_ae_add_uc_address,
981 .rm_uc_addr = hns_ae_rm_uc_address,
982 .set_mc_addr = hns_ae_set_multicast_one,
983 .clr_mc_addr = hns_ae_clr_multicast,
984 .set_mtu = hns_ae_set_mtu,
985 .update_stats = hns_ae_update_stats,
986 .set_tso_stats = hns_ae_set_tso_stats,
987 .get_stats = hns_ae_get_stats,
988 .get_strings = hns_ae_get_strings,
989 .get_sset_count = hns_ae_get_sset_count,
990 .update_led_status = hns_ae_update_led_status,
991 .set_led_id = hns_ae_cpld_set_led_id,
992 .get_regs = hns_ae_get_regs,
993 .get_regs_len = hns_ae_get_regs_len,
994 .get_rss_key_size = hns_ae_get_rss_key_size,
995 .get_rss_indir_size = hns_ae_get_rss_indir_size,
996 .get_rss = hns_ae_get_rss,
997 .set_rss = hns_ae_set_rss
998 };
999
1000 int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
1001 {
1002 struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
1003 static atomic_t id = ATOMIC_INIT(-1);
1004
1005 switch (dsaf_dev->dsaf_ver) {
1006 case AE_VERSION_1:
1007 hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq;
1008 break;
1009 case AE_VERSION_2:
1010 hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq;
1011 break;
1012 default:
1013 break;
1014 }
1015
1016 snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
1017 (int)atomic_inc_return(&id));
1018 ae_dev->ops = &hns_dsaf_ops;
1019 ae_dev->dev = dsaf_dev->dev;
1020
1021 return hnae_ae_register(ae_dev, THIS_MODULE);
1022 }
1023
1024 void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
1025 {
1026 hnae_ae_unregister(&dsaf_dev->ae_dev);
1027 }