1/*
2 * Copyright (c) 2014-2015 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/netdevice.h>
12#include <linux/spinlock.h>
13
14#include "hnae.h"
15#include "hns_dsaf_mac.h"
16#include "hns_dsaf_main.h"
17#include "hns_dsaf_ppe.h"
18#include "hns_dsaf_rcb.h"
19
20#define AE_NAME_PORT_ID_IDX 6
21#define ETH_STATIC_REG	 1
22#define ETH_DUMP_REG	 5
23#define ETH_GSTRING_LEN	32
24
25static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
26{
27	struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
28
29	return vf_cb->mac_cb;
30}
31
32/**
33 * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
34 * @port_id: enet port id
35 *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
36 * return: dsaf port id
37 *: service ports 0 - 5, debug port 6-7
38 **/
39static int hns_ae_map_eport_to_dport(u32 port_id)
40{
41	int port_index;
42
43	if (port_id < DSAF_DEBUG_NW_NUM)
44		port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
45	else
46		port_index = port_id - DSAF_DEBUG_NW_NUM;
47
48	return port_index;
49}
50
51static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
52{
53	return container_of(dev, struct dsaf_device, ae_dev);
54}
55
56static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
57{
58	int ppe_index;
59	int ppe_common_index;
60	struct ppe_common_cb *ppe_comm;
61	struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
62
63	if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
64		ppe_index = vf_cb->port_index;
65		ppe_common_index = 0;
66	} else {
67		ppe_index = 0;
68		ppe_common_index =
69			vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
70	}
71	ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
72	return &ppe_comm->ppe_cb[ppe_index];
73}
74
75static int hns_ae_get_q_num_per_vf(
76	struct dsaf_device *dsaf_dev, int port)
77{
78	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
79
80	return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
81}
82
83static int hns_ae_get_vf_num_per_port(
84	struct dsaf_device *dsaf_dev, int port)
85{
86	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
87
88	return dsaf_dev->rcb_common[common_idx]->max_vfn;
89}
90
91static struct ring_pair_cb *hns_ae_get_base_ring_pair(
92	struct dsaf_device *dsaf_dev, int port)
93{
94	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
95	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
96	int q_num = rcb_comm->max_q_per_vf;
97	int vf_num = rcb_comm->max_vfn;
98
99	if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
100		return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
101	else
102		return &rcb_comm->ring_pair_cb[0];
103}
104
105static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
106{
107	return container_of(q, struct ring_pair_cb, q);
108}
109
110struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
111				      u32 port_id)
112{
113	int port_idx;
114	int vfnum_per_port;
115	int qnum_per_vf;
116	int i;
117	struct dsaf_device *dsaf_dev;
118	struct hnae_handle *ae_handle;
119	struct ring_pair_cb *ring_pair_cb;
120	struct hnae_vf_cb *vf_cb;
121
122	dsaf_dev = hns_ae_get_dsaf_dev(dev);
123	port_idx = hns_ae_map_eport_to_dport(port_id);
124
125	ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
126	vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
127	qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
128
129	vf_cb = kzalloc(sizeof(*vf_cb) +
130			qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
131	if (unlikely(!vf_cb)) {
132		dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
133		ae_handle = ERR_PTR(-ENOMEM);
134		goto handle_err;
135	}
136	ae_handle = &vf_cb->ae_handle;
137	/* ae_handle Init  */
138	ae_handle->owner_dev = dsaf_dev->dev;
139	ae_handle->dev = dev;
140	ae_handle->q_num = qnum_per_vf;
141
142	/* find ring pair, and set vf id*/
143	for (ae_handle->vf_id = 0;
144		ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
145		if (!ring_pair_cb->used_by_vf)
146			break;
147		ring_pair_cb += qnum_per_vf;
148	}
149	if (ae_handle->vf_id >= vfnum_per_port) {
150		dev_err(dsaf_dev->dev, "malloc queue fail!\n");
151		ae_handle = ERR_PTR(-EINVAL);
152		goto vf_id_err;
153	}
154
155	ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
156	for (i = 0; i < qnum_per_vf; i++) {
157		ae_handle->qs[i] = &ring_pair_cb->q;
158		ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
159		ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
160
161		ring_pair_cb->used_by_vf = 1;
162		if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
163			ring_pair_cb->port_id_in_dsa = port_idx;
164		else
165			ring_pair_cb->port_id_in_dsa = 0;
166
167		ring_pair_cb++;
168	}
169
170	vf_cb->dsaf_dev = dsaf_dev;
171	vf_cb->port_index = port_idx;
172	vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
173
174	ae_handle->phy_if = vf_cb->mac_cb->phy_if;
175	ae_handle->phy_node = vf_cb->mac_cb->phy_node;
176	ae_handle->if_support = vf_cb->mac_cb->if_support;
177	ae_handle->port_type = vf_cb->mac_cb->mac_type;
178
179	return ae_handle;
180vf_id_err:
181	kfree(vf_cb);
182handle_err:
183	return ae_handle;
184}
185
186static void hns_ae_put_handle(struct hnae_handle *handle)
187{
188	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
189	int i;
190
191	vf_cb->mac_cb	 = NULL;
192
193	kfree(vf_cb);
194
195	for (i = 0; i < handle->q_num; i++)
196		hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
197}
198
199static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
200{
201	int q_num = handle->q_num;
202	int i;
203
204	for (i = 0; i < q_num; i++)
205		hns_rcb_ring_enable_hw(handle->qs[i], val);
206}
207
208static void hns_ae_init_queue(struct hnae_queue *q)
209{
210	struct ring_pair_cb *ring =
211		container_of(q, struct ring_pair_cb, q);
212
213	hns_rcb_init_hw(ring);
214}
215
216static void hns_ae_fini_queue(struct hnae_queue *q)
217{
218	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
219
220	if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
221		hns_rcb_reset_ring_hw(q);
222}
223
224static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
225{
226	int ret;
227	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
228
229	if (!p || !is_valid_ether_addr((const u8 *)p)) {
230		dev_err(handle->owner_dev, "is not valid ether addr !\n");
231		return -EADDRNOTAVAIL;
232	}
233
234	ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
235	if (ret != 0) {
236		dev_err(handle->owner_dev,
237			"set_mac_address fail, ret=%d!\n", ret);
238		return ret;
239	}
240
241	return 0;
242}
243
244static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
245{
246	int ret;
247	char *mac_addr = (char *)addr;
248	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
249
250	assert(mac_cb);
251
252	if (mac_cb->mac_type != HNAE_PORT_SERVICE)
253		return 0;
254
255	ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE);
256	if (ret) {
257		dev_err(handle->owner_dev,
258			"mac add mul_mac:%pM port%d  fail, ret = %#x!\n",
259			mac_addr, mac_cb->mac_id, ret);
260		return ret;
261	}
262
263	ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
264				mac_addr, ENABLE);
265	if (ret)
266		dev_err(handle->owner_dev,
267			"mac add mul_mac:%pM port%d  fail, ret = %#x!\n",
268			mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
269
270	return ret;
271}
272
273static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
274{
275	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
276
277	return hns_mac_set_mtu(mac_cb, new_mtu);
278}
279
280static int hns_ae_start(struct hnae_handle *handle)
281{
282	int ret;
283	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
284
285	ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE);
286	if (ret)
287		return ret;
288
289	hns_ae_ring_enable_all(handle, 1);
290	msleep(100);
291
292	hns_mac_start(mac_cb);
293
294	return 0;
295}
296
297void hns_ae_stop(struct hnae_handle *handle)
298{
299	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
300
301	/* just clean tx fbd, neednot rx fbd*/
302	hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
303
304	msleep(20);
305
306	hns_mac_stop(mac_cb);
307
308	usleep_range(10000, 20000);
309
310	hns_ae_ring_enable_all(handle, 0);
311
312	(void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE);
313}
314
315static void hns_ae_reset(struct hnae_handle *handle)
316{
317	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
318
319	if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
320		u8 ppe_common_index =
321			vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
322
323		hns_mac_reset(vf_cb->mac_cb);
324		hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
325	}
326}
327
328void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
329{
330	u32 flag;
331
332	if (is_tx_ring(ring))
333		flag = RCB_INT_FLAG_TX;
334	else
335		flag = RCB_INT_FLAG_RX;
336
337	hns_rcb_int_clr_hw(ring->q, flag);
338	hns_rcb_int_ctrl_hw(ring->q, flag, mask);
339}
340
341static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
342{
343	hns_rcb_start(queue, val);
344}
345
346static int hns_ae_get_link_status(struct hnae_handle *handle)
347{
348	u32 link_status;
349	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
350
351	hns_mac_get_link_status(mac_cb, &link_status);
352
353	return !!link_status;
354}
355
356static int hns_ae_get_mac_info(struct hnae_handle *handle,
357			       u8 *auto_neg, u16 *speed, u8 *duplex)
358{
359	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
360
361	return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
362}
363
364static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
365			       int duplex)
366{
367	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
368
369	hns_mac_adjust_link(mac_cb, speed, duplex);
370}
371
372static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
373					u32 *uplimit)
374{
375	*uplimit = HNS_RCB_RING_MAX_PENDING_BD;
376}
377
378static void hns_ae_get_pauseparam(struct hnae_handle *handle,
379				  u32 *auto_neg, u32 *rx_en, u32 *tx_en)
380{
381	assert(handle);
382
383	hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
384
385	hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
386}
387
388static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
389{
390	assert(handle);
391
392	return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
393}
394
395static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
396{
397	hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
398}
399
400static int hns_ae_get_autoneg(struct hnae_handle *handle)
401{
402	u32     auto_neg;
403
404	assert(handle);
405
406	hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
407
408	return auto_neg;
409}
410
411static int hns_ae_set_pauseparam(struct hnae_handle *handle,
412				 u32 autoneg, u32 rx_en, u32 tx_en)
413{
414	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
415	int ret;
416
417	ret = hns_mac_set_autoneg(mac_cb, autoneg);
418	if (ret)
419		return ret;
420
421	return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
422}
423
424static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
425				      u32 *tx_usecs, u32 *rx_usecs)
426{
427	int port;
428
429	port = hns_ae_map_eport_to_dport(handle->eport_id);
430
431	*tx_usecs = hns_rcb_get_coalesce_usecs(
432		hns_ae_get_dsaf_dev(handle->dev),
433		hns_dsaf_get_comm_idx_by_port(port));
434	*rx_usecs = hns_rcb_get_coalesce_usecs(
435		hns_ae_get_dsaf_dev(handle->dev),
436		hns_dsaf_get_comm_idx_by_port(port));
437}
438
439static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
440					       u32 *tx_frames, u32 *rx_frames)
441{
442	int port;
443
444	assert(handle);
445
446	port = hns_ae_map_eport_to_dport(handle->eport_id);
447
448	*tx_frames = hns_rcb_get_coalesced_frames(
449		hns_ae_get_dsaf_dev(handle->dev), port);
450	*rx_frames = hns_rcb_get_coalesced_frames(
451		hns_ae_get_dsaf_dev(handle->dev), port);
452}
453
454static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
455				      u32 timeout)
456{
457	int port;
458
459	assert(handle);
460
461	port = hns_ae_map_eport_to_dport(handle->eport_id);
462
463	hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
464				   port, timeout);
465}
466
467static int  hns_ae_set_coalesce_frames(struct hnae_handle *handle,
468				       u32 coalesce_frames)
469{
470	int port;
471	int ret;
472
473	assert(handle);
474
475	port = hns_ae_map_eport_to_dport(handle->eport_id);
476
477	ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
478					   port, coalesce_frames);
479	return ret;
480}
481
482void hns_ae_update_stats(struct hnae_handle *handle,
483			 struct net_device_stats *net_stats)
484{
485	int port;
486	int idx;
487	struct dsaf_device *dsaf_dev;
488	struct hns_mac_cb *mac_cb;
489	struct hns_ppe_cb *ppe_cb;
490	struct hnae_queue *queue;
491	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
492	u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
493	u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
494	u64 rx_missed_errors = 0;
495
496	dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
497	if (!dsaf_dev)
498		return;
499	port = vf_cb->port_index;
500	ppe_cb = hns_get_ppe_cb(handle);
501	mac_cb = hns_get_mac_cb(handle);
502
503	for (idx = 0; idx < handle->q_num; idx++) {
504		queue = handle->qs[idx];
505		hns_rcb_update_stats(queue);
506
507		tx_bytes += queue->tx_ring.stats.tx_bytes;
508		tx_packets += queue->tx_ring.stats.tx_pkts;
509		rx_bytes += queue->rx_ring.stats.rx_bytes;
510		rx_packets += queue->rx_ring.stats.rx_pkts;
511
512		rx_errors += queue->rx_ring.stats.err_pkt_len
513				+ queue->rx_ring.stats.l2_err
514				+ queue->rx_ring.stats.l3l4_csum_err;
515	}
516
517	hns_ppe_update_stats(ppe_cb);
518	rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
519	tx_errors += ppe_cb->hw_stats.tx_err_checksum
520		+ ppe_cb->hw_stats.tx_err_fifo_empty;
521
522	if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
523		hns_dsaf_update_stats(dsaf_dev, port);
524		/* for port upline direction, i.e., rx. */
525		rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
526		rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
527		rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
528
529		/* for port downline direction, i.e., tx. */
530		port = port + DSAF_PPE_INODE_BASE;
531		hns_dsaf_update_stats(dsaf_dev, port);
532		tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
533		tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
534		tx_dropped += dsaf_dev->hw_stats[port].crc_false;
535		tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
536		tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
537		tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
538	}
539
540	hns_mac_update_stats(mac_cb);
541	rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
542
543	tx_errors += mac_cb->hw_stats.tx_bad_pkts
544		+ mac_cb->hw_stats.tx_fragment_err
545		+ mac_cb->hw_stats.tx_jabber_err
546		+ mac_cb->hw_stats.tx_underrun_err
547		+ mac_cb->hw_stats.tx_crc_err;
548
549	net_stats->tx_bytes = tx_bytes;
550	net_stats->tx_packets = tx_packets;
551	net_stats->rx_bytes = rx_bytes;
552	net_stats->rx_dropped = 0;
553	net_stats->rx_packets = rx_packets;
554	net_stats->rx_errors = rx_errors;
555	net_stats->tx_errors = tx_errors;
556	net_stats->tx_dropped = tx_dropped;
557	net_stats->rx_missed_errors = rx_missed_errors;
558	net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
559	net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
560	net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
561	net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
562	net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
563}
564
565void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
566{
567	int idx;
568	struct hns_mac_cb *mac_cb;
569	struct hns_ppe_cb *ppe_cb;
570	u64 *p = data;
571	struct  hnae_vf_cb *vf_cb;
572
573	if (!handle || !data) {
574		pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
575		return;
576	}
577
578	vf_cb = hns_ae_get_vf_cb(handle);
579	mac_cb = hns_get_mac_cb(handle);
580	ppe_cb = hns_get_ppe_cb(handle);
581
582	for (idx = 0; idx < handle->q_num; idx++) {
583		hns_rcb_get_stats(handle->qs[idx], p);
584		p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
585	}
586
587	hns_ppe_get_stats(ppe_cb, p);
588	p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
589
590	hns_mac_get_stats(mac_cb, p);
591	p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
592
593	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
594		hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
595}
596
597void hns_ae_get_strings(struct hnae_handle *handle,
598			u32 stringset, u8 *data)
599{
600	int port;
601	int idx;
602	struct hns_mac_cb *mac_cb;
603	struct hns_ppe_cb *ppe_cb;
604	u8 *p = data;
605	struct	hnae_vf_cb *vf_cb;
606
607	assert(handle);
608
609	vf_cb = hns_ae_get_vf_cb(handle);
610	port = vf_cb->port_index;
611	mac_cb = hns_get_mac_cb(handle);
612	ppe_cb = hns_get_ppe_cb(handle);
613
614	for (idx = 0; idx < handle->q_num; idx++) {
615		hns_rcb_get_strings(stringset, p, idx);
616		p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
617	}
618
619	hns_ppe_get_strings(ppe_cb, stringset, p);
620	p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
621
622	hns_mac_get_strings(mac_cb, stringset, p);
623	p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
624
625	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
626		hns_dsaf_get_strings(stringset, p, port);
627}
628
629int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
630{
631	u32 sset_count = 0;
632	struct hns_mac_cb *mac_cb;
633
634	assert(handle);
635
636	mac_cb = hns_get_mac_cb(handle);
637
638	sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
639	sset_count += hns_ppe_get_sset_count(stringset);
640	sset_count += hns_mac_get_sset_count(mac_cb, stringset);
641
642	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
643		sset_count += hns_dsaf_get_sset_count(stringset);
644
645	return sset_count;
646}
647
648static int hns_ae_config_loopback(struct hnae_handle *handle,
649				  enum hnae_loop loop, int en)
650{
651	int ret;
652	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
653
654	switch (loop) {
655	case MAC_INTERNALLOOP_SERDES:
656		ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
657		break;
658	case MAC_INTERNALLOOP_MAC:
659		ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
660		break;
661	default:
662		ret = -EINVAL;
663	}
664	return ret;
665}
666
667void hns_ae_update_led_status(struct hnae_handle *handle)
668{
669	struct hns_mac_cb *mac_cb;
670
671	assert(handle);
672	mac_cb = hns_get_mac_cb(handle);
673	if (!mac_cb->cpld_vaddr)
674		return;
675	hns_set_led_opt(mac_cb);
676}
677
678int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
679			   enum hnae_led_state status)
680{
681	struct hns_mac_cb *mac_cb;
682
683	assert(handle);
684
685	mac_cb = hns_get_mac_cb(handle);
686
687	return hns_cpld_led_set_id(mac_cb, status);
688}
689
690void hns_ae_get_regs(struct hnae_handle *handle, void *data)
691{
692	u32 *p = data;
693	u32 rcb_com_idx;
694	int i;
695	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
696	struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
697
698	hns_ppe_get_regs(ppe_cb, p);
699	p += hns_ppe_get_regs_count();
700
701	rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
702	hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
703	p += hns_rcb_get_common_regs_count();
704
705	for (i = 0; i < handle->q_num; i++) {
706		hns_rcb_get_ring_regs(handle->qs[i], p);
707		p += hns_rcb_get_ring_regs_count();
708	}
709
710	hns_mac_get_regs(vf_cb->mac_cb, p);
711	p += hns_mac_get_regs_count(vf_cb->mac_cb);
712
713	if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
714		hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
715}
716
717int hns_ae_get_regs_len(struct hnae_handle *handle)
718{
719	u32 total_num;
720	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
721
722	total_num = hns_ppe_get_regs_count();
723	total_num += hns_rcb_get_common_regs_count();
724	total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
725	total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
726
727	if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
728		total_num += hns_dsaf_get_regs_count();
729
730	return total_num;
731}
732
733static struct hnae_ae_ops hns_dsaf_ops = {
734	.get_handle = hns_ae_get_handle,
735	.put_handle = hns_ae_put_handle,
736	.init_queue = hns_ae_init_queue,
737	.fini_queue = hns_ae_fini_queue,
738	.start = hns_ae_start,
739	.stop = hns_ae_stop,
740	.reset = hns_ae_reset,
741	.toggle_ring_irq = hns_ae_toggle_ring_irq,
742	.toggle_queue_status = hns_ae_toggle_queue_status,
743	.get_status = hns_ae_get_link_status,
744	.get_info = hns_ae_get_mac_info,
745	.adjust_link = hns_ae_adjust_link,
746	.set_loopback = hns_ae_config_loopback,
747	.get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
748	.get_pauseparam = hns_ae_get_pauseparam,
749	.set_autoneg = hns_ae_set_autoneg,
750	.get_autoneg = hns_ae_get_autoneg,
751	.set_pauseparam = hns_ae_set_pauseparam,
752	.get_coalesce_usecs = hns_ae_get_coalesce_usecs,
753	.get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
754	.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
755	.set_coalesce_frames = hns_ae_set_coalesce_frames,
756	.set_promisc_mode = hns_ae_set_promisc_mode,
757	.set_mac_addr = hns_ae_set_mac_address,
758	.set_mc_addr = hns_ae_set_multicast_one,
759	.set_mtu = hns_ae_set_mtu,
760	.update_stats = hns_ae_update_stats,
761	.get_stats = hns_ae_get_stats,
762	.get_strings = hns_ae_get_strings,
763	.get_sset_count = hns_ae_get_sset_count,
764	.update_led_status = hns_ae_update_led_status,
765	.set_led_id = hns_ae_cpld_set_led_id,
766	.get_regs = hns_ae_get_regs,
767	.get_regs_len = hns_ae_get_regs_len
768};
769
770int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
771{
772	struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
773
774	ae_dev->ops = &hns_dsaf_ops;
775	ae_dev->dev = dsaf_dev->dev;
776
777	return hnae_ae_register(ae_dev, THIS_MODULE);
778}
779
780void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
781{
782	hnae_ae_unregister(&dsaf_dev->ae_dev);
783}
784