This source file includes following definitions.
- ring_dist
- ring_space
- is_ring_empty
- hnae_reserve_buffer_map
- hnae_alloc_buffer_attach
- hnae_buffer_detach
- hnae_free_buffer_detach
- hnae_replace_buffer
- hnae_reuse_buffer
- hnae_reinit_all_ring_desc
- hnae_reinit_all_ring_page_off
1
2
3
4
5
6 #ifndef __HNAE_H
7 #define __HNAE_H
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #include <linux/acpi.h>
27 #include <linux/delay.h>
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/notifier.h>
32 #include <linux/phy.h>
33 #include <linux/types.h>
34
35 #define HNAE_DRIVER_VERSION "2.0"
36 #define HNAE_DRIVER_NAME "hns"
37 #define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
38 #define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver"
39 #define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem"
40
41 #ifdef DEBUG
42
43 #ifndef assert
44 #define assert(expr) \
45 do { \
46 if (!(expr)) { \
47 pr_err("Assertion failed! %s, %s, %s, line %d\n", \
48 #expr, __FILE__, __func__, __LINE__); \
49 } \
50 } while (0)
51 #endif
52
53 #else
54
55 #ifndef assert
56 #define assert(expr)
57 #endif
58
59 #endif
60
61 #define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0')
62 #define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0')
63 #define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
64 #define AE_NAME_SIZE 16
65
66 #define BD_SIZE_2048_MAX_MTU 6000
67
68
69
70
71 #define RCB_REG_BASEADDR_L 0x00
72 #define RCB_REG_BASEADDR_H 0x04
73 #define RCB_REG_BD_NUM 0x08
74 #define RCB_REG_BD_LEN 0x0C
75 #define RCB_REG_PKTLINE 0x10
76 #define RCB_REG_TAIL 0x18
77 #define RCB_REG_HEAD 0x1C
78 #define RCB_REG_FBDNUM 0x20
79 #define RCB_REG_OFFSET 0x24
80 #define RCB_REG_PKTNUM_RECORD 0x2C
81
82 #define HNS_RX_HEAD_SIZE 256
83
84 #define HNAE_AE_REGISTER 0x1
85
86 #define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
87
88 #define HNAE_LOWEST_LATENCY_COAL_PARAM 30
89 #define HNAE_LOW_LATENCY_COAL_PARAM 80
90 #define HNAE_BULK_LATENCY_COAL_PARAM 150
91
92 enum hnae_led_state {
93 HNAE_LED_INACTIVE,
94 HNAE_LED_ACTIVE,
95 HNAE_LED_ON,
96 HNAE_LED_OFF
97 };
98
99 #define HNS_RX_FLAG_VLAN_PRESENT 0x1
100 #define HNS_RX_FLAG_L3ID_IPV4 0x0
101 #define HNS_RX_FLAG_L3ID_IPV6 0x1
102 #define HNS_RX_FLAG_L4ID_UDP 0x0
103 #define HNS_RX_FLAG_L4ID_TCP 0x1
104 #define HNS_RX_FLAG_L4ID_SCTP 0x3
105
106 #define HNS_TXD_ASID_S 0
107 #define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
108 #define HNS_TXD_BUFNUM_S 8
109 #define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S)
110 #define HNS_TXD_PORTID_S 10
111 #define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S)
112
113 #define HNS_TXD_RA_B 8
114 #define HNS_TXD_RI_B 9
115 #define HNS_TXD_L4CS_B 10
116 #define HNS_TXD_L3CS_B 11
117 #define HNS_TXD_FE_B 12
118 #define HNS_TXD_VLD_B 13
119 #define HNS_TXD_IPOFFSET_S 14
120 #define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
121
122 #define HNS_RXD_IPOFFSET_S 0
123 #define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
124 #define HNS_RXD_BUFNUM_S 8
125 #define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S)
126 #define HNS_RXD_PORTID_S 10
127 #define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S)
128 #define HNS_RXD_DMAC_S 13
129 #define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S)
130 #define HNS_RXD_VLAN_S 15
131 #define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S)
132 #define HNS_RXD_L3ID_S 17
133 #define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S)
134 #define HNS_RXD_L4ID_S 21
135 #define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S)
136 #define HNS_RXD_FE_B 25
137 #define HNS_RXD_FRAG_B 26
138 #define HNS_RXD_VLD_B 27
139 #define HNS_RXD_L2E_B 28
140 #define HNS_RXD_L3E_B 29
141 #define HNS_RXD_L4E_B 30
142 #define HNS_RXD_DROP_B 31
143
144 #define HNS_RXD_VLANID_S 8
145 #define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S)
146 #define HNS_RXD_CFI_B 20
147 #define HNS_RXD_PRI_S 21
148 #define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S)
149 #define HNS_RXD_ASID_S 24
150 #define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S)
151
152 #define HNSV2_TXD_BUFNUM_S 0
153 #define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S)
154 #define HNSV2_TXD_PORTID_S 4
155 #define HNSV2_TXD_PORTID_M (0X7 << HNSV2_TXD_PORTID_S)
156 #define HNSV2_TXD_RI_B 1
157 #define HNSV2_TXD_L4CS_B 2
158 #define HNSV2_TXD_L3CS_B 3
159 #define HNSV2_TXD_FE_B 4
160 #define HNSV2_TXD_VLD_B 5
161
162 #define HNSV2_TXD_TSE_B 0
163 #define HNSV2_TXD_VLAN_EN_B 1
164 #define HNSV2_TXD_SNAP_B 2
165 #define HNSV2_TXD_IPV6_B 3
166 #define HNSV2_TXD_SCTP_B 4
167
168
169 struct __packed hnae_desc {
170 __le64 addr;
171 union {
172 struct {
173 union {
174 __le16 asid_bufnum_pid;
175 __le16 asid;
176 };
177 __le16 send_size;
178 union {
179 __le32 flag_ipoffset;
180 struct {
181 __u8 bn_pid;
182 __u8 ra_ri_cs_fe_vld;
183 __u8 ip_offset;
184 __u8 tse_vlan_snap_v6_sctp_nth;
185 };
186 };
187 __le16 mss;
188 __u8 l4_len;
189 __u8 reserved1;
190 __le16 paylen;
191 __u8 vmid;
192 __u8 qid;
193 __le32 reserved2[2];
194 } tx;
195
196 struct {
197 __le32 ipoff_bnum_pid_flag;
198 __le16 pkt_len;
199 __le16 size;
200 union {
201 __le32 vlan_pri_asid;
202 struct {
203 __le16 asid;
204 __le16 vlan_cfi_pri;
205 };
206 };
207 __le32 rss_hash;
208 __le32 reserved_1[2];
209 } rx;
210 };
211 };
212
213 struct hnae_desc_cb {
214 dma_addr_t dma;
215 void *buf;
216
217
218 void *priv;
219 u32 page_offset;
220 u32 length;
221
222 u16 reuse_flag;
223
224
225 u16 type;
226 };
227
228 #define setflags(flags, bits) ((flags) |= (bits))
229 #define unsetflags(flags, bits) ((flags) &= ~(bits))
230
231
232 #define RINGF_DIR 0x1
233 #define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
234 #define is_rx_ring(ring) (!is_tx_ring(ring))
235 #define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
236 DMA_TO_DEVICE : DMA_FROM_DEVICE)
237
238 struct ring_stats {
239 u64 io_err_cnt;
240 u64 sw_err_cnt;
241 u64 seg_pkt_cnt;
242 union {
243 struct {
244 u64 tx_pkts;
245 u64 tx_bytes;
246 u64 tx_err_cnt;
247 u64 restart_queue;
248 u64 tx_busy;
249 };
250 struct {
251 u64 rx_pkts;
252 u64 rx_bytes;
253 u64 rx_err_cnt;
254 u64 reuse_pg_cnt;
255 u64 err_pkt_len;
256 u64 non_vld_descs;
257 u64 err_bd_num;
258 u64 l2_err;
259 u64 l3l4_csum_err;
260 };
261 };
262 };
263
264 struct hnae_queue;
265
266 struct hnae_ring {
267 u8 __iomem *io_base;
268 struct hnae_desc *desc;
269 struct hnae_desc_cb *desc_cb;
270 struct hnae_queue *q;
271 int irq;
272 char ring_name[RCB_RING_NAME_LEN];
273
274
275 struct ring_stats stats;
276
277 dma_addr_t desc_dma_addr;
278 u32 buf_size;
279 u16 desc_num;
280 u16 max_desc_num_per_pkt;
281 u16 max_raw_data_sz_per_desc;
282 u16 max_pkt_size;
283 int next_to_use;
284
285
286
287
288 int next_to_clean;
289
290 int flags;
291 int irq_init_flag;
292
293
294 u64 coal_last_rx_bytes;
295 unsigned long coal_last_jiffies;
296 u32 coal_param;
297 u32 coal_rx_rate;
298 };
299
300 #define ring_ptr_move_fw(ring, p) \
301 ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
302 #define ring_ptr_move_bw(ring, p) \
303 ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
304
305 enum hns_desc_type {
306 DESC_TYPE_SKB,
307 DESC_TYPE_PAGE,
308 };
309
310 #define assert_is_ring_idx(ring, idx) \
311 assert((idx) >= 0 && (idx) < (ring)->desc_num)
312
313
314
315
316 static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
317 {
318 assert_is_ring_idx(ring, begin);
319 assert_is_ring_idx(ring, end);
320
321 return (end - begin + ring->desc_num) % ring->desc_num;
322 }
323
324 static inline int ring_space(struct hnae_ring *ring)
325 {
326 return ring->desc_num -
327 ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
328 }
329
330 static inline int is_ring_empty(struct hnae_ring *ring)
331 {
332 assert_is_ring_idx(ring, ring->next_to_use);
333 assert_is_ring_idx(ring, ring->next_to_clean);
334
335 return ring->next_to_use == ring->next_to_clean;
336 }
337
338 #define hnae_buf_size(_ring) ((_ring)->buf_size)
339 #define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
340 #define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
341
342 struct hnae_handle;
343
344
345 struct hnae_buf_ops {
346 int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
347 void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
348 int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
349 void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
350 };
351
352 struct hnae_queue {
353 u8 __iomem *io_base;
354 phys_addr_t phy_base;
355 struct hnae_ae_dev *dev;
356 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
357 struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp;
358 struct hnae_handle *handle;
359 };
360
361
362 enum hnae_loop {
363 MAC_INTERNALLOOP_MAC = 0,
364 MAC_INTERNALLOOP_SERDES,
365 MAC_INTERNALLOOP_PHY,
366 MAC_LOOP_PHY_NONE,
367 MAC_LOOP_NONE,
368 };
369
370
371 enum hnae_port_type {
372 HNAE_PORT_SERVICE = 0,
373 HNAE_PORT_DEBUG
374 };
375
376
377 enum hnae_media_type {
378 HNAE_MEDIA_TYPE_UNKNOWN = 0,
379 HNAE_MEDIA_TYPE_FIBER,
380 HNAE_MEDIA_TYPE_COPPER,
381 HNAE_MEDIA_TYPE_BACKPLANE,
382 };
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466 struct hnae_ae_ops {
467 struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev,
468 u32 port_id);
469 void (*put_handle)(struct hnae_handle *handle);
470 void (*init_queue)(struct hnae_queue *q);
471 void (*fini_queue)(struct hnae_queue *q);
472 int (*start)(struct hnae_handle *handle);
473 void (*stop)(struct hnae_handle *handle);
474 void (*reset)(struct hnae_handle *handle);
475 int (*set_opts)(struct hnae_handle *handle, int type, void *opts);
476 int (*get_opts)(struct hnae_handle *handle, int type, void **opts);
477 int (*get_status)(struct hnae_handle *handle);
478 int (*get_info)(struct hnae_handle *handle,
479 u8 *auto_neg, u16 *speed, u8 *duplex);
480 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
481 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
482 bool (*need_adjust_link)(struct hnae_handle *handle,
483 int speed, int duplex);
484 int (*set_loopback)(struct hnae_handle *handle,
485 enum hnae_loop loop_mode, int en);
486 void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
487 u32 *uplimit);
488 void (*get_pauseparam)(struct hnae_handle *handle,
489 u32 *auto_neg, u32 *rx_en, u32 *tx_en);
490 int (*set_autoneg)(struct hnae_handle *handle, u8 enable);
491 int (*get_autoneg)(struct hnae_handle *handle);
492 int (*set_pauseparam)(struct hnae_handle *handle,
493 u32 auto_neg, u32 rx_en, u32 tx_en);
494 void (*get_coalesce_usecs)(struct hnae_handle *handle,
495 u32 *tx_usecs, u32 *rx_usecs);
496 void (*get_max_coalesced_frames)(struct hnae_handle *handle,
497 u32 *tx_frames, u32 *rx_frames);
498 int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
499 int (*set_coalesce_frames)(struct hnae_handle *handle,
500 u32 tx_frames, u32 rx_frames);
501 void (*get_coalesce_range)(struct hnae_handle *handle,
502 u32 *tx_frames_low, u32 *rx_frames_low,
503 u32 *tx_frames_high, u32 *rx_frames_high,
504 u32 *tx_usecs_low, u32 *rx_usecs_low,
505 u32 *tx_usecs_high, u32 *rx_usecs_high);
506 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
507 int (*get_mac_addr)(struct hnae_handle *handle, void **p);
508 int (*set_mac_addr)(struct hnae_handle *handle, void *p);
509 int (*add_uc_addr)(struct hnae_handle *handle,
510 const unsigned char *addr);
511 int (*rm_uc_addr)(struct hnae_handle *handle,
512 const unsigned char *addr);
513 int (*clr_mc_addr)(struct hnae_handle *handle);
514 int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
515 int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
516 void (*set_tso_stats)(struct hnae_handle *handle, int enable);
517 void (*update_stats)(struct hnae_handle *handle,
518 struct net_device_stats *net_stats);
519 void (*get_stats)(struct hnae_handle *handle, u64 *data);
520 void (*get_strings)(struct hnae_handle *handle,
521 u32 stringset, u8 *data);
522 int (*get_sset_count)(struct hnae_handle *handle, int stringset);
523 void (*update_led_status)(struct hnae_handle *handle);
524 int (*set_led_id)(struct hnae_handle *handle,
525 enum hnae_led_state status);
526 void (*get_regs)(struct hnae_handle *handle, void *data);
527 int (*get_regs_len)(struct hnae_handle *handle);
528 u32 (*get_rss_key_size)(struct hnae_handle *handle);
529 u32 (*get_rss_indir_size)(struct hnae_handle *handle);
530 int (*get_rss)(struct hnae_handle *handle, u32 *indir, u8 *key,
531 u8 *hfunc);
532 int (*set_rss)(struct hnae_handle *handle, const u32 *indir,
533 const u8 *key, const u8 hfunc);
534 };
535
536 struct hnae_ae_dev {
537 struct device cls_dev;
538 struct device *dev;
539 struct hnae_ae_ops *ops;
540 struct list_head node;
541 struct module *owner;
542 int id;
543 char name[AE_NAME_SIZE];
544 struct list_head handle_list;
545 spinlock_t lock;
546 };
547
548 struct hnae_handle {
549 struct device *owner_dev;
550 struct hnae_ae_dev *dev;
551 struct phy_device *phy_dev;
552 phy_interface_t phy_if;
553 u32 if_support;
554 int q_num;
555 int vf_id;
556 unsigned long coal_last_jiffies;
557 u32 coal_param;
558
559 u32 coal_ring_idx;
560 u32 eport_id;
561 u32 dport_id;
562 bool coal_adapt_en;
563 enum hnae_port_type port_type;
564 enum hnae_media_type media_type;
565 struct list_head node;
566 struct hnae_buf_ops *bops;
567 struct hnae_queue **qs;
568 };
569
570 #define ring_to_dev(ring) ((ring)->q->dev->dev)
571
572 struct hnae_handle *hnae_get_handle(struct device *owner_dev,
573 const struct fwnode_handle *fwnode,
574 u32 port_id,
575 struct hnae_buf_ops *bops);
576
577 void hnae_put_handle(struct hnae_handle *handle);
578 int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
579 void hnae_ae_unregister(struct hnae_ae_dev *dev);
580
581 int hnae_register_notifier(struct notifier_block *nb);
582 void hnae_unregister_notifier(struct notifier_block *nb);
583 int hnae_reinit_handle(struct hnae_handle *handle);
584
585 #define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
586 (q)->tx_ring.io_base + RCB_REG_TAIL)
587
588 #ifndef assert
589 #define assert(cond)
590 #endif
591
592 static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
593 struct hnae_desc_cb *cb)
594 {
595 struct hnae_buf_ops *bops = ring->q->handle->bops;
596 int ret;
597
598 ret = bops->alloc_buffer(ring, cb);
599 if (ret)
600 goto out;
601
602 ret = bops->map_buffer(ring, cb);
603 if (ret)
604 goto out_with_buf;
605
606 return 0;
607
608 out_with_buf:
609 bops->free_buffer(ring, cb);
610 out:
611 return ret;
612 }
613
614 static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
615 {
616 int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
617
618 if (ret)
619 return ret;
620
621 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
622
623 return 0;
624 }
625
626 static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
627 {
628 ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
629 ring->desc[i].addr = 0;
630 }
631
632 static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
633 {
634 struct hnae_buf_ops *bops = ring->q->handle->bops;
635 struct hnae_desc_cb *cb = &ring->desc_cb[i];
636
637 if (!ring->desc_cb[i].dma)
638 return;
639
640 hnae_buffer_detach(ring, i);
641 bops->free_buffer(ring, cb);
642 }
643
644
645 static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
646 struct hnae_desc_cb *res_cb)
647 {
648 struct hnae_buf_ops *bops = ring->q->handle->bops;
649
650 bops->unmap_buffer(ring, &ring->desc_cb[i]);
651 ring->desc_cb[i] = *res_cb;
652 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
653 ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
654 }
655
656 static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
657 {
658 ring->desc_cb[i].reuse_flag = 0;
659 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
660 + ring->desc_cb[i].page_offset);
661 ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
662 }
663
664
665 static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
666 {
667 int i, j;
668 struct hnae_ring *ring;
669
670 for (i = 0; i < h->q_num; i++) {
671 ring = &h->qs[i]->rx_ring;
672 for (j = 0; j < ring->desc_num; j++)
673 ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
674 }
675
676 wmb();
677 }
678
679
680 static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
681 {
682 int i, j;
683 struct hnae_ring *ring;
684
685 for (i = 0; i < h->q_num; i++) {
686 ring = &h->qs[i]->rx_ring;
687 for (j = 0; j < ring->desc_num; j++) {
688 ring->desc_cb[j].page_offset = 0;
689 if (ring->desc[j].addr !=
690 cpu_to_le64(ring->desc_cb[j].dma))
691 ring->desc[j].addr =
692 cpu_to_le64(ring->desc_cb[j].dma);
693 }
694 }
695
696 wmb();
697 }
698
699 #define hnae_set_field(origin, mask, shift, val) \
700 do { \
701 (origin) &= (~(mask)); \
702 (origin) |= ((val) << (shift)) & (mask); \
703 } while (0)
704
705 #define hnae_set_bit(origin, shift, val) \
706 hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
707
708 #define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
709
710 #define hnae_get_bit(origin, shift) \
711 hnae_get_field((origin), (0x1 << (shift)), (shift))
712
713 #endif