This source file includes following definitions.
- nfp_net_fw_ver_eq
- nn_readb
- nn_writeb
- nn_readw
- nn_writew
- nn_readl
- nn_writel
- nn_readq
- nn_writeq
- nn_pci_flush
- _nfp_qcp_ptr_add
- nfp_qcp_rd_ptr_add
- nfp_qcp_wr_ptr_add
- _nfp_qcp_read
- nfp_qcp_rd_ptr_read
- nfp_qcp_wr_ptr_read
- nfp_net_is_data_vnic
- nfp_net_running
- nfp_net_name
- nfp_ctrl_lock
- nfp_ctrl_unlock
- nn_ctrl_bar_lock
- nn_ctrl_bar_trylock
- nn_ctrl_bar_unlock
- nfp_netdev_is_nfp_net
- nfp_net_debugfs_create
- nfp_net_debugfs_destroy
- nfp_net_debugfs_device_add
- nfp_net_debugfs_vnic_add
- nfp_net_debugfs_dir_clean
1
2
3
4
5
6
7
8
9
10
11
12 #ifndef _NFP_NET_H_
13 #define _NFP_NET_H_
14
15 #include <linux/atomic.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
18 #include <linux/netdevice.h>
19 #include <linux/pci.h>
20 #include <linux/io-64-nonatomic-hi-lo.h>
21 #include <linux/semaphore.h>
22 #include <linux/workqueue.h>
23 #include <net/xdp.h>
24
25 #include "nfp_net_ctrl.h"
26
27 #define nn_pr(nn, lvl, fmt, args...) \
28 ({ \
29 struct nfp_net *__nn = (nn); \
30 \
31 if (__nn->dp.netdev) \
32 netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
33 else \
34 dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
35 })
36
37 #define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
38 #define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
39 #define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
40 #define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
41
42 #define nn_dp_warn(dp, fmt, args...) \
43 ({ \
44 struct nfp_net_dp *__dp = (dp); \
45 \
46 if (unlikely(net_ratelimit())) { \
47 if (__dp->netdev) \
48 netdev_warn(__dp->netdev, fmt, ## args); \
49 else \
50 dev_warn(__dp->dev, fmt, ## args); \
51 } \
52 })
53
54
55 #define NFP_NET_POLL_TIMEOUT 5
56
57
58 #define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
59
60
61 #define NFP_NET_CTRL_BAR 0
62 #define NFP_NET_Q0_BAR 2
63 #define NFP_NET_Q1_BAR 4
64
65
66 #define NFP_NET_MAX_DMA_BITS 40
67
68
69 #define NFP_NET_DEFAULT_MTU 1500U
70
71
72 #define NFP_NET_MAX_PREPEND 64
73
74
75 #define NFP_NET_NON_Q_VECTORS 2
76 #define NFP_NET_IRQ_LSC_IDX 0
77 #define NFP_NET_IRQ_EXN_IDX 1
78 #define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
79
80
81 #define NFP_NET_MAX_TX_RINGS 64
82 #define NFP_NET_MAX_RX_RINGS 64
83 #define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
84 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
85 #define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
86
87 #define NFP_NET_MIN_TX_DESCS 256
88 #define NFP_NET_MIN_RX_DESCS 256
89 #define NFP_NET_MAX_TX_DESCS (256 * 1024)
90 #define NFP_NET_MAX_RX_DESCS (256 * 1024)
91
92 #define NFP_NET_TX_DESCS_DEFAULT 4096
93 #define NFP_NET_RX_DESCS_DEFAULT 4096
94
95 #define NFP_NET_FL_BATCH 16
96 #define NFP_NET_XDP_MAX_COMPLETE 2048
97
98
99 #define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
100
101 #define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
102 #define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \
103 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
104
105
106 struct nfp_cpp;
107 struct nfp_eth_table_port;
108 struct nfp_net;
109 struct nfp_net_r_vector;
110 struct nfp_port;
111
112
113 #define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
114
115
116 #define nfp_desc_set_dma_addr(desc, dma_addr) \
117 do { \
118 __typeof(desc) __d = (desc); \
119 dma_addr_t __addr = (dma_addr); \
120 \
121 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
122 __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
123 } while (0)
124
125
126
127 #define PCIE_DESC_TX_EOP BIT(7)
128 #define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
129 #define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
130
131
132 #define PCIE_DESC_TX_CSUM BIT(7)
133 #define PCIE_DESC_TX_IP4_CSUM BIT(6)
134 #define PCIE_DESC_TX_TCP_CSUM BIT(5)
135 #define PCIE_DESC_TX_UDP_CSUM BIT(4)
136 #define PCIE_DESC_TX_VLAN BIT(3)
137 #define PCIE_DESC_TX_LSO BIT(2)
138 #define PCIE_DESC_TX_ENCAP BIT(1)
139 #define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
140
141 struct nfp_net_tx_desc {
142 union {
143 struct {
144 u8 dma_addr_hi;
145 __le16 dma_len;
146 u8 offset_eop;
147
148
149 __le32 dma_addr_lo;
150
151 __le16 mss;
152 u8 lso_hdrlen;
153 u8 flags;
154 union {
155 struct {
156 u8 l3_offset;
157 u8 l4_offset;
158 };
159 __le16 vlan;
160 };
161 __le16 data_len;
162 } __packed;
163 __le32 vals[4];
164 __le64 vals8[2];
165 };
166 };
167
168
169
170
171
172
173
174
175
176
177
178
179
180 struct nfp_net_tx_buf {
181 union {
182 struct sk_buff *skb;
183 void *frag;
184 };
185 dma_addr_t dma_addr;
186 short int fidx;
187 u16 pkt_cnt;
188 u32 real_len;
189 };
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 struct nfp_net_tx_ring {
210 struct nfp_net_r_vector *r_vec;
211
212 u32 idx;
213 int qcidx;
214 u8 __iomem *qcp_q;
215
216 u32 cnt;
217 u32 wr_p;
218 u32 rd_p;
219 u32 qcp_rd_p;
220
221 u32 wr_ptr_add;
222
223 struct nfp_net_tx_buf *txbufs;
224 struct nfp_net_tx_desc *txds;
225
226 dma_addr_t dma;
227 size_t size;
228 bool is_xdp;
229 } ____cacheline_aligned;
230
231
232
233 #define PCIE_DESC_RX_DD BIT(7)
234 #define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0)
235
236
237 #define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15))
238 #define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14))
239 #define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13))
240 #define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12))
241 #define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
242 #define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
243 #define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
244 #define PCIE_DESC_RX_DECRYPTED cpu_to_le16(BIT(8))
245 #define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
246 #define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
247 #define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
248 #define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4))
249 #define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3))
250 #define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2))
251 #define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1))
252 #define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0))
253
254 #define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \
255 PCIE_DESC_RX_TCP_CSUM | \
256 PCIE_DESC_RX_UDP_CSUM | \
257 PCIE_DESC_RX_I_IP4_CSUM | \
258 PCIE_DESC_RX_I_TCP_CSUM | \
259 PCIE_DESC_RX_I_UDP_CSUM)
260 #define PCIE_DESC_RX_CSUM_OK_SHIFT 1
261 #define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
262 #define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \
263 PCIE_DESC_RX_CSUM_OK_SHIFT)
264
265 struct nfp_net_rx_desc {
266 union {
267 struct {
268 u8 dma_addr_hi;
269 __le16 reserved;
270 u8 meta_len_dd;
271
272 __le32 dma_addr_lo;
273 } __packed fld;
274
275 struct {
276 __le16 data_len;
277 u8 reserved;
278 u8 meta_len_dd;
279
280
281
282 __le16 flags;
283 __le16 vlan;
284 } __packed rxd;
285
286 __le32 vals[2];
287 };
288 };
289
290 #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
291
292 struct nfp_meta_parsed {
293 u8 hash_type;
294 u8 csum_type;
295 u32 hash;
296 u32 mark;
297 u32 portid;
298 __wsum csum;
299 };
300
301 struct nfp_net_rx_hash {
302 __be32 hash_type;
303 __be32 hash;
304 };
305
306
307
308
309
310
311 struct nfp_net_rx_buf {
312 void *frag;
313 dma_addr_t dma_addr;
314 };
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331 struct nfp_net_rx_ring {
332 struct nfp_net_r_vector *r_vec;
333
334 u32 cnt;
335 u32 wr_p;
336 u32 rd_p;
337
338 u32 idx;
339
340 int fl_qcidx;
341 u8 __iomem *qcp_fl;
342
343 struct nfp_net_rx_buf *rxbufs;
344 struct nfp_net_rx_desc *rxds;
345
346 struct xdp_rxq_info xdp_rxq;
347
348 dma_addr_t dma;
349 size_t size;
350 } ____cacheline_aligned;
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397 struct nfp_net_r_vector {
398 struct nfp_net *nfp_net;
399 union {
400 struct napi_struct napi;
401 struct {
402 struct tasklet_struct tasklet;
403 struct sk_buff_head queue;
404 spinlock_t lock;
405 };
406 };
407
408 struct nfp_net_tx_ring *tx_ring;
409 struct nfp_net_rx_ring *rx_ring;
410
411 u16 irq_entry;
412
413 struct u64_stats_sync rx_sync;
414 u64 rx_pkts;
415 u64 rx_bytes;
416 u64 rx_drops;
417 u64 hw_csum_rx_ok;
418 u64 hw_csum_rx_inner_ok;
419 u64 hw_csum_rx_complete;
420 u64 hw_tls_rx;
421
422 u64 hw_csum_rx_error;
423 u64 rx_replace_buf_alloc_fail;
424
425 struct nfp_net_tx_ring *xdp_ring;
426
427 struct u64_stats_sync tx_sync;
428 u64 tx_pkts;
429 u64 tx_bytes;
430
431 u64 ____cacheline_aligned_in_smp hw_csum_tx;
432 u64 hw_csum_tx_inner;
433 u64 tx_gather;
434 u64 tx_lso;
435 u64 hw_tls_tx;
436
437 u64 tls_tx_fallback;
438 u64 tls_tx_no_fallback;
439 u64 tx_errors;
440 u64 tx_busy;
441
442
443
444 u32 irq_vector;
445 irq_handler_t handler;
446 char name[IFNAMSIZ + 8];
447 cpumask_t affinity_mask;
448 } ____cacheline_aligned;
449
450
451 struct nfp_net_fw_version {
452 u8 minor;
453 u8 major;
454 u8 class;
455 u8 resv;
456 } __packed;
457
458 static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
459 u8 resv, u8 class, u8 major, u8 minor)
460 {
461 return fw_ver->resv == resv &&
462 fw_ver->class == class &&
463 fw_ver->major == major &&
464 fw_ver->minor == minor;
465 }
466
467 struct nfp_stat_pair {
468 u64 pkts;
469 u64 bytes;
470 };
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497 struct nfp_net_dp {
498 struct device *dev;
499 struct net_device *netdev;
500
501 u8 is_vf:1;
502 u8 chained_metadata_format:1;
503 u8 ktls_tx:1;
504
505 u8 rx_dma_dir;
506 u8 rx_offset;
507
508 u32 rx_dma_off;
509
510 u32 ctrl;
511 u32 fl_bufsz;
512
513 struct bpf_prog *xdp_prog;
514
515 struct nfp_net_tx_ring *tx_rings;
516 struct nfp_net_rx_ring *rx_rings;
517
518 u8 __iomem *ctrl_bar;
519
520
521
522 unsigned int txd_cnt;
523 unsigned int rxd_cnt;
524
525 unsigned int num_r_vecs;
526
527 unsigned int num_tx_rings;
528 unsigned int num_stack_tx_rings;
529 unsigned int num_rx_rings;
530
531 unsigned int mtu;
532 };
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605 struct nfp_net {
606 struct nfp_net_dp dp;
607
608 struct nfp_net_fw_version fw_ver;
609
610 u32 id;
611
612 u32 cap;
613 u32 max_mtu;
614
615 u8 rss_hfunc;
616 u32 rss_cfg;
617 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
618 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
619
620 struct xdp_attachment_info xdp;
621 struct xdp_attachment_info xdp_hw;
622
623 unsigned int max_tx_rings;
624 unsigned int max_rx_rings;
625
626 int stride_tx;
627 int stride_rx;
628
629 unsigned int max_r_vecs;
630 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
631 struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
632
633 irq_handler_t lsc_handler;
634 char lsc_name[IFNAMSIZ + 8];
635
636 irq_handler_t exn_handler;
637 char exn_name[IFNAMSIZ + 8];
638
639 irq_handler_t shared_handler;
640 char shared_name[IFNAMSIZ + 8];
641
642 u32 me_freq_mhz;
643
644 bool link_up;
645 spinlock_t link_status_lock;
646
647 spinlock_t reconfig_lock;
648 u32 reconfig_posted;
649 bool reconfig_timer_active;
650 bool reconfig_sync_present;
651 struct timer_list reconfig_timer;
652 u32 reconfig_in_progress_update;
653
654 struct semaphore bar_lock;
655
656 u32 rx_coalesce_usecs;
657 u32 rx_coalesce_max_frames;
658 u32 tx_coalesce_usecs;
659 u32 tx_coalesce_max_frames;
660
661 __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS];
662 u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS];
663
664 u8 __iomem *qcp_cfg;
665
666 u8 __iomem *tx_bar;
667 u8 __iomem *rx_bar;
668
669 struct nfp_net_tlv_caps tlv_caps;
670
671 unsigned int ktls_tx_conn_cnt;
672 unsigned int ktls_rx_conn_cnt;
673
674 atomic64_t ktls_conn_id_gen;
675
676 atomic_t ktls_no_space;
677
678 struct {
679 struct sk_buff_head queue;
680 wait_queue_head_t wq;
681 struct workqueue_struct *workq;
682 struct work_struct wait_work;
683 struct work_struct runq_work;
684 u16 tag;
685 } mbox_cmsg;
686
687 struct dentry *debugfs_dir;
688
689 struct list_head vnic_list;
690
691 struct pci_dev *pdev;
692 struct nfp_app *app;
693
694 bool vnic_no_name;
695
696 struct nfp_port *port;
697
698 void *app_priv;
699 };
700
701
702
703
704 static inline u16 nn_readb(struct nfp_net *nn, int off)
705 {
706 return readb(nn->dp.ctrl_bar + off);
707 }
708
709 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
710 {
711 writeb(val, nn->dp.ctrl_bar + off);
712 }
713
714 static inline u16 nn_readw(struct nfp_net *nn, int off)
715 {
716 return readw(nn->dp.ctrl_bar + off);
717 }
718
719 static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
720 {
721 writew(val, nn->dp.ctrl_bar + off);
722 }
723
724 static inline u32 nn_readl(struct nfp_net *nn, int off)
725 {
726 return readl(nn->dp.ctrl_bar + off);
727 }
728
729 static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
730 {
731 writel(val, nn->dp.ctrl_bar + off);
732 }
733
734 static inline u64 nn_readq(struct nfp_net *nn, int off)
735 {
736 return readq(nn->dp.ctrl_bar + off);
737 }
738
739 static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
740 {
741 writeq(val, nn->dp.ctrl_bar + off);
742 }
743
744
745 static inline void nn_pci_flush(struct nfp_net *nn)
746 {
747 nn_readl(nn, NFP_NET_CFG_VERSION);
748 }
749
750
751
752
753
754
755
756
757
758
759 #define NFP_QCP_QUEUE_ADDR_SZ 0x800
760 #define NFP_QCP_QUEUE_AREA_SZ 0x80000
761 #define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
762 #define NFP_QCP_QUEUE_ADD_RPTR 0x0000
763 #define NFP_QCP_QUEUE_ADD_WPTR 0x0004
764 #define NFP_QCP_QUEUE_STS_LO 0x0008
765 #define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff
766 #define NFP_QCP_QUEUE_STS_HI 0x000c
767 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
768
769
770 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
771
772
773 enum nfp_qcp_ptr {
774 NFP_QCP_READ_PTR = 0,
775 NFP_QCP_WRITE_PTR
776 };
777
778
779
780
781
782 #define NFP_QCP_MAX_ADD 0x3f
783
784 static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
785 enum nfp_qcp_ptr ptr, u32 val)
786 {
787 u32 off;
788
789 if (ptr == NFP_QCP_READ_PTR)
790 off = NFP_QCP_QUEUE_ADD_RPTR;
791 else
792 off = NFP_QCP_QUEUE_ADD_WPTR;
793
794 while (val > NFP_QCP_MAX_ADD) {
795 writel(NFP_QCP_MAX_ADD, q + off);
796 val -= NFP_QCP_MAX_ADD;
797 }
798
799 writel(val, q + off);
800 }
801
802
803
804
805
806
807
808
809
810 static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
811 {
812 _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
813 }
814
815
816
817
818
819
820
821
822
823 static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
824 {
825 _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
826 }
827
828 static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
829 {
830 u32 off;
831 u32 val;
832
833 if (ptr == NFP_QCP_READ_PTR)
834 off = NFP_QCP_QUEUE_STS_LO;
835 else
836 off = NFP_QCP_QUEUE_STS_HI;
837
838 val = readl(q + off);
839
840 if (ptr == NFP_QCP_READ_PTR)
841 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
842 else
843 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
844 }
845
846
847
848
849
850
851
852 static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
853 {
854 return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
855 }
856
857
858
859
860
861
862
863 static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
864 {
865 return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
866 }
867
868 static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
869 {
870 WARN_ON_ONCE(!nn->dp.netdev && nn->port);
871 return !!nn->dp.netdev;
872 }
873
874 static inline bool nfp_net_running(struct nfp_net *nn)
875 {
876 return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
877 }
878
879 static inline const char *nfp_net_name(struct nfp_net *nn)
880 {
881 return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
882 }
883
884 static inline void nfp_ctrl_lock(struct nfp_net *nn)
885 __acquires(&nn->r_vecs[0].lock)
886 {
887 spin_lock_bh(&nn->r_vecs[0].lock);
888 }
889
890 static inline void nfp_ctrl_unlock(struct nfp_net *nn)
891 __releases(&nn->r_vecs[0].lock)
892 {
893 spin_unlock_bh(&nn->r_vecs[0].lock);
894 }
895
896 static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
897 {
898 down(&nn->bar_lock);
899 }
900
901 static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
902 {
903 return !down_trylock(&nn->bar_lock);
904 }
905
906 static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
907 {
908 up(&nn->bar_lock);
909 }
910
911
912 extern const char nfp_driver_version[];
913
914 extern const struct net_device_ops nfp_net_netdev_ops;
915
916 static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
917 {
918 return netdev->netdev_ops == &nfp_net_netdev_ops;
919 }
920
921
922 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
923 void __iomem *ctrl_bar);
924
925 struct nfp_net *
926 nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
927 unsigned int max_tx_rings, unsigned int max_rx_rings);
928 void nfp_net_free(struct nfp_net *nn);
929
930 int nfp_net_init(struct nfp_net *nn);
931 void nfp_net_clean(struct nfp_net *nn);
932
933 int nfp_ctrl_open(struct nfp_net *nn);
934 void nfp_ctrl_close(struct nfp_net *nn);
935
936 void nfp_net_set_ethtool_ops(struct net_device *netdev);
937 void nfp_net_info(struct nfp_net *nn);
938 int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
939 int nfp_net_reconfig(struct nfp_net *nn, u32 update);
940 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
941 void nfp_net_rss_write_itbl(struct nfp_net *nn);
942 void nfp_net_rss_write_key(struct nfp_net *nn);
943 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
944 int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
945 int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
946 int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
947 void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
948 int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
949
950 unsigned int
951 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
952 unsigned int min_irqs, unsigned int want_irqs);
953 void nfp_net_irqs_disable(struct pci_dev *pdev);
954 void
955 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
956 unsigned int n);
957
958 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
959 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
960 struct netlink_ext_ack *extack);
961
962 #ifdef CONFIG_NFP_DEBUG
963 void nfp_net_debugfs_create(void);
964 void nfp_net_debugfs_destroy(void);
965 struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
966 void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir);
967 void nfp_net_debugfs_dir_clean(struct dentry **dir);
968 #else
969 static inline void nfp_net_debugfs_create(void)
970 {
971 }
972
973 static inline void nfp_net_debugfs_destroy(void)
974 {
975 }
976
977 static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
978 {
979 return NULL;
980 }
981
982 static inline void
983 nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
984 {
985 }
986
987 static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
988 {
989 }
990 #endif
991
992 #endif