root/drivers/net/ethernet/cavium/thunder/nic.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. nic_get_node_id
  2. pass1_silicon
  3. pass2_silicon

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2015 Cavium, Inc.
   4  */
   5 
   6 #ifndef NIC_H
   7 #define NIC_H
   8 
   9 #include <linux/netdevice.h>
  10 #include <linux/interrupt.h>
  11 #include <linux/pci.h>
  12 #include "thunder_bgx.h"
  13 
  14 /* PCI device IDs */
  15 #define PCI_DEVICE_ID_THUNDER_NIC_PF            0xA01E
  16 #define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF      0x0011
  17 #define PCI_DEVICE_ID_THUNDER_NIC_VF            0xA034
  18 #define PCI_DEVICE_ID_THUNDER_BGX               0xA026
  19 
  20 /* Subsystem device IDs */
  21 #define PCI_SUBSYS_DEVID_88XX_NIC_PF            0xA11E
  22 #define PCI_SUBSYS_DEVID_81XX_NIC_PF            0xA21E
  23 #define PCI_SUBSYS_DEVID_83XX_NIC_PF            0xA31E
  24 
  25 #define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF      0xA11E
  26 #define PCI_SUBSYS_DEVID_88XX_NIC_VF            0xA134
  27 #define PCI_SUBSYS_DEVID_81XX_NIC_VF            0xA234
  28 #define PCI_SUBSYS_DEVID_83XX_NIC_VF            0xA334
  29 
  30 
  31 /* PCI BAR nos */
  32 #define PCI_CFG_REG_BAR_NUM             0
  33 #define PCI_MSIX_REG_BAR_NUM            4
  34 
  35 /* NIC SRIOV VF count */
  36 #define MAX_NUM_VFS_SUPPORTED           128
  37 #define DEFAULT_NUM_VF_ENABLED          8
  38 
  39 #define NIC_TNS_BYPASS_MODE             0
  40 #define NIC_TNS_MODE                    1
  41 
  42 /* NIC priv flags */
  43 #define NIC_SRIOV_ENABLED               BIT(0)
  44 
  45 /* Min/Max packet size */
  46 #define NIC_HW_MIN_FRS                  64
  47 #define NIC_HW_MAX_FRS                  9190 /* Excluding L2 header and FCS */
  48 
  49 /* Max pkinds */
  50 #define NIC_MAX_PKIND                   16
  51 
  52 /* Max when CPI_ALG is IP diffserv */
  53 #define NIC_MAX_CPI_PER_LMAC            64
  54 
  55 /* NIC VF Interrupts */
  56 #define NICVF_INTR_CQ                   0
  57 #define NICVF_INTR_SQ                   1
  58 #define NICVF_INTR_RBDR                 2
  59 #define NICVF_INTR_PKT_DROP             3
  60 #define NICVF_INTR_TCP_TIMER            4
  61 #define NICVF_INTR_MBOX                 5
  62 #define NICVF_INTR_QS_ERR               6
  63 
  64 #define NICVF_INTR_CQ_SHIFT             0
  65 #define NICVF_INTR_SQ_SHIFT             8
  66 #define NICVF_INTR_RBDR_SHIFT           16
  67 #define NICVF_INTR_PKT_DROP_SHIFT       20
  68 #define NICVF_INTR_TCP_TIMER_SHIFT      21
  69 #define NICVF_INTR_MBOX_SHIFT           22
  70 #define NICVF_INTR_QS_ERR_SHIFT         23
  71 
  72 #define NICVF_INTR_CQ_MASK              (0xFF << NICVF_INTR_CQ_SHIFT)
  73 #define NICVF_INTR_SQ_MASK              (0xFF << NICVF_INTR_SQ_SHIFT)
  74 #define NICVF_INTR_RBDR_MASK            (0x03 << NICVF_INTR_RBDR_SHIFT)
  75 #define NICVF_INTR_PKT_DROP_MASK        BIT(NICVF_INTR_PKT_DROP_SHIFT)
  76 #define NICVF_INTR_TCP_TIMER_MASK       BIT(NICVF_INTR_TCP_TIMER_SHIFT)
  77 #define NICVF_INTR_MBOX_MASK            BIT(NICVF_INTR_MBOX_SHIFT)
  78 #define NICVF_INTR_QS_ERR_MASK          BIT(NICVF_INTR_QS_ERR_SHIFT)
  79 
  80 /* MSI-X interrupts */
  81 #define NIC_PF_MSIX_VECTORS             10
  82 #define NIC_VF_MSIX_VECTORS             20
  83 
  84 #define NIC_PF_INTR_ID_ECC0_SBE         0
  85 #define NIC_PF_INTR_ID_ECC0_DBE         1
  86 #define NIC_PF_INTR_ID_ECC1_SBE         2
  87 #define NIC_PF_INTR_ID_ECC1_DBE         3
  88 #define NIC_PF_INTR_ID_ECC2_SBE         4
  89 #define NIC_PF_INTR_ID_ECC2_DBE         5
  90 #define NIC_PF_INTR_ID_ECC3_SBE         6
  91 #define NIC_PF_INTR_ID_ECC3_DBE         7
  92 #define NIC_PF_INTR_ID_MBOX0            8
  93 #define NIC_PF_INTR_ID_MBOX1            9
  94 
  95 /* Minimum FIFO level before all packets for the CQ are dropped
  96  *
  97  * This value ensures that once a packet has been "accepted"
  98  * for reception it will not get dropped due to non-availability
  99  * of CQ descriptor. An errata in HW mandates this value to be
 100  * atleast 0x100.
 101  */
 102 #define NICPF_CQM_MIN_DROP_LEVEL       0x100
 103 
 104 /* Global timer for CQ timer thresh interrupts
 105  * Calculated for SCLK of 700Mhz
 106  * value written should be a 1/16th of what is expected
 107  *
 108  * 1 tick per 0.025usec
 109  */
 110 #define NICPF_CLK_PER_INT_TICK          1
 111 
 112 /* Time to wait before we decide that a SQ is stuck.
 113  *
 114  * Since both pkt rx and tx notifications are done with same CQ,
 115  * when packets are being received at very high rate (eg: L2 forwarding)
 116  * then freeing transmitted skbs will be delayed and watchdog
 117  * will kick in, resetting interface. Hence keeping this value high.
 118  */
 119 #define NICVF_TX_TIMEOUT                (50 * HZ)
 120 
 121 struct nicvf_cq_poll {
 122         struct  nicvf *nicvf;
 123         u8      cq_idx;         /* Completion queue index */
 124         struct  napi_struct napi;
 125 };
 126 
 127 #define NIC_MAX_RSS_HASH_BITS           8
 128 #define NIC_MAX_RSS_IDR_TBL_SIZE        (1 << NIC_MAX_RSS_HASH_BITS)
 129 #define RSS_HASH_KEY_SIZE               5 /* 320 bit key */
 130 
 131 struct nicvf_rss_info {
 132         bool enable;
 133 #define RSS_L2_EXTENDED_HASH_ENA        BIT(0)
 134 #define RSS_IP_HASH_ENA                 BIT(1)
 135 #define RSS_TCP_HASH_ENA                BIT(2)
 136 #define RSS_TCP_SYN_DIS                 BIT(3)
 137 #define RSS_UDP_HASH_ENA                BIT(4)
 138 #define RSS_L4_EXTENDED_HASH_ENA        BIT(5)
 139 #define RSS_ROCE_ENA                    BIT(6)
 140 #define RSS_L3_BI_DIRECTION_ENA         BIT(7)
 141 #define RSS_L4_BI_DIRECTION_ENA         BIT(8)
 142         u64 cfg;
 143         u8  hash_bits;
 144         u16 rss_size;
 145         u8  ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
 146         u64 key[RSS_HASH_KEY_SIZE];
 147 } ____cacheline_aligned_in_smp;
 148 
 149 struct nicvf_pfc {
 150         u8    autoneg;
 151         u8    fc_rx;
 152         u8    fc_tx;
 153 };
 154 
 155 enum rx_stats_reg_offset {
 156         RX_OCTS = 0x0,
 157         RX_UCAST = 0x1,
 158         RX_BCAST = 0x2,
 159         RX_MCAST = 0x3,
 160         RX_RED = 0x4,
 161         RX_RED_OCTS = 0x5,
 162         RX_ORUN = 0x6,
 163         RX_ORUN_OCTS = 0x7,
 164         RX_FCS = 0x8,
 165         RX_L2ERR = 0x9,
 166         RX_DRP_BCAST = 0xa,
 167         RX_DRP_MCAST = 0xb,
 168         RX_DRP_L3BCAST = 0xc,
 169         RX_DRP_L3MCAST = 0xd,
 170         RX_STATS_ENUM_LAST,
 171 };
 172 
 173 enum tx_stats_reg_offset {
 174         TX_OCTS = 0x0,
 175         TX_UCAST = 0x1,
 176         TX_BCAST = 0x2,
 177         TX_MCAST = 0x3,
 178         TX_DROP = 0x4,
 179         TX_STATS_ENUM_LAST,
 180 };
 181 
 182 struct nicvf_hw_stats {
 183         u64 rx_bytes;
 184         u64 rx_frames;
 185         u64 rx_ucast_frames;
 186         u64 rx_bcast_frames;
 187         u64 rx_mcast_frames;
 188         u64 rx_drops;
 189         u64 rx_drop_red;
 190         u64 rx_drop_red_bytes;
 191         u64 rx_drop_overrun;
 192         u64 rx_drop_overrun_bytes;
 193         u64 rx_drop_bcast;
 194         u64 rx_drop_mcast;
 195         u64 rx_drop_l3_bcast;
 196         u64 rx_drop_l3_mcast;
 197         u64 rx_fcs_errors;
 198         u64 rx_l2_errors;
 199 
 200         u64 tx_bytes;
 201         u64 tx_frames;
 202         u64 tx_ucast_frames;
 203         u64 tx_bcast_frames;
 204         u64 tx_mcast_frames;
 205         u64 tx_drops;
 206 };
 207 
 208 struct nicvf_drv_stats {
 209         /* CQE Rx errs */
 210         u64 rx_bgx_truncated_pkts;
 211         u64 rx_jabber_errs;
 212         u64 rx_fcs_errs;
 213         u64 rx_bgx_errs;
 214         u64 rx_prel2_errs;
 215         u64 rx_l2_hdr_malformed;
 216         u64 rx_oversize;
 217         u64 rx_undersize;
 218         u64 rx_l2_len_mismatch;
 219         u64 rx_l2_pclp;
 220         u64 rx_ip_ver_errs;
 221         u64 rx_ip_csum_errs;
 222         u64 rx_ip_hdr_malformed;
 223         u64 rx_ip_payload_malformed;
 224         u64 rx_ip_ttl_errs;
 225         u64 rx_l3_pclp;
 226         u64 rx_l4_malformed;
 227         u64 rx_l4_csum_errs;
 228         u64 rx_udp_len_errs;
 229         u64 rx_l4_port_errs;
 230         u64 rx_tcp_flag_errs;
 231         u64 rx_tcp_offset_errs;
 232         u64 rx_l4_pclp;
 233         u64 rx_truncated_pkts;
 234 
 235         /* CQE Tx errs */
 236         u64 tx_desc_fault;
 237         u64 tx_hdr_cons_err;
 238         u64 tx_subdesc_err;
 239         u64 tx_max_size_exceeded;
 240         u64 tx_imm_size_oflow;
 241         u64 tx_data_seq_err;
 242         u64 tx_mem_seq_err;
 243         u64 tx_lock_viol;
 244         u64 tx_data_fault;
 245         u64 tx_tstmp_conflict;
 246         u64 tx_tstmp_timeout;
 247         u64 tx_mem_fault;
 248         u64 tx_csum_overlap;
 249         u64 tx_csum_overflow;
 250 
 251         /* driver debug stats */
 252         u64 tx_tso;
 253         u64 tx_timeout;
 254         u64 txq_stop;
 255         u64 txq_wake;
 256 
 257         u64 rcv_buffer_alloc_failures;
 258         u64 page_alloc;
 259 
 260         struct u64_stats_sync   syncp;
 261 };
 262 
 263 struct cavium_ptp;
 264 
 265 struct xcast_addr_list {
 266         int              count;
 267         u64              mc[];
 268 };
 269 
 270 struct nicvf_work {
 271         struct work_struct     work;
 272         u8                     mode;
 273         struct xcast_addr_list *mc;
 274 };
 275 
 276 struct nicvf {
 277         struct nicvf            *pnicvf;
 278         struct net_device       *netdev;
 279         struct pci_dev          *pdev;
 280         void __iomem            *reg_base;
 281         struct bpf_prog         *xdp_prog;
 282 #define MAX_QUEUES_PER_QSET                     8
 283         struct queue_set        *qs;
 284         void                    *iommu_domain;
 285         u8                      vf_id;
 286         u8                      sqs_id;
 287         bool                    sqs_mode;
 288         bool                    hw_tso;
 289         bool                    t88;
 290 
 291         /* Receive buffer alloc */
 292         u32                     rb_page_offset;
 293         u16                     rb_pageref;
 294         bool                    rb_alloc_fail;
 295         bool                    rb_work_scheduled;
 296         struct page             *rb_page;
 297         struct delayed_work     rbdr_work;
 298         struct tasklet_struct   rbdr_task;
 299 
 300         /* Secondary Qset */
 301         u8                      sqs_count;
 302 #define MAX_SQS_PER_VF_SINGLE_NODE              5
 303 #define MAX_SQS_PER_VF                          11
 304         struct nicvf            *snicvf[MAX_SQS_PER_VF];
 305 
 306         /* Queue count */
 307         u8                      rx_queues;
 308         u8                      tx_queues;
 309         u8                      xdp_tx_queues;
 310         u8                      max_queues;
 311 
 312         u8                      node;
 313         u8                      cpi_alg;
 314         bool                    link_up;
 315         u8                      mac_type;
 316         u8                      duplex;
 317         u32                     speed;
 318         bool                    tns_mode;
 319         bool                    loopback_supported;
 320         struct nicvf_rss_info   rss_info;
 321         struct nicvf_pfc        pfc;
 322         struct tasklet_struct   qs_err_task;
 323         struct work_struct      reset_task;
 324         struct nicvf_work       rx_mode_work;
 325         /* spinlock to protect workqueue arguments from concurrent access */
 326         spinlock_t              rx_mode_wq_lock;
 327         /* workqueue for handling kernel ndo_set_rx_mode() calls */
 328         struct workqueue_struct *nicvf_rx_mode_wq;
 329         /* mutex to protect VF's mailbox contents from concurrent access */
 330         struct mutex            rx_mode_mtx;
 331         struct delayed_work     link_change_work;
 332         /* PTP timestamp */
 333         struct cavium_ptp       *ptp_clock;
 334         /* Inbound timestamping is on */
 335         bool                    hw_rx_tstamp;
 336         /* When the packet that requires timestamping is sent, hardware inserts
 337          * two entries to the completion queue.  First is the regular
 338          * CQE_TYPE_SEND entry that signals that the packet was sent.
 339          * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
 340          * for that packet.
 341          * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
 342          * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
 343          * entry.
 344          * So `ptp_skb` is used to hold the pointer to the packet between
 345          * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
 346          */
 347         struct sk_buff          *ptp_skb;
 348         /* `tx_ptp_skbs` is set when the hardware is sending a packet that
 349          * requires timestamping.  Cavium hardware can not process more than one
 350          * such packet at once so this is set each time the driver submits
 351          * a packet that requires timestamping to the send queue and clears
 352          * each time it receives the entry on the completion queue saying
 353          * that such packet was sent.
 354          * So `tx_ptp_skbs` prevents driver from submitting more than one
 355          * packet that requires timestamping to the hardware for transmitting.
 356          */
 357         atomic_t                tx_ptp_skbs;
 358 
 359         /* Interrupt coalescing settings */
 360         u32                     cq_coalesce_usecs;
 361         u32                     msg_enable;
 362 
 363         /* Stats */
 364         struct nicvf_hw_stats   hw_stats;
 365         struct nicvf_drv_stats  __percpu *drv_stats;
 366         struct bgx_stats        bgx_stats;
 367 
 368         /* Napi */
 369         struct nicvf_cq_poll    *napi[8];
 370 
 371         /* MSI-X  */
 372         u8                      num_vec;
 373         char                    irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
 374         bool                    irq_allocated[NIC_VF_MSIX_VECTORS];
 375         cpumask_var_t           affinity_mask[NIC_VF_MSIX_VECTORS];
 376 
 377         /* VF <-> PF mailbox communication */
 378         bool                    pf_acked;
 379         bool                    pf_nacked;
 380         bool                    set_mac_pending;
 381 } ____cacheline_aligned_in_smp;
 382 
 383 /* PF <--> VF Mailbox communication
 384  * Eight 64bit registers are shared between PF and VF.
 385  * Separate set for each VF.
 386  * Writing '1' into last register mbx7 means end of message.
 387  */
 388 
 389 /* PF <--> VF mailbox communication */
 390 #define NIC_PF_VF_MAILBOX_SIZE          2
 391 #define NIC_MBOX_MSG_TIMEOUT            2000 /* ms */
 392 
 393 /* Mailbox message types */
 394 #define NIC_MBOX_MSG_READY              0x01    /* Is PF ready to rcv msgs */
 395 #define NIC_MBOX_MSG_ACK                0x02    /* ACK the message received */
 396 #define NIC_MBOX_MSG_NACK               0x03    /* NACK the message received */
 397 #define NIC_MBOX_MSG_QS_CFG             0x04    /* Configure Qset */
 398 #define NIC_MBOX_MSG_RQ_CFG             0x05    /* Configure receive queue */
 399 #define NIC_MBOX_MSG_SQ_CFG             0x06    /* Configure Send queue */
 400 #define NIC_MBOX_MSG_RQ_DROP_CFG        0x07    /* Configure receive queue */
 401 #define NIC_MBOX_MSG_SET_MAC            0x08    /* Add MAC ID to DMAC filter */
 402 #define NIC_MBOX_MSG_SET_MAX_FRS        0x09    /* Set max frame size */
 403 #define NIC_MBOX_MSG_CPI_CFG            0x0A    /* Config CPI, RSSI */
 404 #define NIC_MBOX_MSG_RSS_SIZE           0x0B    /* Get RSS indir_tbl size */
 405 #define NIC_MBOX_MSG_RSS_CFG            0x0C    /* Config RSS table */
 406 #define NIC_MBOX_MSG_RSS_CFG_CONT       0x0D    /* RSS config continuation */
 407 #define NIC_MBOX_MSG_RQ_BP_CFG          0x0E    /* RQ backpressure config */
 408 #define NIC_MBOX_MSG_RQ_SW_SYNC         0x0F    /* Flush inflight pkts to RQ */
 409 #define NIC_MBOX_MSG_BGX_STATS          0x10    /* Get stats from BGX */
 410 #define NIC_MBOX_MSG_BGX_LINK_CHANGE    0x11    /* BGX:LMAC link status */
 411 #define NIC_MBOX_MSG_ALLOC_SQS          0x12    /* Allocate secondary Qset */
 412 #define NIC_MBOX_MSG_NICVF_PTR          0x13    /* Send nicvf ptr to PF */
 413 #define NIC_MBOX_MSG_PNICVF_PTR         0x14    /* Get primary qset nicvf ptr */
 414 #define NIC_MBOX_MSG_SNICVF_PTR         0x15    /* Send sqet nicvf ptr to PVF */
 415 #define NIC_MBOX_MSG_LOOPBACK           0x16    /* Set interface in loopback */
 416 #define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17    /* Reset statistics counters */
 417 #define NIC_MBOX_MSG_PFC                0x18    /* Pause frame control */
 418 #define NIC_MBOX_MSG_PTP_CFG            0x19    /* HW packet timestamp */
 419 #define NIC_MBOX_MSG_CFG_DONE           0xF0    /* VF configuration done */
 420 #define NIC_MBOX_MSG_SHUTDOWN           0xF1    /* VF is being shutdown */
 421 #define NIC_MBOX_MSG_RESET_XCAST        0xF2    /* Reset DCAM filtering mode */
 422 #define NIC_MBOX_MSG_ADD_MCAST          0xF3    /* Add MAC to DCAM filters */
 423 #define NIC_MBOX_MSG_SET_XCAST          0xF4    /* Set MCAST/BCAST RX mode */
 424 
 425 struct nic_cfg_msg {
 426         u8    msg;
 427         u8    vf_id;
 428         u8    node_id;
 429         u8    tns_mode:1;
 430         u8    sqs_mode:1;
 431         u8    loopback_supported:1;
 432         u8    mac_addr[ETH_ALEN];
 433 };
 434 
 435 /* Qset configuration */
 436 struct qs_cfg_msg {
 437         u8    msg;
 438         u8    num;
 439         u8    sqs_count;
 440         u64   cfg;
 441 };
 442 
 443 /* Receive queue configuration */
 444 struct rq_cfg_msg {
 445         u8    msg;
 446         u8    qs_num;
 447         u8    rq_num;
 448         u64   cfg;
 449 };
 450 
 451 /* Send queue configuration */
 452 struct sq_cfg_msg {
 453         u8    msg;
 454         u8    qs_num;
 455         u8    sq_num;
 456         bool  sqs_mode;
 457         u64   cfg;
 458 };
 459 
 460 /* Set VF's MAC address */
 461 struct set_mac_msg {
 462         u8    msg;
 463         u8    vf_id;
 464         u8    mac_addr[ETH_ALEN];
 465 };
 466 
 467 /* Set Maximum frame size */
 468 struct set_frs_msg {
 469         u8    msg;
 470         u8    vf_id;
 471         u16   max_frs;
 472 };
 473 
 474 /* Set CPI algorithm type */
 475 struct cpi_cfg_msg {
 476         u8    msg;
 477         u8    vf_id;
 478         u8    rq_cnt;
 479         u8    cpi_alg;
 480 };
 481 
 482 /* Get RSS table size */
 483 struct rss_sz_msg {
 484         u8    msg;
 485         u8    vf_id;
 486         u16   ind_tbl_size;
 487 };
 488 
 489 /* Set RSS configuration */
 490 struct rss_cfg_msg {
 491         u8    msg;
 492         u8    vf_id;
 493         u8    hash_bits;
 494         u8    tbl_len;
 495         u8    tbl_offset;
 496 #define RSS_IND_TBL_LEN_PER_MBX_MSG     8
 497         u8    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
 498 };
 499 
 500 struct bgx_stats_msg {
 501         u8    msg;
 502         u8    vf_id;
 503         u8    rx;
 504         u8    idx;
 505         u64   stats;
 506 };
 507 
 508 /* Physical interface link status */
 509 struct bgx_link_status {
 510         u8    msg;
 511         u8    mac_type;
 512         u8    link_up;
 513         u8    duplex;
 514         u32   speed;
 515 };
 516 
 517 /* Get Extra Qset IDs */
 518 struct sqs_alloc {
 519         u8    msg;
 520         u8    vf_id;
 521         u8    qs_count;
 522 };
 523 
 524 struct nicvf_ptr {
 525         u8    msg;
 526         u8    vf_id;
 527         bool  sqs_mode;
 528         u8    sqs_id;
 529         u64   nicvf;
 530 };
 531 
 532 /* Set interface in loopback mode */
 533 struct set_loopback {
 534         u8    msg;
 535         u8    vf_id;
 536         bool  enable;
 537 };
 538 
 539 /* Reset statistics counters */
 540 struct reset_stat_cfg {
 541         u8    msg;
 542         /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
 543         u16   rx_stat_mask;
 544         /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
 545         u8    tx_stat_mask;
 546         /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
 547          * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
 548          * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
 549          * ..
 550          * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
 551          * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
 552          */
 553         u16   rq_stat_mask;
 554         /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
 555          * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
 556          * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
 557          * ..
 558          * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
 559          * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
 560          */
 561         u16   sq_stat_mask;
 562 };
 563 
 564 struct pfc {
 565         u8    msg;
 566         u8    get; /* Get or set PFC settings */
 567         u8    autoneg;
 568         u8    fc_rx;
 569         u8    fc_tx;
 570 };
 571 
 572 struct set_ptp {
 573         u8    msg;
 574         bool  enable;
 575 };
 576 
 577 struct xcast {
 578         u8    msg;
 579         u8    mode;
 580         u64   mac:48;
 581 };
 582 
 583 /* 128 bit shared memory between PF and each VF */
 584 union nic_mbx {
 585         struct { u8 msg; }      msg;
 586         struct nic_cfg_msg      nic_cfg;
 587         struct qs_cfg_msg       qs;
 588         struct rq_cfg_msg       rq;
 589         struct sq_cfg_msg       sq;
 590         struct set_mac_msg      mac;
 591         struct set_frs_msg      frs;
 592         struct cpi_cfg_msg      cpi_cfg;
 593         struct rss_sz_msg       rss_size;
 594         struct rss_cfg_msg      rss_cfg;
 595         struct bgx_stats_msg    bgx_stats;
 596         struct bgx_link_status  link_status;
 597         struct sqs_alloc        sqs_alloc;
 598         struct nicvf_ptr        nicvf;
 599         struct set_loopback     lbk;
 600         struct reset_stat_cfg   reset_stat;
 601         struct pfc              pfc;
 602         struct set_ptp          ptp;
 603         struct xcast            xcast;
 604 };
 605 
 606 #define NIC_NODE_ID_MASK        0x03
 607 #define NIC_NODE_ID_SHIFT       44
 608 
 609 static inline int nic_get_node_id(struct pci_dev *pdev)
 610 {
 611         u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
 612         return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
 613 }
 614 
 615 static inline bool pass1_silicon(struct pci_dev *pdev)
 616 {
 617         return (pdev->revision < 8) &&
 618                 (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
 619 }
 620 
 621 static inline bool pass2_silicon(struct pci_dev *pdev)
 622 {
 623         return (pdev->revision >= 8) &&
 624                 (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
 625 }
 626 
 627 int nicvf_set_real_num_queues(struct net_device *netdev,
 628                               int tx_queues, int rx_queues);
 629 int nicvf_open(struct net_device *netdev);
 630 int nicvf_stop(struct net_device *netdev);
 631 int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
 632 void nicvf_config_rss(struct nicvf *nic);
 633 void nicvf_set_rss_key(struct nicvf *nic);
 634 void nicvf_set_ethtool_ops(struct net_device *netdev);
 635 void nicvf_update_stats(struct nicvf *nic);
 636 void nicvf_update_lmac_stats(struct nicvf *nic);
 637 
 638 #endif /* NIC_H */

/* [<][>][^][v][top][bottom][index][help] */