root/drivers/net/ethernet/cavium/thunder/nicvf_queues.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. nicvf_iova_to_phys

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2015 Cavium, Inc.
   4  */
   5 
   6 #ifndef NICVF_QUEUES_H
   7 #define NICVF_QUEUES_H
   8 
   9 #include <linux/netdevice.h>
  10 #include <linux/iommu.h>
  11 #include <net/xdp.h>
  12 #include "q_struct.h"
  13 
  14 #define MAX_QUEUE_SET                   128
  15 #define MAX_RCV_QUEUES_PER_QS           8
  16 #define MAX_RCV_BUF_DESC_RINGS_PER_QS   2
  17 #define MAX_SND_QUEUES_PER_QS           8
  18 #define MAX_CMP_QUEUES_PER_QS           8
  19 
  20 /* VF's queue interrupt ranges */
  21 #define NICVF_INTR_ID_CQ                0
  22 #define NICVF_INTR_ID_SQ                8
  23 #define NICVF_INTR_ID_RBDR              16
  24 #define NICVF_INTR_ID_MISC              18
  25 #define NICVF_INTR_ID_QS_ERR            19
  26 
  27 #define for_each_cq_irq(irq)    \
  28         for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
  29 #define for_each_sq_irq(irq)    \
  30         for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
  31 #define for_each_rbdr_irq(irq)  \
  32         for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
  33 
  34 #define RBDR_SIZE0              0ULL /* 8K entries */
  35 #define RBDR_SIZE1              1ULL /* 16K entries */
  36 #define RBDR_SIZE2              2ULL /* 32K entries */
  37 #define RBDR_SIZE3              3ULL /* 64K entries */
  38 #define RBDR_SIZE4              4ULL /* 126K entries */
  39 #define RBDR_SIZE5              5ULL /* 256K entries */
  40 #define RBDR_SIZE6              6ULL /* 512K entries */
  41 
  42 #define SND_QUEUE_SIZE0         0ULL /* 1K entries */
  43 #define SND_QUEUE_SIZE1         1ULL /* 2K entries */
  44 #define SND_QUEUE_SIZE2         2ULL /* 4K entries */
  45 #define SND_QUEUE_SIZE3         3ULL /* 8K entries */
  46 #define SND_QUEUE_SIZE4         4ULL /* 16K entries */
  47 #define SND_QUEUE_SIZE5         5ULL /* 32K entries */
  48 #define SND_QUEUE_SIZE6         6ULL /* 64K entries */
  49 
  50 #define CMP_QUEUE_SIZE0         0ULL /* 1K entries */
  51 #define CMP_QUEUE_SIZE1         1ULL /* 2K entries */
  52 #define CMP_QUEUE_SIZE2         2ULL /* 4K entries */
  53 #define CMP_QUEUE_SIZE3         3ULL /* 8K entries */
  54 #define CMP_QUEUE_SIZE4         4ULL /* 16K entries */
  55 #define CMP_QUEUE_SIZE5         5ULL /* 32K entries */
  56 #define CMP_QUEUE_SIZE6         6ULL /* 64K entries */
  57 
  58 /* Default queue count per QS, its lengths and threshold values */
  59 #define DEFAULT_RBDR_CNT        1
  60 
  61 #define SND_QSIZE               SND_QUEUE_SIZE0
  62 #define SND_QUEUE_LEN           (1ULL << (SND_QSIZE + 10))
  63 #define MIN_SND_QUEUE_LEN       (1ULL << (SND_QUEUE_SIZE0 + 10))
  64 #define MAX_SND_QUEUE_LEN       (1ULL << (SND_QUEUE_SIZE6 + 10))
  65 #define SND_QUEUE_THRESH        2ULL
  66 #define MIN_SQ_DESC_PER_PKT_XMIT        2
  67 /* Since timestamp not enabled, otherwise 2 */
  68 #define MAX_CQE_PER_PKT_XMIT            1
  69 
  70 /* Keep CQ and SQ sizes same, if timestamping
  71  * is enabled this equation will change.
  72  */
  73 #define CMP_QSIZE               CMP_QUEUE_SIZE0
  74 #define CMP_QUEUE_LEN           (1ULL << (CMP_QSIZE + 10))
  75 #define MIN_CMP_QUEUE_LEN       (1ULL << (CMP_QUEUE_SIZE0 + 10))
  76 #define MAX_CMP_QUEUE_LEN       (1ULL << (CMP_QUEUE_SIZE6 + 10))
  77 #define CMP_QUEUE_CQE_THRESH    (NAPI_POLL_WEIGHT / 2)
  78 #define CMP_QUEUE_TIMER_THRESH  80 /* ~2usec */
  79 
  80 /* No of CQEs that might anyway gets used by HW due to pipelining
  81  * effects irrespective of PASS/DROP/LEVELS being configured
  82  */
  83 #define CMP_QUEUE_PIPELINE_RSVD 544
  84 
  85 #define RBDR_SIZE               RBDR_SIZE0
  86 #define RCV_BUF_COUNT           (1ULL << (RBDR_SIZE + 13))
  87 #define MAX_RCV_BUF_COUNT       (1ULL << (RBDR_SIZE6 + 13))
  88 #define RBDR_THRESH             (RCV_BUF_COUNT / 2)
  89 #define DMA_BUFFER_LEN          1536 /* In multiples of 128bytes */
  90 #define RCV_FRAG_LEN     (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
  91                          SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  92 
  93 #define MAX_CQES_FOR_TX         ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
  94                                  MAX_CQE_PER_PKT_XMIT)
  95 
  96 /* RED and Backpressure levels of CQ for pkt reception
  97  * For CQ, level is a measure of emptiness i.e 0x0 means full
  98  * eg: For CQ of size 4K, and for pass/drop levels of 160/144
  99  * HW accepts pkt if unused CQE >= 2560
 100  * RED accepts pkt if unused CQE < 2304 & >= 2560
 101  * DROPs pkts if unused CQE < 2304
 102  */
 103 #define RQ_PASS_CQ_LVL         192ULL
 104 #define RQ_DROP_CQ_LVL         184ULL
 105 
 106 /* RED and Backpressure levels of RBDR for pkt reception
 107  * For RBDR, level is a measure of fullness i.e 0x0 means empty
 108  * eg: For RBDR of size 8K, and for pass/drop levels of 4/0
 109  * HW accepts pkt if unused RBs >= 256
 110  * RED accepts pkt if unused RBs < 256 & >= 0
 111  * DROPs pkts if unused RBs < 0
 112  */
 113 #define RQ_PASS_RBDR_LVL        8ULL
 114 #define RQ_DROP_RBDR_LVL        0ULL
 115 
 116 /* Descriptor size in bytes */
 117 #define SND_QUEUE_DESC_SIZE     16
 118 #define CMP_QUEUE_DESC_SIZE     512
 119 
 120 /* Buffer / descriptor alignments */
 121 #define NICVF_RCV_BUF_ALIGN             7
 122 #define NICVF_RCV_BUF_ALIGN_BYTES       (1ULL << NICVF_RCV_BUF_ALIGN)
 123 #define NICVF_CQ_BASE_ALIGN_BYTES       512  /* 9 bits */
 124 #define NICVF_SQ_BASE_ALIGN_BYTES       128  /* 7 bits */
 125 
 126 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES)   ALIGN(ADDR, ALIGN_BYTES)
 127 
 128 /* Queue enable/disable */
 129 #define NICVF_SQ_EN             BIT_ULL(19)
 130 
 131 /* Queue reset */
 132 #define NICVF_CQ_RESET          BIT_ULL(41)
 133 #define NICVF_SQ_RESET          BIT_ULL(17)
 134 #define NICVF_RBDR_RESET        BIT_ULL(43)
 135 
 136 enum CQ_RX_ERRLVL_E {
 137         CQ_ERRLVL_MAC,
 138         CQ_ERRLVL_L2,
 139         CQ_ERRLVL_L3,
 140         CQ_ERRLVL_L4,
 141 };
 142 
 143 enum CQ_RX_ERROP_E {
 144         CQ_RX_ERROP_RE_NONE = 0x0,
 145         CQ_RX_ERROP_RE_PARTIAL = 0x1,
 146         CQ_RX_ERROP_RE_JABBER = 0x2,
 147         CQ_RX_ERROP_RE_FCS = 0x7,
 148         CQ_RX_ERROP_RE_TERMINATE = 0x9,
 149         CQ_RX_ERROP_RE_RX_CTL = 0xb,
 150         CQ_RX_ERROP_PREL2_ERR = 0x1f,
 151         CQ_RX_ERROP_L2_FRAGMENT = 0x20,
 152         CQ_RX_ERROP_L2_OVERRUN = 0x21,
 153         CQ_RX_ERROP_L2_PFCS = 0x22,
 154         CQ_RX_ERROP_L2_PUNY = 0x23,
 155         CQ_RX_ERROP_L2_MAL = 0x24,
 156         CQ_RX_ERROP_L2_OVERSIZE = 0x25,
 157         CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
 158         CQ_RX_ERROP_L2_LENMISM = 0x27,
 159         CQ_RX_ERROP_L2_PCLP = 0x28,
 160         CQ_RX_ERROP_IP_NOT = 0x41,
 161         CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
 162         CQ_RX_ERROP_IP_MAL = 0x43,
 163         CQ_RX_ERROP_IP_MALD = 0x44,
 164         CQ_RX_ERROP_IP_HOP = 0x45,
 165         CQ_RX_ERROP_L3_ICRC = 0x46,
 166         CQ_RX_ERROP_L3_PCLP = 0x47,
 167         CQ_RX_ERROP_L4_MAL = 0x61,
 168         CQ_RX_ERROP_L4_CHK = 0x62,
 169         CQ_RX_ERROP_UDP_LEN = 0x63,
 170         CQ_RX_ERROP_L4_PORT = 0x64,
 171         CQ_RX_ERROP_TCP_FLAG = 0x65,
 172         CQ_RX_ERROP_TCP_OFFSET = 0x66,
 173         CQ_RX_ERROP_L4_PCLP = 0x67,
 174         CQ_RX_ERROP_RBDR_TRUNC = 0x70,
 175 };
 176 
 177 enum CQ_TX_ERROP_E {
 178         CQ_TX_ERROP_GOOD = 0x0,
 179         CQ_TX_ERROP_DESC_FAULT = 0x10,
 180         CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
 181         CQ_TX_ERROP_SUBDC_ERR = 0x12,
 182         CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
 183         CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
 184         CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
 185         CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
 186         CQ_TX_ERROP_LOCK_VIOL = 0x83,
 187         CQ_TX_ERROP_DATA_FAULT = 0x84,
 188         CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
 189         CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
 190         CQ_TX_ERROP_MEM_FAULT = 0x87,
 191         CQ_TX_ERROP_CK_OVERLAP = 0x88,
 192         CQ_TX_ERROP_CK_OFLOW = 0x89,
 193         CQ_TX_ERROP_ENUM_LAST = 0x8a,
 194 };
 195 
 196 enum RQ_SQ_STATS {
 197         RQ_SQ_STATS_OCTS,
 198         RQ_SQ_STATS_PKTS,
 199 };
 200 
 201 struct rx_tx_queue_stats {
 202         u64     bytes;
 203         u64     pkts;
 204 } ____cacheline_aligned_in_smp;
 205 
 206 struct q_desc_mem {
 207         dma_addr_t      dma;
 208         u64             size;
 209         u32             q_len;
 210         dma_addr_t      phys_base;
 211         void            *base;
 212         void            *unalign_base;
 213 };
 214 
 215 struct pgcache {
 216         struct page     *page;
 217         int             ref_count;
 218         u64             dma_addr;
 219 };
 220 
 221 struct rbdr {
 222         bool            enable;
 223         u32             dma_size;
 224         u32             frag_len;
 225         u32             thresh;         /* Threshold level for interrupt */
 226         void            *desc;
 227         u32             head;
 228         u32             tail;
 229         struct q_desc_mem   dmem;
 230         bool            is_xdp;
 231 
 232         /* For page recycling */
 233         int             pgidx;
 234         int             pgcnt;
 235         int             pgalloc;
 236         struct pgcache  *pgcache;
 237 } ____cacheline_aligned_in_smp;
 238 
 239 struct rcv_queue {
 240         bool            enable;
 241         struct  rbdr    *rbdr_start;
 242         struct  rbdr    *rbdr_cont;
 243         bool            en_tcp_reassembly;
 244         u8              cq_qs;  /* CQ's QS to which this RQ is assigned */
 245         u8              cq_idx; /* CQ index (0 to 7) in the QS */
 246         u8              cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
 247         u8              cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
 248         u8              start_rbdr_qs;     /* First buffer ptrs - QS num */
 249         u8              start_qs_rbdr_idx; /* RBDR idx in the above QS */
 250         u8              caching;
 251         struct          rx_tx_queue_stats stats;
 252         struct xdp_rxq_info xdp_rxq;
 253 } ____cacheline_aligned_in_smp;
 254 
 255 struct cmp_queue {
 256         bool            enable;
 257         u16             thresh;
 258         spinlock_t      lock;  /* lock to serialize processing CQEs */
 259         void            *desc;
 260         struct q_desc_mem   dmem;
 261         int             irq;
 262 } ____cacheline_aligned_in_smp;
 263 
 264 struct snd_queue {
 265         bool            enable;
 266         u8              cq_qs;  /* CQ's QS to which this SQ is pointing */
 267         u8              cq_idx; /* CQ index (0 to 7) in the above QS */
 268         u16             thresh;
 269         atomic_t        free_cnt;
 270         u32             head;
 271         u32             tail;
 272         u64             *skbuff;
 273         void            *desc;
 274         u64             *xdp_page;
 275         u16             xdp_desc_cnt;
 276         u16             xdp_free_cnt;
 277         bool            is_xdp;
 278 
 279         /* For TSO segment's header */
 280         char            *tso_hdrs;
 281         dma_addr_t      tso_hdrs_phys;
 282 
 283         cpumask_t       affinity_mask;
 284         struct q_desc_mem   dmem;
 285         struct rx_tx_queue_stats stats;
 286 } ____cacheline_aligned_in_smp;
 287 
 288 struct queue_set {
 289         bool            enable;
 290         bool            be_en;
 291         u8              vnic_id;
 292         u8              rq_cnt;
 293         u8              cq_cnt;
 294         u64             cq_len;
 295         u8              sq_cnt;
 296         u64             sq_len;
 297         u8              rbdr_cnt;
 298         u64             rbdr_len;
 299         struct  rcv_queue       rq[MAX_RCV_QUEUES_PER_QS];
 300         struct  cmp_queue       cq[MAX_CMP_QUEUES_PER_QS];
 301         struct  snd_queue       sq[MAX_SND_QUEUES_PER_QS];
 302         struct  rbdr            rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
 303 } ____cacheline_aligned_in_smp;
 304 
 305 #define GET_RBDR_DESC(RING, idx)\
 306                 (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
 307 #define GET_SQ_DESC(RING, idx)\
 308                 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
 309 #define GET_CQ_DESC(RING, idx)\
 310                 (&(((union cq_desc_t *)((RING)->desc))[idx]))
 311 
 312 /* CQ status bits */
 313 #define CQ_WR_FULL      BIT(26)
 314 #define CQ_WR_DISABLE   BIT(25)
 315 #define CQ_WR_FAULT     BIT(24)
 316 #define CQ_CQE_COUNT    (0xFFFF << 0)
 317 
 318 #define CQ_ERR_MASK     (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
 319 
 320 static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
 321 {
 322         /* Translation is installed only when IOMMU is present */
 323         if (nic->iommu_domain)
 324                 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
 325         return dma_addr;
 326 }
 327 
 328 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
 329                               int hdr_sqe, u8 subdesc_cnt);
 330 void nicvf_config_vlan_stripping(struct nicvf *nic,
 331                                  netdev_features_t features);
 332 int nicvf_set_qset_resources(struct nicvf *nic);
 333 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
 334 void nicvf_qset_config(struct nicvf *nic, bool enable);
 335 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
 336                             int qidx, bool enable);
 337 
 338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
 339 void nicvf_sq_disable(struct nicvf *nic, int qidx);
 340 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
 341 void nicvf_sq_free_used_descs(struct net_device *netdev,
 342                               struct snd_queue *sq, int qidx);
 343 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
 344                         struct sk_buff *skb, u8 sq_num);
 345 int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
 346                             u64 bufaddr, u64 dma_addr, u16 len);
 347 void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
 348 
 349 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
 350                                   struct cqe_rx_t *cqe_rx, bool xdp);
 351 void nicvf_rbdr_task(unsigned long data);
 352 void nicvf_rbdr_work(struct work_struct *work);
 353 
 354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
 355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
 356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
 357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
 358 
 359 /* Register access APIs */
 360 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
 361 u64  nicvf_reg_read(struct nicvf *nic, u64 offset);
 362 void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
 363 u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
 364 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
 365                            u64 qidx, u64 val);
 366 u64  nicvf_queue_reg_read(struct nicvf *nic,
 367                           u64 offset, u64 qidx);
 368 
 369 /* Stats */
 370 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
 371 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
 372 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
 373 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
 374 #endif /* NICVF_QUEUES_H */

/* [<][>][^][v][top][bottom][index][help] */