root/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cn23xx_dump_pf_initialized_regs
  2. cn23xx_pf_soft_reset
  3. cn23xx_enable_error_reporting
  4. cn23xx_coprocessor_clock
  5. cn23xx_pf_get_oq_ticks
  6. cn23xx_setup_global_mac_regs
  7. cn23xx_reset_io_queues
  8. cn23xx_pf_setup_global_input_regs
  9. cn23xx_pf_setup_global_output_regs
  10. cn23xx_setup_pf_device_regs
  11. cn23xx_setup_iq_regs
  12. cn23xx_setup_oq_regs
  13. cn23xx_pf_mbox_thread
  14. cn23xx_setup_pf_mbox
  15. cn23xx_free_pf_mbox
  16. cn23xx_enable_io_queues
  17. cn23xx_disable_io_queues
  18. cn23xx_pf_msix_interrupt_handler
  19. cn23xx_handle_pf_mbox_intr
  20. cn23xx_interrupt_handler
  21. cn23xx_bar1_idx_setup
  22. cn23xx_bar1_idx_write
  23. cn23xx_bar1_idx_read
  24. cn23xx_update_read_index
  25. cn23xx_enable_pf_interrupt
  26. cn23xx_disable_pf_interrupt
  27. cn23xx_get_pcie_qlmport
  28. cn23xx_get_pf_num
  29. cn23xx_setup_reg_address
  30. cn23xx_sriov_config
  31. setup_cn23xx_octeon_pf_device
  32. validate_cn23xx_pf_config_info
  33. cn23xx_fw_loaded
  34. cn23xx_tell_vf_its_macaddr_changed
  35. cn23xx_get_vf_stats_callback
  36. cn23xx_get_vf_stats

   1 /**********************************************************************
   2  * Author: Cavium, Inc.
   3  *
   4  * Contact: support@cavium.com
   5  *          Please include "LiquidIO" in the subject.
   6  *
   7  * Copyright (c) 2003-2016 Cavium, Inc.
   8  *
   9  * This file is free software; you can redistribute it and/or modify
  10  * it under the terms of the GNU General Public License, Version 2, as
  11  * published by the Free Software Foundation.
  12  *
  13  * This file is distributed in the hope that it will be useful, but
  14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17  ***********************************************************************/
  18 #include <linux/pci.h>
  19 #include <linux/vmalloc.h>
  20 #include <linux/etherdevice.h>
  21 #include "liquidio_common.h"
  22 #include "octeon_droq.h"
  23 #include "octeon_iq.h"
  24 #include "response_manager.h"
  25 #include "octeon_device.h"
  26 #include "cn23xx_pf_device.h"
  27 #include "octeon_main.h"
  28 #include "octeon_mailbox.h"
  29 
  30 #define RESET_NOTDONE 0
  31 #define RESET_DONE 1
  32 
  33 /* Change the value of SLI Packet Input Jabber Register to allow
  34  * VXLAN TSO packets which can be 64424 bytes, exceeding the
  35  * MAX_GSO_SIZE we supplied to the kernel
  36  */
  37 #define CN23XX_INPUT_JABBER 64600
  38 
  39 void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
  40 {
  41         int i = 0;
  42         u32 regval = 0;
  43         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
  44 
  45         /*In cn23xx_soft_reset*/
  46         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
  47                 "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
  48                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
  49         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
  50                 "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
  51                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
  52         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
  53                 "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
  54                 lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
  55 
  56         /*In cn23xx_set_dpi_regs*/
  57         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
  58                 "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
  59                 lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
  60 
  61         for (i = 0; i < 6; i++) {
  62                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
  63                         "CN23XX_DPI_DMA_ENG_ENB", i,
  64                         CN23XX_DPI_DMA_ENG_ENB(i),
  65                         lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
  66                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
  67                         "CN23XX_DPI_DMA_ENG_BUF", i,
  68                         CN23XX_DPI_DMA_ENG_BUF(i),
  69                         lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
  70         }
  71 
  72         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
  73                 CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
  74 
  75         /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
  76         pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
  77         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
  78                 "CN23XX_CONFIG_PCIE_DEVCTL",
  79                 CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
  80 
  81         dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
  82                 "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
  83                 CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
  84                 lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
  85 
  86         /*In cn23xx_specific_regs_setup */
  87         dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
  88                 "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
  89                 CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
  90                 CVM_CAST64(octeon_read_csr64(
  91                         oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
  92 
  93         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
  94                 "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
  95                 (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
  96 
  97         /*In cn23xx_setup_global_mac_regs*/
  98         for (i = 0; i < CN23XX_MAX_MACS; i++) {
  99                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 100                         "CN23XX_SLI_PKT_MAC_RINFO64", i,
 101                         CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
 102                         CVM_CAST64(octeon_read_csr64
 103                                 (oct, CN23XX_SLI_PKT_MAC_RINFO64
 104                                         (i, oct->pf_num))));
 105         }
 106 
 107         /*In cn23xx_setup_global_input_regs*/
 108         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
 109                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 110                         "CN23XX_SLI_IQ_PKT_CONTROL64", i,
 111                         CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
 112                         CVM_CAST64(octeon_read_csr64
 113                                 (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
 114         }
 115 
 116         /*In cn23xx_setup_global_output_regs*/
 117         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
 118                 "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
 119                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
 120 
 121         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
 122                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 123                         "CN23XX_SLI_OQ_PKT_CONTROL", i,
 124                         CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
 125                         CVM_CAST64(octeon_read_csr(
 126                                 oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
 127                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 128                         "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
 129                         CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
 130                         CVM_CAST64(octeon_read_csr64(
 131                                 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
 132         }
 133 
 134         /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
 135         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
 136                 "cn23xx->intr_enb_reg64",
 137                 CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
 138                 CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
 139 
 140         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
 141                 "cn23xx->intr_sum_reg64",
 142                 CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
 143                 CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
 144 
 145         /*In cn23xx_setup_iq_regs*/
 146         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
 147                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 148                         "CN23XX_SLI_IQ_BASE_ADDR64", i,
 149                         CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
 150                         CVM_CAST64(octeon_read_csr64(
 151                                 oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
 152                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 153                         "CN23XX_SLI_IQ_SIZE", i,
 154                         CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
 155                         CVM_CAST64(octeon_read_csr
 156                                 (oct, CN23XX_SLI_IQ_SIZE(i))));
 157                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 158                         "CN23XX_SLI_IQ_DOORBELL", i,
 159                         CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
 160                         CVM_CAST64(octeon_read_csr64(
 161                                 oct, CN23XX_SLI_IQ_DOORBELL(i))));
 162                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 163                         "CN23XX_SLI_IQ_INSTR_COUNT64", i,
 164                         CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
 165                         CVM_CAST64(octeon_read_csr64(
 166                                 oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
 167         }
 168 
 169         /*In cn23xx_setup_oq_regs*/
 170         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
 171                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 172                         "CN23XX_SLI_OQ_BASE_ADDR64", i,
 173                         CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
 174                         CVM_CAST64(octeon_read_csr64(
 175                                 oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
 176                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 177                         "CN23XX_SLI_OQ_SIZE", i,
 178                         CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
 179                         CVM_CAST64(octeon_read_csr
 180                                 (oct, CN23XX_SLI_OQ_SIZE(i))));
 181                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 182                         "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
 183                         CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
 184                         CVM_CAST64(octeon_read_csr(
 185                                 oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
 186                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 187                         "CN23XX_SLI_OQ_PKTS_SENT", i,
 188                         CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
 189                         CVM_CAST64(octeon_read_csr64(
 190                                 oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
 191                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
 192                         "CN23XX_SLI_OQ_PKTS_CREDIT", i,
 193                         CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
 194                         CVM_CAST64(octeon_read_csr64(
 195                                 oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
 196         }
 197 
 198         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
 199                 "CN23XX_SLI_PKT_TIME_INT",
 200                 CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
 201                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
 202         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
 203                 "CN23XX_SLI_PKT_CNT_INT",
 204                 CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
 205                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
 206 }
 207 
 208 static int cn23xx_pf_soft_reset(struct octeon_device *oct)
 209 {
 210         octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
 211 
 212         dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
 213                 oct->octeon_id);
 214 
 215         octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
 216 
 217         /* Initiate chip-wide soft reset */
 218         lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
 219         lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
 220 
 221         /* Wait for 100ms as Octeon resets. */
 222         mdelay(100);
 223 
 224         if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
 225                 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
 226                         oct->octeon_id);
 227                 return 1;
 228         }
 229 
 230         dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
 231                 oct->octeon_id);
 232 
 233         /* restore the  reset value*/
 234         octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
 235 
 236         return 0;
 237 }
 238 
 239 static void cn23xx_enable_error_reporting(struct octeon_device *oct)
 240 {
 241         u32 regval;
 242         u32 uncorrectable_err_mask, corrtable_err_status;
 243 
 244         pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
 245         if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
 246                 uncorrectable_err_mask = 0;
 247                 corrtable_err_status = 0;
 248                 pci_read_config_dword(oct->pci_dev,
 249                                       CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
 250                                       &uncorrectable_err_mask);
 251                 pci_read_config_dword(oct->pci_dev,
 252                                       CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
 253                                       &corrtable_err_status);
 254                 dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
 255                                  "\tdev_ctl_status_reg = 0x%08x\n"
 256                                  "\tuncorrectable_error_mask_reg = 0x%08x\n"
 257                                  "\tcorrectable_error_status_reg = 0x%08x\n",
 258                             regval, uncorrectable_err_mask,
 259                             corrtable_err_status);
 260         }
 261 
 262         regval |= 0xf; /* Enable Link error reporting */
 263 
 264         dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
 265                 oct->octeon_id);
 266         pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
 267 }
 268 
 269 static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
 270 {
 271         /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
 272          * for SLI.
 273          */
 274 
 275         /* TBD: get the info in Hand-shake */
 276         return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
 277 }
 278 
 279 u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
 280 {
 281         /* This gives the SLI clock per microsec */
 282         u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
 283 
 284         oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
 285 
 286         /* This gives the clock cycles per millisecond */
 287         oqticks_per_us *= 1000;
 288 
 289         /* This gives the oq ticks (1024 core clock cycles) per millisecond */
 290         oqticks_per_us /= 1024;
 291 
 292         /* time_intr is in microseconds. The next 2 steps gives the oq ticks
 293          *  corressponding to time_intr.
 294          */
 295         oqticks_per_us *= time_intr_in_us;
 296         oqticks_per_us /= 1000;
 297 
 298         return oqticks_per_us;
 299 }
 300 
 301 static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
 302 {
 303         u16 mac_no = oct->pcie_port;
 304         u16 pf_num = oct->pf_num;
 305         u64 reg_val;
 306         u64 temp;
 307 
 308         /* programming SRN and TRS for each MAC(0..3)  */
 309 
 310         dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
 311                 __func__, mac_no);
 312         /* By default, mapping all 64 IOQs to  a single MACs */
 313 
 314         reg_val =
 315             octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
 316 
 317         if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
 318                 /* setting SRN <6:0>  */
 319                 reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
 320         } else {
 321                 /* setting SRN <6:0>  */
 322                 reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
 323         }
 324 
 325         /* setting TRS <23:16> */
 326         reg_val = reg_val |
 327                   (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
 328         /* setting RPVF <39:32> */
 329         temp = oct->sriov_info.rings_per_vf & 0xff;
 330         reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS);
 331 
 332         /* setting NVFS <55:48> */
 333         temp = oct->sriov_info.max_vfs & 0xff;
 334         reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
 335 
 336         /* write these settings to MAC register */
 337         octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
 338                            reg_val);
 339 
 340         dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
 341                 mac_no, pf_num, (u64)octeon_read_csr64
 342                 (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
 343 }
 344 
 345 static int cn23xx_reset_io_queues(struct octeon_device *oct)
 346 {
 347         int ret_val = 0;
 348         u64 d64;
 349         u32 q_no, srn, ern;
 350         u32 loop = 1000;
 351 
 352         srn = oct->sriov_info.pf_srn;
 353         ern = srn + oct->sriov_info.num_pf_rings;
 354 
 355         /*As per HRM reg description, s/w cant write 0 to ENB. */
 356         /*to make the queue off, need to set the RST bit. */
 357 
 358         /* Reset the Enable bit for all the 64 IQs.  */
 359         for (q_no = srn; q_no < ern; q_no++) {
 360                 /* set RST bit to 1. This bit applies to both IQ and OQ */
 361                 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 362                 d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
 363                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
 364         }
 365 
 366         /*wait until the RST bit is clear or the RST and quite bits are set*/
 367         for (q_no = srn; q_no < ern; q_no++) {
 368                 u64 reg_val = octeon_read_csr64(oct,
 369                                         CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 370                 while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
 371                        !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
 372                        loop--) {
 373                         WRITE_ONCE(reg_val, octeon_read_csr64(
 374                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
 375                 }
 376                 if (!loop) {
 377                         dev_err(&oct->pci_dev->dev,
 378                                 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
 379                                 q_no);
 380                         return -1;
 381                 }
 382                 WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
 383                         ~CN23XX_PKT_INPUT_CTL_RST);
 384                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
 385                                    READ_ONCE(reg_val));
 386 
 387                 WRITE_ONCE(reg_val, octeon_read_csr64(
 388                            oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
 389                 if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
 390                         dev_err(&oct->pci_dev->dev,
 391                                 "clearing the reset failed for qno: %u\n",
 392                                 q_no);
 393                         ret_val = -1;
 394                 }
 395         }
 396 
 397         return ret_val;
 398 }
 399 
 400 static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
 401 {
 402         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
 403         struct octeon_instr_queue *iq;
 404         u64 intr_threshold, reg_val;
 405         u32 q_no, ern, srn;
 406         u64 pf_num;
 407         u64 vf_num;
 408 
 409         pf_num = oct->pf_num;
 410 
 411         srn = oct->sriov_info.pf_srn;
 412         ern = srn + oct->sriov_info.num_pf_rings;
 413 
 414         if (cn23xx_reset_io_queues(oct))
 415                 return -1;
 416 
 417         /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
 418          * for all queues.Only PF can set these bits.
 419          * bits 29:30 indicate the MAC num.
 420          * bits 32:47 indicate the PVF num.
 421          */
 422         for (q_no = 0; q_no < ern; q_no++) {
 423                 reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
 424 
 425                 /* for VF assigned queues. */
 426                 if (q_no < oct->sriov_info.pf_srn) {
 427                         vf_num = q_no / oct->sriov_info.rings_per_vf;
 428                         vf_num += 1; /* VF1, VF2,........ */
 429                 } else {
 430                         vf_num = 0;
 431                 }
 432 
 433                 reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS;
 434                 reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
 435 
 436                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
 437                                    reg_val);
 438         }
 439 
 440         /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
 441          * pf queues
 442          */
 443         for (q_no = srn; q_no < ern; q_no++) {
 444                 void __iomem *inst_cnt_reg;
 445 
 446                 iq = oct->instr_queue[q_no];
 447                 if (iq)
 448                         inst_cnt_reg = iq->inst_cnt_reg;
 449                 else
 450                         inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
 451                                        CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
 452 
 453                 reg_val =
 454                     octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 455 
 456                 reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
 457 
 458                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
 459                                    reg_val);
 460 
 461                 /* Set WMARK level for triggering PI_INT */
 462                 /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
 463                 intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
 464                                  CN23XX_PKT_IN_DONE_WMARK_MASK;
 465 
 466                 writeq((readq(inst_cnt_reg) &
 467                         ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
 468                           CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
 469                        (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
 470                        inst_cnt_reg);
 471         }
 472         return 0;
 473 }
 474 
 475 static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
 476 {
 477         u32 reg_val;
 478         u32 q_no, ern, srn;
 479         u64 time_threshold;
 480 
 481         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
 482 
 483         srn = oct->sriov_info.pf_srn;
 484         ern = srn + oct->sriov_info.num_pf_rings;
 485 
 486         if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
 487                 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
 488         } else {
 489                 /** Set Output queue watermark to 0 to disable backpressure */
 490                 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
 491         }
 492 
 493         for (q_no = srn; q_no < ern; q_no++) {
 494                 reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
 495 
 496                 /* clear IPTR */
 497                 reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
 498 
 499                 /* set DPTR */
 500                 reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
 501 
 502                 /* reset BMODE */
 503                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
 504 
 505                 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
 506                  * for Output Queue ScatterList
 507                  * reset ROR_P, NSR_P
 508                  */
 509                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
 510                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
 511 
 512 #ifdef __LITTLE_ENDIAN_BITFIELD
 513                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
 514 #else
 515                 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
 516 #endif
 517                 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
 518                  * for Output Queue Data
 519                  * reset ROR, NSR
 520                  */
 521                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
 522                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
 523                 /* set the ES bit */
 524                 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
 525 
 526                 /* write all the selected settings */
 527                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
 528 
 529                 /* Enabling these interrupt in oct->fn_list.enable_interrupt()
 530                  * routine which called after IOQ init.
 531                  * Set up interrupt packet and time thresholds
 532                  * for all the OQs
 533                  */
 534                 time_threshold = cn23xx_pf_get_oq_ticks(
 535                     oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
 536 
 537                 octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
 538                                    (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
 539                                     (time_threshold << 32)));
 540         }
 541 
 542         /** Setting the water mark level for pko back pressure **/
 543         writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
 544 
 545         /** Disabling setting OQs in reset when ring has no dorebells
 546          * enabling this will cause of head of line blocking
 547          */
 548         /* Do it only for pass1.1. and pass1.2 */
 549         if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
 550             (oct->rev_id == OCTEON_CN23XX_REV_1_1))
 551                 writeq(readq((u8 *)oct->mmio[0].hw_addr +
 552                                      CN23XX_SLI_GBL_CONTROL) | 0x2,
 553                        (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
 554 
 555         /** Enable channel-level backpressure */
 556         if (oct->pf_num)
 557                 writeq(0xffffffffffffffffULL,
 558                        (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
 559         else
 560                 writeq(0xffffffffffffffffULL,
 561                        (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
 562 }
 563 
 564 static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
 565 {
 566         cn23xx_enable_error_reporting(oct);
 567 
 568         /* program the MAC(0..3)_RINFO before setting up input/output regs */
 569         cn23xx_setup_global_mac_regs(oct);
 570 
 571         if (cn23xx_pf_setup_global_input_regs(oct))
 572                 return -1;
 573 
 574         cn23xx_pf_setup_global_output_regs(oct);
 575 
 576         /* Default error timeout value should be 0x200000 to avoid host hang
 577          * when reads invalid register
 578          */
 579         octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
 580                            CN23XX_SLI_WINDOW_CTL_DEFAULT);
 581 
 582         /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
 583         octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
 584         return 0;
 585 }
 586 
 587 static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
 588 {
 589         struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
 590         u64 pkt_in_done;
 591 
 592         iq_no += oct->sriov_info.pf_srn;
 593 
 594         /* Write the start of the input queue's ring and its size  */
 595         octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
 596                            iq->base_addr_dma);
 597         octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
 598 
 599         /* Remember the doorbell & instruction count register addr
 600          * for this queue
 601          */
 602         iq->doorbell_reg =
 603             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
 604         iq->inst_cnt_reg =
 605             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
 606         dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
 607                 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
 608 
 609         /* Store the current instruction counter (used in flush_iq
 610          * calculation)
 611          */
 612         pkt_in_done = readq(iq->inst_cnt_reg);
 613 
 614         if (oct->msix_on) {
 615                 /* Set CINT_ENB to enable IQ interrupt   */
 616                 writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
 617                        iq->inst_cnt_reg);
 618         } else {
 619                 /* Clear the count by writing back what we read, but don't
 620                  * enable interrupts
 621                  */
 622                 writeq(pkt_in_done, iq->inst_cnt_reg);
 623         }
 624 
 625         iq->reset_instr_cnt = 0;
 626 }
 627 
 628 static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
 629 {
 630         u32 reg_val;
 631         struct octeon_droq *droq = oct->droq[oq_no];
 632         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
 633         u64 time_threshold;
 634         u64 cnt_threshold;
 635 
 636         oq_no += oct->sriov_info.pf_srn;
 637 
 638         octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
 639                            droq->desc_ring_dma);
 640         octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
 641 
 642         octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
 643                          droq->buffer_size);
 644 
 645         /* Get the mapped address of the pkt_sent and pkts_credit regs */
 646         droq->pkts_sent_reg =
 647             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
 648         droq->pkts_credit_reg =
 649             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
 650 
 651         if (!oct->msix_on) {
 652                 /* Enable this output queue to generate Packet Timer Interrupt
 653                  */
 654                 reg_val =
 655                     octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
 656                 reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
 657                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
 658                                  reg_val);
 659 
 660                 /* Enable this output queue to generate Packet Count Interrupt
 661                  */
 662                 reg_val =
 663                     octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
 664                 reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
 665                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
 666                                  reg_val);
 667         } else {
 668                 time_threshold = cn23xx_pf_get_oq_ticks(
 669                     oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
 670                 cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
 671 
 672                 octeon_write_csr64(
 673                     oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
 674                     ((time_threshold << 32 | cnt_threshold)));
 675         }
 676 }
 677 
 678 static void cn23xx_pf_mbox_thread(struct work_struct *work)
 679 {
 680         struct cavium_wk *wk = (struct cavium_wk *)work;
 681         struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
 682         struct octeon_device *oct = mbox->oct_dev;
 683         u64 mbox_int_val, val64;
 684         u32 q_no, i;
 685 
 686         if (oct->rev_id < OCTEON_CN23XX_REV_1_1) {
 687                 /*read and clear by writing 1*/
 688                 mbox_int_val = readq(mbox->mbox_int_reg);
 689                 writeq(mbox_int_val, mbox->mbox_int_reg);
 690 
 691                 for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
 692                         q_no = i * oct->sriov_info.rings_per_vf;
 693 
 694                         val64 = readq(oct->mbox[q_no]->mbox_write_reg);
 695 
 696                         if (val64 && (val64 != OCTEON_PFVFACK)) {
 697                                 if (octeon_mbox_read(oct->mbox[q_no]))
 698                                         octeon_mbox_process_message(
 699                                             oct->mbox[q_no]);
 700                         }
 701                 }
 702 
 703                 schedule_delayed_work(&wk->work, msecs_to_jiffies(10));
 704         } else {
 705                 octeon_mbox_process_message(mbox);
 706         }
 707 }
 708 
 709 static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
 710 {
 711         struct octeon_mbox *mbox = NULL;
 712         u16 mac_no = oct->pcie_port;
 713         u16 pf_num = oct->pf_num;
 714         u32 q_no, i;
 715 
 716         if (!oct->sriov_info.max_vfs)
 717                 return 0;
 718 
 719         for (i = 0; i < oct->sriov_info.max_vfs; i++) {
 720                 q_no = i * oct->sriov_info.rings_per_vf;
 721 
 722                 mbox = vmalloc(sizeof(*mbox));
 723                 if (!mbox)
 724                         goto free_mbox;
 725 
 726                 memset(mbox, 0, sizeof(struct octeon_mbox));
 727 
 728                 spin_lock_init(&mbox->lock);
 729 
 730                 mbox->oct_dev = oct;
 731 
 732                 mbox->q_no = q_no;
 733 
 734                 mbox->state = OCTEON_MBOX_STATE_IDLE;
 735 
 736                 /* PF mbox interrupt reg */
 737                 mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr +
 738                                      CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num);
 739 
 740                 /* PF writes into SIG0 reg */
 741                 mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr +
 742                                        CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0);
 743 
 744                 /* PF reads from SIG1 reg */
 745                 mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
 746                                       CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
 747 
 748                 /*Mail Box Thread creation*/
 749                 INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
 750                                   cn23xx_pf_mbox_thread);
 751                 mbox->mbox_poll_wk.ctxptr = (void *)mbox;
 752 
 753                 oct->mbox[q_no] = mbox;
 754 
 755                 writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
 756         }
 757 
 758         if (oct->rev_id < OCTEON_CN23XX_REV_1_1)
 759                 schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
 760                                       msecs_to_jiffies(0));
 761 
 762         return 0;
 763 
 764 free_mbox:
 765         while (i) {
 766                 i--;
 767                 vfree(oct->mbox[i]);
 768         }
 769 
 770         return 1;
 771 }
 772 
 773 static int cn23xx_free_pf_mbox(struct octeon_device *oct)
 774 {
 775         u32 q_no, i;
 776 
 777         if (!oct->sriov_info.max_vfs)
 778                 return 0;
 779 
 780         for (i = 0; i < oct->sriov_info.max_vfs; i++) {
 781                 q_no = i * oct->sriov_info.rings_per_vf;
 782                 cancel_delayed_work_sync(
 783                     &oct->mbox[q_no]->mbox_poll_wk.work);
 784                 vfree(oct->mbox[q_no]);
 785         }
 786 
 787         return 0;
 788 }
 789 
 790 static int cn23xx_enable_io_queues(struct octeon_device *oct)
 791 {
 792         u64 reg_val;
 793         u32 srn, ern, q_no;
 794         u32 loop = 1000;
 795 
 796         srn = oct->sriov_info.pf_srn;
 797         ern = srn + oct->num_iqs;
 798 
 799         for (q_no = srn; q_no < ern; q_no++) {
 800                 /* set the corresponding IQ IS_64B bit */
 801                 if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
 802                         reg_val = octeon_read_csr64(
 803                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 804                         reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
 805                         octeon_write_csr64(
 806                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
 807                 }
 808 
 809                 /* set the corresponding IQ ENB bit */
 810                 if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
 811                         /* IOQs are in reset by default in PEM2 mode,
 812                          * clearing reset bit
 813                          */
 814                         reg_val = octeon_read_csr64(
 815                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 816 
 817                         if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
 818                                 while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
 819                                        !(reg_val &
 820                                          CN23XX_PKT_INPUT_CTL_QUIET) &&
 821                                        --loop) {
 822                                         reg_val = octeon_read_csr64(
 823                                             oct,
 824                                             CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 825                                 }
 826                                 if (!loop) {
 827                                         dev_err(&oct->pci_dev->dev,
 828                                                 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
 829                                                 q_no);
 830                                         return -1;
 831                                 }
 832                                 reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
 833                                 octeon_write_csr64(
 834                                     oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
 835                                     reg_val);
 836 
 837                                 reg_val = octeon_read_csr64(
 838                                     oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 839                                 if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
 840                                         dev_err(&oct->pci_dev->dev,
 841                                                 "clearing the reset failed for qno: %u\n",
 842                                                 q_no);
 843                                         return -1;
 844                                 }
 845                         }
 846                         reg_val = octeon_read_csr64(
 847                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
 848                         reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
 849                         octeon_write_csr64(
 850                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
 851                 }
 852         }
 853         for (q_no = srn; q_no < ern; q_no++) {
 854                 u32 reg_val;
 855                 /* set the corresponding OQ ENB bit */
 856                 if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
 857                         reg_val = octeon_read_csr(
 858                             oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
 859                         reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
 860                         octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
 861                                          reg_val);
 862                 }
 863         }
 864         return 0;
 865 }
 866 
 867 static void cn23xx_disable_io_queues(struct octeon_device *oct)
 868 {
 869         int q_no, loop;
 870         u64 d64;
 871         u32 d32;
 872         u32 srn, ern;
 873 
 874         srn = oct->sriov_info.pf_srn;
 875         ern = srn + oct->num_iqs;
 876 
 877         /*** Disable Input Queues. ***/
 878         for (q_no = srn; q_no < ern; q_no++) {
 879                 loop = HZ;
 880 
 881                 /* start the Reset for a particular ring */
 882                 WRITE_ONCE(d64, octeon_read_csr64(
 883                            oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
 884                 WRITE_ONCE(d64, READ_ONCE(d64) &
 885                                         (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
 886                 WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
 887                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
 888                                    READ_ONCE(d64));
 889 
 890                 /* Wait until hardware indicates that the particular IQ
 891                  * is out of reset.
 892                  */
 893                 WRITE_ONCE(d64, octeon_read_csr64(
 894                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
 895                 while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
 896                         WRITE_ONCE(d64, octeon_read_csr64(
 897                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
 898                         schedule_timeout_uninterruptible(1);
 899                 }
 900 
 901                 /* Reset the doorbell register for this Input Queue. */
 902                 octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
 903                 while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
 904                        loop--) {
 905                         schedule_timeout_uninterruptible(1);
 906                 }
 907         }
 908 
 909         /*** Disable Output Queues. ***/
 910         for (q_no = srn; q_no < ern; q_no++) {
 911                 loop = HZ;
 912 
 913                 /* Wait until hardware indicates that the particular IQ
 914                  * is out of reset.It given that SLI_PKT_RING_RST is
 915                  * common for both IQs and OQs
 916                  */
 917                 WRITE_ONCE(d64, octeon_read_csr64(
 918                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
 919                 while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
 920                         WRITE_ONCE(d64, octeon_read_csr64(
 921                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
 922                         schedule_timeout_uninterruptible(1);
 923                 }
 924 
 925                 /* Reset the doorbell register for this Output Queue. */
 926                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
 927                                  0xFFFFFFFF);
 928                 while (octeon_read_csr64(oct,
 929                                          CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
 930                        loop--) {
 931                         schedule_timeout_uninterruptible(1);
 932                 }
 933 
 934                 /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
 935                 WRITE_ONCE(d32, octeon_read_csr(
 936                                         oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
 937                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
 938                                  READ_ONCE(d32));
 939         }
 940 }
 941 
 942 static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
 943 {
 944         struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
 945         struct octeon_device *oct = ioq_vector->oct_dev;
 946         u64 pkts_sent;
 947         u64 ret = 0;
 948         struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
 949 
 950         dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
 951 
 952         if (!droq) {
 953                 dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
 954                         oct->pf_num, ioq_vector->ioq_num);
 955                 return 0;
 956         }
 957 
 958         pkts_sent = readq(droq->pkts_sent_reg);
 959 
 960         /* If our device has interrupted, then proceed. Also check
 961          * for all f's if interrupt was triggered on an error
 962          * and the PCI read fails.
 963          */
 964         if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
 965                 return ret;
 966 
 967         /* Write count reg in sli_pkt_cnts to clear these int.*/
 968         if ((pkts_sent & CN23XX_INTR_PO_INT) ||
 969             (pkts_sent & CN23XX_INTR_PI_INT)) {
 970                 if (pkts_sent & CN23XX_INTR_PO_INT)
 971                         ret |= MSIX_PO_INT;
 972         }
 973 
 974         if (pkts_sent & CN23XX_INTR_PI_INT)
 975                 /* We will clear the count when we update the read_index. */
 976                 ret |= MSIX_PI_INT;
 977 
 978         /* Never need to handle msix mbox intr for pf. They arrive on the last
 979          * msix
 980          */
 981         return ret;
 982 }
 983 
 984 static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct)
 985 {
 986         struct delayed_work *work;
 987         u64 mbox_int_val;
 988         u32 i, q_no;
 989 
 990         mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
 991 
 992         for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
 993                 q_no = i * oct->sriov_info.rings_per_vf;
 994 
 995                 if (mbox_int_val & BIT_ULL(q_no)) {
 996                         writeq(BIT_ULL(q_no),
 997                                oct->mbox[0]->mbox_int_reg);
 998                         if (octeon_mbox_read(oct->mbox[q_no])) {
 999                                 work = &oct->mbox[q_no]->mbox_poll_wk.work;
1000                                 schedule_delayed_work(work,
1001                                                       msecs_to_jiffies(0));
1002                         }
1003                 }
1004         }
1005 }
1006 
1007 static irqreturn_t cn23xx_interrupt_handler(void *dev)
1008 {
1009         struct octeon_device *oct = (struct octeon_device *)dev;
1010         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1011         u64 intr64;
1012 
1013         dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
1014         intr64 = readq(cn23xx->intr_sum_reg64);
1015 
1016         oct->int_status = 0;
1017 
1018         if (intr64 & CN23XX_INTR_ERR)
1019                 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
1020                         oct->octeon_id, CVM_CAST64(intr64));
1021 
1022         /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */
1023         if (intr64 & CN23XX_INTR_VF_MBOX)
1024                 cn23xx_handle_pf_mbox_intr(oct);
1025 
1026         if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
1027                 if (intr64 & CN23XX_INTR_PKT_DATA)
1028                         oct->int_status |= OCT_DEV_INTR_PKT_DATA;
1029         }
1030 
1031         if (intr64 & (CN23XX_INTR_DMA0_FORCE))
1032                 oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
1033         if (intr64 & (CN23XX_INTR_DMA1_FORCE))
1034                 oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
1035 
1036         /* Clear the current interrupts */
1037         writeq(intr64, cn23xx->intr_sum_reg64);
1038 
1039         return IRQ_HANDLED;
1040 }
1041 
1042 static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
1043                                   u32 idx, int valid)
1044 {
1045         u64 bar1;
1046         u64 reg_adr;
1047 
1048         if (!valid) {
1049                 reg_adr = lio_pci_readq(
1050                         oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1051                 WRITE_ONCE(bar1, reg_adr);
1052                 lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
1053                                CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1054                 reg_adr = lio_pci_readq(
1055                         oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1056                 WRITE_ONCE(bar1, reg_adr);
1057                 return;
1058         }
1059 
1060         /*  The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
1061          *  bits <41:22> of the Core Addr
1062          */
1063         lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
1064                        CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1065 
1066         WRITE_ONCE(bar1, lio_pci_readq(
1067                    oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
1068 }
1069 
1070 static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
1071 {
1072         lio_pci_writeq(oct, mask,
1073                        CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1074 }
1075 
1076 static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
1077 {
1078         return (u32)lio_pci_readq(
1079             oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1080 }
1081 
1082 /* always call with lock held */
1083 static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
1084 {
1085         u32 new_idx;
1086         u32 last_done;
1087         u32 pkt_in_done = readl(iq->inst_cnt_reg);
1088 
1089         last_done = pkt_in_done - iq->pkt_in_done;
1090         iq->pkt_in_done = pkt_in_done;
1091 
1092         /* Modulo of the new index with the IQ size will give us
1093          * the new index.  The iq->reset_instr_cnt is always zero for
1094          * cn23xx, so no extra adjustments are needed.
1095          */
1096         new_idx = (iq->octeon_read_index +
1097                    (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
1098                   iq->max_count;
1099 
1100         return new_idx;
1101 }
1102 
1103 static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
1104 {
1105         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1106         u64 intr_val = 0;
1107 
1108         /*  Divide the single write to multiple writes based on the flag. */
1109         /* Enable Interrupt */
1110         if (intr_flag == OCTEON_ALL_INTR) {
1111                 writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
1112         } else if (intr_flag & OCTEON_OUTPUT_INTR) {
1113                 intr_val = readq(cn23xx->intr_enb_reg64);
1114                 intr_val |= CN23XX_INTR_PKT_DATA;
1115                 writeq(intr_val, cn23xx->intr_enb_reg64);
1116         } else if ((intr_flag & OCTEON_MBOX_INTR) &&
1117                    (oct->sriov_info.max_vfs > 0)) {
1118                 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
1119                         intr_val = readq(cn23xx->intr_enb_reg64);
1120                         intr_val |= CN23XX_INTR_VF_MBOX;
1121                         writeq(intr_val, cn23xx->intr_enb_reg64);
1122                 }
1123         }
1124 }
1125 
1126 static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
1127 {
1128         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1129         u64 intr_val = 0;
1130 
1131         /* Disable Interrupts */
1132         if (intr_flag == OCTEON_ALL_INTR) {
1133                 writeq(0, cn23xx->intr_enb_reg64);
1134         } else if (intr_flag & OCTEON_OUTPUT_INTR) {
1135                 intr_val = readq(cn23xx->intr_enb_reg64);
1136                 intr_val &= ~CN23XX_INTR_PKT_DATA;
1137                 writeq(intr_val, cn23xx->intr_enb_reg64);
1138         } else if ((intr_flag & OCTEON_MBOX_INTR) &&
1139                    (oct->sriov_info.max_vfs > 0)) {
1140                 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
1141                         intr_val = readq(cn23xx->intr_enb_reg64);
1142                         intr_val &= ~CN23XX_INTR_VF_MBOX;
1143                         writeq(intr_val, cn23xx->intr_enb_reg64);
1144                 }
1145         }
1146 }
1147 
1148 static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
1149 {
1150         oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
1151 
1152         dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
1153                 oct->pcie_port);
1154 }
1155 
1156 static int cn23xx_get_pf_num(struct octeon_device *oct)
1157 {
1158         u32 fdl_bit = 0;
1159         u64 pkt0_in_ctl, d64;
1160         int pfnum, mac, trs, ret;
1161 
1162         ret = 0;
1163 
1164         /** Read Function Dependency Link reg to get the function number */
1165         if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
1166                                   &fdl_bit) == 0) {
1167                 oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
1168                                CN23XX_PCIE_SRIOV_FDL_MASK);
1169         } else {
1170                 ret = EINVAL;
1171 
1172                 /* Under some virtual environments, extended PCI regs are
1173                  * inaccessible, in which case the above read will have failed.
1174                  * In this case, read the PF number from the
1175                  * SLI_PKT0_INPUT_CONTROL reg (written by f/w)
1176                  */
1177                 pkt0_in_ctl = octeon_read_csr64(oct,
1178                                                 CN23XX_SLI_IQ_PKT_CONTROL64(0));
1179                 pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
1180                         CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
1181                 mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
1182 
1183                 /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
1184                 d64 = octeon_read_csr64(oct,
1185                                         CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
1186                 trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
1187                 if (trs == 1) {
1188                         dev_err(&oct->pci_dev->dev,
1189                                 "OCTEON: error reading PCI cfg space pfnum, re-read %u\n",
1190                                 pfnum);
1191                         oct->pf_num = pfnum;
1192                         ret = 0;
1193                 } else {
1194                         dev_err(&oct->pci_dev->dev,
1195                                 "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n");
1196                 }
1197         }
1198 
1199         return ret;
1200 }
1201 
1202 static void cn23xx_setup_reg_address(struct octeon_device *oct)
1203 {
1204         u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
1205         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1206 
1207         oct->reg_list.pci_win_wr_addr_hi =
1208             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
1209         oct->reg_list.pci_win_wr_addr_lo =
1210             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
1211         oct->reg_list.pci_win_wr_addr =
1212             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
1213 
1214         oct->reg_list.pci_win_rd_addr_hi =
1215             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
1216         oct->reg_list.pci_win_rd_addr_lo =
1217             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
1218         oct->reg_list.pci_win_rd_addr =
1219             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
1220 
1221         oct->reg_list.pci_win_wr_data_hi =
1222             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
1223         oct->reg_list.pci_win_wr_data_lo =
1224             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
1225         oct->reg_list.pci_win_wr_data =
1226             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
1227 
1228         oct->reg_list.pci_win_rd_data_hi =
1229             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
1230         oct->reg_list.pci_win_rd_data_lo =
1231             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
1232         oct->reg_list.pci_win_rd_data =
1233             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
1234 
1235         cn23xx_get_pcie_qlmport(oct);
1236 
1237         cn23xx->intr_mask64 = CN23XX_INTR_MASK;
1238         if (!oct->msix_on)
1239                 cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
1240         if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
1241                 cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
1242 
1243         cn23xx->intr_sum_reg64 =
1244             bar0_pciaddr +
1245             CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
1246         cn23xx->intr_enb_reg64 =
1247             bar0_pciaddr +
1248             CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
1249 }
1250 
1251 int cn23xx_sriov_config(struct octeon_device *oct)
1252 {
1253         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1254         u32 max_rings, total_rings, max_vfs, rings_per_vf;
1255         u32 pf_srn, num_pf_rings;
1256         u32 max_possible_vfs;
1257 
1258         cn23xx->conf =
1259                 (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
1260         switch (oct->rev_id) {
1261         case OCTEON_CN23XX_REV_1_0:
1262                 max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
1263                 max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0;
1264                 break;
1265         case OCTEON_CN23XX_REV_1_1:
1266                 max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
1267                 max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1;
1268                 break;
1269         default:
1270                 max_rings = CN23XX_MAX_RINGS_PER_PF;
1271                 max_possible_vfs = CN23XX_MAX_VFS_PER_PF;
1272                 break;
1273         }
1274 
1275         if (oct->sriov_info.num_pf_rings)
1276                 num_pf_rings = oct->sriov_info.num_pf_rings;
1277         else
1278                 num_pf_rings = num_present_cpus();
1279 
1280 #ifdef CONFIG_PCI_IOV
1281         max_vfs = min_t(u32,
1282                         (max_rings - num_pf_rings), max_possible_vfs);
1283         rings_per_vf = 1;
1284 #else
1285         max_vfs = 0;
1286         rings_per_vf = 0;
1287 #endif
1288 
1289         total_rings = num_pf_rings + max_vfs;
1290 
1291         /* the first ring of the pf */
1292         pf_srn = total_rings - num_pf_rings;
1293 
1294         oct->sriov_info.trs = total_rings;
1295         oct->sriov_info.max_vfs = max_vfs;
1296         oct->sriov_info.rings_per_vf = rings_per_vf;
1297         oct->sriov_info.pf_srn = pf_srn;
1298         oct->sriov_info.num_pf_rings = num_pf_rings;
1299         dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n",
1300                    oct->sriov_info.trs, oct->sriov_info.max_vfs,
1301                    oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn,
1302                    oct->sriov_info.num_pf_rings);
1303 
1304         oct->sriov_info.sriov_enabled = 0;
1305 
1306         return 0;
1307 }
1308 
1309 int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
1310 {
1311         u32 data32;
1312         u64 BAR0, BAR1;
1313 
1314         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32);
1315         BAR0 = (u64)(data32 & ~0xf);
1316         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32);
1317         BAR0 |= ((u64)data32 << 32);
1318         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32);
1319         BAR1 = (u64)(data32 & ~0xf);
1320         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32);
1321         BAR1 |= ((u64)data32 << 32);
1322 
1323         if (!BAR0 || !BAR1) {
1324                 if (!BAR0)
1325                         dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n");
1326                 if (!BAR1)
1327                         dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n");
1328                 return 1;
1329         }
1330 
1331         if (octeon_map_pci_barx(oct, 0, 0))
1332                 return 1;
1333 
1334         if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
1335                 dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
1336                         __func__);
1337                 octeon_unmap_pci_barx(oct, 0);
1338                 return 1;
1339         }
1340 
1341         if (cn23xx_get_pf_num(oct) != 0)
1342                 return 1;
1343 
1344         if (cn23xx_sriov_config(oct)) {
1345                 octeon_unmap_pci_barx(oct, 0);
1346                 octeon_unmap_pci_barx(oct, 1);
1347                 return 1;
1348         }
1349 
1350         octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
1351 
1352         oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
1353         oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
1354         oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox;
1355         oct->fn_list.free_mbox = cn23xx_free_pf_mbox;
1356 
1357         oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
1358         oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
1359 
1360         oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
1361         oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
1362         oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
1363 
1364         oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
1365         oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
1366         oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
1367 
1368         oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
1369         oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
1370 
1371         oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
1372         oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
1373 
1374         cn23xx_setup_reg_address(oct);
1375 
1376         oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
1377 
1378         return 0;
1379 }
1380 
1381 int validate_cn23xx_pf_config_info(struct octeon_device *oct,
1382                                    struct octeon_config *conf23xx)
1383 {
1384         if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
1385                 dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
1386                         __func__, CFG_GET_IQ_MAX_Q(conf23xx),
1387                         CN23XX_MAX_INPUT_QUEUES);
1388                 return 1;
1389         }
1390 
1391         if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
1392                 dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
1393                         __func__, CFG_GET_OQ_MAX_Q(conf23xx),
1394                         CN23XX_MAX_OUTPUT_QUEUES);
1395                 return 1;
1396         }
1397 
1398         if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
1399             CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
1400                 dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
1401                         __func__);
1402                 return 1;
1403         }
1404 
1405         if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
1406                 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1407                         __func__);
1408                 return 1;
1409         }
1410 
1411         if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
1412                 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1413                         __func__);
1414                 return 1;
1415         }
1416 
1417         return 0;
1418 }
1419 
1420 int cn23xx_fw_loaded(struct octeon_device *oct)
1421 {
1422         u64 val;
1423 
1424         /* If there's more than one active PF on this NIC, then that
1425          * implies that the NIC firmware is loaded and running.  This check
1426          * prevents a rare false negative that might occur if we only relied
1427          * on checking the SCR2_BIT_FW_LOADED flag.  The false negative would
1428          * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
1429          * though the firmware was already loaded but still booting and has yet
1430          * to set SCR2_BIT_FW_LOADED.
1431          */
1432         if (atomic_read(oct->adapter_refcount) > 1)
1433                 return 1;
1434 
1435         val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1436         return (val >> SCR2_BIT_FW_LOADED) & 1ULL;
1437 }
1438 
1439 void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
1440                                         u8 *mac)
1441 {
1442         if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) {
1443                 struct octeon_mbox_cmd mbox_cmd;
1444 
1445                 mbox_cmd.msg.u64 = 0;
1446                 mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
1447                 mbox_cmd.msg.s.resp_needed = 0;
1448                 mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR;
1449                 mbox_cmd.msg.s.len = 1;
1450                 mbox_cmd.recv_len = 0;
1451                 mbox_cmd.recv_status = 0;
1452                 mbox_cmd.fn = NULL;
1453                 mbox_cmd.fn_arg = NULL;
1454                 ether_addr_copy(mbox_cmd.msg.s.params, mac);
1455                 mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1456                 octeon_mbox_write(oct, &mbox_cmd);
1457         }
1458 }
1459 
1460 static void
1461 cn23xx_get_vf_stats_callback(struct octeon_device *oct,
1462                              struct octeon_mbox_cmd *cmd, void *arg)
1463 {
1464         struct oct_vf_stats_ctx *ctx = arg;
1465 
1466         memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats));
1467         atomic_set(&ctx->status, 1);
1468 }
1469 
1470 int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
1471                         struct oct_vf_stats *stats)
1472 {
1473         u32 timeout = HZ; // 1sec
1474         struct octeon_mbox_cmd mbox_cmd;
1475         struct oct_vf_stats_ctx ctx;
1476         u32 count = 0, ret;
1477 
1478         if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
1479                 return -1;
1480 
1481         if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data))
1482                 return -1;
1483 
1484         mbox_cmd.msg.u64 = 0;
1485         mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
1486         mbox_cmd.msg.s.resp_needed = 1;
1487         mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS;
1488         mbox_cmd.msg.s.len = 1;
1489         mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1490         mbox_cmd.recv_len = 0;
1491         mbox_cmd.recv_status = 0;
1492         mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
1493         ctx.stats = stats;
1494         atomic_set(&ctx.status, 0);
1495         mbox_cmd.fn_arg = (void *)&ctx;
1496         memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data));
1497         octeon_mbox_write(oct, &mbox_cmd);
1498 
1499         do {
1500                 schedule_timeout_uninterruptible(1);
1501         } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout));
1502 
1503         ret = atomic_read(&ctx.status);
1504         if (ret == 0) {
1505                 octeon_mbox_cancel(oct, 0);
1506                 dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",
1507                         vfidx);
1508                 return -1;
1509         }
1510 
1511         return 0;
1512 }

/* [<][>][^][v][top][bottom][index][help] */