root/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lio_cn6xxx_soft_reset
  2. lio_cn6xxx_enable_error_reporting
  3. lio_cn6xxx_setup_pcie_mps
  4. lio_cn6xxx_setup_pcie_mrrs
  5. lio_cn6xxx_coprocessor_clock
  6. lio_cn6xxx_get_oq_ticks
  7. lio_cn6xxx_setup_global_input_regs
  8. lio_cn66xx_setup_pkt_ctl_regs
  9. lio_cn6xxx_setup_global_output_regs
  10. lio_cn6xxx_setup_device_regs
  11. lio_cn6xxx_setup_iq_regs
  12. lio_cn66xx_setup_iq_regs
  13. lio_cn6xxx_setup_oq_regs
  14. lio_cn6xxx_enable_io_queues
  15. lio_cn6xxx_disable_io_queues
  16. lio_cn6xxx_bar1_idx_setup
  17. lio_cn6xxx_bar1_idx_write
  18. lio_cn6xxx_bar1_idx_read
  19. lio_cn6xxx_update_read_index
  20. lio_cn6xxx_enable_interrupt
  21. lio_cn6xxx_disable_interrupt
  22. lio_cn6xxx_get_pcie_qlmport
  23. lio_cn6xxx_process_pcie_error_intr
  24. lio_cn6xxx_process_droq_intr_regs
  25. lio_cn6xxx_process_interrupt_regs
  26. lio_cn6xxx_setup_reg_address
  27. lio_setup_cn66xx_octeon_device
  28. lio_validate_cn6xxx_config_info

   1 /**********************************************************************
   2  * Author: Cavium, Inc.
   3  *
   4  * Contact: support@cavium.com
   5  *          Please include "LiquidIO" in the subject.
   6  *
   7  * Copyright (c) 2003-2016 Cavium, Inc.
   8  *
   9  * This file is free software; you can redistribute it and/or modify
  10  * it under the terms of the GNU General Public License, Version 2, as
  11  * published by the Free Software Foundation.
  12  *
  13  * This file is distributed in the hope that it will be useful, but
  14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17  ***********************************************************************/
  18 #include <linux/pci.h>
  19 #include <linux/netdevice.h>
  20 #include "liquidio_common.h"
  21 #include "octeon_droq.h"
  22 #include "octeon_iq.h"
  23 #include "response_manager.h"
  24 #include "octeon_device.h"
  25 #include "octeon_main.h"
  26 #include "cn66xx_regs.h"
  27 #include "cn66xx_device.h"
  28 
  29 int lio_cn6xxx_soft_reset(struct octeon_device *oct)
  30 {
  31         octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
  32 
  33         dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
  34 
  35         lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
  36         octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
  37 
  38         lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
  39         lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
  40 
  41         /* Wait for 10ms as Octeon resets. */
  42         mdelay(100);
  43 
  44         if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
  45                 dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
  46                 return 1;
  47         }
  48 
  49         dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
  50         octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
  51 
  52         return 0;
  53 }
  54 
  55 void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
  56 {
  57         u32 val;
  58 
  59         pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
  60         if (val & 0x000c0000) {
  61                 dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
  62                         val & 0x000c0000);
  63         }
  64 
  65         val |= 0xf;          /* Enable Link error reporting */
  66 
  67         dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
  68         pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
  69 }
  70 
  71 void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
  72                                enum octeon_pcie_mps mps)
  73 {
  74         u32 val;
  75         u64 r64;
  76 
  77         /* Read config register for MPS */
  78         pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
  79 
  80         if (mps == PCIE_MPS_DEFAULT) {
  81                 mps = ((val & (0x7 << 5)) >> 5);
  82         } else {
  83                 val &= ~(0x7 << 5);  /* Turn off any MPS bits */
  84                 val |= (mps << 5);   /* Set MPS */
  85                 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
  86         }
  87 
  88         /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
  89         r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
  90         r64 |= (mps << 4);
  91         lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
  92 }
  93 
  94 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
  95                                 enum octeon_pcie_mrrs mrrs)
  96 {
  97         u32 val;
  98         u64 r64;
  99 
 100         /* Read config register for MRRS */
 101         pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
 102 
 103         if (mrrs == PCIE_MRRS_DEFAULT) {
 104                 mrrs = ((val & (0x7 << 12)) >> 12);
 105         } else {
 106                 val &= ~(0x7 << 12); /* Turn off any MRRS bits */
 107                 val |= (mrrs << 12); /* Set MRRS */
 108                 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
 109         }
 110 
 111         /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
 112         r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
 113         r64 |= mrrs;
 114         octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
 115 
 116         /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
 117         r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
 118         r64 |= mrrs;
 119         lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
 120 }
 121 
 122 u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
 123 {
 124         /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
 125          * for SLI.
 126          */
 127         return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
 128 }
 129 
 130 u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
 131                             u32 time_intr_in_us)
 132 {
 133         /* This gives the SLI clock per microsec */
 134         u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
 135 
 136         /* core clock per us / oq ticks will be fractional. TO avoid that
 137          * we use the method below.
 138          */
 139 
 140         /* This gives the clock cycles per millisecond */
 141         oqticks_per_us *= 1000;
 142 
 143         /* This gives the oq ticks (1024 core clock cycles) per millisecond */
 144         oqticks_per_us /= 1024;
 145 
 146         /* time_intr is in microseconds. The next 2 steps gives the oq ticks
 147          * corressponding to time_intr.
 148          */
 149         oqticks_per_us *= time_intr_in_us;
 150         oqticks_per_us /= 1000;
 151 
 152         return oqticks_per_us;
 153 }
 154 
 155 void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
 156 {
 157         /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
 158         octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
 159                          CN6XXX_INPUT_CTL_MASK);
 160 
 161         /* Instruction Read Size - Max 4 instructions per PCIE Read */
 162         octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
 163                            0xFFFFFFFFFFFFFFFFULL);
 164 
 165         /* Select PCIE Port for all Input rings. */
 166         octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
 167                            (oct->pcie_port * 0x5555555555555555ULL));
 168 }
 169 
 170 static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
 171 {
 172         u64 pktctl;
 173 
 174         struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 175 
 176         pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
 177 
 178         /* 66XX SPECIFIC */
 179         if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
 180                 /* Disable RING_EN if only upto 4 rings are used. */
 181                 pktctl &= ~(1 << 4);
 182         else
 183                 pktctl |= (1 << 4);
 184 
 185         if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
 186                 pktctl |= 0xF;
 187         else
 188                 /* Disable per-port backpressure. */
 189                 pktctl &= ~0xF;
 190         octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
 191 }
 192 
 193 void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
 194 {
 195         u32 time_threshold;
 196         struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 197 
 198         /* / Select PCI-E Port for all Output queues */
 199         octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
 200                            (oct->pcie_port * 0x5555555555555555ULL));
 201 
 202         if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
 203                 octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
 204         } else {
 205                 /* / Set Output queue watermark to 0 to disable backpressure */
 206                 octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
 207         }
 208 
 209         /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
 210         octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
 211 
 212         /* Select ES, RO, NS setting from register for Output Queue Packet
 213          * Address
 214          */
 215         octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
 216 
 217         /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
 218          * Queue ScatterList
 219          */
 220         octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
 221         octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
 222 
 223         /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
 224 #ifdef __BIG_ENDIAN_BITFIELD
 225         octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
 226                            0x5555555555555555ULL);
 227 #else
 228         octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
 229 #endif
 230 
 231         /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
 232         octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
 233         octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
 234         octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
 235                            0x5555555555555555ULL);
 236 
 237         /* / Set up interrupt packet and time threshold */
 238         octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
 239                          (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
 240         time_threshold =
 241                 lio_cn6xxx_get_oq_ticks(oct, (u32)
 242                                         CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
 243 
 244         octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
 245 }
 246 
 247 static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
 248 {
 249         lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
 250         lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
 251         lio_cn6xxx_enable_error_reporting(oct);
 252 
 253         lio_cn6xxx_setup_global_input_regs(oct);
 254         lio_cn66xx_setup_pkt_ctl_regs(oct);
 255         lio_cn6xxx_setup_global_output_regs(oct);
 256 
 257         /* Default error timeout value should be 0x200000 to avoid host hang
 258          * when reads invalid register
 259          */
 260         octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
 261         return 0;
 262 }
 263 
 264 void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
 265 {
 266         struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
 267 
 268         octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
 269 
 270         /* Write the start of the input queue's ring and its size  */
 271         octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
 272                            iq->base_addr_dma);
 273         octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
 274 
 275         /* Remember the doorbell & instruction count register addr for this
 276          * queue
 277          */
 278         iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
 279         iq->inst_cnt_reg = oct->mmio[0].hw_addr
 280                            + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
 281         dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
 282                 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
 283 
 284         /* Store the current instruction counter
 285          * (used in flush_iq calculation)
 286          */
 287         iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
 288 }
 289 
 290 static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
 291 {
 292         lio_cn6xxx_setup_iq_regs(oct, iq_no);
 293 
 294         /* Backpressure for this queue - WMARK set to all F's. This effectively
 295          * disables the backpressure mechanism.
 296          */
 297         octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
 298                            (0xFFFFFFFFULL << 32));
 299 }
 300 
 301 void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
 302 {
 303         u32 intr;
 304         struct octeon_droq *droq = oct->droq[oq_no];
 305 
 306         octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
 307                            droq->desc_ring_dma);
 308         octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
 309 
 310         octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
 311                          droq->buffer_size);
 312 
 313         /* Get the mapped address of the pkt_sent and pkts_credit regs */
 314         droq->pkts_sent_reg =
 315                 oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
 316         droq->pkts_credit_reg =
 317                 oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
 318 
 319         /* Enable this output queue to generate Packet Timer Interrupt */
 320         intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
 321         intr |= (1 << oq_no);
 322         octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
 323 
 324         /* Enable this output queue to generate Packet Timer Interrupt */
 325         intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
 326         intr |= (1 << oq_no);
 327         octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
 328 }
 329 
 330 int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
 331 {
 332         u32 mask;
 333 
 334         mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
 335         mask |= oct->io_qmask.iq64B;
 336         octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
 337 
 338         mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
 339         mask |= oct->io_qmask.iq;
 340         octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
 341 
 342         mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
 343         mask |= oct->io_qmask.oq;
 344         octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
 345 
 346         return 0;
 347 }
 348 
 349 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
 350 {
 351         int i;
 352         u32 mask, loop = HZ;
 353         u32 d32;
 354 
 355         /* Reset the Enable bits for Input Queues. */
 356         mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
 357         mask ^= oct->io_qmask.iq;
 358         octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
 359 
 360         /* Wait until hardware indicates that the queues are out of reset. */
 361         mask = (u32)oct->io_qmask.iq;
 362         d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
 363         while (((d32 & mask) != mask) && loop--) {
 364                 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
 365                 schedule_timeout_uninterruptible(1);
 366         }
 367 
 368         /* Reset the doorbell register for each Input queue. */
 369         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 370                 if (!(oct->io_qmask.iq & BIT_ULL(i)))
 371                         continue;
 372                 octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
 373                 d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
 374         }
 375 
 376         /* Reset the Enable bits for Output Queues. */
 377         mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
 378         mask ^= oct->io_qmask.oq;
 379         octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
 380 
 381         /* Wait until hardware indicates that the queues are out of reset. */
 382         loop = HZ;
 383         mask = (u32)oct->io_qmask.oq;
 384         d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
 385         while (((d32 & mask) != mask) && loop--) {
 386                 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
 387                 schedule_timeout_uninterruptible(1);
 388         }
 389         ;
 390 
 391         /* Reset the doorbell register for each Output queue. */
 392         for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 393                 if (!(oct->io_qmask.oq & BIT_ULL(i)))
 394                         continue;
 395                 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
 396                 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
 397 
 398                 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
 399                 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
 400         }
 401 
 402         d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
 403         if (d32)
 404                 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
 405 
 406         d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
 407         if (d32)
 408                 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
 409 }
 410 
 411 void
 412 lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
 413                           u64 core_addr,
 414                           u32 idx,
 415                           int valid)
 416 {
 417         u64 bar1;
 418 
 419         if (valid == 0) {
 420                 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 421                 lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
 422                                CN6XXX_BAR1_REG(idx, oct->pcie_port));
 423                 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 424                 return;
 425         }
 426 
 427         /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
 428          * the Core Addr
 429          */
 430         lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
 431                        CN6XXX_BAR1_REG(idx, oct->pcie_port));
 432 
 433         bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 434 }
 435 
 436 void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
 437                                u32 idx,
 438                                u32 mask)
 439 {
 440         lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 441 }
 442 
 443 u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
 444 {
 445         return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 446 }
 447 
 448 u32
 449 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
 450 {
 451         u32 new_idx = readl(iq->inst_cnt_reg);
 452 
 453         /* The new instr cnt reg is a 32-bit counter that can roll over. We have
 454          * noted the counter's initial value at init time into
 455          * reset_instr_cnt
 456          */
 457         if (iq->reset_instr_cnt < new_idx)
 458                 new_idx -= iq->reset_instr_cnt;
 459         else
 460                 new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
 461 
 462         /* Modulo of the new index with the IQ size will give us
 463          * the new index.
 464          */
 465         new_idx %= iq->max_count;
 466 
 467         return new_idx;
 468 }
 469 
 470 void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
 471                                  u8 unused __attribute__((unused)))
 472 {
 473         struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 474         u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
 475 
 476         /* Enable Interrupt */
 477         writeq(mask, cn6xxx->intr_enb_reg64);
 478 }
 479 
 480 void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
 481                                   u8 unused __attribute__((unused)))
 482 {
 483         struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 484 
 485         /* Disable Interrupts */
 486         writeq(0, cn6xxx->intr_enb_reg64);
 487 }
 488 
 489 static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
 490 {
 491         /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
 492          * to determine the PCIE port #
 493          */
 494         oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
 495 
 496         dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
 497 }
 498 
 499 static void
 500 lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
 501 {
 502         dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
 503                 CVM_CAST64(intr64));
 504 }
 505 
 506 static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
 507 {
 508         struct octeon_droq *droq;
 509         int oq_no;
 510         u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
 511         u32 droq_cnt_enb, droq_cnt_mask;
 512 
 513         droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
 514         droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
 515         droq_mask = droq_cnt_mask & droq_cnt_enb;
 516 
 517         droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
 518         droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
 519         droq_mask |= (droq_time_mask & droq_int_enb);
 520 
 521         droq_mask &= oct->io_qmask.oq;
 522 
 523         oct->droq_intr = 0;
 524 
 525         for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
 526                 if (!(droq_mask & BIT_ULL(oq_no)))
 527                         continue;
 528 
 529                 droq = oct->droq[oq_no];
 530                 pkt_count = octeon_droq_check_hw_for_pkts(droq);
 531                 if (pkt_count) {
 532                         oct->droq_intr |= BIT_ULL(oq_no);
 533                         if (droq->ops.poll_mode) {
 534                                 u32 value;
 535                                 u32 reg;
 536 
 537                                 struct octeon_cn6xxx *cn6xxx =
 538                                         (struct octeon_cn6xxx *)oct->chip;
 539 
 540                                 /* disable interrupts for this droq */
 541                                 spin_lock
 542                                         (&cn6xxx->lock_for_droq_int_enb_reg);
 543                                 reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
 544                                 value = octeon_read_csr(oct, reg);
 545                                 value &= ~(1 << oq_no);
 546                                 octeon_write_csr(oct, reg, value);
 547                                 reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
 548                                 value = octeon_read_csr(oct, reg);
 549                                 value &= ~(1 << oq_no);
 550                                 octeon_write_csr(oct, reg, value);
 551 
 552                                 spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
 553                         }
 554                 }
 555         }
 556 
 557         droq_time_mask &= oct->io_qmask.oq;
 558         droq_cnt_mask &= oct->io_qmask.oq;
 559 
 560         /* Reset the PKT_CNT/TIME_INT registers. */
 561         if (droq_time_mask)
 562                 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
 563 
 564         if (droq_cnt_mask)      /* reset PKT_CNT register:66xx */
 565                 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
 566 
 567         return 0;
 568 }
 569 
 570 irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
 571 {
 572         struct octeon_device *oct = (struct octeon_device *)dev;
 573         struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 574         u64 intr64;
 575 
 576         intr64 = readq(cn6xxx->intr_sum_reg64);
 577 
 578         /* If our device has interrupted, then proceed.
 579          * Also check for all f's if interrupt was triggered on an error
 580          * and the PCI read fails.
 581          */
 582         if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
 583                 return IRQ_NONE;
 584 
 585         oct->int_status = 0;
 586 
 587         if (intr64 & CN6XXX_INTR_ERR)
 588                 lio_cn6xxx_process_pcie_error_intr(oct, intr64);
 589 
 590         if (intr64 & CN6XXX_INTR_PKT_DATA) {
 591                 lio_cn6xxx_process_droq_intr_regs(oct);
 592                 oct->int_status |= OCT_DEV_INTR_PKT_DATA;
 593         }
 594 
 595         if (intr64 & CN6XXX_INTR_DMA0_FORCE)
 596                 oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
 597 
 598         if (intr64 & CN6XXX_INTR_DMA1_FORCE)
 599                 oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
 600 
 601         /* Clear the current interrupts */
 602         writeq(intr64, cn6xxx->intr_sum_reg64);
 603 
 604         return IRQ_HANDLED;
 605 }
 606 
 607 void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
 608                                   void *chip,
 609                                   struct octeon_reg_list *reg_list)
 610 {
 611         u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
 612         struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
 613 
 614         reg_list->pci_win_wr_addr_hi =
 615                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
 616         reg_list->pci_win_wr_addr_lo =
 617                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
 618         reg_list->pci_win_wr_addr =
 619                 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
 620 
 621         reg_list->pci_win_rd_addr_hi =
 622                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
 623         reg_list->pci_win_rd_addr_lo =
 624                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
 625         reg_list->pci_win_rd_addr =
 626                 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
 627 
 628         reg_list->pci_win_wr_data_hi =
 629                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
 630         reg_list->pci_win_wr_data_lo =
 631                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
 632         reg_list->pci_win_wr_data =
 633                 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
 634 
 635         reg_list->pci_win_rd_data_hi =
 636                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
 637         reg_list->pci_win_rd_data_lo =
 638                 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
 639         reg_list->pci_win_rd_data =
 640                 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
 641 
 642         lio_cn6xxx_get_pcie_qlmport(oct);
 643 
 644         cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
 645         cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
 646         cn6xxx->intr_enb_reg64 =
 647                 bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
 648 }
 649 
 650 int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
 651 {
 652         struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 653 
 654         if (octeon_map_pci_barx(oct, 0, 0))
 655                 return 1;
 656 
 657         if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
 658                 dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
 659                         __func__);
 660                 octeon_unmap_pci_barx(oct, 0);
 661                 return 1;
 662         }
 663 
 664         spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
 665 
 666         oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
 667         oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
 668 
 669         oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
 670         oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
 671         oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
 672 
 673         oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
 674         oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
 675         oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
 676 
 677         oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
 678         oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
 679         oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
 680 
 681         oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
 682         oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
 683 
 684         lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
 685 
 686         cn6xxx->conf = (struct octeon_config *)
 687                        oct_get_config_info(oct, LIO_210SV);
 688         if (!cn6xxx->conf) {
 689                 dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
 690                         __func__);
 691                 octeon_unmap_pci_barx(oct, 0);
 692                 octeon_unmap_pci_barx(oct, 1);
 693                 return 1;
 694         }
 695 
 696         oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
 697 
 698         return 0;
 699 }
 700 
 701 int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
 702                                     struct octeon_config *conf6xxx)
 703 {
 704         if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
 705                 dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
 706                         __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
 707                         CN6XXX_MAX_INPUT_QUEUES);
 708                 return 1;
 709         }
 710 
 711         if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
 712                 dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
 713                         __func__, CFG_GET_OQ_MAX_Q(conf6xxx),
 714                         CN6XXX_MAX_OUTPUT_QUEUES);
 715                 return 1;
 716         }
 717 
 718         if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
 719             CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
 720                 dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
 721                         __func__);
 722                 return 1;
 723         }
 724         if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) {
 725                 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
 726                         __func__);
 727                 return 1;
 728         }
 729 
 730         if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
 731                 dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
 732                         __func__);
 733                 return 1;
 734         }
 735 
 736         return 0;
 737 }

/* [<][>][^][v][top][bottom][index][help] */