root/drivers/crypto/cavium/nitrox/nitrox_isr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nps_pkt_slc_isr
  2. clear_nps_core_err_intr
  3. clear_nps_pkt_err_intr
  4. clear_pom_err_intr
  5. clear_pem_err_intr
  6. clear_lbc_err_intr
  7. clear_efl_err_intr
  8. clear_bmi_err_intr
  9. nps_core_int_tasklet
  10. nps_core_int_isr
  11. nitrox_unregister_interrupts
  12. nitrox_register_interrupts
  13. nitrox_sriov_unregister_interrupts
  14. nitrox_sriov_register_interupts

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/pci.h>
   3 #include <linux/printk.h>
   4 #include <linux/slab.h>
   5 
   6 #include "nitrox_dev.h"
   7 #include "nitrox_csr.h"
   8 #include "nitrox_common.h"
   9 #include "nitrox_hal.h"
  10 #include "nitrox_mbx.h"
  11 
  12 /**
  13  * One vector for each type of ring
  14  *  - NPS packet ring, AQMQ ring and ZQMQ ring
  15  */
  16 #define NR_RING_VECTORS 3
  17 #define NR_NON_RING_VECTORS 1
  18 /* base entry for packet ring/port */
  19 #define PKT_RING_MSIX_BASE 0
  20 #define NON_RING_MSIX_BASE 192
  21 
  22 /**
  23  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
  24  * @irq: irq number
  25  * @data: argument
  26  */
  27 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
  28 {
  29         struct nitrox_q_vector *qvec = data;
  30         union nps_pkt_slc_cnts slc_cnts;
  31         struct nitrox_cmdq *cmdq = qvec->cmdq;
  32 
  33         slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
  34         /* New packet on SLC output port */
  35         if (slc_cnts.s.slc_int)
  36                 tasklet_hi_schedule(&qvec->resp_tasklet);
  37 
  38         return IRQ_HANDLED;
  39 }
  40 
  41 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
  42 {
  43         u64 value;
  44 
  45         /* Write 1 to clear */
  46         value = nitrox_read_csr(ndev, NPS_CORE_INT);
  47         nitrox_write_csr(ndev, NPS_CORE_INT, value);
  48 
  49         dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
  50 }
  51 
  52 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
  53 {
  54         union nps_pkt_int pkt_int;
  55         unsigned long value, offset;
  56         int i;
  57 
  58         pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
  59         dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
  60                             pkt_int.value);
  61 
  62         if (pkt_int.s.slc_err) {
  63                 offset = NPS_PKT_SLC_ERR_TYPE;
  64                 value = nitrox_read_csr(ndev, offset);
  65                 nitrox_write_csr(ndev, offset, value);
  66                 dev_err_ratelimited(DEV(ndev),
  67                                     "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
  68 
  69                 offset = NPS_PKT_SLC_RERR_LO;
  70                 value = nitrox_read_csr(ndev, offset);
  71                 nitrox_write_csr(ndev, offset, value);
  72                 /* enable the solicit ports */
  73                 for_each_set_bit(i, &value, BITS_PER_LONG)
  74                         enable_pkt_solicit_port(ndev, i);
  75 
  76                 dev_err_ratelimited(DEV(ndev),
  77                                     "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
  78 
  79                 offset = NPS_PKT_SLC_RERR_HI;
  80                 value = nitrox_read_csr(ndev, offset);
  81                 nitrox_write_csr(ndev, offset, value);
  82                 dev_err_ratelimited(DEV(ndev),
  83                                     "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
  84         }
  85 
  86         if (pkt_int.s.in_err) {
  87                 offset = NPS_PKT_IN_ERR_TYPE;
  88                 value = nitrox_read_csr(ndev, offset);
  89                 nitrox_write_csr(ndev, offset, value);
  90                 dev_err_ratelimited(DEV(ndev),
  91                                     "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
  92                 offset = NPS_PKT_IN_RERR_LO;
  93                 value = nitrox_read_csr(ndev, offset);
  94                 nitrox_write_csr(ndev, offset, value);
  95                 /* enable the input ring */
  96                 for_each_set_bit(i, &value, BITS_PER_LONG)
  97                         enable_pkt_input_ring(ndev, i);
  98 
  99                 dev_err_ratelimited(DEV(ndev),
 100                                     "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
 101 
 102                 offset = NPS_PKT_IN_RERR_HI;
 103                 value = nitrox_read_csr(ndev, offset);
 104                 nitrox_write_csr(ndev, offset, value);
 105                 dev_err_ratelimited(DEV(ndev),
 106                                     "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
 107         }
 108 }
 109 
 110 static void clear_pom_err_intr(struct nitrox_device *ndev)
 111 {
 112         u64 value;
 113 
 114         value = nitrox_read_csr(ndev, POM_INT);
 115         nitrox_write_csr(ndev, POM_INT, value);
 116         dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
 117 }
 118 
 119 static void clear_pem_err_intr(struct nitrox_device *ndev)
 120 {
 121         u64 value;
 122 
 123         value = nitrox_read_csr(ndev, PEM0_INT);
 124         nitrox_write_csr(ndev, PEM0_INT, value);
 125         dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
 126 }
 127 
 128 static void clear_lbc_err_intr(struct nitrox_device *ndev)
 129 {
 130         union lbc_int lbc_int;
 131         u64 value, offset;
 132         int i;
 133 
 134         lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
 135         dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
 136 
 137         if (lbc_int.s.dma_rd_err) {
 138                 for (i = 0; i < NR_CLUSTERS; i++) {
 139                         offset = EFL_CORE_VF_ERR_INT0X(i);
 140                         value = nitrox_read_csr(ndev, offset);
 141                         nitrox_write_csr(ndev, offset, value);
 142                         offset = EFL_CORE_VF_ERR_INT1X(i);
 143                         value = nitrox_read_csr(ndev, offset);
 144                         nitrox_write_csr(ndev, offset, value);
 145                 }
 146         }
 147 
 148         if (lbc_int.s.cam_soft_err) {
 149                 dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
 150                 invalidate_lbc(ndev);
 151         }
 152 
 153         if (lbc_int.s.pref_dat_len_mismatch_err) {
 154                 offset = LBC_PLM_VF1_64_INT;
 155                 value = nitrox_read_csr(ndev, offset);
 156                 nitrox_write_csr(ndev, offset, value);
 157                 offset = LBC_PLM_VF65_128_INT;
 158                 value = nitrox_read_csr(ndev, offset);
 159                 nitrox_write_csr(ndev, offset, value);
 160         }
 161 
 162         if (lbc_int.s.rd_dat_len_mismatch_err) {
 163                 offset = LBC_ELM_VF1_64_INT;
 164                 value = nitrox_read_csr(ndev, offset);
 165                 nitrox_write_csr(ndev, offset, value);
 166                 offset = LBC_ELM_VF65_128_INT;
 167                 value = nitrox_read_csr(ndev, offset);
 168                 nitrox_write_csr(ndev, offset, value);
 169         }
 170         nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
 171 }
 172 
 173 static void clear_efl_err_intr(struct nitrox_device *ndev)
 174 {
 175         int i;
 176 
 177         for (i = 0; i < NR_CLUSTERS; i++) {
 178                 union efl_core_int core_int;
 179                 u64 value, offset;
 180 
 181                 offset = EFL_CORE_INTX(i);
 182                 core_int.value = nitrox_read_csr(ndev, offset);
 183                 nitrox_write_csr(ndev, offset, core_int.value);
 184                 dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
 185                                     i, core_int.value);
 186                 if (core_int.s.se_err) {
 187                         offset = EFL_CORE_SE_ERR_INTX(i);
 188                         value = nitrox_read_csr(ndev, offset);
 189                         nitrox_write_csr(ndev, offset, value);
 190                 }
 191         }
 192 }
 193 
 194 static void clear_bmi_err_intr(struct nitrox_device *ndev)
 195 {
 196         u64 value;
 197 
 198         value = nitrox_read_csr(ndev, BMI_INT);
 199         nitrox_write_csr(ndev, BMI_INT, value);
 200         dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
 201 }
 202 
 203 static void nps_core_int_tasklet(unsigned long data)
 204 {
 205         struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
 206         struct nitrox_device *ndev = qvec->ndev;
 207 
 208         /* if pf mode do queue recovery */
 209         if (ndev->mode == __NDEV_MODE_PF) {
 210         } else {
 211                 /**
 212                  * if VF(s) enabled communicate the error information
 213                  * to VF(s)
 214                  */
 215         }
 216 }
 217 
 218 /**
 219  * nps_core_int_isr - interrupt handler for NITROX errors and
 220  *   mailbox communication
 221  */
 222 static irqreturn_t nps_core_int_isr(int irq, void *data)
 223 {
 224         struct nitrox_q_vector *qvec = data;
 225         struct nitrox_device *ndev = qvec->ndev;
 226         union nps_core_int_active core_int;
 227 
 228         core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
 229 
 230         if (core_int.s.nps_core)
 231                 clear_nps_core_err_intr(ndev);
 232 
 233         if (core_int.s.nps_pkt)
 234                 clear_nps_pkt_err_intr(ndev);
 235 
 236         if (core_int.s.pom)
 237                 clear_pom_err_intr(ndev);
 238 
 239         if (core_int.s.pem)
 240                 clear_pem_err_intr(ndev);
 241 
 242         if (core_int.s.lbc)
 243                 clear_lbc_err_intr(ndev);
 244 
 245         if (core_int.s.efl)
 246                 clear_efl_err_intr(ndev);
 247 
 248         if (core_int.s.bmi)
 249                 clear_bmi_err_intr(ndev);
 250 
 251         /* Mailbox interrupt */
 252         if (core_int.s.mbox)
 253                 nitrox_pf2vf_mbox_handler(ndev);
 254 
 255         /* If more work callback the ISR, set resend */
 256         core_int.s.resend = 1;
 257         nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
 258 
 259         return IRQ_HANDLED;
 260 }
 261 
 262 void nitrox_unregister_interrupts(struct nitrox_device *ndev)
 263 {
 264         struct pci_dev *pdev = ndev->pdev;
 265         int i;
 266 
 267         for (i = 0; i < ndev->num_vecs; i++) {
 268                 struct nitrox_q_vector *qvec;
 269                 int vec;
 270 
 271                 qvec = ndev->qvec + i;
 272                 if (!qvec->valid)
 273                         continue;
 274 
 275                 /* get the vector number */
 276                 vec = pci_irq_vector(pdev, i);
 277                 irq_set_affinity_hint(vec, NULL);
 278                 free_irq(vec, qvec);
 279 
 280                 tasklet_disable(&qvec->resp_tasklet);
 281                 tasklet_kill(&qvec->resp_tasklet);
 282                 qvec->valid = false;
 283         }
 284         kfree(ndev->qvec);
 285         ndev->qvec = NULL;
 286         pci_free_irq_vectors(pdev);
 287 }
 288 
 289 int nitrox_register_interrupts(struct nitrox_device *ndev)
 290 {
 291         struct pci_dev *pdev = ndev->pdev;
 292         struct nitrox_q_vector *qvec;
 293         int nr_vecs, vec, cpu;
 294         int ret, i;
 295 
 296         /*
 297          * PF MSI-X vectors
 298          *
 299          * Entry 0: NPS PKT ring 0
 300          * Entry 1: AQMQ ring 0
 301          * Entry 2: ZQM ring 0
 302          * Entry 3: NPS PKT ring 1
 303          * Entry 4: AQMQ ring 1
 304          * Entry 5: ZQM ring 1
 305          * ....
 306          * Entry 192: NPS_CORE_INT_ACTIVE
 307          */
 308         nr_vecs = pci_msix_vec_count(pdev);
 309 
 310         /* Enable MSI-X */
 311         ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
 312         if (ret < 0) {
 313                 dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
 314                 return ret;
 315         }
 316         ndev->num_vecs = nr_vecs;
 317 
 318         ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
 319         if (!ndev->qvec) {
 320                 pci_free_irq_vectors(pdev);
 321                 return -ENOMEM;
 322         }
 323 
 324         /* request irqs for packet rings/ports */
 325         for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
 326                 qvec = &ndev->qvec[i];
 327 
 328                 qvec->ring = i / NR_RING_VECTORS;
 329                 if (qvec->ring >= ndev->nr_queues)
 330                         break;
 331 
 332                 qvec->cmdq = &ndev->pkt_inq[qvec->ring];
 333                 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
 334                 /* get the vector number */
 335                 vec = pci_irq_vector(pdev, i);
 336                 ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
 337                 if (ret) {
 338                         dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
 339                                 qvec->ring);
 340                         goto irq_fail;
 341                 }
 342                 cpu = qvec->ring % num_online_cpus();
 343                 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
 344 
 345                 tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
 346                              (unsigned long)qvec);
 347                 qvec->valid = true;
 348         }
 349 
 350         /* request irqs for non ring vectors */
 351         i = NON_RING_MSIX_BASE;
 352         qvec = &ndev->qvec[i];
 353         qvec->ndev = ndev;
 354 
 355         snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
 356         /* get the vector number */
 357         vec = pci_irq_vector(pdev, i);
 358         ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
 359         if (ret) {
 360                 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
 361                 goto irq_fail;
 362         }
 363         cpu = num_online_cpus();
 364         irq_set_affinity_hint(vec, get_cpu_mask(cpu));
 365 
 366         tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
 367                      (unsigned long)qvec);
 368         qvec->valid = true;
 369 
 370         return 0;
 371 
 372 irq_fail:
 373         nitrox_unregister_interrupts(ndev);
 374         return ret;
 375 }
 376 
 377 void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
 378 {
 379         struct pci_dev *pdev = ndev->pdev;
 380         int i;
 381 
 382         for (i = 0; i < ndev->num_vecs; i++) {
 383                 struct nitrox_q_vector *qvec;
 384                 int vec;
 385 
 386                 qvec = ndev->qvec + i;
 387                 if (!qvec->valid)
 388                         continue;
 389 
 390                 vec = ndev->iov.msix.vector;
 391                 irq_set_affinity_hint(vec, NULL);
 392                 free_irq(vec, qvec);
 393 
 394                 tasklet_disable(&qvec->resp_tasklet);
 395                 tasklet_kill(&qvec->resp_tasklet);
 396                 qvec->valid = false;
 397         }
 398         kfree(ndev->qvec);
 399         ndev->qvec = NULL;
 400         pci_disable_msix(pdev);
 401 }
 402 
 403 int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
 404 {
 405         struct pci_dev *pdev = ndev->pdev;
 406         struct nitrox_q_vector *qvec;
 407         int vec, cpu;
 408         int ret;
 409 
 410         /**
 411          * only non ring vectors i.e Entry 192 is available
 412          * for PF in SR-IOV mode.
 413          */
 414         ndev->iov.msix.entry = NON_RING_MSIX_BASE;
 415         ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
 416         if (ret) {
 417                 dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
 418                         NON_RING_MSIX_BASE);
 419                 return ret;
 420         }
 421 
 422         qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
 423         if (!qvec) {
 424                 pci_disable_msix(pdev);
 425                 return -ENOMEM;
 426         }
 427         qvec->ndev = ndev;
 428 
 429         ndev->qvec = qvec;
 430         ndev->num_vecs = NR_NON_RING_VECTORS;
 431         snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
 432                  NON_RING_MSIX_BASE);
 433 
 434         vec = ndev->iov.msix.vector;
 435         ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
 436         if (ret) {
 437                 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
 438                         NON_RING_MSIX_BASE);
 439                 goto iov_irq_fail;
 440         }
 441         cpu = num_online_cpus();
 442         irq_set_affinity_hint(vec, get_cpu_mask(cpu));
 443 
 444         tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
 445                      (unsigned long)qvec);
 446         qvec->valid = true;
 447 
 448         return 0;
 449 
 450 iov_irq_fail:
 451         nitrox_sriov_unregister_interrupts(ndev);
 452         return ret;
 453 }

/* [<][>][^][v][top][bottom][index][help] */