root/drivers/crypto/cavium/cpt/cptpf_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cpt_disable_cores
  2. cpt_enable_cores
  3. cpt_configure_group
  4. cpt_disable_mbox_interrupts
  5. cpt_disable_ecc_interrupts
  6. cpt_disable_exec_interrupts
  7. cpt_disable_all_interrupts
  8. cpt_enable_mbox_interrupts
  9. cpt_load_microcode
  10. do_cpt_init
  11. cpt_ucode_load_fw
  12. cpt_ucode_load
  13. cpt_mbx0_intr_handler
  14. cpt_reset
  15. cpt_find_max_enabled_cores
  16. cpt_check_bist_status
  17. cpt_check_exe_bist_status
  18. cpt_disable_all_cores
  19. cpt_unload_microcode
  20. cpt_device_init
  21. cpt_register_interrupts
  22. cpt_unregister_interrupts
  23. cpt_sriov_init
  24. cpt_probe
  25. cpt_remove
  26. cpt_shutdown

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2016 Cavium, Inc.
   4  */
   5 
   6 #include <linux/device.h>
   7 #include <linux/firmware.h>
   8 #include <linux/interrupt.h>
   9 #include <linux/module.h>
  10 #include <linux/moduleparam.h>
  11 #include <linux/pci.h>
  12 #include <linux/printk.h>
  13 #include <linux/version.h>
  14 
  15 #include "cptpf.h"
  16 
  17 #define DRV_NAME        "thunder-cpt"
  18 #define DRV_VERSION     "1.0"
  19 
  20 static u32 num_vfs = 4; /* Default 4 VF enabled */
  21 module_param(num_vfs, uint, 0444);
  22 MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)");
  23 
  24 /*
  25  * Disable cores specified by coremask
  26  */
  27 static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
  28                               u8 type, u8 grp)
  29 {
  30         u64 pf_exe_ctl;
  31         u32 timeout = 100;
  32         u64 grpmask = 0;
  33         struct device *dev = &cpt->pdev->dev;
  34 
  35         if (type == AE_TYPES)
  36                 coremask = (coremask << cpt->max_se_cores);
  37 
  38         /* Disengage the cores from groups */
  39         grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
  40         cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
  41                         (grpmask & ~coremask));
  42         udelay(CSR_DELAY);
  43         grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
  44         while (grp & coremask) {
  45                 dev_err(dev, "Cores still busy %llx", coremask);
  46                 grp = cpt_read_csr64(cpt->reg_base,
  47                                      CPTX_PF_EXEC_BUSY(0));
  48                 if (timeout--)
  49                         break;
  50 
  51                 udelay(CSR_DELAY);
  52         }
  53 
  54         /* Disable the cores */
  55         pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
  56         cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
  57                         (pf_exe_ctl & ~coremask));
  58         udelay(CSR_DELAY);
  59 }
  60 
  61 /*
  62  * Enable cores specified by coremask
  63  */
  64 static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask,
  65                              u8 type)
  66 {
  67         u64 pf_exe_ctl;
  68 
  69         if (type == AE_TYPES)
  70                 coremask = (coremask << cpt->max_se_cores);
  71 
  72         pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
  73         cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
  74                         (pf_exe_ctl | coremask));
  75         udelay(CSR_DELAY);
  76 }
  77 
  78 static void cpt_configure_group(struct cpt_device *cpt, u8 grp,
  79                                 u64 coremask, u8 type)
  80 {
  81         u64 pf_gx_en = 0;
  82 
  83         if (type == AE_TYPES)
  84                 coremask = (coremask << cpt->max_se_cores);
  85 
  86         pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
  87         cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
  88                         (pf_gx_en | coremask));
  89         udelay(CSR_DELAY);
  90 }
  91 
  92 static void cpt_disable_mbox_interrupts(struct cpt_device *cpt)
  93 {
  94         /* Clear mbox(0) interupts for all vfs */
  95         cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull);
  96 }
  97 
  98 static void cpt_disable_ecc_interrupts(struct cpt_device *cpt)
  99 {
 100         /* Clear ecc(0) interupts for all vfs */
 101         cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull);
 102 }
 103 
 104 static void cpt_disable_exec_interrupts(struct cpt_device *cpt)
 105 {
 106         /* Clear exec interupts for all vfs */
 107         cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull);
 108 }
 109 
 110 static void cpt_disable_all_interrupts(struct cpt_device *cpt)
 111 {
 112         cpt_disable_mbox_interrupts(cpt);
 113         cpt_disable_ecc_interrupts(cpt);
 114         cpt_disable_exec_interrupts(cpt);
 115 }
 116 
 117 static void cpt_enable_mbox_interrupts(struct cpt_device *cpt)
 118 {
 119         /* Set mbox(0) interupts for all vfs */
 120         cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull);
 121 }
 122 
 123 static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode)
 124 {
 125         int ret = 0, core = 0, shift = 0;
 126         u32 total_cores = 0;
 127         struct device *dev = &cpt->pdev->dev;
 128 
 129         if (!mcode || !mcode->code) {
 130                 dev_err(dev, "Either the mcode is null or data is NULL\n");
 131                 return -EINVAL;
 132         }
 133 
 134         if (mcode->code_size == 0) {
 135                 dev_err(dev, "microcode size is 0\n");
 136                 return -EINVAL;
 137         }
 138 
 139         /* Assumes 0-9 are SE cores for UCODE_BASE registers and
 140          * AE core bases follow
 141          */
 142         if (mcode->is_ae) {
 143                 core = CPT_MAX_SE_CORES; /* start couting from 10 */
 144                 total_cores = CPT_MAX_TOTAL_CORES; /* upto 15 */
 145         } else {
 146                 core = 0; /* start couting from 0 */
 147                 total_cores = CPT_MAX_SE_CORES; /* upto 9 */
 148         }
 149 
 150         /* Point to microcode for each core of the group */
 151         for (; core < total_cores ; core++, shift++) {
 152                 if (mcode->core_mask & (1 << shift)) {
 153                         cpt_write_csr64(cpt->reg_base,
 154                                         CPTX_PF_ENGX_UCODE_BASE(0, core),
 155                                         (u64)mcode->phys_base);
 156                 }
 157         }
 158         return ret;
 159 }
 160 
 161 static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode)
 162 {
 163         int ret = 0;
 164         struct device *dev = &cpt->pdev->dev;
 165 
 166         /* Make device not ready */
 167         cpt->flags &= ~CPT_FLAG_DEVICE_READY;
 168         /* Disable All PF interrupts */
 169         cpt_disable_all_interrupts(cpt);
 170         /* Calculate mcode group and coremasks */
 171         if (mcode->is_ae) {
 172                 if (mcode->num_cores > cpt->max_ae_cores) {
 173                         dev_err(dev, "Requested for more cores than available AE cores\n");
 174                         ret = -EINVAL;
 175                         goto cpt_init_fail;
 176                 }
 177 
 178                 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
 179                         dev_err(dev, "Can't load, all eight microcode groups in use");
 180                         return -ENFILE;
 181                 }
 182 
 183                 mcode->group = cpt->next_group;
 184                 /* Convert requested cores to mask */
 185                 mcode->core_mask = GENMASK(mcode->num_cores, 0);
 186                 cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES,
 187                                   mcode->group);
 188                 /* Load microcode for AE engines */
 189                 ret = cpt_load_microcode(cpt, mcode);
 190                 if (ret) {
 191                         dev_err(dev, "Microcode load Failed for %s\n",
 192                                 mcode->version);
 193                         goto cpt_init_fail;
 194                 }
 195                 cpt->next_group++;
 196                 /* Configure group mask for the mcode */
 197                 cpt_configure_group(cpt, mcode->group, mcode->core_mask,
 198                                     AE_TYPES);
 199                 /* Enable AE cores for the group mask */
 200                 cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES);
 201         } else {
 202                 if (mcode->num_cores > cpt->max_se_cores) {
 203                         dev_err(dev, "Requested for more cores than available SE cores\n");
 204                         ret = -EINVAL;
 205                         goto cpt_init_fail;
 206                 }
 207                 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
 208                         dev_err(dev, "Can't load, all eight microcode groups in use");
 209                         return -ENFILE;
 210                 }
 211 
 212                 mcode->group = cpt->next_group;
 213                 /* Covert requested cores to mask */
 214                 mcode->core_mask = GENMASK(mcode->num_cores, 0);
 215                 cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES,
 216                                   mcode->group);
 217                 /* Load microcode for SE engines */
 218                 ret = cpt_load_microcode(cpt, mcode);
 219                 if (ret) {
 220                         dev_err(dev, "Microcode load Failed for %s\n",
 221                                 mcode->version);
 222                         goto cpt_init_fail;
 223                 }
 224                 cpt->next_group++;
 225                 /* Configure group mask for the mcode */
 226                 cpt_configure_group(cpt, mcode->group, mcode->core_mask,
 227                                     SE_TYPES);
 228                 /* Enable SE cores for the group mask */
 229                 cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES);
 230         }
 231 
 232         /* Enabled PF mailbox interrupts */
 233         cpt_enable_mbox_interrupts(cpt);
 234         cpt->flags |= CPT_FLAG_DEVICE_READY;
 235 
 236         return ret;
 237 
 238 cpt_init_fail:
 239         /* Enabled PF mailbox interrupts */
 240         cpt_enable_mbox_interrupts(cpt);
 241 
 242         return ret;
 243 }
 244 
 245 struct ucode_header {
 246         u8 version[CPT_UCODE_VERSION_SZ];
 247         u32 code_length;
 248         u32 data_length;
 249         u64 sram_address;
 250 };
 251 
 252 static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
 253 {
 254         const struct firmware *fw_entry;
 255         struct device *dev = &cpt->pdev->dev;
 256         struct ucode_header *ucode;
 257         struct microcode *mcode;
 258         int j, ret = 0;
 259 
 260         ret = request_firmware(&fw_entry, fw, dev);
 261         if (ret)
 262                 return ret;
 263 
 264         ucode = (struct ucode_header *)fw_entry->data;
 265         mcode = &cpt->mcode[cpt->next_mc_idx];
 266         memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
 267         mcode->code_size = ntohl(ucode->code_length) * 2;
 268         if (!mcode->code_size) {
 269                 ret = -EINVAL;
 270                 goto fw_release;
 271         }
 272 
 273         mcode->is_ae = is_ae;
 274         mcode->core_mask = 0ULL;
 275         mcode->num_cores = is_ae ? 6 : 10;
 276 
 277         /*  Allocate DMAable space */
 278         mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
 279                                          &mcode->phys_base, GFP_KERNEL);
 280         if (!mcode->code) {
 281                 dev_err(dev, "Unable to allocate space for microcode");
 282                 ret = -ENOMEM;
 283                 goto fw_release;
 284         }
 285 
 286         memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
 287                mcode->code_size);
 288 
 289         /* Byte swap 64-bit */
 290         for (j = 0; j < (mcode->code_size / 8); j++)
 291                 ((u64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]);
 292         /*  MC needs 16-bit swap */
 293         for (j = 0; j < (mcode->code_size / 2); j++)
 294                 ((u16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]);
 295 
 296         dev_dbg(dev, "mcode->code_size = %u\n", mcode->code_size);
 297         dev_dbg(dev, "mcode->is_ae = %u\n", mcode->is_ae);
 298         dev_dbg(dev, "mcode->num_cores = %u\n", mcode->num_cores);
 299         dev_dbg(dev, "mcode->code = %llx\n", (u64)mcode->code);
 300         dev_dbg(dev, "mcode->phys_base = %llx\n", mcode->phys_base);
 301 
 302         ret = do_cpt_init(cpt, mcode);
 303         if (ret) {
 304                 dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
 305                 goto fw_release;
 306         }
 307 
 308         dev_info(dev, "Microcode Loaded %s\n", mcode->version);
 309         mcode->is_mc_valid = 1;
 310         cpt->next_mc_idx++;
 311 
 312 fw_release:
 313         release_firmware(fw_entry);
 314 
 315         return ret;
 316 }
 317 
 318 static int cpt_ucode_load(struct cpt_device *cpt)
 319 {
 320         int ret = 0;
 321         struct device *dev = &cpt->pdev->dev;
 322 
 323         ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true);
 324         if (ret) {
 325                 dev_err(dev, "ae:cpt_ucode_load failed with ret: %d\n", ret);
 326                 return ret;
 327         }
 328         ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false);
 329         if (ret) {
 330                 dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n", ret);
 331                 return ret;
 332         }
 333 
 334         return ret;
 335 }
 336 
 337 static irqreturn_t cpt_mbx0_intr_handler(int irq, void *cpt_irq)
 338 {
 339         struct cpt_device *cpt = (struct cpt_device *)cpt_irq;
 340 
 341         cpt_mbox_intr_handler(cpt, 0);
 342 
 343         return IRQ_HANDLED;
 344 }
 345 
 346 static void cpt_reset(struct cpt_device *cpt)
 347 {
 348         cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1);
 349 }
 350 
 351 static void cpt_find_max_enabled_cores(struct cpt_device *cpt)
 352 {
 353         union cptx_pf_constants pf_cnsts = {0};
 354 
 355         pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0));
 356         cpt->max_se_cores = pf_cnsts.s.se;
 357         cpt->max_ae_cores = pf_cnsts.s.ae;
 358 }
 359 
 360 static u32 cpt_check_bist_status(struct cpt_device *cpt)
 361 {
 362         union cptx_pf_bist_status bist_sts = {0};
 363 
 364         bist_sts.u = cpt_read_csr64(cpt->reg_base,
 365                                     CPTX_PF_BIST_STATUS(0));
 366 
 367         return bist_sts.u;
 368 }
 369 
 370 static u64 cpt_check_exe_bist_status(struct cpt_device *cpt)
 371 {
 372         union cptx_pf_exe_bist_status bist_sts = {0};
 373 
 374         bist_sts.u = cpt_read_csr64(cpt->reg_base,
 375                                     CPTX_PF_EXE_BIST_STATUS(0));
 376 
 377         return bist_sts.u;
 378 }
 379 
 380 static void cpt_disable_all_cores(struct cpt_device *cpt)
 381 {
 382         u32 grp, timeout = 100;
 383         struct device *dev = &cpt->pdev->dev;
 384 
 385         /* Disengage the cores from groups */
 386         for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
 387                 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0);
 388                 udelay(CSR_DELAY);
 389         }
 390 
 391         grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
 392         while (grp) {
 393                 dev_err(dev, "Cores still busy");
 394                 grp = cpt_read_csr64(cpt->reg_base,
 395                                      CPTX_PF_EXEC_BUSY(0));
 396                 if (timeout--)
 397                         break;
 398 
 399                 udelay(CSR_DELAY);
 400         }
 401         /* Disable the cores */
 402         cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
 403 }
 404 
 405 /**
 406  * Ensure all cores are disengaged from all groups by
 407  * calling cpt_disable_all_cores() before calling this
 408  * function.
 409  */
 410 static void cpt_unload_microcode(struct cpt_device *cpt)
 411 {
 412         u32 grp = 0, core;
 413 
 414         /* Free microcode bases and reset group masks */
 415         for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
 416                 struct microcode *mcode = &cpt->mcode[grp];
 417 
 418                 if (cpt->mcode[grp].code)
 419                         dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
 420                                           mcode->code, mcode->phys_base);
 421                 mcode->code = NULL;
 422         }
 423         /* Clear UCODE_BASE registers for all engines */
 424         for (core = 0; core < CPT_MAX_TOTAL_CORES; core++)
 425                 cpt_write_csr64(cpt->reg_base,
 426                                 CPTX_PF_ENGX_UCODE_BASE(0, core), 0ull);
 427 }
 428 
 429 static int cpt_device_init(struct cpt_device *cpt)
 430 {
 431         u64 bist;
 432         struct device *dev = &cpt->pdev->dev;
 433 
 434         /* Reset the PF when probed first */
 435         cpt_reset(cpt);
 436         msleep(100);
 437 
 438         /*Check BIST status*/
 439         bist = (u64)cpt_check_bist_status(cpt);
 440         if (bist) {
 441                 dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
 442                 return -ENODEV;
 443         }
 444 
 445         bist = cpt_check_exe_bist_status(cpt);
 446         if (bist) {
 447                 dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
 448                 return -ENODEV;
 449         }
 450 
 451         /*Get CLK frequency*/
 452         /*Get max enabled cores */
 453         cpt_find_max_enabled_cores(cpt);
 454         /*Disable all cores*/
 455         cpt_disable_all_cores(cpt);
 456         /*Reset device parameters*/
 457         cpt->next_mc_idx   = 0;
 458         cpt->next_group = 0;
 459         /* PF is ready */
 460         cpt->flags |= CPT_FLAG_DEVICE_READY;
 461 
 462         return 0;
 463 }
 464 
 465 static int cpt_register_interrupts(struct cpt_device *cpt)
 466 {
 467         int ret;
 468         struct device *dev = &cpt->pdev->dev;
 469 
 470         /* Enable MSI-X */
 471         ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS,
 472                         CPT_PF_MSIX_VECTORS, PCI_IRQ_MSIX);
 473         if (ret < 0) {
 474                 dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n",
 475                         CPT_PF_MSIX_VECTORS);
 476                 return ret;
 477         }
 478 
 479         /* Register mailbox interrupt handlers */
 480         ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)),
 481                           cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
 482         if (ret)
 483                 goto fail;
 484 
 485         /* Enable mailbox interrupt */
 486         cpt_enable_mbox_interrupts(cpt);
 487         return 0;
 488 
 489 fail:
 490         dev_err(dev, "Request irq failed\n");
 491         pci_disable_msix(cpt->pdev);
 492         return ret;
 493 }
 494 
 495 static void cpt_unregister_interrupts(struct cpt_device *cpt)
 496 {
 497         free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt);
 498         pci_disable_msix(cpt->pdev);
 499 }
 500 
 501 static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs)
 502 {
 503         int pos = 0;
 504         int err;
 505         u16 total_vf_cnt;
 506         struct pci_dev *pdev = cpt->pdev;
 507 
 508         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
 509         if (!pos) {
 510                 dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
 511                 return -ENODEV;
 512         }
 513 
 514         cpt->num_vf_en = num_vfs; /* User requested VFs */
 515         pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
 516         if (total_vf_cnt < cpt->num_vf_en)
 517                 cpt->num_vf_en = total_vf_cnt;
 518 
 519         if (!total_vf_cnt)
 520                 return 0;
 521 
 522         /*Enabled the available VFs */
 523         err = pci_enable_sriov(pdev, cpt->num_vf_en);
 524         if (err) {
 525                 dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
 526                         cpt->num_vf_en);
 527                 cpt->num_vf_en = 0;
 528                 return err;
 529         }
 530 
 531         /* TODO: Optionally enable static VQ priorities feature */
 532 
 533         dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
 534                  cpt->num_vf_en);
 535 
 536         cpt->flags |= CPT_FLAG_SRIOV_ENABLED;
 537 
 538         return 0;
 539 }
 540 
 541 static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 542 {
 543         struct device *dev = &pdev->dev;
 544         struct cpt_device *cpt;
 545         int err;
 546 
 547         if (num_vfs > 16 || num_vfs < 4) {
 548                 dev_warn(dev, "Invalid vf count %d, Resetting it to 4(default)\n",
 549                          num_vfs);
 550                 num_vfs = 4;
 551         }
 552 
 553         cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
 554         if (!cpt)
 555                 return -ENOMEM;
 556 
 557         pci_set_drvdata(pdev, cpt);
 558         cpt->pdev = pdev;
 559         err = pci_enable_device(pdev);
 560         if (err) {
 561                 dev_err(dev, "Failed to enable PCI device\n");
 562                 pci_set_drvdata(pdev, NULL);
 563                 return err;
 564         }
 565 
 566         err = pci_request_regions(pdev, DRV_NAME);
 567         if (err) {
 568                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
 569                 goto cpt_err_disable_device;
 570         }
 571 
 572         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
 573         if (err) {
 574                 dev_err(dev, "Unable to get usable DMA configuration\n");
 575                 goto cpt_err_release_regions;
 576         }
 577 
 578         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
 579         if (err) {
 580                 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
 581                 goto cpt_err_release_regions;
 582         }
 583 
 584         /* MAP PF's configuration registers */
 585         cpt->reg_base = pcim_iomap(pdev, 0, 0);
 586         if (!cpt->reg_base) {
 587                 dev_err(dev, "Cannot map config register space, aborting\n");
 588                 err = -ENOMEM;
 589                 goto cpt_err_release_regions;
 590         }
 591 
 592         /* CPT device HW initialization */
 593         cpt_device_init(cpt);
 594 
 595         /* Register interrupts */
 596         err = cpt_register_interrupts(cpt);
 597         if (err)
 598                 goto cpt_err_release_regions;
 599 
 600         err = cpt_ucode_load(cpt);
 601         if (err)
 602                 goto cpt_err_unregister_interrupts;
 603 
 604         /* Configure SRIOV */
 605         err = cpt_sriov_init(cpt, num_vfs);
 606         if (err)
 607                 goto cpt_err_unregister_interrupts;
 608 
 609         return 0;
 610 
 611 cpt_err_unregister_interrupts:
 612         cpt_unregister_interrupts(cpt);
 613 cpt_err_release_regions:
 614         pci_release_regions(pdev);
 615 cpt_err_disable_device:
 616         pci_disable_device(pdev);
 617         pci_set_drvdata(pdev, NULL);
 618         return err;
 619 }
 620 
 621 static void cpt_remove(struct pci_dev *pdev)
 622 {
 623         struct cpt_device *cpt = pci_get_drvdata(pdev);
 624 
 625         /* Disengage SE and AE cores from all groups*/
 626         cpt_disable_all_cores(cpt);
 627         /* Unload microcodes */
 628         cpt_unload_microcode(cpt);
 629         cpt_unregister_interrupts(cpt);
 630         pci_disable_sriov(pdev);
 631         pci_release_regions(pdev);
 632         pci_disable_device(pdev);
 633         pci_set_drvdata(pdev, NULL);
 634 }
 635 
 636 static void cpt_shutdown(struct pci_dev *pdev)
 637 {
 638         struct cpt_device *cpt = pci_get_drvdata(pdev);
 639 
 640         if (!cpt)
 641                 return;
 642 
 643         dev_info(&pdev->dev, "Shutdown device %x:%x.\n",
 644                  (u32)pdev->vendor, (u32)pdev->device);
 645 
 646         cpt_unregister_interrupts(cpt);
 647         pci_release_regions(pdev);
 648         pci_disable_device(pdev);
 649         pci_set_drvdata(pdev, NULL);
 650 }
 651 
 652 /* Supported devices */
 653 static const struct pci_device_id cpt_id_table[] = {
 654         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_PF_DEVICE_ID) },
 655         { 0, }  /* end of table */
 656 };
 657 
 658 static struct pci_driver cpt_pci_driver = {
 659         .name = DRV_NAME,
 660         .id_table = cpt_id_table,
 661         .probe = cpt_probe,
 662         .remove = cpt_remove,
 663         .shutdown = cpt_shutdown,
 664 };
 665 
 666 module_pci_driver(cpt_pci_driver);
 667 
 668 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
 669 MODULE_DESCRIPTION("Cavium Thunder CPT Physical Function Driver");
 670 MODULE_LICENSE("GPL v2");
 671 MODULE_VERSION(DRV_VERSION);
 672 MODULE_DEVICE_TABLE(pci, cpt_id_table);

/* [<][>][^][v][top][bottom][index][help] */