root/drivers/crypto/cavium/cpt/cptvf_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vq_work_handler
  2. init_worker_threads
  3. cleanup_worker_threads
  4. free_pending_queues
  5. alloc_pending_queues
  6. init_pending_queues
  7. cleanup_pending_queues
  8. free_command_queues
  9. alloc_command_queues
  10. init_command_queues
  11. cleanup_command_queues
  12. cptvf_sw_cleanup
  13. cptvf_sw_init
  14. cptvf_free_irq_affinity
  15. cptvf_write_vq_ctl
  16. cptvf_write_vq_doorbell
  17. cptvf_write_vq_inprog
  18. cptvf_write_vq_done_numwait
  19. cptvf_write_vq_done_timewait
  20. cptvf_enable_swerr_interrupts
  21. cptvf_enable_mbox_interrupts
  22. cptvf_enable_done_interrupts
  23. cptvf_clear_dovf_intr
  24. cptvf_clear_irde_intr
  25. cptvf_clear_nwrp_intr
  26. cptvf_clear_mbox_intr
  27. cptvf_clear_swerr_intr
  28. cptvf_read_vf_misc_intr_status
  29. cptvf_misc_intr_handler
  30. get_cptvf_vq_wqe
  31. cptvf_read_vq_done_count
  32. cptvf_write_vq_done_ack
  33. cptvf_done_intr_handler
  34. cptvf_set_irq_affinity
  35. cptvf_write_vq_saddr
  36. cptvf_device_init
  37. cptvf_probe
  38. cptvf_remove
  39. cptvf_shutdown

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2016 Cavium, Inc.
   4  */
   5 
   6 #include <linux/interrupt.h>
   7 #include <linux/module.h>
   8 
   9 #include "cptvf.h"
  10 
  11 #define DRV_NAME        "thunder-cptvf"
  12 #define DRV_VERSION     "1.0"
  13 
  14 struct cptvf_wqe {
  15         struct tasklet_struct twork;
  16         void *cptvf;
  17         u32 qno;
  18 };
  19 
  20 struct cptvf_wqe_info {
  21         struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
  22 };
  23 
  24 static void vq_work_handler(unsigned long data)
  25 {
  26         struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
  27         struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
  28 
  29         vq_post_process(cwqe->cptvf, cwqe->qno);
  30 }
  31 
  32 static int init_worker_threads(struct cpt_vf *cptvf)
  33 {
  34         struct pci_dev *pdev = cptvf->pdev;
  35         struct cptvf_wqe_info *cwqe_info;
  36         int i;
  37 
  38         cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
  39         if (!cwqe_info)
  40                 return -ENOMEM;
  41 
  42         if (cptvf->nr_queues) {
  43                 dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
  44                          cptvf->nr_queues);
  45         }
  46 
  47         for (i = 0; i < cptvf->nr_queues; i++) {
  48                 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
  49                              (u64)cwqe_info);
  50                 cwqe_info->vq_wqe[i].qno = i;
  51                 cwqe_info->vq_wqe[i].cptvf = cptvf;
  52         }
  53 
  54         cptvf->wqe_info = cwqe_info;
  55 
  56         return 0;
  57 }
  58 
  59 static void cleanup_worker_threads(struct cpt_vf *cptvf)
  60 {
  61         struct cptvf_wqe_info *cwqe_info;
  62         struct pci_dev *pdev = cptvf->pdev;
  63         int i;
  64 
  65         cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
  66         if (!cwqe_info)
  67                 return;
  68 
  69         if (cptvf->nr_queues) {
  70                 dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
  71                          cptvf->nr_queues);
  72         }
  73 
  74         for (i = 0; i < cptvf->nr_queues; i++)
  75                 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
  76 
  77         kzfree(cwqe_info);
  78         cptvf->wqe_info = NULL;
  79 }
  80 
  81 static void free_pending_queues(struct pending_qinfo *pqinfo)
  82 {
  83         int i;
  84         struct pending_queue *queue;
  85 
  86         for_each_pending_queue(pqinfo, queue, i) {
  87                 if (!queue->head)
  88                         continue;
  89 
  90                 /* free single queue */
  91                 kzfree((queue->head));
  92 
  93                 queue->front = 0;
  94                 queue->rear = 0;
  95 
  96                 return;
  97         }
  98 
  99         pqinfo->qlen = 0;
 100         pqinfo->nr_queues = 0;
 101 }
 102 
 103 static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
 104                                 u32 nr_queues)
 105 {
 106         u32 i;
 107         size_t size;
 108         int ret;
 109         struct pending_queue *queue = NULL;
 110 
 111         pqinfo->nr_queues = nr_queues;
 112         pqinfo->qlen = qlen;
 113 
 114         size = (qlen * sizeof(struct pending_entry));
 115 
 116         for_each_pending_queue(pqinfo, queue, i) {
 117                 queue->head = kzalloc((size), GFP_KERNEL);
 118                 if (!queue->head) {
 119                         ret = -ENOMEM;
 120                         goto pending_qfail;
 121                 }
 122 
 123                 queue->front = 0;
 124                 queue->rear = 0;
 125                 atomic64_set((&queue->pending_count), (0));
 126 
 127                 /* init queue spin lock */
 128                 spin_lock_init(&queue->lock);
 129         }
 130 
 131         return 0;
 132 
 133 pending_qfail:
 134         free_pending_queues(pqinfo);
 135 
 136         return ret;
 137 }
 138 
 139 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
 140 {
 141         struct pci_dev *pdev = cptvf->pdev;
 142         int ret;
 143 
 144         if (!nr_queues)
 145                 return 0;
 146 
 147         ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
 148         if (ret) {
 149                 dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
 150                         nr_queues);
 151                 return ret;
 152         }
 153 
 154         return 0;
 155 }
 156 
 157 static void cleanup_pending_queues(struct cpt_vf *cptvf)
 158 {
 159         struct pci_dev *pdev = cptvf->pdev;
 160 
 161         if (!cptvf->nr_queues)
 162                 return;
 163 
 164         dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
 165                  cptvf->nr_queues);
 166         free_pending_queues(&cptvf->pqinfo);
 167 }
 168 
 169 static void free_command_queues(struct cpt_vf *cptvf,
 170                                 struct command_qinfo *cqinfo)
 171 {
 172         int i;
 173         struct command_queue *queue = NULL;
 174         struct command_chunk *chunk = NULL;
 175         struct pci_dev *pdev = cptvf->pdev;
 176         struct hlist_node *node;
 177 
 178         /* clean up for each queue */
 179         for (i = 0; i < cptvf->nr_queues; i++) {
 180                 queue = &cqinfo->queue[i];
 181                 if (hlist_empty(&cqinfo->queue[i].chead))
 182                         continue;
 183 
 184                 hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
 185                                           nextchunk) {
 186                         dma_free_coherent(&pdev->dev, chunk->size,
 187                                           chunk->head,
 188                                           chunk->dma_addr);
 189                         chunk->head = NULL;
 190                         chunk->dma_addr = 0;
 191                         hlist_del(&chunk->nextchunk);
 192                         kzfree(chunk);
 193                 }
 194 
 195                 queue->nchunks = 0;
 196                 queue->idx = 0;
 197         }
 198 
 199         /* common cleanup */
 200         cqinfo->cmd_size = 0;
 201 }
 202 
 203 static int alloc_command_queues(struct cpt_vf *cptvf,
 204                                 struct command_qinfo *cqinfo, size_t cmd_size,
 205                                 u32 qlen)
 206 {
 207         int i;
 208         size_t q_size;
 209         struct command_queue *queue = NULL;
 210         struct pci_dev *pdev = cptvf->pdev;
 211 
 212         /* common init */
 213         cqinfo->cmd_size = cmd_size;
 214         /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
 215         cptvf->qsize = min(qlen, cqinfo->qchunksize) *
 216                         CPT_NEXT_CHUNK_PTR_SIZE + 1;
 217         /* Qsize in bytes to create space for alignment */
 218         q_size = qlen * cqinfo->cmd_size;
 219 
 220         /* per queue initialization */
 221         for (i = 0; i < cptvf->nr_queues; i++) {
 222                 size_t c_size = 0;
 223                 size_t rem_q_size = q_size;
 224                 struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
 225                 u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
 226 
 227                 queue = &cqinfo->queue[i];
 228                 INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
 229                 do {
 230                         curr = kzalloc(sizeof(*curr), GFP_KERNEL);
 231                         if (!curr)
 232                                 goto cmd_qfail;
 233 
 234                         c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
 235                                         rem_q_size;
 236                         curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
 237                                                               c_size + CPT_NEXT_CHUNK_PTR_SIZE,
 238                                                               &curr->dma_addr,
 239                                                               GFP_KERNEL);
 240                         if (!curr->head) {
 241                                 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
 242                                         i, queue->nchunks);
 243                                 kfree(curr);
 244                                 goto cmd_qfail;
 245                         }
 246 
 247                         curr->size = c_size;
 248                         if (queue->nchunks == 0) {
 249                                 hlist_add_head(&curr->nextchunk,
 250                                                &cqinfo->queue[i].chead);
 251                                 first = curr;
 252                         } else {
 253                                 hlist_add_behind(&curr->nextchunk,
 254                                                  &last->nextchunk);
 255                         }
 256 
 257                         queue->nchunks++;
 258                         rem_q_size -= c_size;
 259                         if (last)
 260                                 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
 261 
 262                         last = curr;
 263                 } while (rem_q_size);
 264 
 265                 /* Make the queue circular */
 266                 /* Tie back last chunk entry to head */
 267                 curr = first;
 268                 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
 269                 queue->qhead = curr;
 270                 spin_lock_init(&queue->lock);
 271         }
 272         return 0;
 273 
 274 cmd_qfail:
 275         free_command_queues(cptvf, cqinfo);
 276         return -ENOMEM;
 277 }
 278 
 279 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
 280 {
 281         struct pci_dev *pdev = cptvf->pdev;
 282         int ret;
 283 
 284         /* setup AE command queues */
 285         ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
 286                                    qlen);
 287         if (ret) {
 288                 dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
 289                         cptvf->nr_queues);
 290                 return ret;
 291         }
 292 
 293         return ret;
 294 }
 295 
 296 static void cleanup_command_queues(struct cpt_vf *cptvf)
 297 {
 298         struct pci_dev *pdev = cptvf->pdev;
 299 
 300         if (!cptvf->nr_queues)
 301                 return;
 302 
 303         dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
 304                  cptvf->nr_queues);
 305         free_command_queues(cptvf, &cptvf->cqinfo);
 306 }
 307 
 308 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
 309 {
 310         cleanup_worker_threads(cptvf);
 311         cleanup_pending_queues(cptvf);
 312         cleanup_command_queues(cptvf);
 313 }
 314 
 315 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
 316 {
 317         struct pci_dev *pdev = cptvf->pdev;
 318         int ret = 0;
 319         u32 max_dev_queues = 0;
 320 
 321         max_dev_queues = CPT_NUM_QS_PER_VF;
 322         /* possible cpus */
 323         nr_queues = min_t(u32, nr_queues, max_dev_queues);
 324         cptvf->nr_queues = nr_queues;
 325 
 326         ret = init_command_queues(cptvf, qlen);
 327         if (ret) {
 328                 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
 329                         nr_queues);
 330                 return ret;
 331         }
 332 
 333         ret = init_pending_queues(cptvf, qlen, nr_queues);
 334         if (ret) {
 335                 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
 336                         nr_queues);
 337                 goto setup_pqfail;
 338         }
 339 
 340         /* Create worker threads for BH processing */
 341         ret = init_worker_threads(cptvf);
 342         if (ret) {
 343                 dev_err(&pdev->dev, "Failed to setup worker threads\n");
 344                 goto init_work_fail;
 345         }
 346 
 347         return 0;
 348 
 349 init_work_fail:
 350         cleanup_worker_threads(cptvf);
 351         cleanup_pending_queues(cptvf);
 352 
 353 setup_pqfail:
 354         cleanup_command_queues(cptvf);
 355 
 356         return ret;
 357 }
 358 
 359 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
 360 {
 361         irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
 362         free_cpumask_var(cptvf->affinity_mask[vec]);
 363 }
 364 
 365 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
 366 {
 367         union cptx_vqx_ctl vqx_ctl;
 368 
 369         vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
 370         vqx_ctl.s.ena = val;
 371         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
 372 }
 373 
 374 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
 375 {
 376         union cptx_vqx_doorbell vqx_dbell;
 377 
 378         vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
 379                                      CPTX_VQX_DOORBELL(0, 0));
 380         vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
 381         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
 382                         vqx_dbell.u);
 383 }
 384 
 385 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
 386 {
 387         union cptx_vqx_inprog vqx_inprg;
 388 
 389         vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
 390         vqx_inprg.s.inflight = val;
 391         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
 392 }
 393 
 394 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
 395 {
 396         union cptx_vqx_done_wait vqx_dwait;
 397 
 398         vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
 399                                      CPTX_VQX_DONE_WAIT(0, 0));
 400         vqx_dwait.s.num_wait = val;
 401         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
 402                         vqx_dwait.u);
 403 }
 404 
 405 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
 406 {
 407         union cptx_vqx_done_wait vqx_dwait;
 408 
 409         vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
 410                                      CPTX_VQX_DONE_WAIT(0, 0));
 411         vqx_dwait.s.time_wait = time;
 412         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
 413                         vqx_dwait.u);
 414 }
 415 
 416 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
 417 {
 418         union cptx_vqx_misc_ena_w1s vqx_misc_ena;
 419 
 420         vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
 421                                         CPTX_VQX_MISC_ENA_W1S(0, 0));
 422         /* Set mbox(0) interupts for the requested vf */
 423         vqx_misc_ena.s.swerr = 1;
 424         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
 425                         vqx_misc_ena.u);
 426 }
 427 
 428 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
 429 {
 430         union cptx_vqx_misc_ena_w1s vqx_misc_ena;
 431 
 432         vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
 433                                         CPTX_VQX_MISC_ENA_W1S(0, 0));
 434         /* Set mbox(0) interupts for the requested vf */
 435         vqx_misc_ena.s.mbox = 1;
 436         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
 437                         vqx_misc_ena.u);
 438 }
 439 
 440 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
 441 {
 442         union cptx_vqx_done_ena_w1s vqx_done_ena;
 443 
 444         vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
 445                                         CPTX_VQX_DONE_ENA_W1S(0, 0));
 446         /* Set DONE interrupt for the requested vf */
 447         vqx_done_ena.s.done = 1;
 448         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
 449                         vqx_done_ena.u);
 450 }
 451 
 452 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
 453 {
 454         union cptx_vqx_misc_int vqx_misc_int;
 455 
 456         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 457                                         CPTX_VQX_MISC_INT(0, 0));
 458         /* W1C for the VF */
 459         vqx_misc_int.s.dovf = 1;
 460         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 461                         vqx_misc_int.u);
 462 }
 463 
 464 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
 465 {
 466         union cptx_vqx_misc_int vqx_misc_int;
 467 
 468         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 469                                         CPTX_VQX_MISC_INT(0, 0));
 470         /* W1C for the VF */
 471         vqx_misc_int.s.irde = 1;
 472         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 473                         vqx_misc_int.u);
 474 }
 475 
 476 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
 477 {
 478         union cptx_vqx_misc_int vqx_misc_int;
 479 
 480         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 481                                         CPTX_VQX_MISC_INT(0, 0));
 482         /* W1C for the VF */
 483         vqx_misc_int.s.nwrp = 1;
 484         cpt_write_csr64(cptvf->reg_base,
 485                         CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
 486 }
 487 
 488 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
 489 {
 490         union cptx_vqx_misc_int vqx_misc_int;
 491 
 492         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 493                                         CPTX_VQX_MISC_INT(0, 0));
 494         /* W1C for the VF */
 495         vqx_misc_int.s.mbox = 1;
 496         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 497                         vqx_misc_int.u);
 498 }
 499 
 500 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
 501 {
 502         union cptx_vqx_misc_int vqx_misc_int;
 503 
 504         vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
 505                                         CPTX_VQX_MISC_INT(0, 0));
 506         /* W1C for the VF */
 507         vqx_misc_int.s.swerr = 1;
 508         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
 509                         vqx_misc_int.u);
 510 }
 511 
 512 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
 513 {
 514         return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
 515 }
 516 
 517 static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
 518 {
 519         struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
 520         struct pci_dev *pdev = cptvf->pdev;
 521         u64 intr;
 522 
 523         intr = cptvf_read_vf_misc_intr_status(cptvf);
 524         /*Check for MISC interrupt types*/
 525         if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
 526                 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
 527                         intr, cptvf->vfid);
 528                 cptvf_handle_mbox_intr(cptvf);
 529                 cptvf_clear_mbox_intr(cptvf);
 530         } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
 531                 cptvf_clear_dovf_intr(cptvf);
 532                 /*Clear doorbell count*/
 533                 cptvf_write_vq_doorbell(cptvf, 0);
 534                 dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
 535                         intr, cptvf->vfid);
 536         } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
 537                 cptvf_clear_irde_intr(cptvf);
 538                 dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
 539                         intr, cptvf->vfid);
 540         } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
 541                 cptvf_clear_nwrp_intr(cptvf);
 542                 dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
 543                         intr, cptvf->vfid);
 544         } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
 545                 cptvf_clear_swerr_intr(cptvf);
 546                 dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
 547                         intr, cptvf->vfid);
 548         } else {
 549                 dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
 550                         cptvf->vfid);
 551         }
 552 
 553         return IRQ_HANDLED;
 554 }
 555 
 556 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
 557                                                  int qno)
 558 {
 559         struct cptvf_wqe_info *nwqe_info;
 560 
 561         if (unlikely(qno >= cptvf->nr_queues))
 562                 return NULL;
 563         nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
 564 
 565         return &nwqe_info->vq_wqe[qno];
 566 }
 567 
 568 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
 569 {
 570         union cptx_vqx_done vqx_done;
 571 
 572         vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
 573         return vqx_done.s.done;
 574 }
 575 
 576 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
 577                                            u32 ackcnt)
 578 {
 579         union cptx_vqx_done_ack vqx_dack_cnt;
 580 
 581         vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
 582                                         CPTX_VQX_DONE_ACK(0, 0));
 583         vqx_dack_cnt.s.done_ack = ackcnt;
 584         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
 585                         vqx_dack_cnt.u);
 586 }
 587 
 588 static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
 589 {
 590         struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
 591         struct pci_dev *pdev = cptvf->pdev;
 592         /* Read the number of completions */
 593         u32 intr = cptvf_read_vq_done_count(cptvf);
 594 
 595         if (intr) {
 596                 struct cptvf_wqe *wqe;
 597 
 598                 /* Acknowledge the number of
 599                  * scheduled completions for processing
 600                  */
 601                 cptvf_write_vq_done_ack(cptvf, intr);
 602                 wqe = get_cptvf_vq_wqe(cptvf, 0);
 603                 if (unlikely(!wqe)) {
 604                         dev_err(&pdev->dev, "No work to schedule for VF (%d)",
 605                                 cptvf->vfid);
 606                         return IRQ_NONE;
 607                 }
 608                 tasklet_hi_schedule(&wqe->twork);
 609         }
 610 
 611         return IRQ_HANDLED;
 612 }
 613 
 614 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
 615 {
 616         struct pci_dev *pdev = cptvf->pdev;
 617         int cpu;
 618 
 619         if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
 620                                 GFP_KERNEL)) {
 621                 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
 622                         cptvf->vfid);
 623                 return;
 624         }
 625 
 626         cpu = cptvf->vfid % num_online_cpus();
 627         cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
 628                         cptvf->affinity_mask[vec]);
 629         irq_set_affinity_hint(pci_irq_vector(pdev, vec),
 630                         cptvf->affinity_mask[vec]);
 631 }
 632 
 633 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
 634 {
 635         union cptx_vqx_saddr vqx_saddr;
 636 
 637         vqx_saddr.u = val;
 638         cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
 639 }
 640 
 641 static void cptvf_device_init(struct cpt_vf *cptvf)
 642 {
 643         u64 base_addr = 0;
 644 
 645         /* Disable the VQ */
 646         cptvf_write_vq_ctl(cptvf, 0);
 647         /* Reset the doorbell */
 648         cptvf_write_vq_doorbell(cptvf, 0);
 649         /* Clear inflight */
 650         cptvf_write_vq_inprog(cptvf, 0);
 651         /* Write VQ SADDR */
 652         /* TODO: for now only one queue, so hard coded */
 653         base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
 654         cptvf_write_vq_saddr(cptvf, base_addr);
 655         /* Configure timerhold / coalescence */
 656         cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
 657         cptvf_write_vq_done_numwait(cptvf, 1);
 658         /* Enable the VQ */
 659         cptvf_write_vq_ctl(cptvf, 1);
 660         /* Flag the VF ready */
 661         cptvf->flags |= CPT_FLAG_DEVICE_READY;
 662 }
 663 
 664 static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 665 {
 666         struct device *dev = &pdev->dev;
 667         struct cpt_vf *cptvf;
 668         int    err;
 669 
 670         cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
 671         if (!cptvf)
 672                 return -ENOMEM;
 673 
 674         pci_set_drvdata(pdev, cptvf);
 675         cptvf->pdev = pdev;
 676         err = pci_enable_device(pdev);
 677         if (err) {
 678                 dev_err(dev, "Failed to enable PCI device\n");
 679                 pci_set_drvdata(pdev, NULL);
 680                 return err;
 681         }
 682 
 683         err = pci_request_regions(pdev, DRV_NAME);
 684         if (err) {
 685                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
 686                 goto cptvf_err_disable_device;
 687         }
 688         /* Mark as VF driver */
 689         cptvf->flags |= CPT_FLAG_VF_DRIVER;
 690         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
 691         if (err) {
 692                 dev_err(dev, "Unable to get usable DMA configuration\n");
 693                 goto cptvf_err_release_regions;
 694         }
 695 
 696         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
 697         if (err) {
 698                 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
 699                 goto cptvf_err_release_regions;
 700         }
 701 
 702         /* MAP PF's configuration registers */
 703         cptvf->reg_base = pcim_iomap(pdev, 0, 0);
 704         if (!cptvf->reg_base) {
 705                 dev_err(dev, "Cannot map config register space, aborting\n");
 706                 err = -ENOMEM;
 707                 goto cptvf_err_release_regions;
 708         }
 709 
 710         cptvf->node = dev_to_node(&pdev->dev);
 711         err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
 712                         CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
 713         if (err < 0) {
 714                 dev_err(dev, "Request for #%d msix vectors failed\n",
 715                         CPT_VF_MSIX_VECTORS);
 716                 goto cptvf_err_release_regions;
 717         }
 718 
 719         err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
 720                           cptvf_misc_intr_handler, 0, "CPT VF misc intr",
 721                           cptvf);
 722         if (err) {
 723                 dev_err(dev, "Request misc irq failed");
 724                 goto cptvf_free_vectors;
 725         }
 726 
 727         /* Enable mailbox interrupt */
 728         cptvf_enable_mbox_interrupts(cptvf);
 729         cptvf_enable_swerr_interrupts(cptvf);
 730 
 731         /* Check ready with PF */
 732         /* Gets chip ID / device Id from PF if ready */
 733         err = cptvf_check_pf_ready(cptvf);
 734         if (err) {
 735                 dev_err(dev, "PF not responding to READY msg");
 736                 goto cptvf_free_misc_irq;
 737         }
 738 
 739         /* CPT VF software resources initialization */
 740         cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
 741         err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
 742         if (err) {
 743                 dev_err(dev, "cptvf_sw_init() failed");
 744                 goto cptvf_free_misc_irq;
 745         }
 746         /* Convey VQ LEN to PF */
 747         err = cptvf_send_vq_size_msg(cptvf);
 748         if (err) {
 749                 dev_err(dev, "PF not responding to QLEN msg");
 750                 goto cptvf_free_misc_irq;
 751         }
 752 
 753         /* CPT VF device initialization */
 754         cptvf_device_init(cptvf);
 755         /* Send msg to PF to assign currnet Q to required group */
 756         cptvf->vfgrp = 1;
 757         err = cptvf_send_vf_to_grp_msg(cptvf);
 758         if (err) {
 759                 dev_err(dev, "PF not responding to VF_GRP msg");
 760                 goto cptvf_free_misc_irq;
 761         }
 762 
 763         cptvf->priority = 1;
 764         err = cptvf_send_vf_priority_msg(cptvf);
 765         if (err) {
 766                 dev_err(dev, "PF not responding to VF_PRIO msg");
 767                 goto cptvf_free_misc_irq;
 768         }
 769 
 770         err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
 771                           cptvf_done_intr_handler, 0, "CPT VF done intr",
 772                           cptvf);
 773         if (err) {
 774                 dev_err(dev, "Request done irq failed\n");
 775                 goto cptvf_free_misc_irq;
 776         }
 777 
 778         /* Enable mailbox interrupt */
 779         cptvf_enable_done_interrupts(cptvf);
 780 
 781         /* Set irq affinity masks */
 782         cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
 783         cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
 784 
 785         err = cptvf_send_vf_up(cptvf);
 786         if (err) {
 787                 dev_err(dev, "PF not responding to UP msg");
 788                 goto cptvf_free_irq_affinity;
 789         }
 790         err = cvm_crypto_init(cptvf);
 791         if (err) {
 792                 dev_err(dev, "Algorithm register failed\n");
 793                 goto cptvf_free_irq_affinity;
 794         }
 795         return 0;
 796 
 797 cptvf_free_irq_affinity:
 798         cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
 799         cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
 800 cptvf_free_misc_irq:
 801         free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
 802 cptvf_free_vectors:
 803         pci_free_irq_vectors(cptvf->pdev);
 804 cptvf_err_release_regions:
 805         pci_release_regions(pdev);
 806 cptvf_err_disable_device:
 807         pci_disable_device(pdev);
 808         pci_set_drvdata(pdev, NULL);
 809 
 810         return err;
 811 }
 812 
 813 static void cptvf_remove(struct pci_dev *pdev)
 814 {
 815         struct cpt_vf *cptvf = pci_get_drvdata(pdev);
 816 
 817         if (!cptvf) {
 818                 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
 819                 return;
 820         }
 821 
 822         /* Convey DOWN to PF */
 823         if (cptvf_send_vf_down(cptvf)) {
 824                 dev_err(&pdev->dev, "PF not responding to DOWN msg");
 825         } else {
 826                 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
 827                 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
 828                 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
 829                 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
 830                 pci_free_irq_vectors(cptvf->pdev);
 831                 cptvf_sw_cleanup(cptvf);
 832                 pci_set_drvdata(pdev, NULL);
 833                 pci_release_regions(pdev);
 834                 pci_disable_device(pdev);
 835                 cvm_crypto_exit();
 836         }
 837 }
 838 
 839 static void cptvf_shutdown(struct pci_dev *pdev)
 840 {
 841         cptvf_remove(pdev);
 842 }
 843 
 844 /* Supported devices */
 845 static const struct pci_device_id cptvf_id_table[] = {
 846         {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
 847         { 0, }  /* end of table */
 848 };
 849 
 850 static struct pci_driver cptvf_pci_driver = {
 851         .name = DRV_NAME,
 852         .id_table = cptvf_id_table,
 853         .probe = cptvf_probe,
 854         .remove = cptvf_remove,
 855         .shutdown = cptvf_shutdown,
 856 };
 857 
 858 module_pci_driver(cptvf_pci_driver);
 859 
 860 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
 861 MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
 862 MODULE_LICENSE("GPL v2");
 863 MODULE_VERSION(DRV_VERSION);
 864 MODULE_DEVICE_TABLE(pci, cptvf_id_table);

/* [<][>][^][v][top][bottom][index][help] */