root/drivers/crypto/cavium/nitrox/nitrox_lib.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nitrox_cmdq_init
  2. nitrox_cmdq_reset
  3. nitrox_cmdq_cleanup
  4. nitrox_free_aqm_queues
  5. nitrox_alloc_aqm_queues
  6. nitrox_free_pktin_queues
  7. nitrox_alloc_pktin_queues
  8. create_crypto_dma_pool
  9. destroy_crypto_dma_pool
  10. crypto_alloc_context
  11. crypto_free_context
  12. nitrox_common_sw_init
  13. nitrox_common_sw_cleanup

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/cpumask.h>
   3 #include <linux/dma-mapping.h>
   4 #include <linux/dmapool.h>
   5 #include <linux/delay.h>
   6 #include <linux/gfp.h>
   7 #include <linux/kernel.h>
   8 #include <linux/module.h>
   9 #include <linux/pci_regs.h>
  10 #include <linux/vmalloc.h>
  11 #include <linux/pci.h>
  12 
  13 #include "nitrox_dev.h"
  14 #include "nitrox_common.h"
  15 #include "nitrox_req.h"
  16 #include "nitrox_csr.h"
  17 
  18 #define CRYPTO_CTX_SIZE 256
  19 
  20 /* packet inuput ring alignments */
  21 #define PKTIN_Q_ALIGN_BYTES 16
  22 /* AQM Queue input alignments */
  23 #define AQM_Q_ALIGN_BYTES 32
  24 
  25 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
  26 {
  27         struct nitrox_device *ndev = cmdq->ndev;
  28 
  29         cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
  30         cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
  31                                                 &cmdq->unalign_dma,
  32                                                 GFP_KERNEL);
  33         if (!cmdq->unalign_base)
  34                 return -ENOMEM;
  35 
  36         cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
  37         cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
  38         cmdq->write_idx = 0;
  39 
  40         spin_lock_init(&cmdq->cmd_qlock);
  41         spin_lock_init(&cmdq->resp_qlock);
  42         spin_lock_init(&cmdq->backlog_qlock);
  43 
  44         INIT_LIST_HEAD(&cmdq->response_head);
  45         INIT_LIST_HEAD(&cmdq->backlog_head);
  46         INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
  47 
  48         atomic_set(&cmdq->pending_count, 0);
  49         atomic_set(&cmdq->backlog_count, 0);
  50         return 0;
  51 }
  52 
  53 static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
  54 {
  55         cmdq->write_idx = 0;
  56         atomic_set(&cmdq->pending_count, 0);
  57         atomic_set(&cmdq->backlog_count, 0);
  58 }
  59 
  60 static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
  61 {
  62         struct nitrox_device *ndev;
  63 
  64         if (!cmdq)
  65                 return;
  66 
  67         if (!cmdq->unalign_base)
  68                 return;
  69 
  70         ndev = cmdq->ndev;
  71         cancel_work_sync(&cmdq->backlog_qflush);
  72 
  73         dma_free_coherent(DEV(ndev), cmdq->qsize,
  74                           cmdq->unalign_base, cmdq->unalign_dma);
  75         nitrox_cmdq_reset(cmdq);
  76 
  77         cmdq->dbell_csr_addr = NULL;
  78         cmdq->compl_cnt_csr_addr = NULL;
  79         cmdq->unalign_base = NULL;
  80         cmdq->base = NULL;
  81         cmdq->unalign_dma = 0;
  82         cmdq->dma = 0;
  83         cmdq->qsize = 0;
  84         cmdq->instr_size = 0;
  85 }
  86 
  87 static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
  88 {
  89         int i;
  90 
  91         for (i = 0; i < ndev->nr_queues; i++) {
  92                 nitrox_cmdq_cleanup(ndev->aqmq[i]);
  93                 kzfree(ndev->aqmq[i]);
  94                 ndev->aqmq[i] = NULL;
  95         }
  96 }
  97 
  98 static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
  99 {
 100         int i, err;
 101 
 102         for (i = 0; i < ndev->nr_queues; i++) {
 103                 struct nitrox_cmdq *cmdq;
 104                 u64 offset;
 105 
 106                 cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
 107                 if (!cmdq) {
 108                         err = -ENOMEM;
 109                         goto aqmq_fail;
 110                 }
 111 
 112                 cmdq->ndev = ndev;
 113                 cmdq->qno = i;
 114                 cmdq->instr_size = sizeof(struct aqmq_command_s);
 115 
 116                 /* AQM Queue Doorbell Counter Register Address */
 117                 offset = AQMQ_DRBLX(i);
 118                 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
 119                 /* AQM Queue Commands Completed Count Register Address */
 120                 offset = AQMQ_CMD_CNTX(i);
 121                 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
 122 
 123                 err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
 124                 if (err) {
 125                         kzfree(cmdq);
 126                         goto aqmq_fail;
 127                 }
 128                 ndev->aqmq[i] = cmdq;
 129         }
 130 
 131         return 0;
 132 
 133 aqmq_fail:
 134         nitrox_free_aqm_queues(ndev);
 135         return err;
 136 }
 137 
 138 static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
 139 {
 140         int i;
 141 
 142         for (i = 0; i < ndev->nr_queues; i++) {
 143                 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
 144 
 145                 nitrox_cmdq_cleanup(cmdq);
 146         }
 147         kfree(ndev->pkt_inq);
 148         ndev->pkt_inq = NULL;
 149 }
 150 
 151 static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
 152 {
 153         int i, err;
 154 
 155         ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
 156                                      sizeof(struct nitrox_cmdq),
 157                                      GFP_KERNEL, ndev->node);
 158         if (!ndev->pkt_inq)
 159                 return -ENOMEM;
 160 
 161         for (i = 0; i < ndev->nr_queues; i++) {
 162                 struct nitrox_cmdq *cmdq;
 163                 u64 offset;
 164 
 165                 cmdq = &ndev->pkt_inq[i];
 166                 cmdq->ndev = ndev;
 167                 cmdq->qno = i;
 168                 cmdq->instr_size = sizeof(struct nps_pkt_instr);
 169 
 170                 /* packet input ring doorbell address */
 171                 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
 172                 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
 173                 /* packet solicit port completion count address */
 174                 offset = NPS_PKT_SLC_CNTSX(i);
 175                 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
 176 
 177                 err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
 178                 if (err)
 179                         goto pktq_fail;
 180         }
 181         return 0;
 182 
 183 pktq_fail:
 184         nitrox_free_pktin_queues(ndev);
 185         return err;
 186 }
 187 
 188 static int create_crypto_dma_pool(struct nitrox_device *ndev)
 189 {
 190         size_t size;
 191 
 192         /* Crypto context pool, 16 byte aligned */
 193         size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
 194         ndev->ctx_pool = dma_pool_create("nitrox-context",
 195                                          DEV(ndev), size, 16, 0);
 196         if (!ndev->ctx_pool)
 197                 return -ENOMEM;
 198 
 199         return 0;
 200 }
 201 
 202 static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
 203 {
 204         if (!ndev->ctx_pool)
 205                 return;
 206 
 207         dma_pool_destroy(ndev->ctx_pool);
 208         ndev->ctx_pool = NULL;
 209 }
 210 
 211 /*
 212  * crypto_alloc_context - Allocate crypto context from pool
 213  * @ndev: NITROX Device
 214  */
 215 void *crypto_alloc_context(struct nitrox_device *ndev)
 216 {
 217         struct ctx_hdr *ctx;
 218         struct crypto_ctx_hdr *chdr;
 219         void *vaddr;
 220         dma_addr_t dma;
 221 
 222         chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
 223         if (!chdr)
 224                 return NULL;
 225 
 226         vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
 227         if (!vaddr) {
 228                 kfree(chdr);
 229                 return NULL;
 230         }
 231 
 232         /* fill meta data */
 233         ctx = vaddr;
 234         ctx->pool = ndev->ctx_pool;
 235         ctx->dma = dma;
 236         ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
 237 
 238         chdr->pool = ndev->ctx_pool;
 239         chdr->dma = dma;
 240         chdr->vaddr = vaddr;
 241 
 242         return chdr;
 243 }
 244 
 245 /**
 246  * crypto_free_context - Free crypto context to pool
 247  * @ctx: context to free
 248  */
 249 void crypto_free_context(void *ctx)
 250 {
 251         struct crypto_ctx_hdr *ctxp;
 252 
 253         if (!ctx)
 254                 return;
 255 
 256         ctxp = ctx;
 257         dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
 258         kfree(ctxp);
 259 }
 260 
 261 /**
 262  * nitrox_common_sw_init - allocate software resources.
 263  * @ndev: NITROX device
 264  *
 265  * Allocates crypto context pools and command queues etc.
 266  *
 267  * Return: 0 on success, or a negative error code on error.
 268  */
 269 int nitrox_common_sw_init(struct nitrox_device *ndev)
 270 {
 271         int err = 0;
 272 
 273         /* per device crypto context pool */
 274         err = create_crypto_dma_pool(ndev);
 275         if (err)
 276                 return err;
 277 
 278         err = nitrox_alloc_pktin_queues(ndev);
 279         if (err)
 280                 destroy_crypto_dma_pool(ndev);
 281 
 282         err = nitrox_alloc_aqm_queues(ndev);
 283         if (err) {
 284                 nitrox_free_pktin_queues(ndev);
 285                 destroy_crypto_dma_pool(ndev);
 286         }
 287 
 288         return err;
 289 }
 290 
 291 /**
 292  * nitrox_common_sw_cleanup - free software resources.
 293  * @ndev: NITROX device
 294  */
 295 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
 296 {
 297         nitrox_free_aqm_queues(ndev);
 298         nitrox_free_pktin_queues(ndev);
 299         destroy_crypto_dma_pool(ndev);
 300 }

/* [<][>][^][v][top][bottom][index][help] */