root/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. npa_aq_enqueue_wait
  2. rvu_npa_aq_enq_inst
  3. npa_lf_hwctx_disable
  4. rvu_mbox_handler_npa_aq_enq
  5. rvu_mbox_handler_npa_hwctx_disable
  6. npa_ctx_free
  7. rvu_mbox_handler_npa_lf_alloc
  8. rvu_mbox_handler_npa_lf_free
  9. npa_aq_init
  10. rvu_npa_init
  11. rvu_npa_freemem
  12. rvu_npa_lf_teardown

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Marvell OcteonTx2 RVU Admin Function driver
   3  *
   4  * Copyright (C) 2018 Marvell International Ltd.
   5  *
   6  * This program is free software; you can redistribute it and/or modify
   7  * it under the terms of the GNU General Public License version 2 as
   8  * published by the Free Software Foundation.
   9  */
  10 
  11 #include <linux/module.h>
  12 #include <linux/pci.h>
  13 
  14 #include "rvu_struct.h"
  15 #include "rvu_reg.h"
  16 #include "rvu.h"
  17 
  18 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
  19                                struct npa_aq_inst_s *inst)
  20 {
  21         struct admin_queue *aq = block->aq;
  22         struct npa_aq_res_s *result;
  23         int timeout = 1000;
  24         u64 reg, head;
  25 
  26         result = (struct npa_aq_res_s *)aq->res->base;
  27 
  28         /* Get current head pointer where to append this instruction */
  29         reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
  30         head = (reg >> 4) & AQ_PTR_MASK;
  31 
  32         memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
  33                (void *)inst, aq->inst->entry_sz);
  34         memset(result, 0, sizeof(*result));
  35         /* sync into memory */
  36         wmb();
  37 
  38         /* Ring the doorbell and wait for result */
  39         rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
  40         while (result->compcode == NPA_AQ_COMP_NOTDONE) {
  41                 cpu_relax();
  42                 udelay(1);
  43                 timeout--;
  44                 if (!timeout)
  45                         return -EBUSY;
  46         }
  47 
  48         if (result->compcode != NPA_AQ_COMP_GOOD)
  49                 /* TODO: Replace this with some error code */
  50                 return -EBUSY;
  51 
  52         return 0;
  53 }
  54 
  55 static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
  56                                struct npa_aq_enq_rsp *rsp)
  57 {
  58         struct rvu_hwinfo *hw = rvu->hw;
  59         u16 pcifunc = req->hdr.pcifunc;
  60         int blkaddr, npalf, rc = 0;
  61         struct npa_aq_inst_s inst;
  62         struct rvu_block *block;
  63         struct admin_queue *aq;
  64         struct rvu_pfvf *pfvf;
  65         void *ctx, *mask;
  66         bool ena;
  67 
  68         pfvf = rvu_get_pfvf(rvu, pcifunc);
  69         if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
  70                 return NPA_AF_ERR_AQ_ENQUEUE;
  71 
  72         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
  73         if (!pfvf->npalf || blkaddr < 0)
  74                 return NPA_AF_ERR_AF_LF_INVALID;
  75 
  76         block = &hw->block[blkaddr];
  77         aq = block->aq;
  78         if (!aq) {
  79                 dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
  80                 return NPA_AF_ERR_AQ_ENQUEUE;
  81         }
  82 
  83         npalf = rvu_get_lf(rvu, block, pcifunc, 0);
  84         if (npalf < 0)
  85                 return NPA_AF_ERR_AF_LF_INVALID;
  86 
  87         memset(&inst, 0, sizeof(struct npa_aq_inst_s));
  88         inst.cindex = req->aura_id;
  89         inst.lf = npalf;
  90         inst.ctype = req->ctype;
  91         inst.op = req->op;
  92         /* Currently we are not supporting enqueuing multiple instructions,
  93          * so always choose first entry in result memory.
  94          */
  95         inst.res_addr = (u64)aq->res->iova;
  96 
  97         /* Clean result + context memory */
  98         memset(aq->res->base, 0, aq->res->entry_sz);
  99         /* Context needs to be written at RES_ADDR + 128 */
 100         ctx = aq->res->base + 128;
 101         /* Mask needs to be written at RES_ADDR + 256 */
 102         mask = aq->res->base + 256;
 103 
 104         switch (req->op) {
 105         case NPA_AQ_INSTOP_WRITE:
 106                 /* Copy context and write mask */
 107                 if (req->ctype == NPA_AQ_CTYPE_AURA) {
 108                         memcpy(mask, &req->aura_mask,
 109                                sizeof(struct npa_aura_s));
 110                         memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
 111                 } else {
 112                         memcpy(mask, &req->pool_mask,
 113                                sizeof(struct npa_pool_s));
 114                         memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
 115                 }
 116                 break;
 117         case NPA_AQ_INSTOP_INIT:
 118                 if (req->ctype == NPA_AQ_CTYPE_AURA) {
 119                         if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
 120                                 rc = NPA_AF_ERR_AQ_FULL;
 121                                 break;
 122                         }
 123                         /* Set pool's context address */
 124                         req->aura.pool_addr = pfvf->pool_ctx->iova +
 125                         (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
 126                         memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
 127                 } else { /* POOL's context */
 128                         memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
 129                 }
 130                 break;
 131         case NPA_AQ_INSTOP_NOP:
 132         case NPA_AQ_INSTOP_READ:
 133         case NPA_AQ_INSTOP_LOCK:
 134         case NPA_AQ_INSTOP_UNLOCK:
 135                 break;
 136         default:
 137                 rc = NPA_AF_ERR_AQ_FULL;
 138                 break;
 139         }
 140 
 141         if (rc)
 142                 return rc;
 143 
 144         spin_lock(&aq->lock);
 145 
 146         /* Submit the instruction to AQ */
 147         rc = npa_aq_enqueue_wait(rvu, block, &inst);
 148         if (rc) {
 149                 spin_unlock(&aq->lock);
 150                 return rc;
 151         }
 152 
 153         /* Set aura bitmap if aura hw context is enabled */
 154         if (req->ctype == NPA_AQ_CTYPE_AURA) {
 155                 if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
 156                         __set_bit(req->aura_id, pfvf->aura_bmap);
 157                 if (req->op == NPA_AQ_INSTOP_WRITE) {
 158                         ena = (req->aura.ena & req->aura_mask.ena) |
 159                                 (test_bit(req->aura_id, pfvf->aura_bmap) &
 160                                 ~req->aura_mask.ena);
 161                         if (ena)
 162                                 __set_bit(req->aura_id, pfvf->aura_bmap);
 163                         else
 164                                 __clear_bit(req->aura_id, pfvf->aura_bmap);
 165                 }
 166         }
 167 
 168         /* Set pool bitmap if pool hw context is enabled */
 169         if (req->ctype == NPA_AQ_CTYPE_POOL) {
 170                 if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
 171                         __set_bit(req->aura_id, pfvf->pool_bmap);
 172                 if (req->op == NPA_AQ_INSTOP_WRITE) {
 173                         ena = (req->pool.ena & req->pool_mask.ena) |
 174                                 (test_bit(req->aura_id, pfvf->pool_bmap) &
 175                                 ~req->pool_mask.ena);
 176                         if (ena)
 177                                 __set_bit(req->aura_id, pfvf->pool_bmap);
 178                         else
 179                                 __clear_bit(req->aura_id, pfvf->pool_bmap);
 180                 }
 181         }
 182         spin_unlock(&aq->lock);
 183 
 184         if (rsp) {
 185                 /* Copy read context into mailbox */
 186                 if (req->op == NPA_AQ_INSTOP_READ) {
 187                         if (req->ctype == NPA_AQ_CTYPE_AURA)
 188                                 memcpy(&rsp->aura, ctx,
 189                                        sizeof(struct npa_aura_s));
 190                         else
 191                                 memcpy(&rsp->pool, ctx,
 192                                        sizeof(struct npa_pool_s));
 193                 }
 194         }
 195 
 196         return 0;
 197 }
 198 
 199 static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 200 {
 201         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
 202         struct npa_aq_enq_req aq_req;
 203         unsigned long *bmap;
 204         int id, cnt = 0;
 205         int err = 0, rc;
 206 
 207         if (!pfvf->pool_ctx || !pfvf->aura_ctx)
 208                 return NPA_AF_ERR_AQ_ENQUEUE;
 209 
 210         memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
 211         aq_req.hdr.pcifunc = req->hdr.pcifunc;
 212 
 213         if (req->ctype == NPA_AQ_CTYPE_POOL) {
 214                 aq_req.pool.ena = 0;
 215                 aq_req.pool_mask.ena = 1;
 216                 cnt = pfvf->pool_ctx->qsize;
 217                 bmap = pfvf->pool_bmap;
 218         } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
 219                 aq_req.aura.ena = 0;
 220                 aq_req.aura_mask.ena = 1;
 221                 cnt = pfvf->aura_ctx->qsize;
 222                 bmap = pfvf->aura_bmap;
 223         }
 224 
 225         aq_req.ctype = req->ctype;
 226         aq_req.op = NPA_AQ_INSTOP_WRITE;
 227 
 228         for (id = 0; id < cnt; id++) {
 229                 if (!test_bit(id, bmap))
 230                         continue;
 231                 aq_req.aura_id = id;
 232                 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
 233                 if (rc) {
 234                         err = rc;
 235                         dev_err(rvu->dev, "Failed to disable %s:%d context\n",
 236                                 (req->ctype == NPA_AQ_CTYPE_AURA) ?
 237                                 "Aura" : "Pool", id);
 238                 }
 239         }
 240 
 241         return err;
 242 }
 243 
 244 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
 245                                 struct npa_aq_enq_req *req,
 246                                 struct npa_aq_enq_rsp *rsp)
 247 {
 248         return rvu_npa_aq_enq_inst(rvu, req, rsp);
 249 }
 250 
 251 int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
 252                                        struct hwctx_disable_req *req,
 253                                        struct msg_rsp *rsp)
 254 {
 255         return npa_lf_hwctx_disable(rvu, req);
 256 }
 257 
 258 static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
 259 {
 260         kfree(pfvf->aura_bmap);
 261         pfvf->aura_bmap = NULL;
 262 
 263         qmem_free(rvu->dev, pfvf->aura_ctx);
 264         pfvf->aura_ctx = NULL;
 265 
 266         kfree(pfvf->pool_bmap);
 267         pfvf->pool_bmap = NULL;
 268 
 269         qmem_free(rvu->dev, pfvf->pool_ctx);
 270         pfvf->pool_ctx = NULL;
 271 
 272         qmem_free(rvu->dev, pfvf->npa_qints_ctx);
 273         pfvf->npa_qints_ctx = NULL;
 274 }
 275 
 276 int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
 277                                   struct npa_lf_alloc_req *req,
 278                                   struct npa_lf_alloc_rsp *rsp)
 279 {
 280         int npalf, qints, hwctx_size, err, rc = 0;
 281         struct rvu_hwinfo *hw = rvu->hw;
 282         u16 pcifunc = req->hdr.pcifunc;
 283         struct rvu_block *block;
 284         struct rvu_pfvf *pfvf;
 285         u64 cfg, ctx_cfg;
 286         int blkaddr;
 287 
 288         if (req->aura_sz > NPA_AURA_SZ_MAX ||
 289             req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
 290                 return NPA_AF_ERR_PARAM;
 291 
 292         pfvf = rvu_get_pfvf(rvu, pcifunc);
 293         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
 294         if (!pfvf->npalf || blkaddr < 0)
 295                 return NPA_AF_ERR_AF_LF_INVALID;
 296 
 297         block = &hw->block[blkaddr];
 298         npalf = rvu_get_lf(rvu, block, pcifunc, 0);
 299         if (npalf < 0)
 300                 return NPA_AF_ERR_AF_LF_INVALID;
 301 
 302         /* Reset this NPA LF */
 303         err = rvu_lf_reset(rvu, block, npalf);
 304         if (err) {
 305                 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
 306                 return NPA_AF_ERR_LF_RESET;
 307         }
 308 
 309         ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
 310 
 311         /* Alloc memory for aura HW contexts */
 312         hwctx_size = 1UL << (ctx_cfg & 0xF);
 313         err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
 314                          NPA_AURA_COUNT(req->aura_sz), hwctx_size);
 315         if (err)
 316                 goto free_mem;
 317 
 318         pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
 319                                   GFP_KERNEL);
 320         if (!pfvf->aura_bmap)
 321                 goto free_mem;
 322 
 323         /* Alloc memory for pool HW contexts */
 324         hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
 325         err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
 326         if (err)
 327                 goto free_mem;
 328 
 329         pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
 330                                   GFP_KERNEL);
 331         if (!pfvf->pool_bmap)
 332                 goto free_mem;
 333 
 334         /* Get no of queue interrupts supported */
 335         cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
 336         qints = (cfg >> 28) & 0xFFF;
 337 
 338         /* Alloc memory for Qints HW contexts */
 339         hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
 340         err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
 341         if (err)
 342                 goto free_mem;
 343 
 344         cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
 345         /* Clear way partition mask and set aura offset to '0' */
 346         cfg &= ~(BIT_ULL(34) - 1);
 347         /* Set aura size & enable caching of contexts */
 348         cfg |= (req->aura_sz << 16) | BIT_ULL(34);
 349         rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
 350 
 351         /* Configure aura HW context's base */
 352         rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
 353                     (u64)pfvf->aura_ctx->iova);
 354 
 355         /* Enable caching of qints hw context */
 356         rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
 357         rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
 358                     (u64)pfvf->npa_qints_ctx->iova);
 359 
 360         goto exit;
 361 
 362 free_mem:
 363         npa_ctx_free(rvu, pfvf);
 364         rc = -ENOMEM;
 365 
 366 exit:
 367         /* set stack page info */
 368         cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
 369         rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
 370         rsp->stack_pg_bytes = cfg & 0xFF;
 371         rsp->qints = (cfg >> 28) & 0xFFF;
 372         return rc;
 373 }
 374 
 375 int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
 376                                  struct msg_rsp *rsp)
 377 {
 378         struct rvu_hwinfo *hw = rvu->hw;
 379         u16 pcifunc = req->hdr.pcifunc;
 380         struct rvu_block *block;
 381         struct rvu_pfvf *pfvf;
 382         int npalf, err;
 383         int blkaddr;
 384 
 385         pfvf = rvu_get_pfvf(rvu, pcifunc);
 386         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
 387         if (!pfvf->npalf || blkaddr < 0)
 388                 return NPA_AF_ERR_AF_LF_INVALID;
 389 
 390         block = &hw->block[blkaddr];
 391         npalf = rvu_get_lf(rvu, block, pcifunc, 0);
 392         if (npalf < 0)
 393                 return NPA_AF_ERR_AF_LF_INVALID;
 394 
 395         /* Reset this NPA LF */
 396         err = rvu_lf_reset(rvu, block, npalf);
 397         if (err) {
 398                 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
 399                 return NPA_AF_ERR_LF_RESET;
 400         }
 401 
 402         npa_ctx_free(rvu, pfvf);
 403 
 404         return 0;
 405 }
 406 
 407 static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
 408 {
 409         u64 cfg;
 410         int err;
 411 
 412         /* Set admin queue endianness */
 413         cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
 414 #ifdef __BIG_ENDIAN
 415         cfg |= BIT_ULL(1);
 416         rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
 417 #else
 418         cfg &= ~BIT_ULL(1);
 419         rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
 420 #endif
 421 
 422         /* Do not bypass NDC cache */
 423         cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
 424         cfg &= ~0x03DULL;
 425         rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
 426 
 427         /* Result structure can be followed by Aura/Pool context at
 428          * RES + 128bytes and a write mask at RES + 256 bytes, depending on
 429          * operation type. Alloc sufficient result memory for all operations.
 430          */
 431         err = rvu_aq_alloc(rvu, &block->aq,
 432                            Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
 433                            ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
 434         if (err)
 435                 return err;
 436 
 437         rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
 438         rvu_write64(rvu, block->addr,
 439                     NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
 440         return 0;
 441 }
 442 
 443 int rvu_npa_init(struct rvu *rvu)
 444 {
 445         struct rvu_hwinfo *hw = rvu->hw;
 446         int blkaddr, err;
 447 
 448         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
 449         if (blkaddr < 0)
 450                 return 0;
 451 
 452         /* Initialize admin queue */
 453         err = npa_aq_init(rvu, &hw->block[blkaddr]);
 454         if (err)
 455                 return err;
 456 
 457         return 0;
 458 }
 459 
 460 void rvu_npa_freemem(struct rvu *rvu)
 461 {
 462         struct rvu_hwinfo *hw = rvu->hw;
 463         struct rvu_block *block;
 464         int blkaddr;
 465 
 466         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
 467         if (blkaddr < 0)
 468                 return;
 469 
 470         block = &hw->block[blkaddr];
 471         rvu_aq_free(rvu, block->aq);
 472 }
 473 
 474 void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
 475 {
 476         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 477         struct hwctx_disable_req ctx_req;
 478 
 479         /* Disable all pools */
 480         ctx_req.hdr.pcifunc = pcifunc;
 481         ctx_req.ctype = NPA_AQ_CTYPE_POOL;
 482         npa_lf_hwctx_disable(rvu, &ctx_req);
 483 
 484         /* Disable all auras */
 485         ctx_req.ctype = NPA_AQ_CTYPE_AURA;
 486         npa_lf_hwctx_disable(rvu, &ctx_req);
 487 
 488         npa_ctx_free(rvu, pfvf);
 489 }

/* [<][>][^][v][top][bottom][index][help] */