root/drivers/infiniband/hw/cxgb4/resource.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. c4iw_init_qid_table
  2. c4iw_init_resource
  3. c4iw_get_resource
  4. c4iw_put_resource
  5. c4iw_get_cqid
  6. c4iw_put_cqid
  7. c4iw_get_qpid
  8. c4iw_put_qpid
  9. c4iw_destroy_resource
  10. c4iw_pblpool_alloc
  11. destroy_pblpool
  12. c4iw_pblpool_free
  13. c4iw_pblpool_create
  14. c4iw_pblpool_destroy
  15. c4iw_rqtpool_alloc
  16. destroy_rqtpool
  17. c4iw_rqtpool_free
  18. c4iw_rqtpool_create
  19. c4iw_rqtpool_destroy
  20. c4iw_alloc_srq_idx
  21. c4iw_free_srq_idx
  22. c4iw_ocqp_pool_alloc
  23. c4iw_ocqp_pool_free
  24. c4iw_ocqp_pool_create
  25. c4iw_ocqp_pool_destroy

   1 /*
   2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 /* Crude resource management */
  33 #include <linux/spinlock.h>
  34 #include <linux/genalloc.h>
  35 #include <linux/ratelimit.h>
  36 #include "iw_cxgb4.h"
  37 
  38 static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
  39 {
  40         u32 i;
  41 
  42         if (c4iw_id_table_alloc(&rdev->resource.qid_table,
  43                                 rdev->lldi.vr->qp.start,
  44                                 rdev->lldi.vr->qp.size,
  45                                 rdev->lldi.vr->qp.size, 0))
  46                 return -ENOMEM;
  47 
  48         for (i = rdev->lldi.vr->qp.start;
  49                 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
  50                 if (!(i & rdev->qpmask))
  51                         c4iw_id_free(&rdev->resource.qid_table, i);
  52         return 0;
  53 }
  54 
  55 /* nr_* must be power of 2 */
  56 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
  57                        u32 nr_pdid, u32 nr_srqt)
  58 {
  59         int err = 0;
  60         err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
  61                                         C4IW_ID_TABLE_F_RANDOM);
  62         if (err)
  63                 goto tpt_err;
  64         err = c4iw_init_qid_table(rdev);
  65         if (err)
  66                 goto qid_err;
  67         err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
  68                                         nr_pdid, 1, 0);
  69         if (err)
  70                 goto pdid_err;
  71         if (!nr_srqt)
  72                 err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
  73                                           1, 1, 0);
  74         else
  75                 err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
  76                                           nr_srqt, 0, 0);
  77         if (err)
  78                 goto srq_err;
  79         return 0;
  80  srq_err:
  81         c4iw_id_table_free(&rdev->resource.pdid_table);
  82  pdid_err:
  83         c4iw_id_table_free(&rdev->resource.qid_table);
  84  qid_err:
  85         c4iw_id_table_free(&rdev->resource.tpt_table);
  86  tpt_err:
  87         return -ENOMEM;
  88 }
  89 
  90 /*
  91  * returns 0 if no resource available
  92  */
  93 u32 c4iw_get_resource(struct c4iw_id_table *id_table)
  94 {
  95         u32 entry;
  96         entry = c4iw_id_alloc(id_table);
  97         if (entry == (u32)(-1))
  98                 return 0;
  99         return entry;
 100 }
 101 
 102 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
 103 {
 104         pr_debug("entry 0x%x\n", entry);
 105         c4iw_id_free(id_table, entry);
 106 }
 107 
 108 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
 109 {
 110         struct c4iw_qid_list *entry;
 111         u32 qid;
 112         int i;
 113 
 114         mutex_lock(&uctx->lock);
 115         if (!list_empty(&uctx->cqids)) {
 116                 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
 117                                    entry);
 118                 list_del(&entry->entry);
 119                 qid = entry->qid;
 120                 kfree(entry);
 121         } else {
 122                 qid = c4iw_get_resource(&rdev->resource.qid_table);
 123                 if (!qid)
 124                         goto out;
 125                 mutex_lock(&rdev->stats.lock);
 126                 rdev->stats.qid.cur += rdev->qpmask + 1;
 127                 mutex_unlock(&rdev->stats.lock);
 128                 for (i = qid+1; i & rdev->qpmask; i++) {
 129                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 130                         if (!entry)
 131                                 goto out;
 132                         entry->qid = i;
 133                         list_add_tail(&entry->entry, &uctx->cqids);
 134                 }
 135 
 136                 /*
 137                  * now put the same ids on the qp list since they all
 138                  * map to the same db/gts page.
 139                  */
 140                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 141                 if (!entry)
 142                         goto out;
 143                 entry->qid = qid;
 144                 list_add_tail(&entry->entry, &uctx->qpids);
 145                 for (i = qid+1; i & rdev->qpmask; i++) {
 146                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 147                         if (!entry)
 148                                 goto out;
 149                         entry->qid = i;
 150                         list_add_tail(&entry->entry, &uctx->qpids);
 151                 }
 152         }
 153 out:
 154         mutex_unlock(&uctx->lock);
 155         pr_debug("qid 0x%x\n", qid);
 156         mutex_lock(&rdev->stats.lock);
 157         if (rdev->stats.qid.cur > rdev->stats.qid.max)
 158                 rdev->stats.qid.max = rdev->stats.qid.cur;
 159         mutex_unlock(&rdev->stats.lock);
 160         return qid;
 161 }
 162 
 163 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
 164                    struct c4iw_dev_ucontext *uctx)
 165 {
 166         struct c4iw_qid_list *entry;
 167 
 168         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 169         if (!entry)
 170                 return;
 171         pr_debug("qid 0x%x\n", qid);
 172         entry->qid = qid;
 173         mutex_lock(&uctx->lock);
 174         list_add_tail(&entry->entry, &uctx->cqids);
 175         mutex_unlock(&uctx->lock);
 176 }
 177 
 178 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
 179 {
 180         struct c4iw_qid_list *entry;
 181         u32 qid;
 182         int i;
 183 
 184         mutex_lock(&uctx->lock);
 185         if (!list_empty(&uctx->qpids)) {
 186                 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
 187                                    entry);
 188                 list_del(&entry->entry);
 189                 qid = entry->qid;
 190                 kfree(entry);
 191         } else {
 192                 qid = c4iw_get_resource(&rdev->resource.qid_table);
 193                 if (!qid) {
 194                         mutex_lock(&rdev->stats.lock);
 195                         rdev->stats.qid.fail++;
 196                         mutex_unlock(&rdev->stats.lock);
 197                         goto out;
 198                 }
 199                 mutex_lock(&rdev->stats.lock);
 200                 rdev->stats.qid.cur += rdev->qpmask + 1;
 201                 mutex_unlock(&rdev->stats.lock);
 202                 for (i = qid+1; i & rdev->qpmask; i++) {
 203                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 204                         if (!entry)
 205                                 goto out;
 206                         entry->qid = i;
 207                         list_add_tail(&entry->entry, &uctx->qpids);
 208                 }
 209 
 210                 /*
 211                  * now put the same ids on the cq list since they all
 212                  * map to the same db/gts page.
 213                  */
 214                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 215                 if (!entry)
 216                         goto out;
 217                 entry->qid = qid;
 218                 list_add_tail(&entry->entry, &uctx->cqids);
 219                 for (i = qid; i & rdev->qpmask; i++) {
 220                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 221                         if (!entry)
 222                                 goto out;
 223                         entry->qid = i;
 224                         list_add_tail(&entry->entry, &uctx->cqids);
 225                 }
 226         }
 227 out:
 228         mutex_unlock(&uctx->lock);
 229         pr_debug("qid 0x%x\n", qid);
 230         mutex_lock(&rdev->stats.lock);
 231         if (rdev->stats.qid.cur > rdev->stats.qid.max)
 232                 rdev->stats.qid.max = rdev->stats.qid.cur;
 233         mutex_unlock(&rdev->stats.lock);
 234         return qid;
 235 }
 236 
 237 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
 238                    struct c4iw_dev_ucontext *uctx)
 239 {
 240         struct c4iw_qid_list *entry;
 241 
 242         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 243         if (!entry)
 244                 return;
 245         pr_debug("qid 0x%x\n", qid);
 246         entry->qid = qid;
 247         mutex_lock(&uctx->lock);
 248         list_add_tail(&entry->entry, &uctx->qpids);
 249         mutex_unlock(&uctx->lock);
 250 }
 251 
 252 void c4iw_destroy_resource(struct c4iw_resource *rscp)
 253 {
 254         c4iw_id_table_free(&rscp->tpt_table);
 255         c4iw_id_table_free(&rscp->qid_table);
 256         c4iw_id_table_free(&rscp->pdid_table);
 257 }
 258 
 259 /*
 260  * PBL Memory Manager.  Uses Linux generic allocator.
 261  */
 262 
 263 #define MIN_PBL_SHIFT 8                 /* 256B == min PBL size (32 entries) */
 264 
 265 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
 266 {
 267         unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
 268         pr_debug("addr 0x%x size %d\n", (u32)addr, size);
 269         mutex_lock(&rdev->stats.lock);
 270         if (addr) {
 271                 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
 272                 if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
 273                         rdev->stats.pbl.max = rdev->stats.pbl.cur;
 274                 kref_get(&rdev->pbl_kref);
 275         } else
 276                 rdev->stats.pbl.fail++;
 277         mutex_unlock(&rdev->stats.lock);
 278         return (u32)addr;
 279 }
 280 
 281 static void destroy_pblpool(struct kref *kref)
 282 {
 283         struct c4iw_rdev *rdev;
 284 
 285         rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
 286         gen_pool_destroy(rdev->pbl_pool);
 287         complete(&rdev->pbl_compl);
 288 }
 289 
 290 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 291 {
 292         pr_debug("addr 0x%x size %d\n", addr, size);
 293         mutex_lock(&rdev->stats.lock);
 294         rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
 295         mutex_unlock(&rdev->stats.lock);
 296         gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
 297         kref_put(&rdev->pbl_kref, destroy_pblpool);
 298 }
 299 
 300 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
 301 {
 302         unsigned pbl_start, pbl_chunk, pbl_top;
 303 
 304         rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
 305         if (!rdev->pbl_pool)
 306                 return -ENOMEM;
 307 
 308         pbl_start = rdev->lldi.vr->pbl.start;
 309         pbl_chunk = rdev->lldi.vr->pbl.size;
 310         pbl_top = pbl_start + pbl_chunk;
 311 
 312         while (pbl_start < pbl_top) {
 313                 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
 314                 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
 315                         pr_debug("failed to add PBL chunk (%x/%x)\n",
 316                                  pbl_start, pbl_chunk);
 317                         if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
 318                                 pr_warn("Failed to add all PBL chunks (%x/%x)\n",
 319                                         pbl_start, pbl_top - pbl_start);
 320                                 return 0;
 321                         }
 322                         pbl_chunk >>= 1;
 323                 } else {
 324                         pr_debug("added PBL chunk (%x/%x)\n",
 325                                  pbl_start, pbl_chunk);
 326                         pbl_start += pbl_chunk;
 327                 }
 328         }
 329 
 330         return 0;
 331 }
 332 
 333 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
 334 {
 335         kref_put(&rdev->pbl_kref, destroy_pblpool);
 336 }
 337 
 338 /*
 339  * RQT Memory Manager.  Uses Linux generic allocator.
 340  */
 341 
 342 #define MIN_RQT_SHIFT 10        /* 1KB == min RQT size (16 entries) */
 343 
 344 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
 345 {
 346         unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
 347         pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
 348         if (!addr)
 349                 pr_warn_ratelimited("%s: Out of RQT memory\n",
 350                                     pci_name(rdev->lldi.pdev));
 351         mutex_lock(&rdev->stats.lock);
 352         if (addr) {
 353                 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
 354                 if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
 355                         rdev->stats.rqt.max = rdev->stats.rqt.cur;
 356                 kref_get(&rdev->rqt_kref);
 357         } else
 358                 rdev->stats.rqt.fail++;
 359         mutex_unlock(&rdev->stats.lock);
 360         return (u32)addr;
 361 }
 362 
 363 static void destroy_rqtpool(struct kref *kref)
 364 {
 365         struct c4iw_rdev *rdev;
 366 
 367         rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
 368         gen_pool_destroy(rdev->rqt_pool);
 369         complete(&rdev->rqt_compl);
 370 }
 371 
 372 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 373 {
 374         pr_debug("addr 0x%x size %d\n", addr, size << 6);
 375         mutex_lock(&rdev->stats.lock);
 376         rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
 377         mutex_unlock(&rdev->stats.lock);
 378         gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
 379         kref_put(&rdev->rqt_kref, destroy_rqtpool);
 380 }
 381 
 382 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
 383 {
 384         unsigned rqt_start, rqt_chunk, rqt_top;
 385         int skip = 0;
 386 
 387         rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
 388         if (!rdev->rqt_pool)
 389                 return -ENOMEM;
 390 
 391         /*
 392          * If SRQs are supported, then never use the first RQE from
 393          * the RQT region. This is because HW uses RQT index 0 as NULL.
 394          */
 395         if (rdev->lldi.vr->srq.size)
 396                 skip = T4_RQT_ENTRY_SIZE;
 397 
 398         rqt_start = rdev->lldi.vr->rq.start + skip;
 399         rqt_chunk = rdev->lldi.vr->rq.size - skip;
 400         rqt_top = rqt_start + rqt_chunk;
 401 
 402         while (rqt_start < rqt_top) {
 403                 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
 404                 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
 405                         pr_debug("failed to add RQT chunk (%x/%x)\n",
 406                                  rqt_start, rqt_chunk);
 407                         if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
 408                                 pr_warn("Failed to add all RQT chunks (%x/%x)\n",
 409                                         rqt_start, rqt_top - rqt_start);
 410                                 return 0;
 411                         }
 412                         rqt_chunk >>= 1;
 413                 } else {
 414                         pr_debug("added RQT chunk (%x/%x)\n",
 415                                  rqt_start, rqt_chunk);
 416                         rqt_start += rqt_chunk;
 417                 }
 418         }
 419         return 0;
 420 }
 421 
 422 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 423 {
 424         kref_put(&rdev->rqt_kref, destroy_rqtpool);
 425 }
 426 
 427 int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev)
 428 {
 429         int idx;
 430 
 431         idx = c4iw_id_alloc(&rdev->resource.srq_table);
 432         mutex_lock(&rdev->stats.lock);
 433         if (idx == -1) {
 434                 rdev->stats.srqt.fail++;
 435                 mutex_unlock(&rdev->stats.lock);
 436                 return -ENOMEM;
 437         }
 438         rdev->stats.srqt.cur++;
 439         if (rdev->stats.srqt.cur > rdev->stats.srqt.max)
 440                 rdev->stats.srqt.max = rdev->stats.srqt.cur;
 441         mutex_unlock(&rdev->stats.lock);
 442         return idx;
 443 }
 444 
 445 void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx)
 446 {
 447         c4iw_id_free(&rdev->resource.srq_table, idx);
 448         mutex_lock(&rdev->stats.lock);
 449         rdev->stats.srqt.cur--;
 450         mutex_unlock(&rdev->stats.lock);
 451 }
 452 
 453 /*
 454  * On-Chip QP Memory.
 455  */
 456 #define MIN_OCQP_SHIFT 12       /* 4KB == min ocqp size */
 457 
 458 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
 459 {
 460         unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
 461         pr_debug("addr 0x%x size %d\n", (u32)addr, size);
 462         if (addr) {
 463                 mutex_lock(&rdev->stats.lock);
 464                 rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
 465                 if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
 466                         rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
 467                 mutex_unlock(&rdev->stats.lock);
 468         }
 469         return (u32)addr;
 470 }
 471 
 472 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 473 {
 474         pr_debug("addr 0x%x size %d\n", addr, size);
 475         mutex_lock(&rdev->stats.lock);
 476         rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
 477         mutex_unlock(&rdev->stats.lock);
 478         gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
 479 }
 480 
 481 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
 482 {
 483         unsigned start, chunk, top;
 484 
 485         rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
 486         if (!rdev->ocqp_pool)
 487                 return -ENOMEM;
 488 
 489         start = rdev->lldi.vr->ocq.start;
 490         chunk = rdev->lldi.vr->ocq.size;
 491         top = start + chunk;
 492 
 493         while (start < top) {
 494                 chunk = min(top - start + 1, chunk);
 495                 if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
 496                         pr_debug("failed to add OCQP chunk (%x/%x)\n",
 497                                  start, chunk);
 498                         if (chunk <= 1024 << MIN_OCQP_SHIFT) {
 499                                 pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
 500                                         start, top - start);
 501                                 return 0;
 502                         }
 503                         chunk >>= 1;
 504                 } else {
 505                         pr_debug("added OCQP chunk (%x/%x)\n",
 506                                  start, chunk);
 507                         start += chunk;
 508                 }
 509         }
 510         return 0;
 511 }
 512 
 513 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
 514 {
 515         gen_pool_destroy(rdev->ocqp_pool);
 516 }

/* [<][>][^][v][top][bottom][index][help] */