root/drivers/infiniband/hw/hfi1/pio.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __cm_reset
  2. pio_send_control
  3. wildcard_to_pool
  4. sc_type_name
  5. init_sc_pools_and_sizes
  6. init_send_contexts
  7. sc_hw_alloc
  8. sc_hw_free
  9. group_context
  10. group_size
  11. cr_group_addresses
  12. sc_halted
  13. sc_mtu_to_threshold
  14. sc_percent_to_threshold
  15. sc_set_cr_threshold
  16. set_pio_integrity
  17. get_buffers_allocated
  18. reset_buffers_allocated
  19. sc_alloc
  20. sc_free
  21. sc_disable
  22. packet_occupancy
  23. egress_halted
  24. is_sc_halted
  25. sc_wait_for_packet_egress
  26. sc_wait
  27. sc_restart
  28. pio_freeze
  29. pio_kernel_unfreeze
  30. pio_kernel_linkup
  31. pio_init_wait_progress
  32. pio_reset_all
  33. sc_enable
  34. sc_return_credits
  35. sc_flush
  36. sc_drop
  37. sc_stop
  38. sc_buffer_alloc
  39. sc_add_credit_return_intr
  40. sc_del_credit_return_intr
  41. hfi1_sc_wantpiobuf_intr
  42. sc_piobufavail
  43. fill_code
  44. sc_release_update
  45. sc_group_release_update
  46. pio_select_send_context_vl
  47. pio_select_send_context_sc
  48. pio_map_free
  49. pio_map_rcu_callback
  50. set_threshold
  51. pio_map_init
  52. free_pio_map
  53. init_pervl_scs
  54. init_credit_return
  55. free_credit_return
  56. seqfile_dump_sci

   1 /*
   2  * Copyright(c) 2015-2018 Intel Corporation.
   3  *
   4  * This file is provided under a dual BSD/GPLv2 license.  When using or
   5  * redistributing this file, you may do so under either license.
   6  *
   7  * GPL LICENSE SUMMARY
   8  *
   9  * This program is free software; you can redistribute it and/or modify
  10  * it under the terms of version 2 of the GNU General Public License as
  11  * published by the Free Software Foundation.
  12  *
  13  * This program is distributed in the hope that it will be useful, but
  14  * WITHOUT ANY WARRANTY; without even the implied warranty of
  15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  * General Public License for more details.
  17  *
  18  * BSD LICENSE
  19  *
  20  * Redistribution and use in source and binary forms, with or without
  21  * modification, are permitted provided that the following conditions
  22  * are met:
  23  *
  24  *  - Redistributions of source code must retain the above copyright
  25  *    notice, this list of conditions and the following disclaimer.
  26  *  - Redistributions in binary form must reproduce the above copyright
  27  *    notice, this list of conditions and the following disclaimer in
  28  *    the documentation and/or other materials provided with the
  29  *    distribution.
  30  *  - Neither the name of Intel Corporation nor the names of its
  31  *    contributors may be used to endorse or promote products derived
  32  *    from this software without specific prior written permission.
  33  *
  34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45  *
  46  */
  47 
  48 #include <linux/delay.h>
  49 #include "hfi.h"
  50 #include "qp.h"
  51 #include "trace.h"
  52 
  53 #define SC(name) SEND_CTXT_##name
  54 /*
  55  * Send Context functions
  56  */
  57 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
  58 
  59 /*
  60  * Set the CM reset bit and wait for it to clear.  Use the provided
  61  * sendctrl register.  This routine has no locking.
  62  */
  63 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
  64 {
  65         write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
  66         while (1) {
  67                 udelay(1);
  68                 sendctrl = read_csr(dd, SEND_CTRL);
  69                 if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
  70                         break;
  71         }
  72 }
  73 
  74 /* global control of PIO send */
  75 void pio_send_control(struct hfi1_devdata *dd, int op)
  76 {
  77         u64 reg, mask;
  78         unsigned long flags;
  79         int write = 1;  /* write sendctrl back */
  80         int flush = 0;  /* re-read sendctrl to make sure it is flushed */
  81         int i;
  82 
  83         spin_lock_irqsave(&dd->sendctrl_lock, flags);
  84 
  85         reg = read_csr(dd, SEND_CTRL);
  86         switch (op) {
  87         case PSC_GLOBAL_ENABLE:
  88                 reg |= SEND_CTRL_SEND_ENABLE_SMASK;
  89         /* Fall through */
  90         case PSC_DATA_VL_ENABLE:
  91                 mask = 0;
  92                 for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
  93                         if (!dd->vld[i].mtu)
  94                                 mask |= BIT_ULL(i);
  95                 /* Disallow sending on VLs not enabled */
  96                 mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
  97                         SEND_CTRL_UNSUPPORTED_VL_SHIFT;
  98                 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
  99                 break;
 100         case PSC_GLOBAL_DISABLE:
 101                 reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
 102                 break;
 103         case PSC_GLOBAL_VLARB_ENABLE:
 104                 reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
 105                 break;
 106         case PSC_GLOBAL_VLARB_DISABLE:
 107                 reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
 108                 break;
 109         case PSC_CM_RESET:
 110                 __cm_reset(dd, reg);
 111                 write = 0; /* CSR already written (and flushed) */
 112                 break;
 113         case PSC_DATA_VL_DISABLE:
 114                 reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
 115                 flush = 1;
 116                 break;
 117         default:
 118                 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
 119                 break;
 120         }
 121 
 122         if (write) {
 123                 write_csr(dd, SEND_CTRL, reg);
 124                 if (flush)
 125                         (void)read_csr(dd, SEND_CTRL); /* flush write */
 126         }
 127 
 128         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
 129 }
 130 
 131 /* number of send context memory pools */
 132 #define NUM_SC_POOLS 2
 133 
 134 /* Send Context Size (SCS) wildcards */
 135 #define SCS_POOL_0 -1
 136 #define SCS_POOL_1 -2
 137 
 138 /* Send Context Count (SCC) wildcards */
 139 #define SCC_PER_VL -1
 140 #define SCC_PER_CPU  -2
 141 #define SCC_PER_KRCVQ  -3
 142 
 143 /* Send Context Size (SCS) constants */
 144 #define SCS_ACK_CREDITS  32
 145 #define SCS_VL15_CREDITS 102    /* 3 pkts of 2048B data + 128B header */
 146 
 147 #define PIO_THRESHOLD_CEILING 4096
 148 
 149 #define PIO_WAIT_BATCH_SIZE 5
 150 
 151 /* default send context sizes */
 152 static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
 153         [SC_KERNEL] = { .size  = SCS_POOL_0,    /* even divide, pool 0 */
 154                         .count = SCC_PER_VL },  /* one per NUMA */
 155         [SC_ACK]    = { .size  = SCS_ACK_CREDITS,
 156                         .count = SCC_PER_KRCVQ },
 157         [SC_USER]   = { .size  = SCS_POOL_0,    /* even divide, pool 0 */
 158                         .count = SCC_PER_CPU }, /* one per CPU */
 159         [SC_VL15]   = { .size  = SCS_VL15_CREDITS,
 160                         .count = 1 },
 161 
 162 };
 163 
 164 /* send context memory pool configuration */
 165 struct mem_pool_config {
 166         int centipercent;       /* % of memory, in 100ths of 1% */
 167         int absolute_blocks;    /* absolute block count */
 168 };
 169 
 170 /* default memory pool configuration: 100% in pool 0 */
 171 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
 172         /* centi%, abs blocks */
 173         {  10000,     -1 },             /* pool 0 */
 174         {      0,     -1 },             /* pool 1 */
 175 };
 176 
 177 /* memory pool information, used when calculating final sizes */
 178 struct mem_pool_info {
 179         int centipercent;       /*
 180                                  * 100th of 1% of memory to use, -1 if blocks
 181                                  * already set
 182                                  */
 183         int count;              /* count of contexts in the pool */
 184         int blocks;             /* block size of the pool */
 185         int size;               /* context size, in blocks */
 186 };
 187 
 188 /*
 189  * Convert a pool wildcard to a valid pool index.  The wildcards
 190  * start at -1 and increase negatively.  Map them as:
 191  *      -1 => 0
 192  *      -2 => 1
 193  *      etc.
 194  *
 195  * Return -1 on non-wildcard input, otherwise convert to a pool number.
 196  */
 197 static int wildcard_to_pool(int wc)
 198 {
 199         if (wc >= 0)
 200                 return -1;      /* non-wildcard */
 201         return -wc - 1;
 202 }
 203 
 204 static const char *sc_type_names[SC_MAX] = {
 205         "kernel",
 206         "ack",
 207         "user",
 208         "vl15"
 209 };
 210 
 211 static const char *sc_type_name(int index)
 212 {
 213         if (index < 0 || index >= SC_MAX)
 214                 return "unknown";
 215         return sc_type_names[index];
 216 }
 217 
 218 /*
 219  * Read the send context memory pool configuration and send context
 220  * size configuration.  Replace any wildcards and come up with final
 221  * counts and sizes for the send context types.
 222  */
 223 int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
 224 {
 225         struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
 226         int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1;
 227         int total_contexts = 0;
 228         int fixed_blocks;
 229         int pool_blocks;
 230         int used_blocks;
 231         int cp_total;           /* centipercent total */
 232         int ab_total;           /* absolute block total */
 233         int extra;
 234         int i;
 235 
 236         /*
 237          * When SDMA is enabled, kernel context pio packet size is capped by
 238          * "piothreshold". Reduce pio buffer allocation for kernel context by
 239          * setting it to a fixed size. The allocation allows 3-deep buffering
 240          * of the largest pio packets plus up to 128 bytes header, sufficient
 241          * to maintain verbs performance.
 242          *
 243          * When SDMA is disabled, keep the default pooling allocation.
 244          */
 245         if (HFI1_CAP_IS_KSET(SDMA)) {
 246                 u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
 247                                          piothreshold : PIO_THRESHOLD_CEILING;
 248                 sc_config_sizes[SC_KERNEL].size =
 249                         3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
 250         }
 251 
 252         /*
 253          * Step 0:
 254          *      - copy the centipercents/absolute sizes from the pool config
 255          *      - sanity check these values
 256          *      - add up centipercents, then later check for full value
 257          *      - add up absolute blocks, then later check for over-commit
 258          */
 259         cp_total = 0;
 260         ab_total = 0;
 261         for (i = 0; i < NUM_SC_POOLS; i++) {
 262                 int cp = sc_mem_pool_config[i].centipercent;
 263                 int ab = sc_mem_pool_config[i].absolute_blocks;
 264 
 265                 /*
 266                  * A negative value is "unused" or "invalid".  Both *can*
 267                  * be valid, but centipercent wins, so check that first
 268                  */
 269                 if (cp >= 0) {                  /* centipercent valid */
 270                         cp_total += cp;
 271                 } else if (ab >= 0) {           /* absolute blocks valid */
 272                         ab_total += ab;
 273                 } else {                        /* neither valid */
 274                         dd_dev_err(
 275                                 dd,
 276                                 "Send context memory pool %d: both the block count and centipercent are invalid\n",
 277                                 i);
 278                         return -EINVAL;
 279                 }
 280 
 281                 mem_pool_info[i].centipercent = cp;
 282                 mem_pool_info[i].blocks = ab;
 283         }
 284 
 285         /* do not use both % and absolute blocks for different pools */
 286         if (cp_total != 0 && ab_total != 0) {
 287                 dd_dev_err(
 288                         dd,
 289                         "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
 290                 return -EINVAL;
 291         }
 292 
 293         /* if any percentages are present, they must add up to 100% x 100 */
 294         if (cp_total != 0 && cp_total != 10000) {
 295                 dd_dev_err(
 296                         dd,
 297                         "Send context memory pool centipercent is %d, expecting 10000\n",
 298                         cp_total);
 299                 return -EINVAL;
 300         }
 301 
 302         /* the absolute pool total cannot be more than the mem total */
 303         if (ab_total > total_blocks) {
 304                 dd_dev_err(
 305                         dd,
 306                         "Send context memory pool absolute block count %d is larger than the memory size %d\n",
 307                         ab_total, total_blocks);
 308                 return -EINVAL;
 309         }
 310 
 311         /*
 312          * Step 2:
 313          *      - copy from the context size config
 314          *      - replace context type wildcard counts with real values
 315          *      - add up non-memory pool block sizes
 316          *      - add up memory pool user counts
 317          */
 318         fixed_blocks = 0;
 319         for (i = 0; i < SC_MAX; i++) {
 320                 int count = sc_config_sizes[i].count;
 321                 int size = sc_config_sizes[i].size;
 322                 int pool;
 323 
 324                 /*
 325                  * Sanity check count: Either a positive value or
 326                  * one of the expected wildcards is valid.  The positive
 327                  * value is checked later when we compare against total
 328                  * memory available.
 329                  */
 330                 if (i == SC_ACK) {
 331                         count = dd->n_krcv_queues;
 332                 } else if (i == SC_KERNEL) {
 333                         count = INIT_SC_PER_VL * num_vls;
 334                 } else if (count == SCC_PER_CPU) {
 335                         count = dd->num_rcv_contexts - dd->n_krcv_queues;
 336                 } else if (count < 0) {
 337                         dd_dev_err(
 338                                 dd,
 339                                 "%s send context invalid count wildcard %d\n",
 340                                 sc_type_name(i), count);
 341                         return -EINVAL;
 342                 }
 343                 if (total_contexts + count > chip_send_contexts(dd))
 344                         count = chip_send_contexts(dd) - total_contexts;
 345 
 346                 total_contexts += count;
 347 
 348                 /*
 349                  * Sanity check pool: The conversion will return a pool
 350                  * number or -1 if a fixed (non-negative) value.  The fixed
 351                  * value is checked later when we compare against
 352                  * total memory available.
 353                  */
 354                 pool = wildcard_to_pool(size);
 355                 if (pool == -1) {                       /* non-wildcard */
 356                         fixed_blocks += size * count;
 357                 } else if (pool < NUM_SC_POOLS) {       /* valid wildcard */
 358                         mem_pool_info[pool].count += count;
 359                 } else {                                /* invalid wildcard */
 360                         dd_dev_err(
 361                                 dd,
 362                                 "%s send context invalid pool wildcard %d\n",
 363                                 sc_type_name(i), size);
 364                         return -EINVAL;
 365                 }
 366 
 367                 dd->sc_sizes[i].count = count;
 368                 dd->sc_sizes[i].size = size;
 369         }
 370         if (fixed_blocks > total_blocks) {
 371                 dd_dev_err(
 372                         dd,
 373                         "Send context fixed block count, %u, larger than total block count %u\n",
 374                         fixed_blocks, total_blocks);
 375                 return -EINVAL;
 376         }
 377 
 378         /* step 3: calculate the blocks in the pools, and pool context sizes */
 379         pool_blocks = total_blocks - fixed_blocks;
 380         if (ab_total > pool_blocks) {
 381                 dd_dev_err(
 382                         dd,
 383                         "Send context fixed pool sizes, %u, larger than pool block count %u\n",
 384                         ab_total, pool_blocks);
 385                 return -EINVAL;
 386         }
 387         /* subtract off the fixed pool blocks */
 388         pool_blocks -= ab_total;
 389 
 390         for (i = 0; i < NUM_SC_POOLS; i++) {
 391                 struct mem_pool_info *pi = &mem_pool_info[i];
 392 
 393                 /* % beats absolute blocks */
 394                 if (pi->centipercent >= 0)
 395                         pi->blocks = (pool_blocks * pi->centipercent) / 10000;
 396 
 397                 if (pi->blocks == 0 && pi->count != 0) {
 398                         dd_dev_err(
 399                                 dd,
 400                                 "Send context memory pool %d has %u contexts, but no blocks\n",
 401                                 i, pi->count);
 402                         return -EINVAL;
 403                 }
 404                 if (pi->count == 0) {
 405                         /* warn about wasted blocks */
 406                         if (pi->blocks != 0)
 407                                 dd_dev_err(
 408                                         dd,
 409                                         "Send context memory pool %d has %u blocks, but zero contexts\n",
 410                                         i, pi->blocks);
 411                         pi->size = 0;
 412                 } else {
 413                         pi->size = pi->blocks / pi->count;
 414                 }
 415         }
 416 
 417         /* step 4: fill in the context type sizes from the pool sizes */
 418         used_blocks = 0;
 419         for (i = 0; i < SC_MAX; i++) {
 420                 if (dd->sc_sizes[i].size < 0) {
 421                         unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
 422 
 423                         WARN_ON_ONCE(pool >= NUM_SC_POOLS);
 424                         dd->sc_sizes[i].size = mem_pool_info[pool].size;
 425                 }
 426                 /* make sure we are not larger than what is allowed by the HW */
 427 #define PIO_MAX_BLOCKS 1024
 428                 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
 429                         dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
 430 
 431                 /* calculate our total usage */
 432                 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
 433         }
 434         extra = total_blocks - used_blocks;
 435         if (extra != 0)
 436                 dd_dev_info(dd, "unused send context blocks: %d\n", extra);
 437 
 438         return total_contexts;
 439 }
 440 
 441 int init_send_contexts(struct hfi1_devdata *dd)
 442 {
 443         u16 base;
 444         int ret, i, j, context;
 445 
 446         ret = init_credit_return(dd);
 447         if (ret)
 448                 return ret;
 449 
 450         dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
 451                                         GFP_KERNEL);
 452         dd->send_contexts = kcalloc(dd->num_send_contexts,
 453                                     sizeof(struct send_context_info),
 454                                     GFP_KERNEL);
 455         if (!dd->send_contexts || !dd->hw_to_sw) {
 456                 kfree(dd->hw_to_sw);
 457                 kfree(dd->send_contexts);
 458                 free_credit_return(dd);
 459                 return -ENOMEM;
 460         }
 461 
 462         /* hardware context map starts with invalid send context indices */
 463         for (i = 0; i < TXE_NUM_CONTEXTS; i++)
 464                 dd->hw_to_sw[i] = INVALID_SCI;
 465 
 466         /*
 467          * All send contexts have their credit sizes.  Allocate credits
 468          * for each context one after another from the global space.
 469          */
 470         context = 0;
 471         base = 1;
 472         for (i = 0; i < SC_MAX; i++) {
 473                 struct sc_config_sizes *scs = &dd->sc_sizes[i];
 474 
 475                 for (j = 0; j < scs->count; j++) {
 476                         struct send_context_info *sci =
 477                                                 &dd->send_contexts[context];
 478                         sci->type = i;
 479                         sci->base = base;
 480                         sci->credits = scs->size;
 481 
 482                         context++;
 483                         base += scs->size;
 484                 }
 485         }
 486 
 487         return 0;
 488 }
 489 
 490 /*
 491  * Allocate a software index and hardware context of the given type.
 492  *
 493  * Must be called with dd->sc_lock held.
 494  */
 495 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
 496                        u32 *hw_context)
 497 {
 498         struct send_context_info *sci;
 499         u32 index;
 500         u32 context;
 501 
 502         for (index = 0, sci = &dd->send_contexts[0];
 503                         index < dd->num_send_contexts; index++, sci++) {
 504                 if (sci->type == type && sci->allocated == 0) {
 505                         sci->allocated = 1;
 506                         /* use a 1:1 mapping, but make them non-equal */
 507                         context = chip_send_contexts(dd) - index - 1;
 508                         dd->hw_to_sw[context] = index;
 509                         *sw_index = index;
 510                         *hw_context = context;
 511                         return 0; /* success */
 512                 }
 513         }
 514         dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
 515         return -ENOSPC;
 516 }
 517 
 518 /*
 519  * Free the send context given by its software index.
 520  *
 521  * Must be called with dd->sc_lock held.
 522  */
 523 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
 524 {
 525         struct send_context_info *sci;
 526 
 527         sci = &dd->send_contexts[sw_index];
 528         if (!sci->allocated) {
 529                 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
 530                            __func__, sw_index, hw_context);
 531         }
 532         sci->allocated = 0;
 533         dd->hw_to_sw[hw_context] = INVALID_SCI;
 534 }
 535 
 536 /* return the base context of a context in a group */
 537 static inline u32 group_context(u32 context, u32 group)
 538 {
 539         return (context >> group) << group;
 540 }
 541 
 542 /* return the size of a group */
 543 static inline u32 group_size(u32 group)
 544 {
 545         return 1 << group;
 546 }
 547 
 548 /*
 549  * Obtain the credit return addresses, kernel virtual and bus, for the
 550  * given sc.
 551  *
 552  * To understand this routine:
 553  * o va and dma are arrays of struct credit_return.  One for each physical
 554  *   send context, per NUMA.
 555  * o Each send context always looks in its relative location in a struct
 556  *   credit_return for its credit return.
 557  * o Each send context in a group must have its return address CSR programmed
 558  *   with the same value.  Use the address of the first send context in the
 559  *   group.
 560  */
 561 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
 562 {
 563         u32 gc = group_context(sc->hw_context, sc->group);
 564         u32 index = sc->hw_context & 0x7;
 565 
 566         sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
 567         *dma = (unsigned long)
 568                &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
 569 }
 570 
 571 /*
 572  * Work queue function triggered in error interrupt routine for
 573  * kernel contexts.
 574  */
 575 static void sc_halted(struct work_struct *work)
 576 {
 577         struct send_context *sc;
 578 
 579         sc = container_of(work, struct send_context, halt_work);
 580         sc_restart(sc);
 581 }
 582 
 583 /*
 584  * Calculate PIO block threshold for this send context using the given MTU.
 585  * Trigger a return when one MTU plus optional header of credits remain.
 586  *
 587  * Parameter mtu is in bytes.
 588  * Parameter hdrqentsize is in DWORDs.
 589  *
 590  * Return value is what to write into the CSR: trigger return when
 591  * unreturned credits pass this count.
 592  */
 593 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
 594 {
 595         u32 release_credits;
 596         u32 threshold;
 597 
 598         /* add in the header size, then divide by the PIO block size */
 599         mtu += hdrqentsize << 2;
 600         release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
 601 
 602         /* check against this context's credits */
 603         if (sc->credits <= release_credits)
 604                 threshold = 1;
 605         else
 606                 threshold = sc->credits - release_credits;
 607 
 608         return threshold;
 609 }
 610 
 611 /*
 612  * Calculate credit threshold in terms of percent of the allocated credits.
 613  * Trigger when unreturned credits equal or exceed the percentage of the whole.
 614  *
 615  * Return value is what to write into the CSR: trigger return when
 616  * unreturned credits pass this count.
 617  */
 618 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
 619 {
 620         return (sc->credits * percent) / 100;
 621 }
 622 
 623 /*
 624  * Set the credit return threshold.
 625  */
 626 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
 627 {
 628         unsigned long flags;
 629         u32 old_threshold;
 630         int force_return = 0;
 631 
 632         spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
 633 
 634         old_threshold = (sc->credit_ctrl >>
 635                                 SC(CREDIT_CTRL_THRESHOLD_SHIFT))
 636                          & SC(CREDIT_CTRL_THRESHOLD_MASK);
 637 
 638         if (new_threshold != old_threshold) {
 639                 sc->credit_ctrl =
 640                         (sc->credit_ctrl
 641                                 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
 642                         | ((new_threshold
 643                                 & SC(CREDIT_CTRL_THRESHOLD_MASK))
 644                            << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
 645                 write_kctxt_csr(sc->dd, sc->hw_context,
 646                                 SC(CREDIT_CTRL), sc->credit_ctrl);
 647 
 648                 /* force a credit return on change to avoid a possible stall */
 649                 force_return = 1;
 650         }
 651 
 652         spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
 653 
 654         if (force_return)
 655                 sc_return_credits(sc);
 656 }
 657 
 658 /*
 659  * set_pio_integrity
 660  *
 661  * Set the CHECK_ENABLE register for the send context 'sc'.
 662  */
 663 void set_pio_integrity(struct send_context *sc)
 664 {
 665         struct hfi1_devdata *dd = sc->dd;
 666         u32 hw_context = sc->hw_context;
 667         int type = sc->type;
 668 
 669         write_kctxt_csr(dd, hw_context,
 670                         SC(CHECK_ENABLE),
 671                         hfi1_pkt_default_send_ctxt_mask(dd, type));
 672 }
 673 
 674 static u32 get_buffers_allocated(struct send_context *sc)
 675 {
 676         int cpu;
 677         u32 ret = 0;
 678 
 679         for_each_possible_cpu(cpu)
 680                 ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
 681         return ret;
 682 }
 683 
 684 static void reset_buffers_allocated(struct send_context *sc)
 685 {
 686         int cpu;
 687 
 688         for_each_possible_cpu(cpu)
 689                 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
 690 }
 691 
 692 /*
 693  * Allocate a NUMA relative send context structure of the given type along
 694  * with a HW context.
 695  */
 696 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
 697                               uint hdrqentsize, int numa)
 698 {
 699         struct send_context_info *sci;
 700         struct send_context *sc = NULL;
 701         dma_addr_t dma;
 702         unsigned long flags;
 703         u64 reg;
 704         u32 thresh;
 705         u32 sw_index;
 706         u32 hw_context;
 707         int ret;
 708         u8 opval, opmask;
 709 
 710         /* do not allocate while frozen */
 711         if (dd->flags & HFI1_FROZEN)
 712                 return NULL;
 713 
 714         sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
 715         if (!sc)
 716                 return NULL;
 717 
 718         sc->buffers_allocated = alloc_percpu(u32);
 719         if (!sc->buffers_allocated) {
 720                 kfree(sc);
 721                 dd_dev_err(dd,
 722                            "Cannot allocate buffers_allocated per cpu counters\n"
 723                           );
 724                 return NULL;
 725         }
 726 
 727         spin_lock_irqsave(&dd->sc_lock, flags);
 728         ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
 729         if (ret) {
 730                 spin_unlock_irqrestore(&dd->sc_lock, flags);
 731                 free_percpu(sc->buffers_allocated);
 732                 kfree(sc);
 733                 return NULL;
 734         }
 735 
 736         sci = &dd->send_contexts[sw_index];
 737         sci->sc = sc;
 738 
 739         sc->dd = dd;
 740         sc->node = numa;
 741         sc->type = type;
 742         spin_lock_init(&sc->alloc_lock);
 743         spin_lock_init(&sc->release_lock);
 744         spin_lock_init(&sc->credit_ctrl_lock);
 745         seqlock_init(&sc->waitlock);
 746         INIT_LIST_HEAD(&sc->piowait);
 747         INIT_WORK(&sc->halt_work, sc_halted);
 748         init_waitqueue_head(&sc->halt_wait);
 749 
 750         /* grouping is always single context for now */
 751         sc->group = 0;
 752 
 753         sc->sw_index = sw_index;
 754         sc->hw_context = hw_context;
 755         cr_group_addresses(sc, &dma);
 756         sc->credits = sci->credits;
 757         sc->size = sc->credits * PIO_BLOCK_SIZE;
 758 
 759 /* PIO Send Memory Address details */
 760 #define PIO_ADDR_CONTEXT_MASK 0xfful
 761 #define PIO_ADDR_CONTEXT_SHIFT 16
 762         sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
 763                                         << PIO_ADDR_CONTEXT_SHIFT);
 764 
 765         /* set base and credits */
 766         reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
 767                                         << SC(CTRL_CTXT_DEPTH_SHIFT))
 768                 | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
 769                                         << SC(CTRL_CTXT_BASE_SHIFT));
 770         write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
 771 
 772         set_pio_integrity(sc);
 773 
 774         /* unmask all errors */
 775         write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
 776 
 777         /* set the default partition key */
 778         write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
 779                         (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
 780                          DEFAULT_PKEY) <<
 781                         SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
 782 
 783         /* per context type checks */
 784         if (type == SC_USER) {
 785                 opval = USER_OPCODE_CHECK_VAL;
 786                 opmask = USER_OPCODE_CHECK_MASK;
 787         } else {
 788                 opval = OPCODE_CHECK_VAL_DISABLED;
 789                 opmask = OPCODE_CHECK_MASK_DISABLED;
 790         }
 791 
 792         /* set the send context check opcode mask and value */
 793         write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
 794                         ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
 795                         ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
 796 
 797         /* set up credit return */
 798         reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
 799         write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
 800 
 801         /*
 802          * Calculate the initial credit return threshold.
 803          *
 804          * For Ack contexts, set a threshold for half the credits.
 805          * For User contexts use the given percentage.  This has been
 806          * sanitized on driver start-up.
 807          * For Kernel contexts, use the default MTU plus a header
 808          * or half the credits, whichever is smaller. This should
 809          * work for both the 3-deep buffering allocation and the
 810          * pooling allocation.
 811          */
 812         if (type == SC_ACK) {
 813                 thresh = sc_percent_to_threshold(sc, 50);
 814         } else if (type == SC_USER) {
 815                 thresh = sc_percent_to_threshold(sc,
 816                                                  user_credit_return_threshold);
 817         } else { /* kernel */
 818                 thresh = min(sc_percent_to_threshold(sc, 50),
 819                              sc_mtu_to_threshold(sc, hfi1_max_mtu,
 820                                                  hdrqentsize));
 821         }
 822         reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
 823         /* add in early return */
 824         if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
 825                 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
 826         else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
 827                 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
 828 
 829         /* set up write-through credit_ctrl */
 830         sc->credit_ctrl = reg;
 831         write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
 832 
 833         /* User send contexts should not allow sending on VL15 */
 834         if (type == SC_USER) {
 835                 reg = 1ULL << 15;
 836                 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
 837         }
 838 
 839         spin_unlock_irqrestore(&dd->sc_lock, flags);
 840 
 841         /*
 842          * Allocate shadow ring to track outstanding PIO buffers _after_
 843          * unlocking.  We don't know the size until the lock is held and
 844          * we can't allocate while the lock is held.  No one is using
 845          * the context yet, so allocate it now.
 846          *
 847          * User contexts do not get a shadow ring.
 848          */
 849         if (type != SC_USER) {
 850                 /*
 851                  * Size the shadow ring 1 larger than the number of credits
 852                  * so head == tail can mean empty.
 853                  */
 854                 sc->sr_size = sci->credits + 1;
 855                 sc->sr = kcalloc_node(sc->sr_size,
 856                                       sizeof(union pio_shadow_ring),
 857                                       GFP_KERNEL, numa);
 858                 if (!sc->sr) {
 859                         sc_free(sc);
 860                         return NULL;
 861                 }
 862         }
 863 
 864         hfi1_cdbg(PIO,
 865                   "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
 866                   sw_index,
 867                   hw_context,
 868                   sc_type_name(type),
 869                   sc->group,
 870                   sc->credits,
 871                   sc->credit_ctrl,
 872                   thresh);
 873 
 874         return sc;
 875 }
 876 
 877 /* free a per-NUMA send context structure */
 878 void sc_free(struct send_context *sc)
 879 {
 880         struct hfi1_devdata *dd;
 881         unsigned long flags;
 882         u32 sw_index;
 883         u32 hw_context;
 884 
 885         if (!sc)
 886                 return;
 887 
 888         sc->flags |= SCF_IN_FREE;       /* ensure no restarts */
 889         dd = sc->dd;
 890         if (!list_empty(&sc->piowait))
 891                 dd_dev_err(dd, "piowait list not empty!\n");
 892         sw_index = sc->sw_index;
 893         hw_context = sc->hw_context;
 894         sc_disable(sc); /* make sure the HW is disabled */
 895         flush_work(&sc->halt_work);
 896 
 897         spin_lock_irqsave(&dd->sc_lock, flags);
 898         dd->send_contexts[sw_index].sc = NULL;
 899 
 900         /* clear/disable all registers set in sc_alloc */
 901         write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
 902         write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
 903         write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
 904         write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
 905         write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
 906         write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
 907         write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
 908 
 909         /* release the index and context for re-use */
 910         sc_hw_free(dd, sw_index, hw_context);
 911         spin_unlock_irqrestore(&dd->sc_lock, flags);
 912 
 913         kfree(sc->sr);
 914         free_percpu(sc->buffers_allocated);
 915         kfree(sc);
 916 }
 917 
 918 /* disable the context */
 919 void sc_disable(struct send_context *sc)
 920 {
 921         u64 reg;
 922         struct pio_buf *pbuf;
 923 
 924         if (!sc)
 925                 return;
 926 
 927         /* do all steps, even if already disabled */
 928         spin_lock_irq(&sc->alloc_lock);
 929         reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
 930         reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
 931         sc->flags &= ~SCF_ENABLED;
 932         sc_wait_for_packet_egress(sc, 1);
 933         write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
 934 
 935         /*
 936          * Flush any waiters.  Once the context is disabled,
 937          * credit return interrupts are stopped (although there
 938          * could be one in-process when the context is disabled).
 939          * Wait one microsecond for any lingering interrupts, then
 940          * proceed with the flush.
 941          */
 942         udelay(1);
 943         spin_lock(&sc->release_lock);
 944         if (sc->sr) {   /* this context has a shadow ring */
 945                 while (sc->sr_tail != sc->sr_head) {
 946                         pbuf = &sc->sr[sc->sr_tail].pbuf;
 947                         if (pbuf->cb)
 948                                 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
 949                         sc->sr_tail++;
 950                         if (sc->sr_tail >= sc->sr_size)
 951                                 sc->sr_tail = 0;
 952                 }
 953         }
 954         spin_unlock(&sc->release_lock);
 955 
 956         write_seqlock(&sc->waitlock);
 957         while (!list_empty(&sc->piowait)) {
 958                 struct iowait *wait;
 959                 struct rvt_qp *qp;
 960                 struct hfi1_qp_priv *priv;
 961 
 962                 wait = list_first_entry(&sc->piowait, struct iowait, list);
 963                 qp = iowait_to_qp(wait);
 964                 priv = qp->priv;
 965                 list_del_init(&priv->s_iowait.list);
 966                 priv->s_iowait.lock = NULL;
 967                 hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
 968         }
 969         write_sequnlock(&sc->waitlock);
 970 
 971         spin_unlock_irq(&sc->alloc_lock);
 972 }
 973 
 974 /* return SendEgressCtxtStatus.PacketOccupancy */
 975 static u64 packet_occupancy(u64 reg)
 976 {
 977         return (reg &
 978                 SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
 979                 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
 980 }
 981 
 982 /* is egress halted on the context? */
 983 static bool egress_halted(u64 reg)
 984 {
 985         return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
 986 }
 987 
 988 /* is the send context halted? */
 989 static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
 990 {
 991         return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
 992                   SC(STATUS_CTXT_HALTED_SMASK));
 993 }
 994 
 995 /**
 996  * sc_wait_for_packet_egress
 997  * @sc: valid send context
 998  * @pause: wait for credit return
 999  *
1000  * Wait for packet egress, optionally pause for credit return
1001  *
1002  * Egress halt and Context halt are not necessarily the same thing, so
1003  * check for both.
1004  *
1005  * NOTE: The context halt bit may not be set immediately.  Because of this,
1006  * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
1007  * context bit to determine if the context is halted.
1008  */
1009 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
1010 {
1011         struct hfi1_devdata *dd = sc->dd;
1012         u64 reg = 0;
1013         u64 reg_prev;
1014         u32 loop = 0;
1015 
1016         while (1) {
1017                 reg_prev = reg;
1018                 reg = read_csr(dd, sc->hw_context * 8 +
1019                                SEND_EGRESS_CTXT_STATUS);
1020                 /* done if any halt bits, SW or HW are set */
1021                 if (sc->flags & SCF_HALTED ||
1022                     is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
1023                         break;
1024                 reg = packet_occupancy(reg);
1025                 if (reg == 0)
1026                         break;
1027                 /* counter is reset if occupancy count changes */
1028                 if (reg != reg_prev)
1029                         loop = 0;
1030                 if (loop > 50000) {
1031                         /* timed out - bounce the link */
1032                         dd_dev_err(dd,
1033                                    "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
1034                                    __func__, sc->sw_index,
1035                                    sc->hw_context, (u32)reg);
1036                         queue_work(dd->pport->link_wq,
1037                                    &dd->pport->link_bounce_work);
1038                         break;
1039                 }
1040                 loop++;
1041                 udelay(1);
1042         }
1043 
1044         if (pause)
1045                 /* Add additional delay to ensure chip returns all credits */
1046                 pause_for_credit_return(dd);
1047 }
1048 
1049 void sc_wait(struct hfi1_devdata *dd)
1050 {
1051         int i;
1052 
1053         for (i = 0; i < dd->num_send_contexts; i++) {
1054                 struct send_context *sc = dd->send_contexts[i].sc;
1055 
1056                 if (!sc)
1057                         continue;
1058                 sc_wait_for_packet_egress(sc, 0);
1059         }
1060 }
1061 
1062 /*
1063  * Restart a context after it has been halted due to error.
1064  *
1065  * If the first step fails - wait for the halt to be asserted, return early.
1066  * Otherwise complain about timeouts but keep going.
1067  *
1068  * It is expected that allocations (enabled flag bit) have been shut off
1069  * already (only applies to kernel contexts).
1070  */
1071 int sc_restart(struct send_context *sc)
1072 {
1073         struct hfi1_devdata *dd = sc->dd;
1074         u64 reg;
1075         u32 loop;
1076         int count;
1077 
1078         /* bounce off if not halted, or being free'd */
1079         if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1080                 return -EINVAL;
1081 
1082         dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1083                     sc->hw_context);
1084 
1085         /*
1086          * Step 1: Wait for the context to actually halt.
1087          *
1088          * The error interrupt is asynchronous to actually setting halt
1089          * on the context.
1090          */
1091         loop = 0;
1092         while (1) {
1093                 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1094                 if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1095                         break;
1096                 if (loop > 100) {
1097                         dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1098                                    __func__, sc->sw_index, sc->hw_context);
1099                         return -ETIME;
1100                 }
1101                 loop++;
1102                 udelay(1);
1103         }
1104 
1105         /*
1106          * Step 2: Ensure no users are still trying to write to PIO.
1107          *
1108          * For kernel contexts, we have already turned off buffer allocation.
1109          * Now wait for the buffer count to go to zero.
1110          *
1111          * For user contexts, the user handling code has cut off write access
1112          * to the context's PIO pages before calling this routine and will
1113          * restore write access after this routine returns.
1114          */
1115         if (sc->type != SC_USER) {
1116                 /* kernel context */
1117                 loop = 0;
1118                 while (1) {
1119                         count = get_buffers_allocated(sc);
1120                         if (count == 0)
1121                                 break;
1122                         if (loop > 100) {
1123                                 dd_dev_err(dd,
1124                                            "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1125                                            __func__, sc->sw_index,
1126                                            sc->hw_context, count);
1127                         }
1128                         loop++;
1129                         udelay(1);
1130                 }
1131         }
1132 
1133         /*
1134          * Step 3: Wait for all packets to egress.
1135          * This is done while disabling the send context
1136          *
1137          * Step 4: Disable the context
1138          *
1139          * This is a superset of the halt.  After the disable, the
1140          * errors can be cleared.
1141          */
1142         sc_disable(sc);
1143 
1144         /*
1145          * Step 5: Enable the context
1146          *
1147          * This enable will clear the halted flag and per-send context
1148          * error flags.
1149          */
1150         return sc_enable(sc);
1151 }
1152 
1153 /*
1154  * PIO freeze processing.  To be called after the TXE block is fully frozen.
1155  * Go through all frozen send contexts and disable them.  The contexts are
1156  * already stopped by the freeze.
1157  */
1158 void pio_freeze(struct hfi1_devdata *dd)
1159 {
1160         struct send_context *sc;
1161         int i;
1162 
1163         for (i = 0; i < dd->num_send_contexts; i++) {
1164                 sc = dd->send_contexts[i].sc;
1165                 /*
1166                  * Don't disable unallocated, unfrozen, or user send contexts.
1167                  * User send contexts will be disabled when the process
1168                  * calls into the driver to reset its context.
1169                  */
1170                 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1171                         continue;
1172 
1173                 /* only need to disable, the context is already stopped */
1174                 sc_disable(sc);
1175         }
1176 }
1177 
1178 /*
1179  * Unfreeze PIO for kernel send contexts.  The precondition for calling this
1180  * is that all PIO send contexts have been disabled and the SPC freeze has
1181  * been cleared.  Now perform the last step and re-enable each kernel context.
1182  * User (PSM) processing will occur when PSM calls into the kernel to
1183  * acknowledge the freeze.
1184  */
1185 void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1186 {
1187         struct send_context *sc;
1188         int i;
1189 
1190         for (i = 0; i < dd->num_send_contexts; i++) {
1191                 sc = dd->send_contexts[i].sc;
1192                 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1193                         continue;
1194                 if (sc->flags & SCF_LINK_DOWN)
1195                         continue;
1196 
1197                 sc_enable(sc);  /* will clear the sc frozen flag */
1198         }
1199 }
1200 
1201 /**
1202  * pio_kernel_linkup() - Re-enable send contexts after linkup event
1203  * @dd: valid devive data
1204  *
1205  * When the link goes down, the freeze path is taken.  However, a link down
1206  * event is different from a freeze because if the send context is re-enabled
1207  * whowever is sending data will start sending data again, which will hang
1208  * any QP that is sending data.
1209  *
1210  * The freeze path now looks at the type of event that occurs and takes this
1211  * path for link down event.
1212  */
1213 void pio_kernel_linkup(struct hfi1_devdata *dd)
1214 {
1215         struct send_context *sc;
1216         int i;
1217 
1218         for (i = 0; i < dd->num_send_contexts; i++) {
1219                 sc = dd->send_contexts[i].sc;
1220                 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
1221                         continue;
1222 
1223                 sc_enable(sc);  /* will clear the sc link down flag */
1224         }
1225 }
1226 
1227 /*
1228  * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1229  * Returns:
1230  *      -ETIMEDOUT - if we wait too long
1231  *      -EIO       - if there was an error
1232  */
1233 static int pio_init_wait_progress(struct hfi1_devdata *dd)
1234 {
1235         u64 reg;
1236         int max, count = 0;
1237 
1238         /* max is the longest possible HW init time / delay */
1239         max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1240         while (1) {
1241                 reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1242                 if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1243                         break;
1244                 if (count >= max)
1245                         return -ETIMEDOUT;
1246                 udelay(5);
1247                 count++;
1248         }
1249 
1250         return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1251 }
1252 
1253 /*
1254  * Reset all of the send contexts to their power-on state.  Used
1255  * only during manual init - no lock against sc_enable needed.
1256  */
1257 void pio_reset_all(struct hfi1_devdata *dd)
1258 {
1259         int ret;
1260 
1261         /* make sure the init engine is not busy */
1262         ret = pio_init_wait_progress(dd);
1263         /* ignore any timeout */
1264         if (ret == -EIO) {
1265                 /* clear the error */
1266                 write_csr(dd, SEND_PIO_ERR_CLEAR,
1267                           SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1268         }
1269 
1270         /* reset init all */
1271         write_csr(dd, SEND_PIO_INIT_CTXT,
1272                   SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1273         udelay(2);
1274         ret = pio_init_wait_progress(dd);
1275         if (ret < 0) {
1276                 dd_dev_err(dd,
1277                            "PIO send context init %s while initializing all PIO blocks\n",
1278                            ret == -ETIMEDOUT ? "is stuck" : "had an error");
1279         }
1280 }
1281 
1282 /* enable the context */
1283 int sc_enable(struct send_context *sc)
1284 {
1285         u64 sc_ctrl, reg, pio;
1286         struct hfi1_devdata *dd;
1287         unsigned long flags;
1288         int ret = 0;
1289 
1290         if (!sc)
1291                 return -EINVAL;
1292         dd = sc->dd;
1293 
1294         /*
1295          * Obtain the allocator lock to guard against any allocation
1296          * attempts (which should not happen prior to context being
1297          * enabled). On the release/disable side we don't need to
1298          * worry about locking since the releaser will not do anything
1299          * if the context accounting values have not changed.
1300          */
1301         spin_lock_irqsave(&sc->alloc_lock, flags);
1302         sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1303         if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1304                 goto unlock; /* already enabled */
1305 
1306         /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1307 
1308         *sc->hw_free = 0;
1309         sc->free = 0;
1310         sc->alloc_free = 0;
1311         sc->fill = 0;
1312         sc->fill_wrap = 0;
1313         sc->sr_head = 0;
1314         sc->sr_tail = 0;
1315         sc->flags = 0;
1316         /* the alloc lock insures no fast path allocation */
1317         reset_buffers_allocated(sc);
1318 
1319         /*
1320          * Clear all per-context errors.  Some of these will be set when
1321          * we are re-enabling after a context halt.  Now that the context
1322          * is disabled, the halt will not clear until after the PIO init
1323          * engine runs below.
1324          */
1325         reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1326         if (reg)
1327                 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1328 
1329         /*
1330          * The HW PIO initialization engine can handle only one init
1331          * request at a time. Serialize access to each device's engine.
1332          */
1333         spin_lock(&dd->sc_init_lock);
1334         /*
1335          * Since access to this code block is serialized and
1336          * each access waits for the initialization to complete
1337          * before releasing the lock, the PIO initialization engine
1338          * should not be in use, so we don't have to wait for the
1339          * InProgress bit to go down.
1340          */
1341         pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1342                SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1343                 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1344         write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1345         /*
1346          * Wait until the engine is done.  Give the chip the required time
1347          * so, hopefully, we read the register just once.
1348          */
1349         udelay(2);
1350         ret = pio_init_wait_progress(dd);
1351         spin_unlock(&dd->sc_init_lock);
1352         if (ret) {
1353                 dd_dev_err(dd,
1354                            "sctxt%u(%u): Context not enabled due to init failure %d\n",
1355                            sc->sw_index, sc->hw_context, ret);
1356                 goto unlock;
1357         }
1358 
1359         /*
1360          * All is well. Enable the context.
1361          */
1362         sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1363         write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1364         /*
1365          * Read SendCtxtCtrl to force the write out and prevent a timing
1366          * hazard where a PIO write may reach the context before the enable.
1367          */
1368         read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1369         sc->flags |= SCF_ENABLED;
1370 
1371 unlock:
1372         spin_unlock_irqrestore(&sc->alloc_lock, flags);
1373 
1374         return ret;
1375 }
1376 
1377 /* force a credit return on the context */
1378 void sc_return_credits(struct send_context *sc)
1379 {
1380         if (!sc)
1381                 return;
1382 
1383         /* a 0->1 transition schedules a credit return */
1384         write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1385                         SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1386         /*
1387          * Ensure that the write is flushed and the credit return is
1388          * scheduled. We care more about the 0 -> 1 transition.
1389          */
1390         read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1391         /* set back to 0 for next time */
1392         write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1393 }
1394 
1395 /* allow all in-flight packets to drain on the context */
1396 void sc_flush(struct send_context *sc)
1397 {
1398         if (!sc)
1399                 return;
1400 
1401         sc_wait_for_packet_egress(sc, 1);
1402 }
1403 
1404 /* drop all packets on the context, no waiting until they are sent */
1405 void sc_drop(struct send_context *sc)
1406 {
1407         if (!sc)
1408                 return;
1409 
1410         dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1411                     __func__, sc->sw_index, sc->hw_context);
1412 }
1413 
1414 /*
1415  * Start the software reaction to a context halt or SPC freeze:
1416  *      - mark the context as halted or frozen
1417  *      - stop buffer allocations
1418  *
1419  * Called from the error interrupt.  Other work is deferred until
1420  * out of the interrupt.
1421  */
1422 void sc_stop(struct send_context *sc, int flag)
1423 {
1424         unsigned long flags;
1425 
1426         /* stop buffer allocations */
1427         spin_lock_irqsave(&sc->alloc_lock, flags);
1428         /* mark the context */
1429         sc->flags |= flag;
1430         sc->flags &= ~SCF_ENABLED;
1431         spin_unlock_irqrestore(&sc->alloc_lock, flags);
1432         wake_up(&sc->halt_wait);
1433 }
1434 
1435 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1436 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1437 
1438 /*
1439  * The send context buffer "allocator".
1440  *
1441  * @sc: the PIO send context we are allocating from
1442  * @len: length of whole packet - including PBC - in dwords
1443  * @cb: optional callback to call when the buffer is finished sending
1444  * @arg: argument for cb
1445  *
1446  * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
1447  * when link is down.
1448  */
1449 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1450                                 pio_release_cb cb, void *arg)
1451 {
1452         struct pio_buf *pbuf = NULL;
1453         unsigned long flags;
1454         unsigned long avail;
1455         unsigned long blocks = dwords_to_blocks(dw_len);
1456         u32 fill_wrap;
1457         int trycount = 0;
1458         u32 head, next;
1459 
1460         spin_lock_irqsave(&sc->alloc_lock, flags);
1461         if (!(sc->flags & SCF_ENABLED)) {
1462                 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1463                 return ERR_PTR(-ECOMM);
1464         }
1465 
1466 retry:
1467         avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1468         if (blocks > avail) {
1469                 /* not enough room */
1470                 if (unlikely(trycount)) { /* already tried to get more room */
1471                         spin_unlock_irqrestore(&sc->alloc_lock, flags);
1472                         goto done;
1473                 }
1474                 /* copy from receiver cache line and recalculate */
1475                 sc->alloc_free = READ_ONCE(sc->free);
1476                 avail =
1477                         (unsigned long)sc->credits -
1478                         (sc->fill - sc->alloc_free);
1479                 if (blocks > avail) {
1480                         /* still no room, actively update */
1481                         sc_release_update(sc);
1482                         sc->alloc_free = READ_ONCE(sc->free);
1483                         trycount++;
1484                         goto retry;
1485                 }
1486         }
1487 
1488         /* there is enough room */
1489 
1490         preempt_disable();
1491         this_cpu_inc(*sc->buffers_allocated);
1492 
1493         /* read this once */
1494         head = sc->sr_head;
1495 
1496         /* "allocate" the buffer */
1497         sc->fill += blocks;
1498         fill_wrap = sc->fill_wrap;
1499         sc->fill_wrap += blocks;
1500         if (sc->fill_wrap >= sc->credits)
1501                 sc->fill_wrap = sc->fill_wrap - sc->credits;
1502 
1503         /*
1504          * Fill the parts that the releaser looks at before moving the head.
1505          * The only necessary piece is the sent_at field.  The credits
1506          * we have just allocated cannot have been returned yet, so the
1507          * cb and arg will not be looked at for a "while".  Put them
1508          * on this side of the memory barrier anyway.
1509          */
1510         pbuf = &sc->sr[head].pbuf;
1511         pbuf->sent_at = sc->fill;
1512         pbuf->cb = cb;
1513         pbuf->arg = arg;
1514         pbuf->sc = sc;  /* could be filled in at sc->sr init time */
1515         /* make sure this is in memory before updating the head */
1516 
1517         /* calculate next head index, do not store */
1518         next = head + 1;
1519         if (next >= sc->sr_size)
1520                 next = 0;
1521         /*
1522          * update the head - must be last! - the releaser can look at fields
1523          * in pbuf once we move the head
1524          */
1525         smp_wmb();
1526         sc->sr_head = next;
1527         spin_unlock_irqrestore(&sc->alloc_lock, flags);
1528 
1529         /* finish filling in the buffer outside the lock */
1530         pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
1531         pbuf->end = sc->base_addr + sc->size;
1532         pbuf->qw_written = 0;
1533         pbuf->carry_bytes = 0;
1534         pbuf->carry.val64 = 0;
1535 done:
1536         return pbuf;
1537 }
1538 
1539 /*
1540  * There are at least two entities that can turn on credit return
1541  * interrupts and they can overlap.  Avoid problems by implementing
1542  * a count scheme that is enforced by a lock.  The lock is needed because
1543  * the count and CSR write must be paired.
1544  */
1545 
1546 /*
1547  * Start credit return interrupts.  This is managed by a count.  If already
1548  * on, just increment the count.
1549  */
1550 void sc_add_credit_return_intr(struct send_context *sc)
1551 {
1552         unsigned long flags;
1553 
1554         /* lock must surround both the count change and the CSR update */
1555         spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1556         if (sc->credit_intr_count == 0) {
1557                 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1558                 write_kctxt_csr(sc->dd, sc->hw_context,
1559                                 SC(CREDIT_CTRL), sc->credit_ctrl);
1560         }
1561         sc->credit_intr_count++;
1562         spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1563 }
1564 
1565 /*
1566  * Stop credit return interrupts.  This is managed by a count.  Decrement the
1567  * count, if the last user, then turn the credit interrupts off.
1568  */
1569 void sc_del_credit_return_intr(struct send_context *sc)
1570 {
1571         unsigned long flags;
1572 
1573         WARN_ON(sc->credit_intr_count == 0);
1574 
1575         /* lock must surround both the count change and the CSR update */
1576         spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1577         sc->credit_intr_count--;
1578         if (sc->credit_intr_count == 0) {
1579                 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1580                 write_kctxt_csr(sc->dd, sc->hw_context,
1581                                 SC(CREDIT_CTRL), sc->credit_ctrl);
1582         }
1583         spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1584 }
1585 
1586 /*
1587  * The caller must be careful when calling this.  All needint calls
1588  * must be paired with !needint.
1589  */
1590 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1591 {
1592         if (needint)
1593                 sc_add_credit_return_intr(sc);
1594         else
1595                 sc_del_credit_return_intr(sc);
1596         trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1597         if (needint)
1598                 sc_return_credits(sc);
1599 }
1600 
1601 /**
1602  * sc_piobufavail - callback when a PIO buffer is available
1603  * @sc: the send context
1604  *
1605  * This is called from the interrupt handler when a PIO buffer is
1606  * available after hfi1_verbs_send() returned an error that no buffers were
1607  * available. Disable the interrupt if there are no more QPs waiting.
1608  */
1609 static void sc_piobufavail(struct send_context *sc)
1610 {
1611         struct hfi1_devdata *dd = sc->dd;
1612         struct list_head *list;
1613         struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
1614         struct rvt_qp *qp;
1615         struct hfi1_qp_priv *priv;
1616         unsigned long flags;
1617         uint i, n = 0, top_idx = 0;
1618 
1619         if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
1620             dd->send_contexts[sc->sw_index].type != SC_VL15)
1621                 return;
1622         list = &sc->piowait;
1623         /*
1624          * Note: checking that the piowait list is empty and clearing
1625          * the buffer available interrupt needs to be atomic or we
1626          * could end up with QPs on the wait list with the interrupt
1627          * disabled.
1628          */
1629         write_seqlock_irqsave(&sc->waitlock, flags);
1630         while (!list_empty(list)) {
1631                 struct iowait *wait;
1632 
1633                 if (n == ARRAY_SIZE(qps))
1634                         break;
1635                 wait = list_first_entry(list, struct iowait, list);
1636                 iowait_get_priority(wait);
1637                 qp = iowait_to_qp(wait);
1638                 priv = qp->priv;
1639                 list_del_init(&priv->s_iowait.list);
1640                 priv->s_iowait.lock = NULL;
1641                 if (n) {
1642                         priv = qps[top_idx]->priv;
1643                         top_idx = iowait_priority_update_top(wait,
1644                                                              &priv->s_iowait,
1645                                                              n, top_idx);
1646                 }
1647 
1648                 /* refcount held until actual wake up */
1649                 qps[n++] = qp;
1650         }
1651         /*
1652          * If there had been waiters and there are more
1653          * insure that we redo the force to avoid a potential hang.
1654          */
1655         if (n) {
1656                 hfi1_sc_wantpiobuf_intr(sc, 0);
1657                 if (!list_empty(list))
1658                         hfi1_sc_wantpiobuf_intr(sc, 1);
1659         }
1660         write_sequnlock_irqrestore(&sc->waitlock, flags);
1661 
1662         /* Wake up the top-priority one first */
1663         if (n)
1664                 hfi1_qp_wakeup(qps[top_idx],
1665                                RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
1666         for (i = 0; i < n; i++)
1667                 if (i != top_idx)
1668                         hfi1_qp_wakeup(qps[i],
1669                                        RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
1670 }
1671 
1672 /* translate a send credit update to a bit code of reasons */
1673 static inline int fill_code(u64 hw_free)
1674 {
1675         int code = 0;
1676 
1677         if (hw_free & CR_STATUS_SMASK)
1678                 code |= PRC_STATUS_ERR;
1679         if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1680                 code |= PRC_PBC;
1681         if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1682                 code |= PRC_THRESHOLD;
1683         if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1684                 code |= PRC_FILL_ERR;
1685         if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1686                 code |= PRC_SC_DISABLE;
1687         return code;
1688 }
1689 
1690 /* use the jiffies compare to get the wrap right */
1691 #define sent_before(a, b) time_before(a, b)     /* a < b */
1692 
1693 /*
1694  * The send context buffer "releaser".
1695  */
1696 void sc_release_update(struct send_context *sc)
1697 {
1698         struct pio_buf *pbuf;
1699         u64 hw_free;
1700         u32 head, tail;
1701         unsigned long old_free;
1702         unsigned long free;
1703         unsigned long extra;
1704         unsigned long flags;
1705         int code;
1706 
1707         if (!sc)
1708                 return;
1709 
1710         spin_lock_irqsave(&sc->release_lock, flags);
1711         /* update free */
1712         hw_free = le64_to_cpu(*sc->hw_free);            /* volatile read */
1713         old_free = sc->free;
1714         extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1715                         - (old_free & CR_COUNTER_MASK))
1716                                 & CR_COUNTER_MASK;
1717         free = old_free + extra;
1718         trace_hfi1_piofree(sc, extra);
1719 
1720         /* call sent buffer callbacks */
1721         code = -1;                              /* code not yet set */
1722         head = READ_ONCE(sc->sr_head);  /* snapshot the head */
1723         tail = sc->sr_tail;
1724         while (head != tail) {
1725                 pbuf = &sc->sr[tail].pbuf;
1726 
1727                 if (sent_before(free, pbuf->sent_at)) {
1728                         /* not sent yet */
1729                         break;
1730                 }
1731                 if (pbuf->cb) {
1732                         if (code < 0) /* fill in code on first user */
1733                                 code = fill_code(hw_free);
1734                         (*pbuf->cb)(pbuf->arg, code);
1735                 }
1736 
1737                 tail++;
1738                 if (tail >= sc->sr_size)
1739                         tail = 0;
1740         }
1741         sc->sr_tail = tail;
1742         /* make sure tail is updated before free */
1743         smp_wmb();
1744         sc->free = free;
1745         spin_unlock_irqrestore(&sc->release_lock, flags);
1746         sc_piobufavail(sc);
1747 }
1748 
1749 /*
1750  * Send context group releaser.  Argument is the send context that caused
1751  * the interrupt.  Called from the send context interrupt handler.
1752  *
1753  * Call release on all contexts in the group.
1754  *
1755  * This routine takes the sc_lock without an irqsave because it is only
1756  * called from an interrupt handler.  Adjust if that changes.
1757  */
1758 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1759 {
1760         struct send_context *sc;
1761         u32 sw_index;
1762         u32 gc, gc_end;
1763 
1764         spin_lock(&dd->sc_lock);
1765         sw_index = dd->hw_to_sw[hw_context];
1766         if (unlikely(sw_index >= dd->num_send_contexts)) {
1767                 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1768                            __func__, hw_context, sw_index);
1769                 goto done;
1770         }
1771         sc = dd->send_contexts[sw_index].sc;
1772         if (unlikely(!sc))
1773                 goto done;
1774 
1775         gc = group_context(hw_context, sc->group);
1776         gc_end = gc + group_size(sc->group);
1777         for (; gc < gc_end; gc++) {
1778                 sw_index = dd->hw_to_sw[gc];
1779                 if (unlikely(sw_index >= dd->num_send_contexts)) {
1780                         dd_dev_err(dd,
1781                                    "%s: invalid hw (%u) to sw (%u) mapping\n",
1782                                    __func__, hw_context, sw_index);
1783                         continue;
1784                 }
1785                 sc_release_update(dd->send_contexts[sw_index].sc);
1786         }
1787 done:
1788         spin_unlock(&dd->sc_lock);
1789 }
1790 
1791 /*
1792  * pio_select_send_context_vl() - select send context
1793  * @dd: devdata
1794  * @selector: a spreading factor
1795  * @vl: this vl
1796  *
1797  * This function returns a send context based on the selector and a vl.
1798  * The mapping fields are protected by RCU
1799  */
1800 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
1801                                                 u32 selector, u8 vl)
1802 {
1803         struct pio_vl_map *m;
1804         struct pio_map_elem *e;
1805         struct send_context *rval;
1806 
1807         /*
1808          * NOTE This should only happen if SC->VL changed after the initial
1809          * checks on the QP/AH
1810          * Default will return VL0's send context below
1811          */
1812         if (unlikely(vl >= num_vls)) {
1813                 rval = NULL;
1814                 goto done;
1815         }
1816 
1817         rcu_read_lock();
1818         m = rcu_dereference(dd->pio_map);
1819         if (unlikely(!m)) {
1820                 rcu_read_unlock();
1821                 return dd->vld[0].sc;
1822         }
1823         e = m->map[vl & m->mask];
1824         rval = e->ksc[selector & e->mask];
1825         rcu_read_unlock();
1826 
1827 done:
1828         rval = !rval ? dd->vld[0].sc : rval;
1829         return rval;
1830 }
1831 
1832 /*
1833  * pio_select_send_context_sc() - select send context
1834  * @dd: devdata
1835  * @selector: a spreading factor
1836  * @sc5: the 5 bit sc
1837  *
1838  * This function returns an send context based on the selector and an sc
1839  */
1840 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
1841                                                 u32 selector, u8 sc5)
1842 {
1843         u8 vl = sc_to_vlt(dd, sc5);
1844 
1845         return pio_select_send_context_vl(dd, selector, vl);
1846 }
1847 
1848 /*
1849  * Free the indicated map struct
1850  */
1851 static void pio_map_free(struct pio_vl_map *m)
1852 {
1853         int i;
1854 
1855         for (i = 0; m && i < m->actual_vls; i++)
1856                 kfree(m->map[i]);
1857         kfree(m);
1858 }
1859 
1860 /*
1861  * Handle RCU callback
1862  */
1863 static void pio_map_rcu_callback(struct rcu_head *list)
1864 {
1865         struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
1866 
1867         pio_map_free(m);
1868 }
1869 
1870 /*
1871  * Set credit return threshold for the kernel send context
1872  */
1873 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1874 {
1875         u32 thres;
1876 
1877         thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1878                                             50),
1879                     sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1880                                         dd->vld[i].mtu,
1881                                         dd->rcd[0]->rcvhdrqentsize));
1882         sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1883 }
1884 
1885 /*
1886  * pio_map_init - called when #vls change
1887  * @dd: hfi1_devdata
1888  * @port: port number
1889  * @num_vls: number of vls
1890  * @vl_scontexts: per vl send context mapping (optional)
1891  *
1892  * This routine changes the mapping based on the number of vls.
1893  *
1894  * vl_scontexts is used to specify a non-uniform vl/send context
1895  * loading. NULL implies auto computing the loading and giving each
1896  * VL an uniform distribution of send contexts per VL.
1897  *
1898  * The auto algorithm computers the sc_per_vl and the number of extra
1899  * send contexts. Any extra send contexts are added from the last VL
1900  * on down
1901  *
1902  * rcu locking is used here to control access to the mapping fields.
1903  *
1904  * If either the num_vls or num_send_contexts are non-power of 2, the
1905  * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1906  * rounded up to the next highest power of 2 and the first entry is
1907  * reused in a round robin fashion.
1908  *
1909  * If an error occurs the map change is not done and the mapping is not
1910  * chaged.
1911  *
1912  */
1913 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1914 {
1915         int i, j;
1916         int extra, sc_per_vl;
1917         int scontext = 1;
1918         int num_kernel_send_contexts = 0;
1919         u8 lvl_scontexts[OPA_MAX_VLS];
1920         struct pio_vl_map *oldmap, *newmap;
1921 
1922         if (!vl_scontexts) {
1923                 for (i = 0; i < dd->num_send_contexts; i++)
1924                         if (dd->send_contexts[i].type == SC_KERNEL)
1925                                 num_kernel_send_contexts++;
1926                 /* truncate divide */
1927                 sc_per_vl = num_kernel_send_contexts / num_vls;
1928                 /* extras */
1929                 extra = num_kernel_send_contexts % num_vls;
1930                 vl_scontexts = lvl_scontexts;
1931                 /* add extras from last vl down */
1932                 for (i = num_vls - 1; i >= 0; i--, extra--)
1933                         vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
1934         }
1935         /* build new map */
1936         newmap = kzalloc(sizeof(*newmap) +
1937                          roundup_pow_of_two(num_vls) *
1938                          sizeof(struct pio_map_elem *),
1939                          GFP_KERNEL);
1940         if (!newmap)
1941                 goto bail;
1942         newmap->actual_vls = num_vls;
1943         newmap->vls = roundup_pow_of_two(num_vls);
1944         newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1945         for (i = 0; i < newmap->vls; i++) {
1946                 /* save for wrap around */
1947                 int first_scontext = scontext;
1948 
1949                 if (i < newmap->actual_vls) {
1950                         int sz = roundup_pow_of_two(vl_scontexts[i]);
1951 
1952                         /* only allocate once */
1953                         newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
1954                                                  sz * sizeof(struct
1955                                                              send_context *),
1956                                                  GFP_KERNEL);
1957                         if (!newmap->map[i])
1958                                 goto bail;
1959                         newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1960                         /*
1961                          * assign send contexts and
1962                          * adjust credit return threshold
1963                          */
1964                         for (j = 0; j < sz; j++) {
1965                                 if (dd->kernel_send_context[scontext]) {
1966                                         newmap->map[i]->ksc[j] =
1967                                         dd->kernel_send_context[scontext];
1968                                         set_threshold(dd, scontext, i);
1969                                 }
1970                                 if (++scontext >= first_scontext +
1971                                                   vl_scontexts[i])
1972                                         /* wrap back to first send context */
1973                                         scontext = first_scontext;
1974                         }
1975                 } else {
1976                         /* just re-use entry without allocating */
1977                         newmap->map[i] = newmap->map[i % num_vls];
1978                 }
1979                 scontext = first_scontext + vl_scontexts[i];
1980         }
1981         /* newmap in hand, save old map */
1982         spin_lock_irq(&dd->pio_map_lock);
1983         oldmap = rcu_dereference_protected(dd->pio_map,
1984                                            lockdep_is_held(&dd->pio_map_lock));
1985 
1986         /* publish newmap */
1987         rcu_assign_pointer(dd->pio_map, newmap);
1988 
1989         spin_unlock_irq(&dd->pio_map_lock);
1990         /* success, free any old map after grace period */
1991         if (oldmap)
1992                 call_rcu(&oldmap->list, pio_map_rcu_callback);
1993         return 0;
1994 bail:
1995         /* free any partial allocation */
1996         pio_map_free(newmap);
1997         return -ENOMEM;
1998 }
1999 
2000 void free_pio_map(struct hfi1_devdata *dd)
2001 {
2002         /* Free PIO map if allocated */
2003         if (rcu_access_pointer(dd->pio_map)) {
2004                 spin_lock_irq(&dd->pio_map_lock);
2005                 pio_map_free(rcu_access_pointer(dd->pio_map));
2006                 RCU_INIT_POINTER(dd->pio_map, NULL);
2007                 spin_unlock_irq(&dd->pio_map_lock);
2008                 synchronize_rcu();
2009         }
2010         kfree(dd->kernel_send_context);
2011         dd->kernel_send_context = NULL;
2012 }
2013 
2014 int init_pervl_scs(struct hfi1_devdata *dd)
2015 {
2016         int i;
2017         u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
2018         u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
2019         u32 ctxt;
2020         struct hfi1_pportdata *ppd = dd->pport;
2021 
2022         dd->vld[15].sc = sc_alloc(dd, SC_VL15,
2023                                   dd->rcd[0]->rcvhdrqentsize, dd->node);
2024         if (!dd->vld[15].sc)
2025                 return -ENOMEM;
2026 
2027         hfi1_init_ctxt(dd->vld[15].sc);
2028         dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
2029 
2030         dd->kernel_send_context = kcalloc_node(dd->num_send_contexts,
2031                                                sizeof(struct send_context *),
2032                                                GFP_KERNEL, dd->node);
2033         if (!dd->kernel_send_context)
2034                 goto freesc15;
2035 
2036         dd->kernel_send_context[0] = dd->vld[15].sc;
2037 
2038         for (i = 0; i < num_vls; i++) {
2039                 /*
2040                  * Since this function does not deal with a specific
2041                  * receive context but we need the RcvHdrQ entry size,
2042                  * use the size from rcd[0]. It is guaranteed to be
2043                  * valid at this point and will remain the same for all
2044                  * receive contexts.
2045                  */
2046                 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
2047                                          dd->rcd[0]->rcvhdrqentsize, dd->node);
2048                 if (!dd->vld[i].sc)
2049                         goto nomem;
2050                 dd->kernel_send_context[i + 1] = dd->vld[i].sc;
2051                 hfi1_init_ctxt(dd->vld[i].sc);
2052                 /* non VL15 start with the max MTU */
2053                 dd->vld[i].mtu = hfi1_max_mtu;
2054         }
2055         for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2056                 dd->kernel_send_context[i + 1] =
2057                 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
2058                 if (!dd->kernel_send_context[i + 1])
2059                         goto nomem;
2060                 hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
2061         }
2062 
2063         sc_enable(dd->vld[15].sc);
2064         ctxt = dd->vld[15].sc->hw_context;
2065         mask = all_vl_mask & ~(1LL << 15);
2066         write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2067         dd_dev_info(dd,
2068                     "Using send context %u(%u) for VL15\n",
2069                     dd->vld[15].sc->sw_index, ctxt);
2070 
2071         for (i = 0; i < num_vls; i++) {
2072                 sc_enable(dd->vld[i].sc);
2073                 ctxt = dd->vld[i].sc->hw_context;
2074                 mask = all_vl_mask & ~(data_vls_mask);
2075                 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2076         }
2077         for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2078                 sc_enable(dd->kernel_send_context[i + 1]);
2079                 ctxt = dd->kernel_send_context[i + 1]->hw_context;
2080                 mask = all_vl_mask & ~(data_vls_mask);
2081                 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2082         }
2083 
2084         if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
2085                 goto nomem;
2086         return 0;
2087 
2088 nomem:
2089         for (i = 0; i < num_vls; i++) {
2090                 sc_free(dd->vld[i].sc);
2091                 dd->vld[i].sc = NULL;
2092         }
2093 
2094         for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
2095                 sc_free(dd->kernel_send_context[i + 1]);
2096 
2097         kfree(dd->kernel_send_context);
2098         dd->kernel_send_context = NULL;
2099 
2100 freesc15:
2101         sc_free(dd->vld[15].sc);
2102         return -ENOMEM;
2103 }
2104 
2105 int init_credit_return(struct hfi1_devdata *dd)
2106 {
2107         int ret;
2108         int i;
2109 
2110         dd->cr_base = kcalloc(
2111                 node_affinity.num_possible_nodes,
2112                 sizeof(struct credit_return_base),
2113                 GFP_KERNEL);
2114         if (!dd->cr_base) {
2115                 ret = -ENOMEM;
2116                 goto done;
2117         }
2118         for_each_node_with_cpus(i) {
2119                 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2120 
2121                 set_dev_node(&dd->pcidev->dev, i);
2122                 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
2123                                                        bytes,
2124                                                        &dd->cr_base[i].dma,
2125                                                        GFP_KERNEL);
2126                 if (!dd->cr_base[i].va) {
2127                         set_dev_node(&dd->pcidev->dev, dd->node);
2128                         dd_dev_err(dd,
2129                                    "Unable to allocate credit return DMA range for NUMA %d\n",
2130                                    i);
2131                         ret = -ENOMEM;
2132                         goto done;
2133                 }
2134         }
2135         set_dev_node(&dd->pcidev->dev, dd->node);
2136 
2137         ret = 0;
2138 done:
2139         return ret;
2140 }
2141 
2142 void free_credit_return(struct hfi1_devdata *dd)
2143 {
2144         int i;
2145 
2146         if (!dd->cr_base)
2147                 return;
2148         for (i = 0; i < node_affinity.num_possible_nodes; i++) {
2149                 if (dd->cr_base[i].va) {
2150                         dma_free_coherent(&dd->pcidev->dev,
2151                                           TXE_NUM_CONTEXTS *
2152                                           sizeof(struct credit_return),
2153                                           dd->cr_base[i].va,
2154                                           dd->cr_base[i].dma);
2155                 }
2156         }
2157         kfree(dd->cr_base);
2158         dd->cr_base = NULL;
2159 }
2160 
2161 void seqfile_dump_sci(struct seq_file *s, u32 i,
2162                       struct send_context_info *sci)
2163 {
2164         struct send_context *sc = sci->sc;
2165         u64 reg;
2166 
2167         seq_printf(s, "SCI %u: type %u base %u credits %u\n",
2168                    i, sci->type, sci->base, sci->credits);
2169         seq_printf(s, "  flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
2170                    sc->flags,  sc->sw_index, sc->hw_context, sc->group);
2171         seq_printf(s, "  sr_size %u credits %u sr_head %u sr_tail %u\n",
2172                    sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
2173         seq_printf(s, "  fill %lu free %lu fill_wrap %u alloc_free %lu\n",
2174                    sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
2175         seq_printf(s, "  credit_intr_count %u credit_ctrl 0x%llx\n",
2176                    sc->credit_intr_count, sc->credit_ctrl);
2177         reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
2178         seq_printf(s, "  *hw_free %llu CurrentFree %llu LastReturned %llu\n",
2179                    (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>
2180                     CR_COUNTER_SHIFT,
2181                    (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) &
2182                     SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK),
2183                    reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK));
2184 }

/* [<][>][^][v][top][bottom][index][help] */