root/arch/powerpc/kvm/book3s_xive_template.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. GLUE
  2. GLUE
  3. GLUE

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
   4  */
   5 
   6 /* File to be included by other .c files */
   7 
   8 #define XGLUE(a,b) a##b
   9 #define GLUE(a,b) XGLUE(a,b)
  10 
  11 /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
  12 #define XICS_DUMMY      1
  13 
  14 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
  15 {
  16         u8 cppr;
  17         u16 ack;
  18 
  19         /*
  20          * Ensure any previous store to CPPR is ordered vs.
  21          * the subsequent loads from PIPR or ACK.
  22          */
  23         eieio();
  24 
  25         /* Perform the acknowledge OS to register cycle. */
  26         ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
  27 
  28         /* Synchronize subsequent queue accesses */
  29         mb();
  30 
  31         /* XXX Check grouping level */
  32 
  33         /* Anything ? */
  34         if (!((ack >> 8) & TM_QW1_NSR_EO))
  35                 return;
  36 
  37         /* Grab CPPR of the most favored pending interrupt */
  38         cppr = ack & 0xff;
  39         if (cppr < 8)
  40                 xc->pending |= 1 << cppr;
  41 
  42 #ifdef XIVE_RUNTIME_CHECKS
  43         /* Check consistency */
  44         if (cppr >= xc->hw_cppr)
  45                 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
  46                         smp_processor_id(), cppr, xc->hw_cppr);
  47 #endif
  48 
  49         /*
  50          * Update our image of the HW CPPR. We don't yet modify
  51          * xc->cppr, this will be done as we scan for interrupts
  52          * in the queues.
  53          */
  54         xc->hw_cppr = cppr;
  55 }
  56 
  57 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
  58 {
  59         u64 val;
  60 
  61         if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
  62                 offset |= offset << 4;
  63 
  64         val =__x_readq(__x_eoi_page(xd) + offset);
  65 #ifdef __LITTLE_ENDIAN__
  66         val >>= 64-8;
  67 #endif
  68         return (u8)val;
  69 }
  70 
  71 
  72 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
  73 {
  74         /* If the XIVE supports the new "store EOI facility, use it */
  75         if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
  76                 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
  77         else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
  78                 opal_int_eoi(hw_irq);
  79         else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
  80                 /*
  81                  * For LSIs the HW EOI cycle is used rather than PQ bits,
  82                  * as they are automatically re-triggred in HW when still
  83                  * pending.
  84                  */
  85                 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
  86         } else {
  87                 uint64_t eoi_val;
  88 
  89                 /*
  90                  * Otherwise for EOI, we use the special MMIO that does
  91                  * a clear of both P and Q and returns the old Q,
  92                  * except for LSIs where we use the "EOI cycle" special
  93                  * load.
  94                  *
  95                  * This allows us to then do a re-trigger if Q was set
  96                  * rather than synthetizing an interrupt in software
  97                  */
  98                 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
  99 
 100                 /* Re-trigger if needed */
 101                 if ((eoi_val & 1) && __x_trig_page(xd))
 102                         __x_writeq(0, __x_trig_page(xd));
 103         }
 104 }
 105 
 106 enum {
 107         scan_fetch,
 108         scan_poll,
 109         scan_eoi,
 110 };
 111 
 112 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
 113                                        u8 pending, int scan_type)
 114 {
 115         u32 hirq = 0;
 116         u8 prio = 0xff;
 117 
 118         /* Find highest pending priority */
 119         while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
 120                 struct xive_q *q;
 121                 u32 idx, toggle;
 122                 __be32 *qpage;
 123 
 124                 /*
 125                  * If pending is 0 this will return 0xff which is what
 126                  * we want
 127                  */
 128                 prio = ffs(pending) - 1;
 129 
 130                 /* Don't scan past the guest cppr */
 131                 if (prio >= xc->cppr || prio > 7) {
 132                         if (xc->mfrr < xc->cppr) {
 133                                 prio = xc->mfrr;
 134                                 hirq = XICS_IPI;
 135                         }
 136                         break;
 137                 }
 138 
 139                 /* Grab queue and pointers */
 140                 q = &xc->queues[prio];
 141                 idx = q->idx;
 142                 toggle = q->toggle;
 143 
 144                 /*
 145                  * Snapshot the queue page. The test further down for EOI
 146                  * must use the same "copy" that was used by __xive_read_eq
 147                  * since qpage can be set concurrently and we don't want
 148                  * to miss an EOI.
 149                  */
 150                 qpage = READ_ONCE(q->qpage);
 151 
 152 skip_ipi:
 153                 /*
 154                  * Try to fetch from the queue. Will return 0 for a
 155                  * non-queueing priority (ie, qpage = 0).
 156                  */
 157                 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
 158 
 159                 /*
 160                  * If this was a signal for an MFFR change done by
 161                  * H_IPI we skip it. Additionally, if we were fetching
 162                  * we EOI it now, thus re-enabling reception of a new
 163                  * such signal.
 164                  *
 165                  * We also need to do that if prio is 0 and we had no
 166                  * page for the queue. In this case, we have non-queued
 167                  * IPI that needs to be EOId.
 168                  *
 169                  * This is safe because if we have another pending MFRR
 170                  * change that wasn't observed above, the Q bit will have
 171                  * been set and another occurrence of the IPI will trigger.
 172                  */
 173                 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
 174                         if (scan_type == scan_fetch) {
 175                                 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
 176                                                        &xc->vp_ipi_data);
 177                                 q->idx = idx;
 178                                 q->toggle = toggle;
 179                         }
 180                         /* Loop back on same queue with updated idx/toggle */
 181 #ifdef XIVE_RUNTIME_CHECKS
 182                         WARN_ON(hirq && hirq != XICS_IPI);
 183 #endif
 184                         if (hirq)
 185                                 goto skip_ipi;
 186                 }
 187 
 188                 /* If it's the dummy interrupt, continue searching */
 189                 if (hirq == XICS_DUMMY)
 190                         goto skip_ipi;
 191 
 192                 /* Clear the pending bit if the queue is now empty */
 193                 if (!hirq) {
 194                         pending &= ~(1 << prio);
 195 
 196                         /*
 197                          * Check if the queue count needs adjusting due to
 198                          * interrupts being moved away.
 199                          */
 200                         if (atomic_read(&q->pending_count)) {
 201                                 int p = atomic_xchg(&q->pending_count, 0);
 202                                 if (p) {
 203 #ifdef XIVE_RUNTIME_CHECKS
 204                                         WARN_ON(p > atomic_read(&q->count));
 205 #endif
 206                                         atomic_sub(p, &q->count);
 207                                 }
 208                         }
 209                 }
 210 
 211                 /*
 212                  * If the most favoured prio we found pending is less
 213                  * favored (or equal) than a pending IPI, we return
 214                  * the IPI instead.
 215                  */
 216                 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
 217                         prio = xc->mfrr;
 218                         hirq = XICS_IPI;
 219                         break;
 220                 }
 221 
 222                 /* If fetching, update queue pointers */
 223                 if (scan_type == scan_fetch) {
 224                         q->idx = idx;
 225                         q->toggle = toggle;
 226                 }
 227         }
 228 
 229         /* If we are just taking a "peek", do nothing else */
 230         if (scan_type == scan_poll)
 231                 return hirq;
 232 
 233         /* Update the pending bits */
 234         xc->pending = pending;
 235 
 236         /*
 237          * If this is an EOI that's it, no CPPR adjustment done here,
 238          * all we needed was cleanup the stale pending bits and check
 239          * if there's anything left.
 240          */
 241         if (scan_type == scan_eoi)
 242                 return hirq;
 243 
 244         /*
 245          * If we found an interrupt, adjust what the guest CPPR should
 246          * be as if we had just fetched that interrupt from HW.
 247          *
 248          * Note: This can only make xc->cppr smaller as the previous
 249          * loop will only exit with hirq != 0 if prio is lower than
 250          * the current xc->cppr. Thus we don't need to re-check xc->mfrr
 251          * for pending IPIs.
 252          */
 253         if (hirq)
 254                 xc->cppr = prio;
 255         /*
 256          * If it was an IPI the HW CPPR might have been lowered too much
 257          * as the HW interrupt we use for IPIs is routed to priority 0.
 258          *
 259          * We re-sync it here.
 260          */
 261         if (xc->cppr != xc->hw_cppr) {
 262                 xc->hw_cppr = xc->cppr;
 263                 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
 264         }
 265 
 266         return hirq;
 267 }
 268 
 269 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
 270 {
 271         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 272         u8 old_cppr;
 273         u32 hirq;
 274 
 275         pr_devel("H_XIRR\n");
 276 
 277         xc->GLUE(X_STAT_PFX,h_xirr)++;
 278 
 279         /* First collect pending bits from HW */
 280         GLUE(X_PFX,ack_pending)(xc);
 281 
 282         pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
 283                  xc->pending, xc->hw_cppr, xc->cppr);
 284 
 285         /* Grab previous CPPR and reverse map it */
 286         old_cppr = xive_prio_to_guest(xc->cppr);
 287 
 288         /* Scan for actual interrupts */
 289         hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
 290 
 291         pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
 292                  hirq, xc->hw_cppr, xc->cppr);
 293 
 294 #ifdef XIVE_RUNTIME_CHECKS
 295         /* That should never hit */
 296         if (hirq & 0xff000000)
 297                 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
 298 #endif
 299 
 300         /*
 301          * XXX We could check if the interrupt is masked here and
 302          * filter it. If we chose to do so, we would need to do:
 303          *
 304          *    if (masked) {
 305          *        lock();
 306          *        if (masked) {
 307          *            old_Q = true;
 308          *            hirq = 0;
 309          *        }
 310          *        unlock();
 311          *    }
 312          */
 313 
 314         /* Return interrupt and old CPPR in GPR4 */
 315         vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
 316 
 317         return H_SUCCESS;
 318 }
 319 
 320 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
 321 {
 322         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 323         u8 pending = xc->pending;
 324         u32 hirq;
 325 
 326         pr_devel("H_IPOLL(server=%ld)\n", server);
 327 
 328         xc->GLUE(X_STAT_PFX,h_ipoll)++;
 329 
 330         /* Grab the target VCPU if not the current one */
 331         if (xc->server_num != server) {
 332                 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
 333                 if (!vcpu)
 334                         return H_PARAMETER;
 335                 xc = vcpu->arch.xive_vcpu;
 336 
 337                 /* Scan all priorities */
 338                 pending = 0xff;
 339         } else {
 340                 /* Grab pending interrupt if any */
 341                 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
 342                 u8 pipr = be64_to_cpu(qw1) & 0xff;
 343                 if (pipr < 8)
 344                         pending |= 1 << pipr;
 345         }
 346 
 347         hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
 348 
 349         /* Return interrupt and old CPPR in GPR4 */
 350         vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
 351 
 352         return H_SUCCESS;
 353 }
 354 
 355 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
 356 {
 357         u8 pending, prio;
 358 
 359         pending = xc->pending;
 360         if (xc->mfrr != 0xff) {
 361                 if (xc->mfrr < 8)
 362                         pending |= 1 << xc->mfrr;
 363                 else
 364                         pending |= 0x80;
 365         }
 366         if (!pending)
 367                 return;
 368         prio = ffs(pending) - 1;
 369 
 370         __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
 371 }
 372 
 373 static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
 374                                                struct kvmppc_xive_vcpu *xc)
 375 {
 376         unsigned int prio;
 377 
 378         /* For each priority that is now masked */
 379         for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
 380                 struct xive_q *q = &xc->queues[prio];
 381                 struct kvmppc_xive_irq_state *state;
 382                 struct kvmppc_xive_src_block *sb;
 383                 u32 idx, toggle, entry, irq, hw_num;
 384                 struct xive_irq_data *xd;
 385                 __be32 *qpage;
 386                 u16 src;
 387 
 388                 idx = q->idx;
 389                 toggle = q->toggle;
 390                 qpage = READ_ONCE(q->qpage);
 391                 if (!qpage)
 392                         continue;
 393 
 394                 /* For each interrupt in the queue */
 395                 for (;;) {
 396                         entry = be32_to_cpup(qpage + idx);
 397 
 398                         /* No more ? */
 399                         if ((entry >> 31) == toggle)
 400                                 break;
 401                         irq = entry & 0x7fffffff;
 402 
 403                         /* Skip dummies and IPIs */
 404                         if (irq == XICS_DUMMY || irq == XICS_IPI)
 405                                 goto next;
 406                         sb = kvmppc_xive_find_source(xive, irq, &src);
 407                         if (!sb)
 408                                 goto next;
 409                         state = &sb->irq_state[src];
 410 
 411                         /* Has it been rerouted ? */
 412                         if (xc->server_num == state->act_server)
 413                                 goto next;
 414 
 415                         /*
 416                          * Allright, it *has* been re-routed, kill it from
 417                          * the queue.
 418                          */
 419                         qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
 420 
 421                         /* Find the HW interrupt */
 422                         kvmppc_xive_select_irq(state, &hw_num, &xd);
 423 
 424                         /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
 425                         if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
 426                                 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
 427 
 428                         /* EOI the source */
 429                         GLUE(X_PFX,source_eoi)(hw_num, xd);
 430 
 431                 next:
 432                         idx = (idx + 1) & q->msk;
 433                         if (idx == 0)
 434                                 toggle ^= 1;
 435                 }
 436         }
 437 }
 438 
 439 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
 440 {
 441         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 442         struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
 443         u8 old_cppr;
 444 
 445         pr_devel("H_CPPR(cppr=%ld)\n", cppr);
 446 
 447         xc->GLUE(X_STAT_PFX,h_cppr)++;
 448 
 449         /* Map CPPR */
 450         cppr = xive_prio_from_guest(cppr);
 451 
 452         /* Remember old and update SW state */
 453         old_cppr = xc->cppr;
 454         xc->cppr = cppr;
 455 
 456         /*
 457          * Order the above update of xc->cppr with the subsequent
 458          * read of xc->mfrr inside push_pending_to_hw()
 459          */
 460         smp_mb();
 461 
 462         if (cppr > old_cppr) {
 463                 /*
 464                  * We are masking less, we need to look for pending things
 465                  * to deliver and set VP pending bits accordingly to trigger
 466                  * a new interrupt otherwise we might miss MFRR changes for
 467                  * which we have optimized out sending an IPI signal.
 468                  */
 469                 GLUE(X_PFX,push_pending_to_hw)(xc);
 470         } else {
 471                 /*
 472                  * We are masking more, we need to check the queue for any
 473                  * interrupt that has been routed to another CPU, take
 474                  * it out (replace it with the dummy) and retrigger it.
 475                  *
 476                  * This is necessary since those interrupts may otherwise
 477                  * never be processed, at least not until this CPU restores
 478                  * its CPPR.
 479                  *
 480                  * This is in theory racy vs. HW adding new interrupts to
 481                  * the queue. In practice this works because the interesting
 482                  * cases are when the guest has done a set_xive() to move the
 483                  * interrupt away, which flushes the xive, followed by the
 484                  * target CPU doing a H_CPPR. So any new interrupt coming into
 485                  * the queue must still be routed to us and isn't a source
 486                  * of concern.
 487                  */
 488                 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
 489         }
 490 
 491         /* Apply new CPPR */
 492         xc->hw_cppr = cppr;
 493         __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
 494 
 495         return H_SUCCESS;
 496 }
 497 
 498 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
 499 {
 500         struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
 501         struct kvmppc_xive_src_block *sb;
 502         struct kvmppc_xive_irq_state *state;
 503         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 504         struct xive_irq_data *xd;
 505         u8 new_cppr = xirr >> 24;
 506         u32 irq = xirr & 0x00ffffff, hw_num;
 507         u16 src;
 508         int rc = 0;
 509 
 510         pr_devel("H_EOI(xirr=%08lx)\n", xirr);
 511 
 512         xc->GLUE(X_STAT_PFX,h_eoi)++;
 513 
 514         xc->cppr = xive_prio_from_guest(new_cppr);
 515 
 516         /*
 517          * IPIs are synthetized from MFRR and thus don't need
 518          * any special EOI handling. The underlying interrupt
 519          * used to signal MFRR changes is EOId when fetched from
 520          * the queue.
 521          */
 522         if (irq == XICS_IPI || irq == 0) {
 523                 /*
 524                  * This barrier orders the setting of xc->cppr vs.
 525                  * subsquent test of xc->mfrr done inside
 526                  * scan_interrupts and push_pending_to_hw
 527                  */
 528                 smp_mb();
 529                 goto bail;
 530         }
 531 
 532         /* Find interrupt source */
 533         sb = kvmppc_xive_find_source(xive, irq, &src);
 534         if (!sb) {
 535                 pr_devel(" source not found !\n");
 536                 rc = H_PARAMETER;
 537                 /* Same as above */
 538                 smp_mb();
 539                 goto bail;
 540         }
 541         state = &sb->irq_state[src];
 542         kvmppc_xive_select_irq(state, &hw_num, &xd);
 543 
 544         state->in_eoi = true;
 545 
 546         /*
 547          * This barrier orders both setting of in_eoi above vs,
 548          * subsequent test of guest_priority, and the setting
 549          * of xc->cppr vs. subsquent test of xc->mfrr done inside
 550          * scan_interrupts and push_pending_to_hw
 551          */
 552         smp_mb();
 553 
 554 again:
 555         if (state->guest_priority == MASKED) {
 556                 arch_spin_lock(&sb->lock);
 557                 if (state->guest_priority != MASKED) {
 558                         arch_spin_unlock(&sb->lock);
 559                         goto again;
 560                 }
 561                 pr_devel(" EOI on saved P...\n");
 562 
 563                 /* Clear old_p, that will cause unmask to perform an EOI */
 564                 state->old_p = false;
 565 
 566                 arch_spin_unlock(&sb->lock);
 567         } else {
 568                 pr_devel(" EOI on source...\n");
 569 
 570                 /* Perform EOI on the source */
 571                 GLUE(X_PFX,source_eoi)(hw_num, xd);
 572 
 573                 /* If it's an emulated LSI, check level and resend */
 574                 if (state->lsi && state->asserted)
 575                         __x_writeq(0, __x_trig_page(xd));
 576 
 577         }
 578 
 579         /*
 580          * This barrier orders the above guest_priority check
 581          * and spin_lock/unlock with clearing in_eoi below.
 582          *
 583          * It also has to be a full mb() as it must ensure
 584          * the MMIOs done in source_eoi() are completed before
 585          * state->in_eoi is visible.
 586          */
 587         mb();
 588         state->in_eoi = false;
 589 bail:
 590 
 591         /* Re-evaluate pending IRQs and update HW */
 592         GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
 593         GLUE(X_PFX,push_pending_to_hw)(xc);
 594         pr_devel(" after scan pending=%02x\n", xc->pending);
 595 
 596         /* Apply new CPPR */
 597         xc->hw_cppr = xc->cppr;
 598         __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
 599 
 600         return rc;
 601 }
 602 
 603 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
 604                                unsigned long mfrr)
 605 {
 606         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 607 
 608         pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
 609 
 610         xc->GLUE(X_STAT_PFX,h_ipi)++;
 611 
 612         /* Find target */
 613         vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
 614         if (!vcpu)
 615                 return H_PARAMETER;
 616         xc = vcpu->arch.xive_vcpu;
 617 
 618         /* Locklessly write over MFRR */
 619         xc->mfrr = mfrr;
 620 
 621         /*
 622          * The load of xc->cppr below and the subsequent MMIO store
 623          * to the IPI must happen after the above mfrr update is
 624          * globally visible so that:
 625          *
 626          * - Synchronize with another CPU doing an H_EOI or a H_CPPR
 627          *   updating xc->cppr then reading xc->mfrr.
 628          *
 629          * - The target of the IPI sees the xc->mfrr update
 630          */
 631         mb();
 632 
 633         /* Shoot the IPI if most favored than target cppr */
 634         if (mfrr < xc->cppr)
 635                 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
 636 
 637         return H_SUCCESS;
 638 }

/* [<][>][^][v][top][bottom][index][help] */