root/arch/powerpc/kvm/book3s_xive.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kvmppc_xive_push_vcpu
  2. xive_irq_trigger
  3. xive_esc_irq
  4. kvmppc_xive_attach_escalation
  5. xive_provision_queue
  6. xive_check_provisioning
  7. xive_inc_q_pending
  8. xive_try_pick_queue
  9. kvmppc_xive_select_target
  10. xive_lock_and_mask
  11. xive_lock_for_unmask
  12. xive_finish_unmask
  13. xive_target_interrupt
  14. kvmppc_xive_set_xive
  15. kvmppc_xive_get_xive
  16. kvmppc_xive_int_on
  17. kvmppc_xive_int_off
  18. xive_restore_pending_irq
  19. kvmppc_xive_get_icp
  20. kvmppc_xive_set_icp
  21. kvmppc_xive_set_mapped
  22. kvmppc_xive_clr_mapped
  23. kvmppc_xive_disable_vcpu_interrupts
  24. xive_cleanup_single_escalation
  25. kvmppc_xive_cleanup_vcpu
  26. kvmppc_xive_connect_vcpu
  27. xive_pre_save_set_queued
  28. xive_pre_save_mask_irq
  29. xive_pre_save_unmask_irq
  30. xive_pre_save_queue
  31. xive_pre_save_scan
  32. xive_post_save_scan
  33. xive_get_source
  34. kvmppc_xive_create_src_block
  35. xive_check_delayed_irq
  36. xive_set_source
  37. kvmppc_xive_set_irq
  38. xive_set_attr
  39. xive_get_attr
  40. xive_has_attr
  41. kvmppc_xive_cleanup_irq
  42. kvmppc_xive_free_sources
  43. kvmppc_xive_release
  44. kvmppc_xive_get_device
  45. kvmppc_xive_create
  46. kvmppc_xive_debug_show_queues
  47. xive_debug_show
  48. xive_debugfs_init
  49. kvmppc_xive_init
  50. kvmppc_xive_init_module
  51. kvmppc_xive_exit_module

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
   4  */
   5 
   6 #define pr_fmt(fmt) "xive-kvm: " fmt
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/kvm_host.h>
  10 #include <linux/err.h>
  11 #include <linux/gfp.h>
  12 #include <linux/spinlock.h>
  13 #include <linux/delay.h>
  14 #include <linux/percpu.h>
  15 #include <linux/cpumask.h>
  16 #include <linux/uaccess.h>
  17 #include <asm/kvm_book3s.h>
  18 #include <asm/kvm_ppc.h>
  19 #include <asm/hvcall.h>
  20 #include <asm/xics.h>
  21 #include <asm/xive.h>
  22 #include <asm/xive-regs.h>
  23 #include <asm/debug.h>
  24 #include <asm/debugfs.h>
  25 #include <asm/time.h>
  26 #include <asm/opal.h>
  27 
  28 #include <linux/debugfs.h>
  29 #include <linux/seq_file.h>
  30 
  31 #include "book3s_xive.h"
  32 
  33 
  34 /*
  35  * Virtual mode variants of the hcalls for use on radix/radix
  36  * with AIL. They require the VCPU's VP to be "pushed"
  37  *
  38  * We still instantiate them here because we use some of the
  39  * generated utility functions as well in this file.
  40  */
  41 #define XIVE_RUNTIME_CHECKS
  42 #define X_PFX xive_vm_
  43 #define X_STATIC static
  44 #define X_STAT_PFX stat_vm_
  45 #define __x_tima                xive_tima
  46 #define __x_eoi_page(xd)        ((void __iomem *)((xd)->eoi_mmio))
  47 #define __x_trig_page(xd)       ((void __iomem *)((xd)->trig_mmio))
  48 #define __x_writeb      __raw_writeb
  49 #define __x_readw       __raw_readw
  50 #define __x_readq       __raw_readq
  51 #define __x_writeq      __raw_writeq
  52 
  53 #include "book3s_xive_template.c"
  54 
  55 /*
  56  * We leave a gap of a couple of interrupts in the queue to
  57  * account for the IPI and additional safety guard.
  58  */
  59 #define XIVE_Q_GAP      2
  60 
  61 /*
  62  * Push a vcpu's context to the XIVE on guest entry.
  63  * This assumes we are in virtual mode (MMU on)
  64  */
  65 void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
  66 {
  67         void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
  68         u64 pq;
  69 
  70         /*
  71          * Nothing to do if the platform doesn't have a XIVE
  72          * or this vCPU doesn't have its own XIVE context
  73          * (e.g. because it's not using an in-kernel interrupt controller).
  74          */
  75         if (!tima || !vcpu->arch.xive_cam_word)
  76                 return;
  77 
  78         eieio();
  79         __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
  80         __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
  81         vcpu->arch.xive_pushed = 1;
  82         eieio();
  83 
  84         /*
  85          * We clear the irq_pending flag. There is a small chance of a
  86          * race vs. the escalation interrupt happening on another
  87          * processor setting it again, but the only consequence is to
  88          * cause a spurious wakeup on the next H_CEDE, which is not an
  89          * issue.
  90          */
  91         vcpu->arch.irq_pending = 0;
  92 
  93         /*
  94          * In single escalation mode, if the escalation interrupt is
  95          * on, we mask it.
  96          */
  97         if (vcpu->arch.xive_esc_on) {
  98                 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
  99                                                   XIVE_ESB_SET_PQ_01));
 100                 mb();
 101 
 102                 /*
 103                  * We have a possible subtle race here: The escalation
 104                  * interrupt might have fired and be on its way to the
 105                  * host queue while we mask it, and if we unmask it
 106                  * early enough (re-cede right away), there is a
 107                  * theorical possibility that it fires again, thus
 108                  * landing in the target queue more than once which is
 109                  * a big no-no.
 110                  *
 111                  * Fortunately, solving this is rather easy. If the
 112                  * above load setting PQ to 01 returns a previous
 113                  * value where P is set, then we know the escalation
 114                  * interrupt is somewhere on its way to the host. In
 115                  * that case we simply don't clear the xive_esc_on
 116                  * flag below. It will be eventually cleared by the
 117                  * handler for the escalation interrupt.
 118                  *
 119                  * Then, when doing a cede, we check that flag again
 120                  * before re-enabling the escalation interrupt, and if
 121                  * set, we abort the cede.
 122                  */
 123                 if (!(pq & XIVE_ESB_VAL_P))
 124                         /* Now P is 0, we can clear the flag */
 125                         vcpu->arch.xive_esc_on = 0;
 126         }
 127 }
 128 EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
 129 
 130 /*
 131  * This is a simple trigger for a generic XIVE IRQ. This must
 132  * only be called for interrupts that support a trigger page
 133  */
 134 static bool xive_irq_trigger(struct xive_irq_data *xd)
 135 {
 136         /* This should be only for MSIs */
 137         if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
 138                 return false;
 139 
 140         /* Those interrupts should always have a trigger page */
 141         if (WARN_ON(!xd->trig_mmio))
 142                 return false;
 143 
 144         out_be64(xd->trig_mmio, 0);
 145 
 146         return true;
 147 }
 148 
 149 static irqreturn_t xive_esc_irq(int irq, void *data)
 150 {
 151         struct kvm_vcpu *vcpu = data;
 152 
 153         vcpu->arch.irq_pending = 1;
 154         smp_mb();
 155         if (vcpu->arch.ceded)
 156                 kvmppc_fast_vcpu_kick(vcpu);
 157 
 158         /* Since we have the no-EOI flag, the interrupt is effectively
 159          * disabled now. Clearing xive_esc_on means we won't bother
 160          * doing so on the next entry.
 161          *
 162          * This also allows the entry code to know that if a PQ combination
 163          * of 10 is observed while xive_esc_on is true, it means the queue
 164          * contains an unprocessed escalation interrupt. We don't make use of
 165          * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
 166          */
 167         vcpu->arch.xive_esc_on = false;
 168 
 169         /* This orders xive_esc_on = false vs. subsequent stale_p = true */
 170         smp_wmb();      /* goes with smp_mb() in cleanup_single_escalation */
 171 
 172         return IRQ_HANDLED;
 173 }
 174 
 175 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
 176                                   bool single_escalation)
 177 {
 178         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 179         struct xive_q *q = &xc->queues[prio];
 180         char *name = NULL;
 181         int rc;
 182 
 183         /* Already there ? */
 184         if (xc->esc_virq[prio])
 185                 return 0;
 186 
 187         /* Hook up the escalation interrupt */
 188         xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
 189         if (!xc->esc_virq[prio]) {
 190                 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
 191                        prio, xc->server_num);
 192                 return -EIO;
 193         }
 194 
 195         if (single_escalation)
 196                 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
 197                                  vcpu->kvm->arch.lpid, xc->server_num);
 198         else
 199                 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
 200                                  vcpu->kvm->arch.lpid, xc->server_num, prio);
 201         if (!name) {
 202                 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
 203                        prio, xc->server_num);
 204                 rc = -ENOMEM;
 205                 goto error;
 206         }
 207 
 208         pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
 209 
 210         rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
 211                          IRQF_NO_THREAD, name, vcpu);
 212         if (rc) {
 213                 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
 214                        prio, xc->server_num);
 215                 goto error;
 216         }
 217         xc->esc_virq_names[prio] = name;
 218 
 219         /* In single escalation mode, we grab the ESB MMIO of the
 220          * interrupt and mask it. Also populate the VCPU v/raddr
 221          * of the ESB page for use by asm entry/exit code. Finally
 222          * set the XIVE_IRQ_NO_EOI flag which will prevent the
 223          * core code from performing an EOI on the escalation
 224          * interrupt, thus leaving it effectively masked after
 225          * it fires once.
 226          */
 227         if (single_escalation) {
 228                 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
 229                 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
 230 
 231                 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
 232                 vcpu->arch.xive_esc_raddr = xd->eoi_page;
 233                 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
 234                 xd->flags |= XIVE_IRQ_NO_EOI;
 235         }
 236 
 237         return 0;
 238 error:
 239         irq_dispose_mapping(xc->esc_virq[prio]);
 240         xc->esc_virq[prio] = 0;
 241         kfree(name);
 242         return rc;
 243 }
 244 
 245 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
 246 {
 247         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 248         struct kvmppc_xive *xive = xc->xive;
 249         struct xive_q *q =  &xc->queues[prio];
 250         void *qpage;
 251         int rc;
 252 
 253         if (WARN_ON(q->qpage))
 254                 return 0;
 255 
 256         /* Allocate the queue and retrieve infos on current node for now */
 257         qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
 258         if (!qpage) {
 259                 pr_err("Failed to allocate queue %d for VCPU %d\n",
 260                        prio, xc->server_num);
 261                 return -ENOMEM;
 262         }
 263         memset(qpage, 0, 1 << xive->q_order);
 264 
 265         /*
 266          * Reconfigure the queue. This will set q->qpage only once the
 267          * queue is fully configured. This is a requirement for prio 0
 268          * as we will stop doing EOIs for every IPI as soon as we observe
 269          * qpage being non-NULL, and instead will only EOI when we receive
 270          * corresponding queue 0 entries
 271          */
 272         rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
 273                                          xive->q_order, true);
 274         if (rc)
 275                 pr_err("Failed to configure queue %d for VCPU %d\n",
 276                        prio, xc->server_num);
 277         return rc;
 278 }
 279 
 280 /* Called with xive->lock held */
 281 static int xive_check_provisioning(struct kvm *kvm, u8 prio)
 282 {
 283         struct kvmppc_xive *xive = kvm->arch.xive;
 284         struct kvm_vcpu *vcpu;
 285         int i, rc;
 286 
 287         lockdep_assert_held(&xive->lock);
 288 
 289         /* Already provisioned ? */
 290         if (xive->qmap & (1 << prio))
 291                 return 0;
 292 
 293         pr_devel("Provisioning prio... %d\n", prio);
 294 
 295         /* Provision each VCPU and enable escalations if needed */
 296         kvm_for_each_vcpu(i, vcpu, kvm) {
 297                 if (!vcpu->arch.xive_vcpu)
 298                         continue;
 299                 rc = xive_provision_queue(vcpu, prio);
 300                 if (rc == 0 && !xive->single_escalation)
 301                         kvmppc_xive_attach_escalation(vcpu, prio,
 302                                                       xive->single_escalation);
 303                 if (rc)
 304                         return rc;
 305         }
 306 
 307         /* Order previous stores and mark it as provisioned */
 308         mb();
 309         xive->qmap |= (1 << prio);
 310         return 0;
 311 }
 312 
 313 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
 314 {
 315         struct kvm_vcpu *vcpu;
 316         struct kvmppc_xive_vcpu *xc;
 317         struct xive_q *q;
 318 
 319         /* Locate target server */
 320         vcpu = kvmppc_xive_find_server(kvm, server);
 321         if (!vcpu) {
 322                 pr_warn("%s: Can't find server %d\n", __func__, server);
 323                 return;
 324         }
 325         xc = vcpu->arch.xive_vcpu;
 326         if (WARN_ON(!xc))
 327                 return;
 328 
 329         q = &xc->queues[prio];
 330         atomic_inc(&q->pending_count);
 331 }
 332 
 333 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
 334 {
 335         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 336         struct xive_q *q;
 337         u32 max;
 338 
 339         if (WARN_ON(!xc))
 340                 return -ENXIO;
 341         if (!xc->valid)
 342                 return -ENXIO;
 343 
 344         q = &xc->queues[prio];
 345         if (WARN_ON(!q->qpage))
 346                 return -ENXIO;
 347 
 348         /* Calculate max number of interrupts in that queue. */
 349         max = (q->msk + 1) - XIVE_Q_GAP;
 350         return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
 351 }
 352 
 353 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
 354 {
 355         struct kvm_vcpu *vcpu;
 356         int i, rc;
 357 
 358         /* Locate target server */
 359         vcpu = kvmppc_xive_find_server(kvm, *server);
 360         if (!vcpu) {
 361                 pr_devel("Can't find server %d\n", *server);
 362                 return -EINVAL;
 363         }
 364 
 365         pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
 366 
 367         /* Try pick it */
 368         rc = xive_try_pick_queue(vcpu, prio);
 369         if (rc == 0)
 370                 return rc;
 371 
 372         pr_devel(" .. failed, looking up candidate...\n");
 373 
 374         /* Failed, pick another VCPU */
 375         kvm_for_each_vcpu(i, vcpu, kvm) {
 376                 if (!vcpu->arch.xive_vcpu)
 377                         continue;
 378                 rc = xive_try_pick_queue(vcpu, prio);
 379                 if (rc == 0) {
 380                         *server = vcpu->arch.xive_vcpu->server_num;
 381                         pr_devel("  found on 0x%x/%d\n", *server, prio);
 382                         return rc;
 383                 }
 384         }
 385         pr_devel("  no available target !\n");
 386 
 387         /* No available target ! */
 388         return -EBUSY;
 389 }
 390 
 391 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
 392                              struct kvmppc_xive_src_block *sb,
 393                              struct kvmppc_xive_irq_state *state)
 394 {
 395         struct xive_irq_data *xd;
 396         u32 hw_num;
 397         u8 old_prio;
 398         u64 val;
 399 
 400         /*
 401          * Take the lock, set masked, try again if racing
 402          * with H_EOI
 403          */
 404         for (;;) {
 405                 arch_spin_lock(&sb->lock);
 406                 old_prio = state->guest_priority;
 407                 state->guest_priority = MASKED;
 408                 mb();
 409                 if (!state->in_eoi)
 410                         break;
 411                 state->guest_priority = old_prio;
 412                 arch_spin_unlock(&sb->lock);
 413         }
 414 
 415         /* No change ? Bail */
 416         if (old_prio == MASKED)
 417                 return old_prio;
 418 
 419         /* Get the right irq */
 420         kvmppc_xive_select_irq(state, &hw_num, &xd);
 421 
 422         /*
 423          * If the interrupt is marked as needing masking via
 424          * firmware, we do it here. Firmware masking however
 425          * is "lossy", it won't return the old p and q bits
 426          * and won't set the interrupt to a state where it will
 427          * record queued ones. If this is an issue we should do
 428          * lazy masking instead.
 429          *
 430          * For now, we work around this in unmask by forcing
 431          * an interrupt whenever we unmask a non-LSI via FW
 432          * (if ever).
 433          */
 434         if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
 435                 xive_native_configure_irq(hw_num,
 436                                 kvmppc_xive_vp(xive, state->act_server),
 437                                 MASKED, state->number);
 438                 /* set old_p so we can track if an H_EOI was done */
 439                 state->old_p = true;
 440                 state->old_q = false;
 441         } else {
 442                 /* Set PQ to 10, return old P and old Q and remember them */
 443                 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
 444                 state->old_p = !!(val & 2);
 445                 state->old_q = !!(val & 1);
 446 
 447                 /*
 448                  * Synchronize hardware to sensure the queues are updated
 449                  * when masking
 450                  */
 451                 xive_native_sync_source(hw_num);
 452         }
 453 
 454         return old_prio;
 455 }
 456 
 457 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
 458                                  struct kvmppc_xive_irq_state *state)
 459 {
 460         /*
 461          * Take the lock try again if racing with H_EOI
 462          */
 463         for (;;) {
 464                 arch_spin_lock(&sb->lock);
 465                 if (!state->in_eoi)
 466                         break;
 467                 arch_spin_unlock(&sb->lock);
 468         }
 469 }
 470 
 471 static void xive_finish_unmask(struct kvmppc_xive *xive,
 472                                struct kvmppc_xive_src_block *sb,
 473                                struct kvmppc_xive_irq_state *state,
 474                                u8 prio)
 475 {
 476         struct xive_irq_data *xd;
 477         u32 hw_num;
 478 
 479         /* If we aren't changing a thing, move on */
 480         if (state->guest_priority != MASKED)
 481                 goto bail;
 482 
 483         /* Get the right irq */
 484         kvmppc_xive_select_irq(state, &hw_num, &xd);
 485 
 486         /*
 487          * See command in xive_lock_and_mask() concerning masking
 488          * via firmware.
 489          */
 490         if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
 491                 xive_native_configure_irq(hw_num,
 492                                 kvmppc_xive_vp(xive, state->act_server),
 493                                 state->act_priority, state->number);
 494                 /* If an EOI is needed, do it here */
 495                 if (!state->old_p)
 496                         xive_vm_source_eoi(hw_num, xd);
 497                 /* If this is not an LSI, force a trigger */
 498                 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
 499                         xive_irq_trigger(xd);
 500                 goto bail;
 501         }
 502 
 503         /* Old Q set, set PQ to 11 */
 504         if (state->old_q)
 505                 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
 506 
 507         /*
 508          * If not old P, then perform an "effective" EOI,
 509          * on the source. This will handle the cases where
 510          * FW EOI is needed.
 511          */
 512         if (!state->old_p)
 513                 xive_vm_source_eoi(hw_num, xd);
 514 
 515         /* Synchronize ordering and mark unmasked */
 516         mb();
 517 bail:
 518         state->guest_priority = prio;
 519 }
 520 
 521 /*
 522  * Target an interrupt to a given server/prio, this will fallback
 523  * to another server if necessary and perform the HW targetting
 524  * updates as needed
 525  *
 526  * NOTE: Must be called with the state lock held
 527  */
 528 static int xive_target_interrupt(struct kvm *kvm,
 529                                  struct kvmppc_xive_irq_state *state,
 530                                  u32 server, u8 prio)
 531 {
 532         struct kvmppc_xive *xive = kvm->arch.xive;
 533         u32 hw_num;
 534         int rc;
 535 
 536         /*
 537          * This will return a tentative server and actual
 538          * priority. The count for that new target will have
 539          * already been incremented.
 540          */
 541         rc = kvmppc_xive_select_target(kvm, &server, prio);
 542 
 543         /*
 544          * We failed to find a target ? Not much we can do
 545          * at least until we support the GIQ.
 546          */
 547         if (rc)
 548                 return rc;
 549 
 550         /*
 551          * Increment the old queue pending count if there
 552          * was one so that the old queue count gets adjusted later
 553          * when observed to be empty.
 554          */
 555         if (state->act_priority != MASKED)
 556                 xive_inc_q_pending(kvm,
 557                                    state->act_server,
 558                                    state->act_priority);
 559         /*
 560          * Update state and HW
 561          */
 562         state->act_priority = prio;
 563         state->act_server = server;
 564 
 565         /* Get the right irq */
 566         kvmppc_xive_select_irq(state, &hw_num, NULL);
 567 
 568         return xive_native_configure_irq(hw_num,
 569                                          kvmppc_xive_vp(xive, server),
 570                                          prio, state->number);
 571 }
 572 
 573 /*
 574  * Targetting rules: In order to avoid losing track of
 575  * pending interrupts accross mask and unmask, which would
 576  * allow queue overflows, we implement the following rules:
 577  *
 578  *  - Unless it was never enabled (or we run out of capacity)
 579  *    an interrupt is always targetted at a valid server/queue
 580  *    pair even when "masked" by the guest. This pair tends to
 581  *    be the last one used but it can be changed under some
 582  *    circumstances. That allows us to separate targetting
 583  *    from masking, we only handle accounting during (re)targetting,
 584  *    this also allows us to let an interrupt drain into its target
 585  *    queue after masking, avoiding complex schemes to remove
 586  *    interrupts out of remote processor queues.
 587  *
 588  *  - When masking, we set PQ to 10 and save the previous value
 589  *    of P and Q.
 590  *
 591  *  - When unmasking, if saved Q was set, we set PQ to 11
 592  *    otherwise we leave PQ to the HW state which will be either
 593  *    10 if nothing happened or 11 if the interrupt fired while
 594  *    masked. Effectively we are OR'ing the previous Q into the
 595  *    HW Q.
 596  *
 597  *    Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
 598  *    which will unmask the interrupt and shoot a new one if Q was
 599  *    set.
 600  *
 601  *    Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
 602  *    effectively meaning an H_EOI from the guest is still expected
 603  *    for that interrupt).
 604  *
 605  *  - If H_EOI occurs while masked, we clear the saved P.
 606  *
 607  *  - When changing target, we account on the new target and
 608  *    increment a separate "pending" counter on the old one.
 609  *    This pending counter will be used to decrement the old
 610  *    target's count when its queue has been observed empty.
 611  */
 612 
 613 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
 614                          u32 priority)
 615 {
 616         struct kvmppc_xive *xive = kvm->arch.xive;
 617         struct kvmppc_xive_src_block *sb;
 618         struct kvmppc_xive_irq_state *state;
 619         u8 new_act_prio;
 620         int rc = 0;
 621         u16 idx;
 622 
 623         if (!xive)
 624                 return -ENODEV;
 625 
 626         pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
 627                  irq, server, priority);
 628 
 629         /* First, check provisioning of queues */
 630         if (priority != MASKED) {
 631                 mutex_lock(&xive->lock);
 632                 rc = xive_check_provisioning(xive->kvm,
 633                               xive_prio_from_guest(priority));
 634                 mutex_unlock(&xive->lock);
 635         }
 636         if (rc) {
 637                 pr_devel("  provisioning failure %d !\n", rc);
 638                 return rc;
 639         }
 640 
 641         sb = kvmppc_xive_find_source(xive, irq, &idx);
 642         if (!sb)
 643                 return -EINVAL;
 644         state = &sb->irq_state[idx];
 645 
 646         /*
 647          * We first handle masking/unmasking since the locking
 648          * might need to be retried due to EOIs, we'll handle
 649          * targetting changes later. These functions will return
 650          * with the SB lock held.
 651          *
 652          * xive_lock_and_mask() will also set state->guest_priority
 653          * but won't otherwise change other fields of the state.
 654          *
 655          * xive_lock_for_unmask will not actually unmask, this will
 656          * be done later by xive_finish_unmask() once the targetting
 657          * has been done, so we don't try to unmask an interrupt
 658          * that hasn't yet been targetted.
 659          */
 660         if (priority == MASKED)
 661                 xive_lock_and_mask(xive, sb, state);
 662         else
 663                 xive_lock_for_unmask(sb, state);
 664 
 665 
 666         /*
 667          * Then we handle targetting.
 668          *
 669          * First calculate a new "actual priority"
 670          */
 671         new_act_prio = state->act_priority;
 672         if (priority != MASKED)
 673                 new_act_prio = xive_prio_from_guest(priority);
 674 
 675         pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
 676                  new_act_prio, state->act_server, state->act_priority);
 677 
 678         /*
 679          * Then check if we actually need to change anything,
 680          *
 681          * The condition for re-targetting the interrupt is that
 682          * we have a valid new priority (new_act_prio is not 0xff)
 683          * and either the server or the priority changed.
 684          *
 685          * Note: If act_priority was ff and the new priority is
 686          *       also ff, we don't do anything and leave the interrupt
 687          *       untargetted. An attempt of doing an int_on on an
 688          *       untargetted interrupt will fail. If that is a problem
 689          *       we could initialize interrupts with valid default
 690          */
 691 
 692         if (new_act_prio != MASKED &&
 693             (state->act_server != server ||
 694              state->act_priority != new_act_prio))
 695                 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
 696 
 697         /*
 698          * Perform the final unmasking of the interrupt source
 699          * if necessary
 700          */
 701         if (priority != MASKED)
 702                 xive_finish_unmask(xive, sb, state, priority);
 703 
 704         /*
 705          * Finally Update saved_priority to match. Only int_on/off
 706          * set this field to a different value.
 707          */
 708         state->saved_priority = priority;
 709 
 710         arch_spin_unlock(&sb->lock);
 711         return rc;
 712 }
 713 
 714 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 715                          u32 *priority)
 716 {
 717         struct kvmppc_xive *xive = kvm->arch.xive;
 718         struct kvmppc_xive_src_block *sb;
 719         struct kvmppc_xive_irq_state *state;
 720         u16 idx;
 721 
 722         if (!xive)
 723                 return -ENODEV;
 724 
 725         sb = kvmppc_xive_find_source(xive, irq, &idx);
 726         if (!sb)
 727                 return -EINVAL;
 728         state = &sb->irq_state[idx];
 729         arch_spin_lock(&sb->lock);
 730         *server = state->act_server;
 731         *priority = state->guest_priority;
 732         arch_spin_unlock(&sb->lock);
 733 
 734         return 0;
 735 }
 736 
 737 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
 738 {
 739         struct kvmppc_xive *xive = kvm->arch.xive;
 740         struct kvmppc_xive_src_block *sb;
 741         struct kvmppc_xive_irq_state *state;
 742         u16 idx;
 743 
 744         if (!xive)
 745                 return -ENODEV;
 746 
 747         sb = kvmppc_xive_find_source(xive, irq, &idx);
 748         if (!sb)
 749                 return -EINVAL;
 750         state = &sb->irq_state[idx];
 751 
 752         pr_devel("int_on(irq=0x%x)\n", irq);
 753 
 754         /*
 755          * Check if interrupt was not targetted
 756          */
 757         if (state->act_priority == MASKED) {
 758                 pr_devel("int_on on untargetted interrupt\n");
 759                 return -EINVAL;
 760         }
 761 
 762         /* If saved_priority is 0xff, do nothing */
 763         if (state->saved_priority == MASKED)
 764                 return 0;
 765 
 766         /*
 767          * Lock and unmask it.
 768          */
 769         xive_lock_for_unmask(sb, state);
 770         xive_finish_unmask(xive, sb, state, state->saved_priority);
 771         arch_spin_unlock(&sb->lock);
 772 
 773         return 0;
 774 }
 775 
 776 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
 777 {
 778         struct kvmppc_xive *xive = kvm->arch.xive;
 779         struct kvmppc_xive_src_block *sb;
 780         struct kvmppc_xive_irq_state *state;
 781         u16 idx;
 782 
 783         if (!xive)
 784                 return -ENODEV;
 785 
 786         sb = kvmppc_xive_find_source(xive, irq, &idx);
 787         if (!sb)
 788                 return -EINVAL;
 789         state = &sb->irq_state[idx];
 790 
 791         pr_devel("int_off(irq=0x%x)\n", irq);
 792 
 793         /*
 794          * Lock and mask
 795          */
 796         state->saved_priority = xive_lock_and_mask(xive, sb, state);
 797         arch_spin_unlock(&sb->lock);
 798 
 799         return 0;
 800 }
 801 
 802 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
 803 {
 804         struct kvmppc_xive_src_block *sb;
 805         struct kvmppc_xive_irq_state *state;
 806         u16 idx;
 807 
 808         sb = kvmppc_xive_find_source(xive, irq, &idx);
 809         if (!sb)
 810                 return false;
 811         state = &sb->irq_state[idx];
 812         if (!state->valid)
 813                 return false;
 814 
 815         /*
 816          * Trigger the IPI. This assumes we never restore a pass-through
 817          * interrupt which should be safe enough
 818          */
 819         xive_irq_trigger(&state->ipi_data);
 820 
 821         return true;
 822 }
 823 
 824 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
 825 {
 826         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 827 
 828         if (!xc)
 829                 return 0;
 830 
 831         /* Return the per-cpu state for state saving/migration */
 832         return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
 833                (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
 834                (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
 835 }
 836 
 837 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
 838 {
 839         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 840         struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
 841         u8 cppr, mfrr;
 842         u32 xisr;
 843 
 844         if (!xc || !xive)
 845                 return -ENOENT;
 846 
 847         /* Grab individual state fields. We don't use pending_pri */
 848         cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
 849         xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
 850                 KVM_REG_PPC_ICP_XISR_MASK;
 851         mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
 852 
 853         pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
 854                  xc->server_num, cppr, mfrr, xisr);
 855 
 856         /*
 857          * We can't update the state of a "pushed" VCPU, but that
 858          * shouldn't happen because the vcpu->mutex makes running a
 859          * vcpu mutually exclusive with doing one_reg get/set on it.
 860          */
 861         if (WARN_ON(vcpu->arch.xive_pushed))
 862                 return -EIO;
 863 
 864         /* Update VCPU HW saved state */
 865         vcpu->arch.xive_saved_state.cppr = cppr;
 866         xc->hw_cppr = xc->cppr = cppr;
 867 
 868         /*
 869          * Update MFRR state. If it's not 0xff, we mark the VCPU as
 870          * having a pending MFRR change, which will re-evaluate the
 871          * target. The VCPU will thus potentially get a spurious
 872          * interrupt but that's not a big deal.
 873          */
 874         xc->mfrr = mfrr;
 875         if (mfrr < cppr)
 876                 xive_irq_trigger(&xc->vp_ipi_data);
 877 
 878         /*
 879          * Now saved XIRR is "interesting". It means there's something in
 880          * the legacy "1 element" queue... for an IPI we simply ignore it,
 881          * as the MFRR restore will handle that. For anything else we need
 882          * to force a resend of the source.
 883          * However the source may not have been setup yet. If that's the
 884          * case, we keep that info and increment a counter in the xive to
 885          * tell subsequent xive_set_source() to go look.
 886          */
 887         if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
 888                 xc->delayed_irq = xisr;
 889                 xive->delayed_irqs++;
 890                 pr_devel("  xisr restore delayed\n");
 891         }
 892 
 893         return 0;
 894 }
 895 
 896 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 897                            struct irq_desc *host_desc)
 898 {
 899         struct kvmppc_xive *xive = kvm->arch.xive;
 900         struct kvmppc_xive_src_block *sb;
 901         struct kvmppc_xive_irq_state *state;
 902         struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
 903         unsigned int host_irq = irq_desc_get_irq(host_desc);
 904         unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
 905         u16 idx;
 906         u8 prio;
 907         int rc;
 908 
 909         if (!xive)
 910                 return -ENODEV;
 911 
 912         pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
 913 
 914         sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
 915         if (!sb)
 916                 return -EINVAL;
 917         state = &sb->irq_state[idx];
 918 
 919         /*
 920          * Mark the passed-through interrupt as going to a VCPU,
 921          * this will prevent further EOIs and similar operations
 922          * from the XIVE code. It will also mask the interrupt
 923          * to either PQ=10 or 11 state, the latter if the interrupt
 924          * is pending. This will allow us to unmask or retrigger it
 925          * after routing it to the guest with a simple EOI.
 926          *
 927          * The "state" argument is a "token", all it needs is to be
 928          * non-NULL to switch to passed-through or NULL for the
 929          * other way around. We may not yet have an actual VCPU
 930          * target here and we don't really care.
 931          */
 932         rc = irq_set_vcpu_affinity(host_irq, state);
 933         if (rc) {
 934                 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
 935                 return rc;
 936         }
 937 
 938         /*
 939          * Mask and read state of IPI. We need to know if its P bit
 940          * is set as that means it's potentially already using a
 941          * queue entry in the target
 942          */
 943         prio = xive_lock_and_mask(xive, sb, state);
 944         pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
 945                  state->old_p, state->old_q);
 946 
 947         /* Turn the IPI hard off */
 948         xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
 949 
 950         /*
 951          * Reset ESB guest mapping. Needed when ESB pages are exposed
 952          * to the guest in XIVE native mode
 953          */
 954         if (xive->ops && xive->ops->reset_mapped)
 955                 xive->ops->reset_mapped(kvm, guest_irq);
 956 
 957         /* Grab info about irq */
 958         state->pt_number = hw_irq;
 959         state->pt_data = irq_data_get_irq_handler_data(host_data);
 960 
 961         /*
 962          * Configure the IRQ to match the existing configuration of
 963          * the IPI if it was already targetted. Otherwise this will
 964          * mask the interrupt in a lossy way (act_priority is 0xff)
 965          * which is fine for a never started interrupt.
 966          */
 967         xive_native_configure_irq(hw_irq,
 968                                   kvmppc_xive_vp(xive, state->act_server),
 969                                   state->act_priority, state->number);
 970 
 971         /*
 972          * We do an EOI to enable the interrupt (and retrigger if needed)
 973          * if the guest has the interrupt unmasked and the P bit was *not*
 974          * set in the IPI. If it was set, we know a slot may still be in
 975          * use in the target queue thus we have to wait for a guest
 976          * originated EOI
 977          */
 978         if (prio != MASKED && !state->old_p)
 979                 xive_vm_source_eoi(hw_irq, state->pt_data);
 980 
 981         /* Clear old_p/old_q as they are no longer relevant */
 982         state->old_p = state->old_q = false;
 983 
 984         /* Restore guest prio (unlocks EOI) */
 985         mb();
 986         state->guest_priority = prio;
 987         arch_spin_unlock(&sb->lock);
 988 
 989         return 0;
 990 }
 991 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
 992 
 993 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
 994                            struct irq_desc *host_desc)
 995 {
 996         struct kvmppc_xive *xive = kvm->arch.xive;
 997         struct kvmppc_xive_src_block *sb;
 998         struct kvmppc_xive_irq_state *state;
 999         unsigned int host_irq = irq_desc_get_irq(host_desc);
1000         u16 idx;
1001         u8 prio;
1002         int rc;
1003 
1004         if (!xive)
1005                 return -ENODEV;
1006 
1007         pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1008 
1009         sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1010         if (!sb)
1011                 return -EINVAL;
1012         state = &sb->irq_state[idx];
1013 
1014         /*
1015          * Mask and read state of IRQ. We need to know if its P bit
1016          * is set as that means it's potentially already using a
1017          * queue entry in the target
1018          */
1019         prio = xive_lock_and_mask(xive, sb, state);
1020         pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1021                  state->old_p, state->old_q);
1022 
1023         /*
1024          * If old_p is set, the interrupt is pending, we switch it to
1025          * PQ=11. This will force a resend in the host so the interrupt
1026          * isn't lost to whatver host driver may pick it up
1027          */
1028         if (state->old_p)
1029                 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1030 
1031         /* Release the passed-through interrupt to the host */
1032         rc = irq_set_vcpu_affinity(host_irq, NULL);
1033         if (rc) {
1034                 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1035                 return rc;
1036         }
1037 
1038         /* Forget about the IRQ */
1039         state->pt_number = 0;
1040         state->pt_data = NULL;
1041 
1042         /*
1043          * Reset ESB guest mapping. Needed when ESB pages are exposed
1044          * to the guest in XIVE native mode
1045          */
1046         if (xive->ops && xive->ops->reset_mapped) {
1047                 xive->ops->reset_mapped(kvm, guest_irq);
1048         }
1049 
1050         /* Reconfigure the IPI */
1051         xive_native_configure_irq(state->ipi_number,
1052                                   kvmppc_xive_vp(xive, state->act_server),
1053                                   state->act_priority, state->number);
1054 
1055         /*
1056          * If old_p is set (we have a queue entry potentially
1057          * occupied) or the interrupt is masked, we set the IPI
1058          * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1059          */
1060         if (prio == MASKED || state->old_p)
1061                 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1062         else
1063                 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1064 
1065         /* Restore guest prio (unlocks EOI) */
1066         mb();
1067         state->guest_priority = prio;
1068         arch_spin_unlock(&sb->lock);
1069 
1070         return 0;
1071 }
1072 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1073 
1074 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1075 {
1076         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1077         struct kvm *kvm = vcpu->kvm;
1078         struct kvmppc_xive *xive = kvm->arch.xive;
1079         int i, j;
1080 
1081         for (i = 0; i <= xive->max_sbid; i++) {
1082                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1083 
1084                 if (!sb)
1085                         continue;
1086                 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1087                         struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1088 
1089                         if (!state->valid)
1090                                 continue;
1091                         if (state->act_priority == MASKED)
1092                                 continue;
1093                         if (state->act_server != xc->server_num)
1094                                 continue;
1095 
1096                         /* Clean it up */
1097                         arch_spin_lock(&sb->lock);
1098                         state->act_priority = MASKED;
1099                         xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1100                         xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1101                         if (state->pt_number) {
1102                                 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1103                                 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1104                         }
1105                         arch_spin_unlock(&sb->lock);
1106                 }
1107         }
1108 
1109         /* Disable vcpu's escalation interrupt */
1110         if (vcpu->arch.xive_esc_on) {
1111                 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1112                                              XIVE_ESB_SET_PQ_01));
1113                 vcpu->arch.xive_esc_on = false;
1114         }
1115 
1116         /*
1117          * Clear pointers to escalation interrupt ESB.
1118          * This is safe because the vcpu->mutex is held, preventing
1119          * any other CPU from concurrently executing a KVM_RUN ioctl.
1120          */
1121         vcpu->arch.xive_esc_vaddr = 0;
1122         vcpu->arch.xive_esc_raddr = 0;
1123 }
1124 
1125 /*
1126  * In single escalation mode, the escalation interrupt is marked so
1127  * that EOI doesn't re-enable it, but just sets the stale_p flag to
1128  * indicate that the P bit has already been dealt with.  However, the
1129  * assembly code that enters the guest sets PQ to 00 without clearing
1130  * stale_p (because it has no easy way to address it).  Hence we have
1131  * to adjust stale_p before shutting down the interrupt.
1132  */
1133 void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1134                                     struct kvmppc_xive_vcpu *xc, int irq)
1135 {
1136         struct irq_data *d = irq_get_irq_data(irq);
1137         struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1138 
1139         /*
1140          * This slightly odd sequence gives the right result
1141          * (i.e. stale_p set if xive_esc_on is false) even if
1142          * we race with xive_esc_irq() and xive_irq_eoi().
1143          */
1144         xd->stale_p = false;
1145         smp_mb();               /* paired with smb_wmb in xive_esc_irq */
1146         if (!vcpu->arch.xive_esc_on)
1147                 xd->stale_p = true;
1148 }
1149 
1150 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1151 {
1152         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1153         struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1154         int i;
1155 
1156         if (!kvmppc_xics_enabled(vcpu))
1157                 return;
1158 
1159         if (!xc)
1160                 return;
1161 
1162         pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1163 
1164         /* Ensure no interrupt is still routed to that VP */
1165         xc->valid = false;
1166         kvmppc_xive_disable_vcpu_interrupts(vcpu);
1167 
1168         /* Mask the VP IPI */
1169         xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1170 
1171         /* Free escalations */
1172         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1173                 if (xc->esc_virq[i]) {
1174                         if (xc->xive->single_escalation)
1175                                 xive_cleanup_single_escalation(vcpu, xc,
1176                                                         xc->esc_virq[i]);
1177                         free_irq(xc->esc_virq[i], vcpu);
1178                         irq_dispose_mapping(xc->esc_virq[i]);
1179                         kfree(xc->esc_virq_names[i]);
1180                 }
1181         }
1182 
1183         /* Disable the VP */
1184         xive_native_disable_vp(xc->vp_id);
1185 
1186         /* Clear the cam word so guest entry won't try to push context */
1187         vcpu->arch.xive_cam_word = 0;
1188 
1189         /* Free the queues */
1190         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1191                 struct xive_q *q = &xc->queues[i];
1192 
1193                 xive_native_disable_queue(xc->vp_id, q, i);
1194                 if (q->qpage) {
1195                         free_pages((unsigned long)q->qpage,
1196                                    xive->q_page_order);
1197                         q->qpage = NULL;
1198                 }
1199         }
1200 
1201         /* Free the IPI */
1202         if (xc->vp_ipi) {
1203                 xive_cleanup_irq_data(&xc->vp_ipi_data);
1204                 xive_native_free_irq(xc->vp_ipi);
1205         }
1206         /* Free the VP */
1207         kfree(xc);
1208 
1209         /* Cleanup the vcpu */
1210         vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1211         vcpu->arch.xive_vcpu = NULL;
1212 }
1213 
1214 int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1215                              struct kvm_vcpu *vcpu, u32 cpu)
1216 {
1217         struct kvmppc_xive *xive = dev->private;
1218         struct kvmppc_xive_vcpu *xc;
1219         int i, r = -EBUSY;
1220         u32 vp_id;
1221 
1222         pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1223 
1224         if (dev->ops != &kvm_xive_ops) {
1225                 pr_devel("Wrong ops !\n");
1226                 return -EPERM;
1227         }
1228         if (xive->kvm != vcpu->kvm)
1229                 return -EPERM;
1230         if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1231                 return -EBUSY;
1232         if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
1233                 pr_devel("Out of bounds !\n");
1234                 return -EINVAL;
1235         }
1236 
1237         /* We need to synchronize with queue provisioning */
1238         mutex_lock(&xive->lock);
1239 
1240         vp_id = kvmppc_xive_vp(xive, cpu);
1241         if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1242                 pr_devel("Duplicate !\n");
1243                 r = -EEXIST;
1244                 goto bail;
1245         }
1246 
1247         xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1248         if (!xc) {
1249                 r = -ENOMEM;
1250                 goto bail;
1251         }
1252 
1253         vcpu->arch.xive_vcpu = xc;
1254         xc->xive = xive;
1255         xc->vcpu = vcpu;
1256         xc->server_num = cpu;
1257         xc->vp_id = vp_id;
1258         xc->mfrr = 0xff;
1259         xc->valid = true;
1260 
1261         r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1262         if (r)
1263                 goto bail;
1264 
1265         /* Configure VCPU fields for use by assembly push/pull */
1266         vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1267         vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1268 
1269         /* Allocate IPI */
1270         xc->vp_ipi = xive_native_alloc_irq();
1271         if (!xc->vp_ipi) {
1272                 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1273                 r = -EIO;
1274                 goto bail;
1275         }
1276         pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1277 
1278         r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1279         if (r)
1280                 goto bail;
1281 
1282         /*
1283          * Enable the VP first as the single escalation mode will
1284          * affect escalation interrupts numbering
1285          */
1286         r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1287         if (r) {
1288                 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1289                 goto bail;
1290         }
1291 
1292         /*
1293          * Initialize queues. Initially we set them all for no queueing
1294          * and we enable escalation for queue 0 only which we'll use for
1295          * our mfrr change notifications. If the VCPU is hot-plugged, we
1296          * do handle provisioning however based on the existing "map"
1297          * of enabled queues.
1298          */
1299         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1300                 struct xive_q *q = &xc->queues[i];
1301 
1302                 /* Single escalation, no queue 7 */
1303                 if (i == 7 && xive->single_escalation)
1304                         break;
1305 
1306                 /* Is queue already enabled ? Provision it */
1307                 if (xive->qmap & (1 << i)) {
1308                         r = xive_provision_queue(vcpu, i);
1309                         if (r == 0 && !xive->single_escalation)
1310                                 kvmppc_xive_attach_escalation(
1311                                         vcpu, i, xive->single_escalation);
1312                         if (r)
1313                                 goto bail;
1314                 } else {
1315                         r = xive_native_configure_queue(xc->vp_id,
1316                                                         q, i, NULL, 0, true);
1317                         if (r) {
1318                                 pr_err("Failed to configure queue %d for VCPU %d\n",
1319                                        i, cpu);
1320                                 goto bail;
1321                         }
1322                 }
1323         }
1324 
1325         /* If not done above, attach priority 0 escalation */
1326         r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1327         if (r)
1328                 goto bail;
1329 
1330         /* Route the IPI */
1331         r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1332         if (!r)
1333                 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1334 
1335 bail:
1336         mutex_unlock(&xive->lock);
1337         if (r) {
1338                 kvmppc_xive_cleanup_vcpu(vcpu);
1339                 return r;
1340         }
1341 
1342         vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1343         return 0;
1344 }
1345 
1346 /*
1347  * Scanning of queues before/after migration save
1348  */
1349 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1350 {
1351         struct kvmppc_xive_src_block *sb;
1352         struct kvmppc_xive_irq_state *state;
1353         u16 idx;
1354 
1355         sb = kvmppc_xive_find_source(xive, irq, &idx);
1356         if (!sb)
1357                 return;
1358 
1359         state = &sb->irq_state[idx];
1360 
1361         /* Some sanity checking */
1362         if (!state->valid) {
1363                 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1364                 return;
1365         }
1366 
1367         /*
1368          * If the interrupt is in a queue it should have P set.
1369          * We warn so that gets reported. A backtrace isn't useful
1370          * so no need to use a WARN_ON.
1371          */
1372         if (!state->saved_p)
1373                 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1374 
1375         /* Set flag */
1376         state->in_queue = true;
1377 }
1378 
1379 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1380                                    struct kvmppc_xive_src_block *sb,
1381                                    u32 irq)
1382 {
1383         struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1384 
1385         if (!state->valid)
1386                 return;
1387 
1388         /* Mask and save state, this will also sync HW queues */
1389         state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1390 
1391         /* Transfer P and Q */
1392         state->saved_p = state->old_p;
1393         state->saved_q = state->old_q;
1394 
1395         /* Unlock */
1396         arch_spin_unlock(&sb->lock);
1397 }
1398 
1399 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1400                                      struct kvmppc_xive_src_block *sb,
1401                                      u32 irq)
1402 {
1403         struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1404 
1405         if (!state->valid)
1406                 return;
1407 
1408         /*
1409          * Lock / exclude EOI (not technically necessary if the
1410          * guest isn't running concurrently. If this becomes a
1411          * performance issue we can probably remove the lock.
1412          */
1413         xive_lock_for_unmask(sb, state);
1414 
1415         /* Restore mask/prio if it wasn't masked */
1416         if (state->saved_scan_prio != MASKED)
1417                 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1418 
1419         /* Unlock */
1420         arch_spin_unlock(&sb->lock);
1421 }
1422 
1423 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1424 {
1425         u32 idx = q->idx;
1426         u32 toggle = q->toggle;
1427         u32 irq;
1428 
1429         do {
1430                 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1431                 if (irq > XICS_IPI)
1432                         xive_pre_save_set_queued(xive, irq);
1433         } while(irq);
1434 }
1435 
1436 static void xive_pre_save_scan(struct kvmppc_xive *xive)
1437 {
1438         struct kvm_vcpu *vcpu = NULL;
1439         int i, j;
1440 
1441         /*
1442          * See comment in xive_get_source() about how this
1443          * work. Collect a stable state for all interrupts
1444          */
1445         for (i = 0; i <= xive->max_sbid; i++) {
1446                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1447                 if (!sb)
1448                         continue;
1449                 for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1450                         xive_pre_save_mask_irq(xive, sb, j);
1451         }
1452 
1453         /* Then scan the queues and update the "in_queue" flag */
1454         kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1455                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1456                 if (!xc)
1457                         continue;
1458                 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1459                         if (xc->queues[j].qpage)
1460                                 xive_pre_save_queue(xive, &xc->queues[j]);
1461                 }
1462         }
1463 
1464         /* Finally restore interrupt states */
1465         for (i = 0; i <= xive->max_sbid; i++) {
1466                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1467                 if (!sb)
1468                         continue;
1469                 for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1470                         xive_pre_save_unmask_irq(xive, sb, j);
1471         }
1472 }
1473 
1474 static void xive_post_save_scan(struct kvmppc_xive *xive)
1475 {
1476         u32 i, j;
1477 
1478         /* Clear all the in_queue flags */
1479         for (i = 0; i <= xive->max_sbid; i++) {
1480                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1481                 if (!sb)
1482                         continue;
1483                 for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1484                         sb->irq_state[j].in_queue = false;
1485         }
1486 
1487         /* Next get_source() will do a new scan */
1488         xive->saved_src_count = 0;
1489 }
1490 
1491 /*
1492  * This returns the source configuration and state to user space.
1493  */
1494 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1495 {
1496         struct kvmppc_xive_src_block *sb;
1497         struct kvmppc_xive_irq_state *state;
1498         u64 __user *ubufp = (u64 __user *) addr;
1499         u64 val, prio;
1500         u16 idx;
1501 
1502         sb = kvmppc_xive_find_source(xive, irq, &idx);
1503         if (!sb)
1504                 return -ENOENT;
1505 
1506         state = &sb->irq_state[idx];
1507 
1508         if (!state->valid)
1509                 return -ENOENT;
1510 
1511         pr_devel("get_source(%ld)...\n", irq);
1512 
1513         /*
1514          * So to properly save the state into something that looks like a
1515          * XICS migration stream we cannot treat interrupts individually.
1516          *
1517          * We need, instead, mask them all (& save their previous PQ state)
1518          * to get a stable state in the HW, then sync them to ensure that
1519          * any interrupt that had already fired hits its queue, and finally
1520          * scan all the queues to collect which interrupts are still present
1521          * in the queues, so we can set the "pending" flag on them and
1522          * they can be resent on restore.
1523          *
1524          * So we do it all when the "first" interrupt gets saved, all the
1525          * state is collected at that point, the rest of xive_get_source()
1526          * will merely collect and convert that state to the expected
1527          * userspace bit mask.
1528          */
1529         if (xive->saved_src_count == 0)
1530                 xive_pre_save_scan(xive);
1531         xive->saved_src_count++;
1532 
1533         /* Convert saved state into something compatible with xics */
1534         val = state->act_server;
1535         prio = state->saved_scan_prio;
1536 
1537         if (prio == MASKED) {
1538                 val |= KVM_XICS_MASKED;
1539                 prio = state->saved_priority;
1540         }
1541         val |= prio << KVM_XICS_PRIORITY_SHIFT;
1542         if (state->lsi) {
1543                 val |= KVM_XICS_LEVEL_SENSITIVE;
1544                 if (state->saved_p)
1545                         val |= KVM_XICS_PENDING;
1546         } else {
1547                 if (state->saved_p)
1548                         val |= KVM_XICS_PRESENTED;
1549 
1550                 if (state->saved_q)
1551                         val |= KVM_XICS_QUEUED;
1552 
1553                 /*
1554                  * We mark it pending (which will attempt a re-delivery)
1555                  * if we are in a queue *or* we were masked and had
1556                  * Q set which is equivalent to the XICS "masked pending"
1557                  * state
1558                  */
1559                 if (state->in_queue || (prio == MASKED && state->saved_q))
1560                         val |= KVM_XICS_PENDING;
1561         }
1562 
1563         /*
1564          * If that was the last interrupt saved, reset the
1565          * in_queue flags
1566          */
1567         if (xive->saved_src_count == xive->src_count)
1568                 xive_post_save_scan(xive);
1569 
1570         /* Copy the result to userspace */
1571         if (put_user(val, ubufp))
1572                 return -EFAULT;
1573 
1574         return 0;
1575 }
1576 
1577 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1578         struct kvmppc_xive *xive, int irq)
1579 {
1580         struct kvmppc_xive_src_block *sb;
1581         int i, bid;
1582 
1583         bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1584 
1585         mutex_lock(&xive->lock);
1586 
1587         /* block already exists - somebody else got here first */
1588         if (xive->src_blocks[bid])
1589                 goto out;
1590 
1591         /* Create the ICS */
1592         sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1593         if (!sb)
1594                 goto out;
1595 
1596         sb->id = bid;
1597 
1598         for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1599                 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1600                 sb->irq_state[i].eisn = 0;
1601                 sb->irq_state[i].guest_priority = MASKED;
1602                 sb->irq_state[i].saved_priority = MASKED;
1603                 sb->irq_state[i].act_priority = MASKED;
1604         }
1605         smp_wmb();
1606         xive->src_blocks[bid] = sb;
1607 
1608         if (bid > xive->max_sbid)
1609                 xive->max_sbid = bid;
1610 
1611 out:
1612         mutex_unlock(&xive->lock);
1613         return xive->src_blocks[bid];
1614 }
1615 
1616 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1617 {
1618         struct kvm *kvm = xive->kvm;
1619         struct kvm_vcpu *vcpu = NULL;
1620         int i;
1621 
1622         kvm_for_each_vcpu(i, vcpu, kvm) {
1623                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1624 
1625                 if (!xc)
1626                         continue;
1627 
1628                 if (xc->delayed_irq == irq) {
1629                         xc->delayed_irq = 0;
1630                         xive->delayed_irqs--;
1631                         return true;
1632                 }
1633         }
1634         return false;
1635 }
1636 
1637 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1638 {
1639         struct kvmppc_xive_src_block *sb;
1640         struct kvmppc_xive_irq_state *state;
1641         u64 __user *ubufp = (u64 __user *) addr;
1642         u16 idx;
1643         u64 val;
1644         u8 act_prio, guest_prio;
1645         u32 server;
1646         int rc = 0;
1647 
1648         if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1649                 return -ENOENT;
1650 
1651         pr_devel("set_source(irq=0x%lx)\n", irq);
1652 
1653         /* Find the source */
1654         sb = kvmppc_xive_find_source(xive, irq, &idx);
1655         if (!sb) {
1656                 pr_devel("No source, creating source block...\n");
1657                 sb = kvmppc_xive_create_src_block(xive, irq);
1658                 if (!sb) {
1659                         pr_devel("Failed to create block...\n");
1660                         return -ENOMEM;
1661                 }
1662         }
1663         state = &sb->irq_state[idx];
1664 
1665         /* Read user passed data */
1666         if (get_user(val, ubufp)) {
1667                 pr_devel("fault getting user info !\n");
1668                 return -EFAULT;
1669         }
1670 
1671         server = val & KVM_XICS_DESTINATION_MASK;
1672         guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1673 
1674         pr_devel("  val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1675                  val, server, guest_prio);
1676 
1677         /*
1678          * If the source doesn't already have an IPI, allocate
1679          * one and get the corresponding data
1680          */
1681         if (!state->ipi_number) {
1682                 state->ipi_number = xive_native_alloc_irq();
1683                 if (state->ipi_number == 0) {
1684                         pr_devel("Failed to allocate IPI !\n");
1685                         return -ENOMEM;
1686                 }
1687                 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1688                 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1689         }
1690 
1691         /*
1692          * We use lock_and_mask() to set us in the right masked
1693          * state. We will override that state from the saved state
1694          * further down, but this will handle the cases of interrupts
1695          * that need FW masking. We set the initial guest_priority to
1696          * 0 before calling it to ensure it actually performs the masking.
1697          */
1698         state->guest_priority = 0;
1699         xive_lock_and_mask(xive, sb, state);
1700 
1701         /*
1702          * Now, we select a target if we have one. If we don't we
1703          * leave the interrupt untargetted. It means that an interrupt
1704          * can become "untargetted" accross migration if it was masked
1705          * by set_xive() but there is little we can do about it.
1706          */
1707 
1708         /* First convert prio and mark interrupt as untargetted */
1709         act_prio = xive_prio_from_guest(guest_prio);
1710         state->act_priority = MASKED;
1711 
1712         /*
1713          * We need to drop the lock due to the mutex below. Hopefully
1714          * nothing is touching that interrupt yet since it hasn't been
1715          * advertized to a running guest yet
1716          */
1717         arch_spin_unlock(&sb->lock);
1718 
1719         /* If we have a priority target the interrupt */
1720         if (act_prio != MASKED) {
1721                 /* First, check provisioning of queues */
1722                 mutex_lock(&xive->lock);
1723                 rc = xive_check_provisioning(xive->kvm, act_prio);
1724                 mutex_unlock(&xive->lock);
1725 
1726                 /* Target interrupt */
1727                 if (rc == 0)
1728                         rc = xive_target_interrupt(xive->kvm, state,
1729                                                    server, act_prio);
1730                 /*
1731                  * If provisioning or targetting failed, leave it
1732                  * alone and masked. It will remain disabled until
1733                  * the guest re-targets it.
1734                  */
1735         }
1736 
1737         /*
1738          * Find out if this was a delayed irq stashed in an ICP,
1739          * in which case, treat it as pending
1740          */
1741         if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1742                 val |= KVM_XICS_PENDING;
1743                 pr_devel("  Found delayed ! forcing PENDING !\n");
1744         }
1745 
1746         /* Cleanup the SW state */
1747         state->old_p = false;
1748         state->old_q = false;
1749         state->lsi = false;
1750         state->asserted = false;
1751 
1752         /* Restore LSI state */
1753         if (val & KVM_XICS_LEVEL_SENSITIVE) {
1754                 state->lsi = true;
1755                 if (val & KVM_XICS_PENDING)
1756                         state->asserted = true;
1757                 pr_devel("  LSI ! Asserted=%d\n", state->asserted);
1758         }
1759 
1760         /*
1761          * Restore P and Q. If the interrupt was pending, we
1762          * force Q and !P, which will trigger a resend.
1763          *
1764          * That means that a guest that had both an interrupt
1765          * pending (queued) and Q set will restore with only
1766          * one instance of that interrupt instead of 2, but that
1767          * is perfectly fine as coalescing interrupts that haven't
1768          * been presented yet is always allowed.
1769          */
1770         if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1771                 state->old_p = true;
1772         if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1773                 state->old_q = true;
1774 
1775         pr_devel("  P=%d, Q=%d\n", state->old_p, state->old_q);
1776 
1777         /*
1778          * If the interrupt was unmasked, update guest priority and
1779          * perform the appropriate state transition and do a
1780          * re-trigger if necessary.
1781          */
1782         if (val & KVM_XICS_MASKED) {
1783                 pr_devel("  masked, saving prio\n");
1784                 state->guest_priority = MASKED;
1785                 state->saved_priority = guest_prio;
1786         } else {
1787                 pr_devel("  unmasked, restoring to prio %d\n", guest_prio);
1788                 xive_finish_unmask(xive, sb, state, guest_prio);
1789                 state->saved_priority = guest_prio;
1790         }
1791 
1792         /* Increment the number of valid sources and mark this one valid */
1793         if (!state->valid)
1794                 xive->src_count++;
1795         state->valid = true;
1796 
1797         return 0;
1798 }
1799 
1800 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1801                         bool line_status)
1802 {
1803         struct kvmppc_xive *xive = kvm->arch.xive;
1804         struct kvmppc_xive_src_block *sb;
1805         struct kvmppc_xive_irq_state *state;
1806         u16 idx;
1807 
1808         if (!xive)
1809                 return -ENODEV;
1810 
1811         sb = kvmppc_xive_find_source(xive, irq, &idx);
1812         if (!sb)
1813                 return -EINVAL;
1814 
1815         /* Perform locklessly .... (we need to do some RCUisms here...) */
1816         state = &sb->irq_state[idx];
1817         if (!state->valid)
1818                 return -EINVAL;
1819 
1820         /* We don't allow a trigger on a passed-through interrupt */
1821         if (state->pt_number)
1822                 return -EINVAL;
1823 
1824         if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1825                 state->asserted = 1;
1826         else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1827                 state->asserted = 0;
1828                 return 0;
1829         }
1830 
1831         /* Trigger the IPI */
1832         xive_irq_trigger(&state->ipi_data);
1833 
1834         return 0;
1835 }
1836 
1837 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1838 {
1839         struct kvmppc_xive *xive = dev->private;
1840 
1841         /* We honor the existing XICS ioctl */
1842         switch (attr->group) {
1843         case KVM_DEV_XICS_GRP_SOURCES:
1844                 return xive_set_source(xive, attr->attr, attr->addr);
1845         }
1846         return -ENXIO;
1847 }
1848 
1849 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1850 {
1851         struct kvmppc_xive *xive = dev->private;
1852 
1853         /* We honor the existing XICS ioctl */
1854         switch (attr->group) {
1855         case KVM_DEV_XICS_GRP_SOURCES:
1856                 return xive_get_source(xive, attr->attr, attr->addr);
1857         }
1858         return -ENXIO;
1859 }
1860 
1861 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1862 {
1863         /* We honor the same limits as XICS, at least for now */
1864         switch (attr->group) {
1865         case KVM_DEV_XICS_GRP_SOURCES:
1866                 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1867                     attr->attr < KVMPPC_XICS_NR_IRQS)
1868                         return 0;
1869                 break;
1870         }
1871         return -ENXIO;
1872 }
1873 
1874 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1875 {
1876         xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1877         xive_native_configure_irq(hw_num, 0, MASKED, 0);
1878 }
1879 
1880 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1881 {
1882         int i;
1883 
1884         for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1885                 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1886 
1887                 if (!state->valid)
1888                         continue;
1889 
1890                 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1891                 xive_cleanup_irq_data(&state->ipi_data);
1892                 xive_native_free_irq(state->ipi_number);
1893 
1894                 /* Pass-through, cleanup too but keep IRQ hw data */
1895                 if (state->pt_number)
1896                         kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1897 
1898                 state->valid = false;
1899         }
1900 }
1901 
1902 /*
1903  * Called when device fd is closed.  kvm->lock is held.
1904  */
1905 static void kvmppc_xive_release(struct kvm_device *dev)
1906 {
1907         struct kvmppc_xive *xive = dev->private;
1908         struct kvm *kvm = xive->kvm;
1909         struct kvm_vcpu *vcpu;
1910         int i;
1911 
1912         pr_devel("Releasing xive device\n");
1913 
1914         /*
1915          * Since this is the device release function, we know that
1916          * userspace does not have any open fd referring to the
1917          * device.  Therefore there can not be any of the device
1918          * attribute set/get functions being executed concurrently,
1919          * and similarly, the connect_vcpu and set/clr_mapped
1920          * functions also cannot be being executed.
1921          */
1922 
1923         debugfs_remove(xive->dentry);
1924 
1925         /*
1926          * We should clean up the vCPU interrupt presenters first.
1927          */
1928         kvm_for_each_vcpu(i, vcpu, kvm) {
1929                 /*
1930                  * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1931                  * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
1932                  * Holding the vcpu->mutex also means that the vcpu cannot
1933                  * be executing the KVM_RUN ioctl, and therefore it cannot
1934                  * be executing the XIVE push or pull code or accessing
1935                  * the XIVE MMIO regions.
1936                  */
1937                 mutex_lock(&vcpu->mutex);
1938                 kvmppc_xive_cleanup_vcpu(vcpu);
1939                 mutex_unlock(&vcpu->mutex);
1940         }
1941 
1942         /*
1943          * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1944          * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1945          * against xive code getting called during vcpu execution or
1946          * set/get one_reg operations.
1947          */
1948         kvm->arch.xive = NULL;
1949 
1950         /* Mask and free interrupts */
1951         for (i = 0; i <= xive->max_sbid; i++) {
1952                 if (xive->src_blocks[i])
1953                         kvmppc_xive_free_sources(xive->src_blocks[i]);
1954                 kfree(xive->src_blocks[i]);
1955                 xive->src_blocks[i] = NULL;
1956         }
1957 
1958         if (xive->vp_base != XIVE_INVALID_VP)
1959                 xive_native_free_vp_block(xive->vp_base);
1960 
1961         /*
1962          * A reference of the kvmppc_xive pointer is now kept under
1963          * the xive_devices struct of the machine for reuse. It is
1964          * freed when the VM is destroyed for now until we fix all the
1965          * execution paths.
1966          */
1967 
1968         kfree(dev);
1969 }
1970 
1971 /*
1972  * When the guest chooses the interrupt mode (XICS legacy or XIVE
1973  * native), the VM will switch of KVM device. The previous device will
1974  * be "released" before the new one is created.
1975  *
1976  * Until we are sure all execution paths are well protected, provide a
1977  * fail safe (transitional) method for device destruction, in which
1978  * the XIVE device pointer is recycled and not directly freed.
1979  */
1980 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
1981 {
1982         struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
1983                 &kvm->arch.xive_devices.native :
1984                 &kvm->arch.xive_devices.xics_on_xive;
1985         struct kvmppc_xive *xive = *kvm_xive_device;
1986 
1987         if (!xive) {
1988                 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1989                 *kvm_xive_device = xive;
1990         } else {
1991                 memset(xive, 0, sizeof(*xive));
1992         }
1993 
1994         return xive;
1995 }
1996 
1997 /*
1998  * Create a XICS device with XIVE backend.  kvm->lock is held.
1999  */
2000 static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2001 {
2002         struct kvmppc_xive *xive;
2003         struct kvm *kvm = dev->kvm;
2004         int ret = 0;
2005 
2006         pr_devel("Creating xive for partition\n");
2007 
2008         /* Already there ? */
2009         if (kvm->arch.xive)
2010                 return -EEXIST;
2011 
2012         xive = kvmppc_xive_get_device(kvm, type);
2013         if (!xive)
2014                 return -ENOMEM;
2015 
2016         dev->private = xive;
2017         xive->dev = dev;
2018         xive->kvm = kvm;
2019         mutex_init(&xive->lock);
2020 
2021         /* We use the default queue size set by the host */
2022         xive->q_order = xive_native_default_eq_shift();
2023         if (xive->q_order < PAGE_SHIFT)
2024                 xive->q_page_order = 0;
2025         else
2026                 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2027 
2028         /* Allocate a bunch of VPs */
2029         xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
2030         pr_devel("VP_Base=%x\n", xive->vp_base);
2031 
2032         if (xive->vp_base == XIVE_INVALID_VP)
2033                 ret = -ENOMEM;
2034 
2035         xive->single_escalation = xive_native_has_single_escalation();
2036 
2037         if (ret)
2038                 return ret;
2039 
2040         kvm->arch.xive = xive;
2041         return 0;
2042 }
2043 
2044 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2045 {
2046         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2047         unsigned int i;
2048 
2049         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2050                 struct xive_q *q = &xc->queues[i];
2051                 u32 i0, i1, idx;
2052 
2053                 if (!q->qpage && !xc->esc_virq[i])
2054                         continue;
2055 
2056                 seq_printf(m, " [q%d]: ", i);
2057 
2058                 if (q->qpage) {
2059                         idx = q->idx;
2060                         i0 = be32_to_cpup(q->qpage + idx);
2061                         idx = (idx + 1) & q->msk;
2062                         i1 = be32_to_cpup(q->qpage + idx);
2063                         seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2064                                    i0, i1);
2065                 }
2066                 if (xc->esc_virq[i]) {
2067                         struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2068                         struct xive_irq_data *xd =
2069                                 irq_data_get_irq_handler_data(d);
2070                         u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2071 
2072                         seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2073                                    (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2074                                    (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2075                                    xc->esc_virq[i], pq, xd->eoi_page);
2076                         seq_puts(m, "\n");
2077                 }
2078         }
2079         return 0;
2080 }
2081 
2082 static int xive_debug_show(struct seq_file *m, void *private)
2083 {
2084         struct kvmppc_xive *xive = m->private;
2085         struct kvm *kvm = xive->kvm;
2086         struct kvm_vcpu *vcpu;
2087         u64 t_rm_h_xirr = 0;
2088         u64 t_rm_h_ipoll = 0;
2089         u64 t_rm_h_cppr = 0;
2090         u64 t_rm_h_eoi = 0;
2091         u64 t_rm_h_ipi = 0;
2092         u64 t_vm_h_xirr = 0;
2093         u64 t_vm_h_ipoll = 0;
2094         u64 t_vm_h_cppr = 0;
2095         u64 t_vm_h_eoi = 0;
2096         u64 t_vm_h_ipi = 0;
2097         unsigned int i;
2098 
2099         if (!kvm)
2100                 return 0;
2101 
2102         seq_printf(m, "=========\nVCPU state\n=========\n");
2103 
2104         kvm_for_each_vcpu(i, vcpu, kvm) {
2105                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2106 
2107                 if (!xc)
2108                         continue;
2109 
2110                 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
2111                            " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2112                            xc->server_num, xc->cppr, xc->hw_cppr,
2113                            xc->mfrr, xc->pending,
2114                            xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2115 
2116                 kvmppc_xive_debug_show_queues(m, vcpu);
2117 
2118                 t_rm_h_xirr += xc->stat_rm_h_xirr;
2119                 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2120                 t_rm_h_cppr += xc->stat_rm_h_cppr;
2121                 t_rm_h_eoi += xc->stat_rm_h_eoi;
2122                 t_rm_h_ipi += xc->stat_rm_h_ipi;
2123                 t_vm_h_xirr += xc->stat_vm_h_xirr;
2124                 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2125                 t_vm_h_cppr += xc->stat_vm_h_cppr;
2126                 t_vm_h_eoi += xc->stat_vm_h_eoi;
2127                 t_vm_h_ipi += xc->stat_vm_h_ipi;
2128         }
2129 
2130         seq_printf(m, "Hcalls totals\n");
2131         seq_printf(m, " H_XIRR  R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2132         seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2133         seq_printf(m, " H_CPPR  R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2134         seq_printf(m, " H_EOI   R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2135         seq_printf(m, " H_IPI   R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2136 
2137         return 0;
2138 }
2139 
2140 DEFINE_SHOW_ATTRIBUTE(xive_debug);
2141 
2142 static void xive_debugfs_init(struct kvmppc_xive *xive)
2143 {
2144         char *name;
2145 
2146         name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2147         if (!name) {
2148                 pr_err("%s: no memory for name\n", __func__);
2149                 return;
2150         }
2151 
2152         xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2153                                            xive, &xive_debug_fops);
2154 
2155         pr_debug("%s: created %s\n", __func__, name);
2156         kfree(name);
2157 }
2158 
2159 static void kvmppc_xive_init(struct kvm_device *dev)
2160 {
2161         struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2162 
2163         /* Register some debug interfaces */
2164         xive_debugfs_init(xive);
2165 }
2166 
2167 struct kvm_device_ops kvm_xive_ops = {
2168         .name = "kvm-xive",
2169         .create = kvmppc_xive_create,
2170         .init = kvmppc_xive_init,
2171         .release = kvmppc_xive_release,
2172         .set_attr = xive_set_attr,
2173         .get_attr = xive_get_attr,
2174         .has_attr = xive_has_attr,
2175 };
2176 
2177 void kvmppc_xive_init_module(void)
2178 {
2179         __xive_vm_h_xirr = xive_vm_h_xirr;
2180         __xive_vm_h_ipoll = xive_vm_h_ipoll;
2181         __xive_vm_h_ipi = xive_vm_h_ipi;
2182         __xive_vm_h_cppr = xive_vm_h_cppr;
2183         __xive_vm_h_eoi = xive_vm_h_eoi;
2184 }
2185 
2186 void kvmppc_xive_exit_module(void)
2187 {
2188         __xive_vm_h_xirr = NULL;
2189         __xive_vm_h_ipoll = NULL;
2190         __xive_vm_h_ipi = NULL;
2191         __xive_vm_h_cppr = NULL;
2192         __xive_vm_h_eoi = NULL;
2193 }

/* [<][>][^][v][top][bottom][index][help] */