root/arch/powerpc/kvm/book3s_xive_native.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xive_vm_esb_load
  2. kvmppc_xive_native_cleanup_queue
  3. kvmppc_xive_native_configure_queue
  4. kvmppc_xive_native_cleanup_vcpu
  5. kvmppc_xive_native_connect_vcpu
  6. kvmppc_xive_native_reset_mapped
  7. xive_native_esb_fault
  8. xive_native_tima_fault
  9. kvmppc_xive_native_mmap
  10. kvmppc_xive_native_set_source
  11. kvmppc_xive_native_update_source_config
  12. kvmppc_xive_native_set_source_config
  13. kvmppc_xive_native_sync_source
  14. xive_native_validate_queue_size
  15. kvmppc_xive_native_set_queue_config
  16. kvmppc_xive_native_get_queue_config
  17. kvmppc_xive_reset_sources
  18. kvmppc_xive_reset
  19. kvmppc_xive_native_sync_sources
  20. kvmppc_xive_native_vcpu_eq_sync
  21. kvmppc_xive_native_eq_sync
  22. kvmppc_xive_native_set_attr
  23. kvmppc_xive_native_get_attr
  24. kvmppc_xive_native_has_attr
  25. kvmppc_xive_native_release
  26. kvmppc_xive_native_create
  27. kvmppc_xive_native_get_vp
  28. kvmppc_xive_native_set_vp
  29. kvmppc_xive_native_supported
  30. xive_native_debug_show
  31. xive_native_debug_open
  32. xive_native_debugfs_init
  33. kvmppc_xive_native_init
  34. kvmppc_xive_native_init_module
  35. kvmppc_xive_native_exit_module

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2017-2019, IBM Corporation.
   4  */
   5 
   6 #define pr_fmt(fmt) "xive-kvm: " fmt
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/kvm_host.h>
  10 #include <linux/err.h>
  11 #include <linux/gfp.h>
  12 #include <linux/spinlock.h>
  13 #include <linux/delay.h>
  14 #include <linux/file.h>
  15 #include <asm/uaccess.h>
  16 #include <asm/kvm_book3s.h>
  17 #include <asm/kvm_ppc.h>
  18 #include <asm/hvcall.h>
  19 #include <asm/xive.h>
  20 #include <asm/xive-regs.h>
  21 #include <asm/debug.h>
  22 #include <asm/debugfs.h>
  23 #include <asm/opal.h>
  24 
  25 #include <linux/debugfs.h>
  26 #include <linux/seq_file.h>
  27 
  28 #include "book3s_xive.h"
  29 
  30 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
  31 {
  32         u64 val;
  33 
  34         if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
  35                 offset |= offset << 4;
  36 
  37         val = in_be64(xd->eoi_mmio + offset);
  38         return (u8)val;
  39 }
  40 
  41 static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
  42 {
  43         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  44         struct xive_q *q = &xc->queues[prio];
  45 
  46         xive_native_disable_queue(xc->vp_id, q, prio);
  47         if (q->qpage) {
  48                 put_page(virt_to_page(q->qpage));
  49                 q->qpage = NULL;
  50         }
  51 }
  52 
  53 static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
  54                                               u8 prio, __be32 *qpage,
  55                                               u32 order, bool can_escalate)
  56 {
  57         int rc;
  58         __be32 *qpage_prev = q->qpage;
  59 
  60         rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
  61                                          can_escalate);
  62         if (rc)
  63                 return rc;
  64 
  65         if (qpage_prev)
  66                 put_page(virt_to_page(qpage_prev));
  67 
  68         return rc;
  69 }
  70 
  71 void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
  72 {
  73         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  74         int i;
  75 
  76         if (!kvmppc_xive_enabled(vcpu))
  77                 return;
  78 
  79         if (!xc)
  80                 return;
  81 
  82         pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
  83 
  84         /* Ensure no interrupt is still routed to that VP */
  85         xc->valid = false;
  86         kvmppc_xive_disable_vcpu_interrupts(vcpu);
  87 
  88         /* Free escalations */
  89         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  90                 /* Free the escalation irq */
  91                 if (xc->esc_virq[i]) {
  92                         if (xc->xive->single_escalation)
  93                                 xive_cleanup_single_escalation(vcpu, xc,
  94                                                         xc->esc_virq[i]);
  95                         free_irq(xc->esc_virq[i], vcpu);
  96                         irq_dispose_mapping(xc->esc_virq[i]);
  97                         kfree(xc->esc_virq_names[i]);
  98                         xc->esc_virq[i] = 0;
  99                 }
 100         }
 101 
 102         /* Disable the VP */
 103         xive_native_disable_vp(xc->vp_id);
 104 
 105         /* Clear the cam word so guest entry won't try to push context */
 106         vcpu->arch.xive_cam_word = 0;
 107 
 108         /* Free the queues */
 109         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
 110                 kvmppc_xive_native_cleanup_queue(vcpu, i);
 111         }
 112 
 113         /* Free the VP */
 114         kfree(xc);
 115 
 116         /* Cleanup the vcpu */
 117         vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
 118         vcpu->arch.xive_vcpu = NULL;
 119 }
 120 
 121 int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
 122                                     struct kvm_vcpu *vcpu, u32 server_num)
 123 {
 124         struct kvmppc_xive *xive = dev->private;
 125         struct kvmppc_xive_vcpu *xc = NULL;
 126         int rc;
 127         u32 vp_id;
 128 
 129         pr_devel("native_connect_vcpu(server=%d)\n", server_num);
 130 
 131         if (dev->ops != &kvm_xive_native_ops) {
 132                 pr_devel("Wrong ops !\n");
 133                 return -EPERM;
 134         }
 135         if (xive->kvm != vcpu->kvm)
 136                 return -EPERM;
 137         if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
 138                 return -EBUSY;
 139         if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
 140                 pr_devel("Out of bounds !\n");
 141                 return -EINVAL;
 142         }
 143 
 144         mutex_lock(&xive->lock);
 145 
 146         vp_id = kvmppc_xive_vp(xive, server_num);
 147         if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
 148                 pr_devel("Duplicate !\n");
 149                 rc = -EEXIST;
 150                 goto bail;
 151         }
 152 
 153         xc = kzalloc(sizeof(*xc), GFP_KERNEL);
 154         if (!xc) {
 155                 rc = -ENOMEM;
 156                 goto bail;
 157         }
 158 
 159         vcpu->arch.xive_vcpu = xc;
 160         xc->xive = xive;
 161         xc->vcpu = vcpu;
 162         xc->server_num = server_num;
 163 
 164         xc->vp_id = vp_id;
 165         xc->valid = true;
 166         vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
 167 
 168         rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
 169         if (rc) {
 170                 pr_err("Failed to get VP info from OPAL: %d\n", rc);
 171                 goto bail;
 172         }
 173 
 174         /*
 175          * Enable the VP first as the single escalation mode will
 176          * affect escalation interrupts numbering
 177          */
 178         rc = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
 179         if (rc) {
 180                 pr_err("Failed to enable VP in OPAL: %d\n", rc);
 181                 goto bail;
 182         }
 183 
 184         /* Configure VCPU fields for use by assembly push/pull */
 185         vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
 186         vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
 187 
 188         /* TODO: reset all queues to a clean state ? */
 189 bail:
 190         mutex_unlock(&xive->lock);
 191         if (rc)
 192                 kvmppc_xive_native_cleanup_vcpu(vcpu);
 193 
 194         return rc;
 195 }
 196 
 197 /*
 198  * Device passthrough support
 199  */
 200 static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
 201 {
 202         struct kvmppc_xive *xive = kvm->arch.xive;
 203         pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
 204 
 205         if (irq >= KVMPPC_XIVE_NR_IRQS)
 206                 return -EINVAL;
 207 
 208         /*
 209          * Clear the ESB pages of the IRQ number being mapped (or
 210          * unmapped) into the guest and let the the VM fault handler
 211          * repopulate with the appropriate ESB pages (device or IC)
 212          */
 213         pr_debug("clearing esb pages for girq 0x%lx\n", irq);
 214         mutex_lock(&xive->mapping_lock);
 215         if (xive->mapping)
 216                 unmap_mapping_range(xive->mapping,
 217                                     esb_pgoff << PAGE_SHIFT,
 218                                     2ull << PAGE_SHIFT, 1);
 219         mutex_unlock(&xive->mapping_lock);
 220         return 0;
 221 }
 222 
 223 static struct kvmppc_xive_ops kvmppc_xive_native_ops =  {
 224         .reset_mapped = kvmppc_xive_native_reset_mapped,
 225 };
 226 
 227 static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
 228 {
 229         struct vm_area_struct *vma = vmf->vma;
 230         struct kvm_device *dev = vma->vm_file->private_data;
 231         struct kvmppc_xive *xive = dev->private;
 232         struct kvmppc_xive_src_block *sb;
 233         struct kvmppc_xive_irq_state *state;
 234         struct xive_irq_data *xd;
 235         u32 hw_num;
 236         u16 src;
 237         u64 page;
 238         unsigned long irq;
 239         u64 page_offset;
 240 
 241         /*
 242          * Linux/KVM uses a two pages ESB setting, one for trigger and
 243          * one for EOI
 244          */
 245         page_offset = vmf->pgoff - vma->vm_pgoff;
 246         irq = page_offset / 2;
 247 
 248         sb = kvmppc_xive_find_source(xive, irq, &src);
 249         if (!sb) {
 250                 pr_devel("%s: source %lx not found !\n", __func__, irq);
 251                 return VM_FAULT_SIGBUS;
 252         }
 253 
 254         state = &sb->irq_state[src];
 255         kvmppc_xive_select_irq(state, &hw_num, &xd);
 256 
 257         arch_spin_lock(&sb->lock);
 258 
 259         /*
 260          * first/even page is for trigger
 261          * second/odd page is for EOI and management.
 262          */
 263         page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
 264         arch_spin_unlock(&sb->lock);
 265 
 266         if (WARN_ON(!page)) {
 267                 pr_err("%s: accessing invalid ESB page for source %lx !\n",
 268                        __func__, irq);
 269                 return VM_FAULT_SIGBUS;
 270         }
 271 
 272         vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
 273         return VM_FAULT_NOPAGE;
 274 }
 275 
 276 static const struct vm_operations_struct xive_native_esb_vmops = {
 277         .fault = xive_native_esb_fault,
 278 };
 279 
 280 static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
 281 {
 282         struct vm_area_struct *vma = vmf->vma;
 283 
 284         switch (vmf->pgoff - vma->vm_pgoff) {
 285         case 0: /* HW - forbid access */
 286         case 1: /* HV - forbid access */
 287                 return VM_FAULT_SIGBUS;
 288         case 2: /* OS */
 289                 vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
 290                 return VM_FAULT_NOPAGE;
 291         case 3: /* USER - TODO */
 292         default:
 293                 return VM_FAULT_SIGBUS;
 294         }
 295 }
 296 
 297 static const struct vm_operations_struct xive_native_tima_vmops = {
 298         .fault = xive_native_tima_fault,
 299 };
 300 
 301 static int kvmppc_xive_native_mmap(struct kvm_device *dev,
 302                                    struct vm_area_struct *vma)
 303 {
 304         struct kvmppc_xive *xive = dev->private;
 305 
 306         /* We only allow mappings at fixed offset for now */
 307         if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
 308                 if (vma_pages(vma) > 4)
 309                         return -EINVAL;
 310                 vma->vm_ops = &xive_native_tima_vmops;
 311         } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
 312                 if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
 313                         return -EINVAL;
 314                 vma->vm_ops = &xive_native_esb_vmops;
 315         } else {
 316                 return -EINVAL;
 317         }
 318 
 319         vma->vm_flags |= VM_IO | VM_PFNMAP;
 320         vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 321 
 322         /*
 323          * Grab the KVM device file address_space to be able to clear
 324          * the ESB pages mapping when a device is passed-through into
 325          * the guest.
 326          */
 327         xive->mapping = vma->vm_file->f_mapping;
 328         return 0;
 329 }
 330 
 331 static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
 332                                          u64 addr)
 333 {
 334         struct kvmppc_xive_src_block *sb;
 335         struct kvmppc_xive_irq_state *state;
 336         u64 __user *ubufp = (u64 __user *) addr;
 337         u64 val;
 338         u16 idx;
 339         int rc;
 340 
 341         pr_devel("%s irq=0x%lx\n", __func__, irq);
 342 
 343         if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
 344                 return -E2BIG;
 345 
 346         sb = kvmppc_xive_find_source(xive, irq, &idx);
 347         if (!sb) {
 348                 pr_debug("No source, creating source block...\n");
 349                 sb = kvmppc_xive_create_src_block(xive, irq);
 350                 if (!sb) {
 351                         pr_err("Failed to create block...\n");
 352                         return -ENOMEM;
 353                 }
 354         }
 355         state = &sb->irq_state[idx];
 356 
 357         if (get_user(val, ubufp)) {
 358                 pr_err("fault getting user info !\n");
 359                 return -EFAULT;
 360         }
 361 
 362         arch_spin_lock(&sb->lock);
 363 
 364         /*
 365          * If the source doesn't already have an IPI, allocate
 366          * one and get the corresponding data
 367          */
 368         if (!state->ipi_number) {
 369                 state->ipi_number = xive_native_alloc_irq();
 370                 if (state->ipi_number == 0) {
 371                         pr_err("Failed to allocate IRQ !\n");
 372                         rc = -ENXIO;
 373                         goto unlock;
 374                 }
 375                 xive_native_populate_irq_data(state->ipi_number,
 376                                               &state->ipi_data);
 377                 pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
 378                          state->ipi_number, irq);
 379         }
 380 
 381         /* Restore LSI state */
 382         if (val & KVM_XIVE_LEVEL_SENSITIVE) {
 383                 state->lsi = true;
 384                 if (val & KVM_XIVE_LEVEL_ASSERTED)
 385                         state->asserted = true;
 386                 pr_devel("  LSI ! Asserted=%d\n", state->asserted);
 387         }
 388 
 389         /* Mask IRQ to start with */
 390         state->act_server = 0;
 391         state->act_priority = MASKED;
 392         xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
 393         xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
 394 
 395         /* Increment the number of valid sources and mark this one valid */
 396         if (!state->valid)
 397                 xive->src_count++;
 398         state->valid = true;
 399 
 400         rc = 0;
 401 
 402 unlock:
 403         arch_spin_unlock(&sb->lock);
 404 
 405         return rc;
 406 }
 407 
 408 static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
 409                                         struct kvmppc_xive_src_block *sb,
 410                                         struct kvmppc_xive_irq_state *state,
 411                                         u32 server, u8 priority, bool masked,
 412                                         u32 eisn)
 413 {
 414         struct kvm *kvm = xive->kvm;
 415         u32 hw_num;
 416         int rc = 0;
 417 
 418         arch_spin_lock(&sb->lock);
 419 
 420         if (state->act_server == server && state->act_priority == priority &&
 421             state->eisn == eisn)
 422                 goto unlock;
 423 
 424         pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
 425                  priority, server, masked, state->act_server,
 426                  state->act_priority);
 427 
 428         kvmppc_xive_select_irq(state, &hw_num, NULL);
 429 
 430         if (priority != MASKED && !masked) {
 431                 rc = kvmppc_xive_select_target(kvm, &server, priority);
 432                 if (rc)
 433                         goto unlock;
 434 
 435                 state->act_priority = priority;
 436                 state->act_server = server;
 437                 state->eisn = eisn;
 438 
 439                 rc = xive_native_configure_irq(hw_num,
 440                                                kvmppc_xive_vp(xive, server),
 441                                                priority, eisn);
 442         } else {
 443                 state->act_priority = MASKED;
 444                 state->act_server = 0;
 445                 state->eisn = 0;
 446 
 447                 rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
 448         }
 449 
 450 unlock:
 451         arch_spin_unlock(&sb->lock);
 452         return rc;
 453 }
 454 
 455 static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
 456                                                 long irq, u64 addr)
 457 {
 458         struct kvmppc_xive_src_block *sb;
 459         struct kvmppc_xive_irq_state *state;
 460         u64 __user *ubufp = (u64 __user *) addr;
 461         u16 src;
 462         u64 kvm_cfg;
 463         u32 server;
 464         u8 priority;
 465         bool masked;
 466         u32 eisn;
 467 
 468         sb = kvmppc_xive_find_source(xive, irq, &src);
 469         if (!sb)
 470                 return -ENOENT;
 471 
 472         state = &sb->irq_state[src];
 473 
 474         if (!state->valid)
 475                 return -EINVAL;
 476 
 477         if (get_user(kvm_cfg, ubufp))
 478                 return -EFAULT;
 479 
 480         pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
 481 
 482         priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
 483                 KVM_XIVE_SOURCE_PRIORITY_SHIFT;
 484         server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
 485                 KVM_XIVE_SOURCE_SERVER_SHIFT;
 486         masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
 487                 KVM_XIVE_SOURCE_MASKED_SHIFT;
 488         eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
 489                 KVM_XIVE_SOURCE_EISN_SHIFT;
 490 
 491         if (priority != xive_prio_from_guest(priority)) {
 492                 pr_err("invalid priority for queue %d for VCPU %d\n",
 493                        priority, server);
 494                 return -EINVAL;
 495         }
 496 
 497         return kvmppc_xive_native_update_source_config(xive, sb, state, server,
 498                                                        priority, masked, eisn);
 499 }
 500 
 501 static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
 502                                           long irq, u64 addr)
 503 {
 504         struct kvmppc_xive_src_block *sb;
 505         struct kvmppc_xive_irq_state *state;
 506         struct xive_irq_data *xd;
 507         u32 hw_num;
 508         u16 src;
 509         int rc = 0;
 510 
 511         pr_devel("%s irq=0x%lx", __func__, irq);
 512 
 513         sb = kvmppc_xive_find_source(xive, irq, &src);
 514         if (!sb)
 515                 return -ENOENT;
 516 
 517         state = &sb->irq_state[src];
 518 
 519         rc = -EINVAL;
 520 
 521         arch_spin_lock(&sb->lock);
 522 
 523         if (state->valid) {
 524                 kvmppc_xive_select_irq(state, &hw_num, &xd);
 525                 xive_native_sync_source(hw_num);
 526                 rc = 0;
 527         }
 528 
 529         arch_spin_unlock(&sb->lock);
 530         return rc;
 531 }
 532 
 533 static int xive_native_validate_queue_size(u32 qshift)
 534 {
 535         /*
 536          * We only support 64K pages for the moment. This is also
 537          * advertised in the DT property "ibm,xive-eq-sizes"
 538          */
 539         switch (qshift) {
 540         case 0: /* EQ reset */
 541         case 16:
 542                 return 0;
 543         case 12:
 544         case 21:
 545         case 24:
 546         default:
 547                 return -EINVAL;
 548         }
 549 }
 550 
 551 static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
 552                                                long eq_idx, u64 addr)
 553 {
 554         struct kvm *kvm = xive->kvm;
 555         struct kvm_vcpu *vcpu;
 556         struct kvmppc_xive_vcpu *xc;
 557         void __user *ubufp = (void __user *) addr;
 558         u32 server;
 559         u8 priority;
 560         struct kvm_ppc_xive_eq kvm_eq;
 561         int rc;
 562         __be32 *qaddr = 0;
 563         struct page *page;
 564         struct xive_q *q;
 565         gfn_t gfn;
 566         unsigned long page_size;
 567         int srcu_idx;
 568 
 569         /*
 570          * Demangle priority/server tuple from the EQ identifier
 571          */
 572         priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
 573                 KVM_XIVE_EQ_PRIORITY_SHIFT;
 574         server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
 575                 KVM_XIVE_EQ_SERVER_SHIFT;
 576 
 577         if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
 578                 return -EFAULT;
 579 
 580         vcpu = kvmppc_xive_find_server(kvm, server);
 581         if (!vcpu) {
 582                 pr_err("Can't find server %d\n", server);
 583                 return -ENOENT;
 584         }
 585         xc = vcpu->arch.xive_vcpu;
 586 
 587         if (priority != xive_prio_from_guest(priority)) {
 588                 pr_err("Trying to restore invalid queue %d for VCPU %d\n",
 589                        priority, server);
 590                 return -EINVAL;
 591         }
 592         q = &xc->queues[priority];
 593 
 594         pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
 595                  __func__, server, priority, kvm_eq.flags,
 596                  kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
 597 
 598         /* reset queue and disable queueing */
 599         if (!kvm_eq.qshift) {
 600                 q->guest_qaddr  = 0;
 601                 q->guest_qshift = 0;
 602 
 603                 rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
 604                                                         NULL, 0, true);
 605                 if (rc) {
 606                         pr_err("Failed to reset queue %d for VCPU %d: %d\n",
 607                                priority, xc->server_num, rc);
 608                         return rc;
 609                 }
 610 
 611                 return 0;
 612         }
 613 
 614         /*
 615          * sPAPR specifies a "Unconditional Notify (n) flag" for the
 616          * H_INT_SET_QUEUE_CONFIG hcall which forces notification
 617          * without using the coalescing mechanisms provided by the
 618          * XIVE END ESBs. This is required on KVM as notification
 619          * using the END ESBs is not supported.
 620          */
 621         if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
 622                 pr_err("invalid flags %d\n", kvm_eq.flags);
 623                 return -EINVAL;
 624         }
 625 
 626         rc = xive_native_validate_queue_size(kvm_eq.qshift);
 627         if (rc) {
 628                 pr_err("invalid queue size %d\n", kvm_eq.qshift);
 629                 return rc;
 630         }
 631 
 632         if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
 633                 pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
 634                        1ull << kvm_eq.qshift);
 635                 return -EINVAL;
 636         }
 637 
 638         srcu_idx = srcu_read_lock(&kvm->srcu);
 639         gfn = gpa_to_gfn(kvm_eq.qaddr);
 640 
 641         page_size = kvm_host_page_size(vcpu, gfn);
 642         if (1ull << kvm_eq.qshift > page_size) {
 643                 srcu_read_unlock(&kvm->srcu, srcu_idx);
 644                 pr_warn("Incompatible host page size %lx!\n", page_size);
 645                 return -EINVAL;
 646         }
 647 
 648         page = gfn_to_page(kvm, gfn);
 649         if (is_error_page(page)) {
 650                 srcu_read_unlock(&kvm->srcu, srcu_idx);
 651                 pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
 652                 return -EINVAL;
 653         }
 654 
 655         qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
 656         srcu_read_unlock(&kvm->srcu, srcu_idx);
 657 
 658         /*
 659          * Backup the queue page guest address to the mark EQ page
 660          * dirty for migration.
 661          */
 662         q->guest_qaddr  = kvm_eq.qaddr;
 663         q->guest_qshift = kvm_eq.qshift;
 664 
 665          /*
 666           * Unconditional Notification is forced by default at the
 667           * OPAL level because the use of END ESBs is not supported by
 668           * Linux.
 669           */
 670         rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
 671                                         (__be32 *) qaddr, kvm_eq.qshift, true);
 672         if (rc) {
 673                 pr_err("Failed to configure queue %d for VCPU %d: %d\n",
 674                        priority, xc->server_num, rc);
 675                 put_page(page);
 676                 return rc;
 677         }
 678 
 679         /*
 680          * Only restore the queue state when needed. When doing the
 681          * H_INT_SET_SOURCE_CONFIG hcall, it should not.
 682          */
 683         if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
 684                 rc = xive_native_set_queue_state(xc->vp_id, priority,
 685                                                  kvm_eq.qtoggle,
 686                                                  kvm_eq.qindex);
 687                 if (rc)
 688                         goto error;
 689         }
 690 
 691         rc = kvmppc_xive_attach_escalation(vcpu, priority,
 692                                            xive->single_escalation);
 693 error:
 694         if (rc)
 695                 kvmppc_xive_native_cleanup_queue(vcpu, priority);
 696         return rc;
 697 }
 698 
 699 static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
 700                                                long eq_idx, u64 addr)
 701 {
 702         struct kvm *kvm = xive->kvm;
 703         struct kvm_vcpu *vcpu;
 704         struct kvmppc_xive_vcpu *xc;
 705         struct xive_q *q;
 706         void __user *ubufp = (u64 __user *) addr;
 707         u32 server;
 708         u8 priority;
 709         struct kvm_ppc_xive_eq kvm_eq;
 710         u64 qaddr;
 711         u64 qshift;
 712         u64 qeoi_page;
 713         u32 escalate_irq;
 714         u64 qflags;
 715         int rc;
 716 
 717         /*
 718          * Demangle priority/server tuple from the EQ identifier
 719          */
 720         priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
 721                 KVM_XIVE_EQ_PRIORITY_SHIFT;
 722         server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
 723                 KVM_XIVE_EQ_SERVER_SHIFT;
 724 
 725         vcpu = kvmppc_xive_find_server(kvm, server);
 726         if (!vcpu) {
 727                 pr_err("Can't find server %d\n", server);
 728                 return -ENOENT;
 729         }
 730         xc = vcpu->arch.xive_vcpu;
 731 
 732         if (priority != xive_prio_from_guest(priority)) {
 733                 pr_err("invalid priority for queue %d for VCPU %d\n",
 734                        priority, server);
 735                 return -EINVAL;
 736         }
 737         q = &xc->queues[priority];
 738 
 739         memset(&kvm_eq, 0, sizeof(kvm_eq));
 740 
 741         if (!q->qpage)
 742                 return 0;
 743 
 744         rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
 745                                         &qeoi_page, &escalate_irq, &qflags);
 746         if (rc)
 747                 return rc;
 748 
 749         kvm_eq.flags = 0;
 750         if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
 751                 kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
 752 
 753         kvm_eq.qshift = q->guest_qshift;
 754         kvm_eq.qaddr  = q->guest_qaddr;
 755 
 756         rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
 757                                          &kvm_eq.qindex);
 758         if (rc)
 759                 return rc;
 760 
 761         pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
 762                  __func__, server, priority, kvm_eq.flags,
 763                  kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
 764 
 765         if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
 766                 return -EFAULT;
 767 
 768         return 0;
 769 }
 770 
 771 static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
 772 {
 773         int i;
 774 
 775         for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
 776                 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
 777 
 778                 if (!state->valid)
 779                         continue;
 780 
 781                 if (state->act_priority == MASKED)
 782                         continue;
 783 
 784                 state->eisn = 0;
 785                 state->act_server = 0;
 786                 state->act_priority = MASKED;
 787                 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
 788                 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
 789                 if (state->pt_number) {
 790                         xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
 791                         xive_native_configure_irq(state->pt_number,
 792                                                   0, MASKED, 0);
 793                 }
 794         }
 795 }
 796 
 797 static int kvmppc_xive_reset(struct kvmppc_xive *xive)
 798 {
 799         struct kvm *kvm = xive->kvm;
 800         struct kvm_vcpu *vcpu;
 801         unsigned int i;
 802 
 803         pr_devel("%s\n", __func__);
 804 
 805         mutex_lock(&xive->lock);
 806 
 807         kvm_for_each_vcpu(i, vcpu, kvm) {
 808                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 809                 unsigned int prio;
 810 
 811                 if (!xc)
 812                         continue;
 813 
 814                 kvmppc_xive_disable_vcpu_interrupts(vcpu);
 815 
 816                 for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
 817 
 818                         /* Single escalation, no queue 7 */
 819                         if (prio == 7 && xive->single_escalation)
 820                                 break;
 821 
 822                         if (xc->esc_virq[prio]) {
 823                                 free_irq(xc->esc_virq[prio], vcpu);
 824                                 irq_dispose_mapping(xc->esc_virq[prio]);
 825                                 kfree(xc->esc_virq_names[prio]);
 826                                 xc->esc_virq[prio] = 0;
 827                         }
 828 
 829                         kvmppc_xive_native_cleanup_queue(vcpu, prio);
 830                 }
 831         }
 832 
 833         for (i = 0; i <= xive->max_sbid; i++) {
 834                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
 835 
 836                 if (sb) {
 837                         arch_spin_lock(&sb->lock);
 838                         kvmppc_xive_reset_sources(sb);
 839                         arch_spin_unlock(&sb->lock);
 840                 }
 841         }
 842 
 843         mutex_unlock(&xive->lock);
 844 
 845         return 0;
 846 }
 847 
 848 static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
 849 {
 850         int j;
 851 
 852         for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
 853                 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
 854                 struct xive_irq_data *xd;
 855                 u32 hw_num;
 856 
 857                 if (!state->valid)
 858                         continue;
 859 
 860                 /*
 861                  * The struct kvmppc_xive_irq_state reflects the state
 862                  * of the EAS configuration and not the state of the
 863                  * source. The source is masked setting the PQ bits to
 864                  * '-Q', which is what is being done before calling
 865                  * the KVM_DEV_XIVE_EQ_SYNC control.
 866                  *
 867                  * If a source EAS is configured, OPAL syncs the XIVE
 868                  * IC of the source and the XIVE IC of the previous
 869                  * target if any.
 870                  *
 871                  * So it should be fine ignoring MASKED sources as
 872                  * they have been synced already.
 873                  */
 874                 if (state->act_priority == MASKED)
 875                         continue;
 876 
 877                 kvmppc_xive_select_irq(state, &hw_num, &xd);
 878                 xive_native_sync_source(hw_num);
 879                 xive_native_sync_queue(hw_num);
 880         }
 881 }
 882 
 883 static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
 884 {
 885         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 886         unsigned int prio;
 887         int srcu_idx;
 888 
 889         if (!xc)
 890                 return -ENOENT;
 891 
 892         for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
 893                 struct xive_q *q = &xc->queues[prio];
 894 
 895                 if (!q->qpage)
 896                         continue;
 897 
 898                 /* Mark EQ page dirty for migration */
 899                 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 900                 mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
 901                 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
 902         }
 903         return 0;
 904 }
 905 
 906 static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
 907 {
 908         struct kvm *kvm = xive->kvm;
 909         struct kvm_vcpu *vcpu;
 910         unsigned int i;
 911 
 912         pr_devel("%s\n", __func__);
 913 
 914         mutex_lock(&xive->lock);
 915         for (i = 0; i <= xive->max_sbid; i++) {
 916                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
 917 
 918                 if (sb) {
 919                         arch_spin_lock(&sb->lock);
 920                         kvmppc_xive_native_sync_sources(sb);
 921                         arch_spin_unlock(&sb->lock);
 922                 }
 923         }
 924 
 925         kvm_for_each_vcpu(i, vcpu, kvm) {
 926                 kvmppc_xive_native_vcpu_eq_sync(vcpu);
 927         }
 928         mutex_unlock(&xive->lock);
 929 
 930         return 0;
 931 }
 932 
 933 static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
 934                                        struct kvm_device_attr *attr)
 935 {
 936         struct kvmppc_xive *xive = dev->private;
 937 
 938         switch (attr->group) {
 939         case KVM_DEV_XIVE_GRP_CTRL:
 940                 switch (attr->attr) {
 941                 case KVM_DEV_XIVE_RESET:
 942                         return kvmppc_xive_reset(xive);
 943                 case KVM_DEV_XIVE_EQ_SYNC:
 944                         return kvmppc_xive_native_eq_sync(xive);
 945                 }
 946                 break;
 947         case KVM_DEV_XIVE_GRP_SOURCE:
 948                 return kvmppc_xive_native_set_source(xive, attr->attr,
 949                                                      attr->addr);
 950         case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
 951                 return kvmppc_xive_native_set_source_config(xive, attr->attr,
 952                                                             attr->addr);
 953         case KVM_DEV_XIVE_GRP_EQ_CONFIG:
 954                 return kvmppc_xive_native_set_queue_config(xive, attr->attr,
 955                                                            attr->addr);
 956         case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
 957                 return kvmppc_xive_native_sync_source(xive, attr->attr,
 958                                                       attr->addr);
 959         }
 960         return -ENXIO;
 961 }
 962 
 963 static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
 964                                        struct kvm_device_attr *attr)
 965 {
 966         struct kvmppc_xive *xive = dev->private;
 967 
 968         switch (attr->group) {
 969         case KVM_DEV_XIVE_GRP_EQ_CONFIG:
 970                 return kvmppc_xive_native_get_queue_config(xive, attr->attr,
 971                                                            attr->addr);
 972         }
 973         return -ENXIO;
 974 }
 975 
 976 static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
 977                                        struct kvm_device_attr *attr)
 978 {
 979         switch (attr->group) {
 980         case KVM_DEV_XIVE_GRP_CTRL:
 981                 switch (attr->attr) {
 982                 case KVM_DEV_XIVE_RESET:
 983                 case KVM_DEV_XIVE_EQ_SYNC:
 984                         return 0;
 985                 }
 986                 break;
 987         case KVM_DEV_XIVE_GRP_SOURCE:
 988         case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
 989         case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
 990                 if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
 991                     attr->attr < KVMPPC_XIVE_NR_IRQS)
 992                         return 0;
 993                 break;
 994         case KVM_DEV_XIVE_GRP_EQ_CONFIG:
 995                 return 0;
 996         }
 997         return -ENXIO;
 998 }
 999 
1000 /*
1001  * Called when device fd is closed.  kvm->lock is held.
1002  */
1003 static void kvmppc_xive_native_release(struct kvm_device *dev)
1004 {
1005         struct kvmppc_xive *xive = dev->private;
1006         struct kvm *kvm = xive->kvm;
1007         struct kvm_vcpu *vcpu;
1008         int i;
1009 
1010         pr_devel("Releasing xive native device\n");
1011 
1012         /*
1013          * Clear the KVM device file address_space which is used to
1014          * unmap the ESB pages when a device is passed-through.
1015          */
1016         mutex_lock(&xive->mapping_lock);
1017         xive->mapping = NULL;
1018         mutex_unlock(&xive->mapping_lock);
1019 
1020         /*
1021          * Since this is the device release function, we know that
1022          * userspace does not have any open fd or mmap referring to
1023          * the device.  Therefore there can not be any of the
1024          * device attribute set/get, mmap, or page fault functions
1025          * being executed concurrently, and similarly, the
1026          * connect_vcpu and set/clr_mapped functions also cannot
1027          * be being executed.
1028          */
1029 
1030         debugfs_remove(xive->dentry);
1031 
1032         /*
1033          * We should clean up the vCPU interrupt presenters first.
1034          */
1035         kvm_for_each_vcpu(i, vcpu, kvm) {
1036                 /*
1037                  * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1038                  * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
1039                  * Holding the vcpu->mutex also means that the vcpu cannot
1040                  * be executing the KVM_RUN ioctl, and therefore it cannot
1041                  * be executing the XIVE push or pull code or accessing
1042                  * the XIVE MMIO regions.
1043                  */
1044                 mutex_lock(&vcpu->mutex);
1045                 kvmppc_xive_native_cleanup_vcpu(vcpu);
1046                 mutex_unlock(&vcpu->mutex);
1047         }
1048 
1049         /*
1050          * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1051          * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1052          * against xive code getting called during vcpu execution or
1053          * set/get one_reg operations.
1054          */
1055         kvm->arch.xive = NULL;
1056 
1057         for (i = 0; i <= xive->max_sbid; i++) {
1058                 if (xive->src_blocks[i])
1059                         kvmppc_xive_free_sources(xive->src_blocks[i]);
1060                 kfree(xive->src_blocks[i]);
1061                 xive->src_blocks[i] = NULL;
1062         }
1063 
1064         if (xive->vp_base != XIVE_INVALID_VP)
1065                 xive_native_free_vp_block(xive->vp_base);
1066 
1067         /*
1068          * A reference of the kvmppc_xive pointer is now kept under
1069          * the xive_devices struct of the machine for reuse. It is
1070          * freed when the VM is destroyed for now until we fix all the
1071          * execution paths.
1072          */
1073 
1074         kfree(dev);
1075 }
1076 
1077 /*
1078  * Create a XIVE device.  kvm->lock is held.
1079  */
1080 static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
1081 {
1082         struct kvmppc_xive *xive;
1083         struct kvm *kvm = dev->kvm;
1084         int ret = 0;
1085 
1086         pr_devel("Creating xive native device\n");
1087 
1088         if (kvm->arch.xive)
1089                 return -EEXIST;
1090 
1091         xive = kvmppc_xive_get_device(kvm, type);
1092         if (!xive)
1093                 return -ENOMEM;
1094 
1095         dev->private = xive;
1096         xive->dev = dev;
1097         xive->kvm = kvm;
1098         mutex_init(&xive->mapping_lock);
1099         mutex_init(&xive->lock);
1100 
1101         /*
1102          * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for
1103          * a default. Getting the max number of CPUs the VM was
1104          * configured with would improve our usage of the XIVE VP space.
1105          */
1106         xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1107         pr_devel("VP_Base=%x\n", xive->vp_base);
1108 
1109         if (xive->vp_base == XIVE_INVALID_VP)
1110                 ret = -ENXIO;
1111 
1112         xive->single_escalation = xive_native_has_single_escalation();
1113         xive->ops = &kvmppc_xive_native_ops;
1114 
1115         if (ret)
1116                 return ret;
1117 
1118         kvm->arch.xive = xive;
1119         return 0;
1120 }
1121 
1122 /*
1123  * Interrupt Pending Buffer (IPB) offset
1124  */
1125 #define TM_IPB_SHIFT 40
1126 #define TM_IPB_MASK  (((u64) 0xFF) << TM_IPB_SHIFT)
1127 
1128 int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1129 {
1130         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1131         u64 opal_state;
1132         int rc;
1133 
1134         if (!kvmppc_xive_enabled(vcpu))
1135                 return -EPERM;
1136 
1137         if (!xc)
1138                 return -ENOENT;
1139 
1140         /* Thread context registers. We only care about IPB and CPPR */
1141         val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
1142 
1143         /* Get the VP state from OPAL */
1144         rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
1145         if (rc)
1146                 return rc;
1147 
1148         /*
1149          * Capture the backup of IPB register in the NVT structure and
1150          * merge it in our KVM VP state.
1151          */
1152         val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
1153 
1154         pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
1155                  __func__,
1156                  vcpu->arch.xive_saved_state.nsr,
1157                  vcpu->arch.xive_saved_state.cppr,
1158                  vcpu->arch.xive_saved_state.ipb,
1159                  vcpu->arch.xive_saved_state.pipr,
1160                  vcpu->arch.xive_saved_state.w01,
1161                  (u32) vcpu->arch.xive_cam_word, opal_state);
1162 
1163         return 0;
1164 }
1165 
1166 int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1167 {
1168         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1169         struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1170 
1171         pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
1172                  val->xive_timaval[0], val->xive_timaval[1]);
1173 
1174         if (!kvmppc_xive_enabled(vcpu))
1175                 return -EPERM;
1176 
1177         if (!xc || !xive)
1178                 return -ENOENT;
1179 
1180         /* We can't update the state of a "pushed" VCPU  */
1181         if (WARN_ON(vcpu->arch.xive_pushed))
1182                 return -EBUSY;
1183 
1184         /*
1185          * Restore the thread context registers. IPB and CPPR should
1186          * be the only ones that matter.
1187          */
1188         vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
1189 
1190         /*
1191          * There is no need to restore the XIVE internal state (IPB
1192          * stored in the NVT) as the IPB register was merged in KVM VP
1193          * state when captured.
1194          */
1195         return 0;
1196 }
1197 
1198 bool kvmppc_xive_native_supported(void)
1199 {
1200         return xive_native_has_queue_state_support();
1201 }
1202 
1203 static int xive_native_debug_show(struct seq_file *m, void *private)
1204 {
1205         struct kvmppc_xive *xive = m->private;
1206         struct kvm *kvm = xive->kvm;
1207         struct kvm_vcpu *vcpu;
1208         unsigned int i;
1209 
1210         if (!kvm)
1211                 return 0;
1212 
1213         seq_puts(m, "=========\nVCPU state\n=========\n");
1214 
1215         kvm_for_each_vcpu(i, vcpu, kvm) {
1216                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1217 
1218                 if (!xc)
1219                         continue;
1220 
1221                 seq_printf(m, "cpu server %#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
1222                            xc->server_num,
1223                            vcpu->arch.xive_saved_state.nsr,
1224                            vcpu->arch.xive_saved_state.cppr,
1225                            vcpu->arch.xive_saved_state.ipb,
1226                            vcpu->arch.xive_saved_state.pipr,
1227                            vcpu->arch.xive_saved_state.w01,
1228                            (u32) vcpu->arch.xive_cam_word);
1229 
1230                 kvmppc_xive_debug_show_queues(m, vcpu);
1231         }
1232 
1233         return 0;
1234 }
1235 
1236 static int xive_native_debug_open(struct inode *inode, struct file *file)
1237 {
1238         return single_open(file, xive_native_debug_show, inode->i_private);
1239 }
1240 
1241 static const struct file_operations xive_native_debug_fops = {
1242         .open = xive_native_debug_open,
1243         .read = seq_read,
1244         .llseek = seq_lseek,
1245         .release = single_release,
1246 };
1247 
1248 static void xive_native_debugfs_init(struct kvmppc_xive *xive)
1249 {
1250         char *name;
1251 
1252         name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1253         if (!name) {
1254                 pr_err("%s: no memory for name\n", __func__);
1255                 return;
1256         }
1257 
1258         xive->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
1259                                            xive, &xive_native_debug_fops);
1260 
1261         pr_debug("%s: created %s\n", __func__, name);
1262         kfree(name);
1263 }
1264 
1265 static void kvmppc_xive_native_init(struct kvm_device *dev)
1266 {
1267         struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1268 
1269         /* Register some debug interfaces */
1270         xive_native_debugfs_init(xive);
1271 }
1272 
1273 struct kvm_device_ops kvm_xive_native_ops = {
1274         .name = "kvm-xive-native",
1275         .create = kvmppc_xive_native_create,
1276         .init = kvmppc_xive_native_init,
1277         .release = kvmppc_xive_native_release,
1278         .set_attr = kvmppc_xive_native_set_attr,
1279         .get_attr = kvmppc_xive_native_get_attr,
1280         .has_attr = kvmppc_xive_native_has_attr,
1281         .mmap = kvmppc_xive_native_mmap,
1282 };
1283 
1284 void kvmppc_xive_native_init_module(void)
1285 {
1286         ;
1287 }
1288 
1289 void kvmppc_xive_native_exit_module(void)
1290 {
1291         ;
1292 }

/* [<][>][^][v][top][bottom][index][help] */