root/virt/kvm/arm/pmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kvm_pmu_idx_is_64bit
  2. kvm_pmc_to_vcpu
  3. kvm_pmu_pmc_is_chained
  4. kvm_pmu_idx_is_high_counter
  5. kvm_pmu_get_canonical_pmc
  6. kvm_pmu_idx_has_chain_evtype
  7. kvm_pmu_get_pair_counter_value
  8. kvm_pmu_get_counter_value
  9. kvm_pmu_set_counter_value
  10. kvm_pmu_release_perf_event
  11. kvm_pmu_stop_counter
  12. kvm_pmu_vcpu_init
  13. kvm_pmu_vcpu_reset
  14. kvm_pmu_vcpu_destroy
  15. kvm_pmu_valid_counter_mask
  16. kvm_pmu_enable_counter_mask
  17. kvm_pmu_disable_counter_mask
  18. kvm_pmu_overflow_status
  19. kvm_pmu_update_state
  20. kvm_pmu_should_notify_user
  21. kvm_pmu_update_run
  22. kvm_pmu_flush_hwstate
  23. kvm_pmu_sync_hwstate
  24. kvm_pmu_perf_overflow
  25. kvm_pmu_software_increment
  26. kvm_pmu_handle_pmcr
  27. kvm_pmu_counter_is_enabled
  28. kvm_pmu_create_perf_event
  29. kvm_pmu_update_pmc_chained
  30. kvm_pmu_set_counter_event_type
  31. kvm_arm_support_pmu_v3
  32. kvm_arm_pmu_v3_enable
  33. kvm_arm_pmu_v3_init
  34. pmu_irq_is_valid
  35. kvm_arm_pmu_v3_set_attr
  36. kvm_arm_pmu_v3_get_attr
  37. kvm_arm_pmu_v3_has_attr

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2015 Linaro Ltd.
   4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
   5  */
   6 
   7 #include <linux/cpu.h>
   8 #include <linux/kvm.h>
   9 #include <linux/kvm_host.h>
  10 #include <linux/perf_event.h>
  11 #include <linux/perf/arm_pmu.h>
  12 #include <linux/uaccess.h>
  13 #include <asm/kvm_emulate.h>
  14 #include <kvm/arm_pmu.h>
  15 #include <kvm/arm_vgic.h>
  16 
  17 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
  18 
  19 #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
  20 
  21 /**
  22  * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
  23  * @vcpu: The vcpu pointer
  24  * @select_idx: The counter index
  25  */
  26 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
  27 {
  28         return (select_idx == ARMV8_PMU_CYCLE_IDX &&
  29                 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
  30 }
  31 
  32 static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
  33 {
  34         struct kvm_pmu *pmu;
  35         struct kvm_vcpu_arch *vcpu_arch;
  36 
  37         pmc -= pmc->idx;
  38         pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
  39         vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
  40         return container_of(vcpu_arch, struct kvm_vcpu, arch);
  41 }
  42 
  43 /**
  44  * kvm_pmu_pmc_is_chained - determine if the pmc is chained
  45  * @pmc: The PMU counter pointer
  46  */
  47 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
  48 {
  49         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
  50 
  51         return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
  52 }
  53 
  54 /**
  55  * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
  56  * @select_idx: The counter index
  57  */
  58 static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
  59 {
  60         return select_idx & 0x1;
  61 }
  62 
  63 /**
  64  * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
  65  * @pmc: The PMU counter pointer
  66  *
  67  * When a pair of PMCs are chained together we use the low counter (canonical)
  68  * to hold the underlying perf event.
  69  */
  70 static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
  71 {
  72         if (kvm_pmu_pmc_is_chained(pmc) &&
  73             kvm_pmu_idx_is_high_counter(pmc->idx))
  74                 return pmc - 1;
  75 
  76         return pmc;
  77 }
  78 
  79 /**
  80  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
  81  * @vcpu: The vcpu pointer
  82  * @select_idx: The counter index
  83  */
  84 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
  85 {
  86         u64 eventsel, reg;
  87 
  88         select_idx |= 0x1;
  89 
  90         if (select_idx == ARMV8_PMU_CYCLE_IDX)
  91                 return false;
  92 
  93         reg = PMEVTYPER0_EL0 + select_idx;
  94         eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
  95 
  96         return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
  97 }
  98 
  99 /**
 100  * kvm_pmu_get_pair_counter_value - get PMU counter value
 101  * @vcpu: The vcpu pointer
 102  * @pmc: The PMU counter pointer
 103  */
 104 static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
 105                                           struct kvm_pmc *pmc)
 106 {
 107         u64 counter, counter_high, reg, enabled, running;
 108 
 109         if (kvm_pmu_pmc_is_chained(pmc)) {
 110                 pmc = kvm_pmu_get_canonical_pmc(pmc);
 111                 reg = PMEVCNTR0_EL0 + pmc->idx;
 112 
 113                 counter = __vcpu_sys_reg(vcpu, reg);
 114                 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
 115 
 116                 counter = lower_32_bits(counter) | (counter_high << 32);
 117         } else {
 118                 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
 119                       ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
 120                 counter = __vcpu_sys_reg(vcpu, reg);
 121         }
 122 
 123         /*
 124          * The real counter value is equal to the value of counter register plus
 125          * the value perf event counts.
 126          */
 127         if (pmc->perf_event)
 128                 counter += perf_event_read_value(pmc->perf_event, &enabled,
 129                                                  &running);
 130 
 131         return counter;
 132 }
 133 
 134 /**
 135  * kvm_pmu_get_counter_value - get PMU counter value
 136  * @vcpu: The vcpu pointer
 137  * @select_idx: The counter index
 138  */
 139 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
 140 {
 141         u64 counter;
 142         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 143         struct kvm_pmc *pmc = &pmu->pmc[select_idx];
 144 
 145         counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 146 
 147         if (kvm_pmu_pmc_is_chained(pmc) &&
 148             kvm_pmu_idx_is_high_counter(select_idx))
 149                 counter = upper_32_bits(counter);
 150         else if (select_idx != ARMV8_PMU_CYCLE_IDX)
 151                 counter = lower_32_bits(counter);
 152 
 153         return counter;
 154 }
 155 
 156 /**
 157  * kvm_pmu_set_counter_value - set PMU counter value
 158  * @vcpu: The vcpu pointer
 159  * @select_idx: The counter index
 160  * @val: The counter value
 161  */
 162 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
 163 {
 164         u64 reg;
 165 
 166         reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
 167               ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
 168         __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
 169 
 170         /* Recreate the perf event to reflect the updated sample_period */
 171         kvm_pmu_create_perf_event(vcpu, select_idx);
 172 }
 173 
 174 /**
 175  * kvm_pmu_release_perf_event - remove the perf event
 176  * @pmc: The PMU counter pointer
 177  */
 178 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
 179 {
 180         pmc = kvm_pmu_get_canonical_pmc(pmc);
 181         if (pmc->perf_event) {
 182                 perf_event_disable(pmc->perf_event);
 183                 perf_event_release_kernel(pmc->perf_event);
 184                 pmc->perf_event = NULL;
 185         }
 186 }
 187 
 188 /**
 189  * kvm_pmu_stop_counter - stop PMU counter
 190  * @pmc: The PMU counter pointer
 191  *
 192  * If this counter has been configured to monitor some event, release it here.
 193  */
 194 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 195 {
 196         u64 counter, reg, val;
 197 
 198         pmc = kvm_pmu_get_canonical_pmc(pmc);
 199         if (!pmc->perf_event)
 200                 return;
 201 
 202         counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 203 
 204         if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
 205                 reg = PMCCNTR_EL0;
 206                 val = counter;
 207         } else {
 208                 reg = PMEVCNTR0_EL0 + pmc->idx;
 209                 val = lower_32_bits(counter);
 210         }
 211 
 212         __vcpu_sys_reg(vcpu, reg) = val;
 213 
 214         if (kvm_pmu_pmc_is_chained(pmc))
 215                 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
 216 
 217         kvm_pmu_release_perf_event(pmc);
 218 }
 219 
 220 /**
 221  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
 222  * @vcpu: The vcpu pointer
 223  *
 224  */
 225 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
 226 {
 227         int i;
 228         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 229 
 230         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
 231                 pmu->pmc[i].idx = i;
 232 }
 233 
 234 /**
 235  * kvm_pmu_vcpu_reset - reset pmu state for cpu
 236  * @vcpu: The vcpu pointer
 237  *
 238  */
 239 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
 240 {
 241         int i;
 242         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 243 
 244         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
 245                 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
 246 
 247         bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
 248 }
 249 
 250 /**
 251  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
 252  * @vcpu: The vcpu pointer
 253  *
 254  */
 255 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
 256 {
 257         int i;
 258         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 259 
 260         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
 261                 kvm_pmu_release_perf_event(&pmu->pmc[i]);
 262 }
 263 
 264 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
 265 {
 266         u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
 267 
 268         val &= ARMV8_PMU_PMCR_N_MASK;
 269         if (val == 0)
 270                 return BIT(ARMV8_PMU_CYCLE_IDX);
 271         else
 272                 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
 273 }
 274 
 275 /**
 276  * kvm_pmu_enable_counter_mask - enable selected PMU counters
 277  * @vcpu: The vcpu pointer
 278  * @val: the value guest writes to PMCNTENSET register
 279  *
 280  * Call perf_event_enable to start counting the perf event
 281  */
 282 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 283 {
 284         int i;
 285         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 286         struct kvm_pmc *pmc;
 287 
 288         if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
 289                 return;
 290 
 291         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
 292                 if (!(val & BIT(i)))
 293                         continue;
 294 
 295                 pmc = &pmu->pmc[i];
 296 
 297                 /*
 298                  * For high counters of chained events we must recreate the
 299                  * perf event with the long (64bit) attribute set.
 300                  */
 301                 if (kvm_pmu_pmc_is_chained(pmc) &&
 302                     kvm_pmu_idx_is_high_counter(i)) {
 303                         kvm_pmu_create_perf_event(vcpu, i);
 304                         continue;
 305                 }
 306 
 307                 /* At this point, pmc must be the canonical */
 308                 if (pmc->perf_event) {
 309                         perf_event_enable(pmc->perf_event);
 310                         if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
 311                                 kvm_debug("fail to enable perf event\n");
 312                 }
 313         }
 314 }
 315 
 316 /**
 317  * kvm_pmu_disable_counter_mask - disable selected PMU counters
 318  * @vcpu: The vcpu pointer
 319  * @val: the value guest writes to PMCNTENCLR register
 320  *
 321  * Call perf_event_disable to stop counting the perf event
 322  */
 323 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 324 {
 325         int i;
 326         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 327         struct kvm_pmc *pmc;
 328 
 329         if (!val)
 330                 return;
 331 
 332         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
 333                 if (!(val & BIT(i)))
 334                         continue;
 335 
 336                 pmc = &pmu->pmc[i];
 337 
 338                 /*
 339                  * For high counters of chained events we must recreate the
 340                  * perf event with the long (64bit) attribute unset.
 341                  */
 342                 if (kvm_pmu_pmc_is_chained(pmc) &&
 343                     kvm_pmu_idx_is_high_counter(i)) {
 344                         kvm_pmu_create_perf_event(vcpu, i);
 345                         continue;
 346                 }
 347 
 348                 /* At this point, pmc must be the canonical */
 349                 if (pmc->perf_event)
 350                         perf_event_disable(pmc->perf_event);
 351         }
 352 }
 353 
 354 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
 355 {
 356         u64 reg = 0;
 357 
 358         if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
 359                 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
 360                 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
 361                 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
 362                 reg &= kvm_pmu_valid_counter_mask(vcpu);
 363         }
 364 
 365         return reg;
 366 }
 367 
 368 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
 369 {
 370         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 371         bool overflow;
 372 
 373         if (!kvm_arm_pmu_v3_ready(vcpu))
 374                 return;
 375 
 376         overflow = !!kvm_pmu_overflow_status(vcpu);
 377         if (pmu->irq_level == overflow)
 378                 return;
 379 
 380         pmu->irq_level = overflow;
 381 
 382         if (likely(irqchip_in_kernel(vcpu->kvm))) {
 383                 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
 384                                               pmu->irq_num, overflow, pmu);
 385                 WARN_ON(ret);
 386         }
 387 }
 388 
 389 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
 390 {
 391         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 392         struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
 393         bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
 394 
 395         if (likely(irqchip_in_kernel(vcpu->kvm)))
 396                 return false;
 397 
 398         return pmu->irq_level != run_level;
 399 }
 400 
 401 /*
 402  * Reflect the PMU overflow interrupt output level into the kvm_run structure
 403  */
 404 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
 405 {
 406         struct kvm_sync_regs *regs = &vcpu->run->s.regs;
 407 
 408         /* Populate the timer bitmap for user space */
 409         regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
 410         if (vcpu->arch.pmu.irq_level)
 411                 regs->device_irq_level |= KVM_ARM_DEV_PMU;
 412 }
 413 
 414 /**
 415  * kvm_pmu_flush_hwstate - flush pmu state to cpu
 416  * @vcpu: The vcpu pointer
 417  *
 418  * Check if the PMU has overflowed while we were running in the host, and inject
 419  * an interrupt if that was the case.
 420  */
 421 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
 422 {
 423         kvm_pmu_update_state(vcpu);
 424 }
 425 
 426 /**
 427  * kvm_pmu_sync_hwstate - sync pmu state from cpu
 428  * @vcpu: The vcpu pointer
 429  *
 430  * Check if the PMU has overflowed while we were running in the guest, and
 431  * inject an interrupt if that was the case.
 432  */
 433 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
 434 {
 435         kvm_pmu_update_state(vcpu);
 436 }
 437 
 438 /**
 439  * When the perf event overflows, set the overflow status and inform the vcpu.
 440  */
 441 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
 442                                   struct perf_sample_data *data,
 443                                   struct pt_regs *regs)
 444 {
 445         struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 446         struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
 447         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
 448         int idx = pmc->idx;
 449         u64 period;
 450 
 451         cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
 452 
 453         /*
 454          * Reset the sample period to the architectural limit,
 455          * i.e. the point where the counter overflows.
 456          */
 457         period = -(local64_read(&perf_event->count));
 458 
 459         if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
 460                 period &= GENMASK(31, 0);
 461 
 462         local64_set(&perf_event->hw.period_left, 0);
 463         perf_event->attr.sample_period = period;
 464         perf_event->hw.sample_period = period;
 465 
 466         __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
 467 
 468         if (kvm_pmu_overflow_status(vcpu)) {
 469                 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
 470                 kvm_vcpu_kick(vcpu);
 471         }
 472 
 473         cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
 474 }
 475 
 476 /**
 477  * kvm_pmu_software_increment - do software increment
 478  * @vcpu: The vcpu pointer
 479  * @val: the value guest writes to PMSWINC register
 480  */
 481 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
 482 {
 483         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 484         int i;
 485 
 486         if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
 487                 return;
 488 
 489         /* Weed out disabled counters */
 490         val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
 491 
 492         for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
 493                 u64 type, reg;
 494 
 495                 if (!(val & BIT(i)))
 496                         continue;
 497 
 498                 /* PMSWINC only applies to ... SW_INC! */
 499                 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
 500                 type &= ARMV8_PMU_EVTYPE_EVENT;
 501                 if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
 502                         continue;
 503 
 504                 /* increment this even SW_INC counter */
 505                 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
 506                 reg = lower_32_bits(reg);
 507                 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
 508 
 509                 if (reg) /* no overflow on the low part */
 510                         continue;
 511 
 512                 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
 513                         /* increment the high counter */
 514                         reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
 515                         reg = lower_32_bits(reg);
 516                         __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
 517                         if (!reg) /* mark overflow on the high counter */
 518                                 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
 519                 } else {
 520                         /* mark overflow on low counter */
 521                         __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
 522                 }
 523         }
 524 }
 525 
 526 /**
 527  * kvm_pmu_handle_pmcr - handle PMCR register
 528  * @vcpu: The vcpu pointer
 529  * @val: the value guest writes to PMCR register
 530  */
 531 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 532 {
 533         u64 mask;
 534         int i;
 535 
 536         mask = kvm_pmu_valid_counter_mask(vcpu);
 537         if (val & ARMV8_PMU_PMCR_E) {
 538                 kvm_pmu_enable_counter_mask(vcpu,
 539                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
 540         } else {
 541                 kvm_pmu_disable_counter_mask(vcpu, mask);
 542         }
 543 
 544         if (val & ARMV8_PMU_PMCR_C)
 545                 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
 546 
 547         if (val & ARMV8_PMU_PMCR_P) {
 548                 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
 549                         kvm_pmu_set_counter_value(vcpu, i, 0);
 550         }
 551 }
 552 
 553 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
 554 {
 555         return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
 556                (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
 557 }
 558 
 559 /**
 560  * kvm_pmu_create_perf_event - create a perf event for a counter
 561  * @vcpu: The vcpu pointer
 562  * @select_idx: The number of selected counter
 563  */
 564 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
 565 {
 566         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 567         struct kvm_pmc *pmc;
 568         struct perf_event *event;
 569         struct perf_event_attr attr;
 570         u64 eventsel, counter, reg, data;
 571 
 572         /*
 573          * For chained counters the event type and filtering attributes are
 574          * obtained from the low/even counter. We also use this counter to
 575          * determine if the event is enabled/disabled.
 576          */
 577         pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
 578 
 579         reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
 580               ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
 581         data = __vcpu_sys_reg(vcpu, reg);
 582 
 583         kvm_pmu_stop_counter(vcpu, pmc);
 584         eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
 585 
 586         /* Software increment event does't need to be backed by a perf event */
 587         if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
 588             pmc->idx != ARMV8_PMU_CYCLE_IDX)
 589                 return;
 590 
 591         memset(&attr, 0, sizeof(struct perf_event_attr));
 592         attr.type = PERF_TYPE_RAW;
 593         attr.size = sizeof(attr);
 594         attr.pinned = 1;
 595         attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
 596         attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
 597         attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
 598         attr.exclude_hv = 1; /* Don't count EL2 events */
 599         attr.exclude_host = 1; /* Don't count host events */
 600         attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
 601                 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
 602 
 603         counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 604 
 605         if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
 606                 /**
 607                  * The initial sample period (overflow count) of an event. For
 608                  * chained counters we only support overflow interrupts on the
 609                  * high counter.
 610                  */
 611                 attr.sample_period = (-counter) & GENMASK(63, 0);
 612                 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
 613                         attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
 614 
 615                 event = perf_event_create_kernel_counter(&attr, -1, current,
 616                                                          kvm_pmu_perf_overflow,
 617                                                          pmc + 1);
 618         } else {
 619                 /* The initial sample period (overflow count) of an event. */
 620                 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
 621                         attr.sample_period = (-counter) & GENMASK(63, 0);
 622                 else
 623                         attr.sample_period = (-counter) & GENMASK(31, 0);
 624 
 625                 event = perf_event_create_kernel_counter(&attr, -1, current,
 626                                                  kvm_pmu_perf_overflow, pmc);
 627         }
 628 
 629         if (IS_ERR(event)) {
 630                 pr_err_once("kvm: pmu event creation failed %ld\n",
 631                             PTR_ERR(event));
 632                 return;
 633         }
 634 
 635         pmc->perf_event = event;
 636 }
 637 
 638 /**
 639  * kvm_pmu_update_pmc_chained - update chained bitmap
 640  * @vcpu: The vcpu pointer
 641  * @select_idx: The number of selected counter
 642  *
 643  * Update the chained bitmap based on the event type written in the
 644  * typer register.
 645  */
 646 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
 647 {
 648         struct kvm_pmu *pmu = &vcpu->arch.pmu;
 649         struct kvm_pmc *pmc = &pmu->pmc[select_idx];
 650 
 651         if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
 652                 /*
 653                  * During promotion from !chained to chained we must ensure
 654                  * the adjacent counter is stopped and its event destroyed
 655                  */
 656                 if (!kvm_pmu_pmc_is_chained(pmc))
 657                         kvm_pmu_stop_counter(vcpu, pmc);
 658 
 659                 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
 660         } else {
 661                 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
 662         }
 663 }
 664 
 665 /**
 666  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
 667  * @vcpu: The vcpu pointer
 668  * @data: The data guest writes to PMXEVTYPER_EL0
 669  * @select_idx: The number of selected counter
 670  *
 671  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
 672  * event with given hardware event number. Here we call perf_event API to
 673  * emulate this action and create a kernel perf event for it.
 674  */
 675 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 676                                     u64 select_idx)
 677 {
 678         u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
 679 
 680         reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
 681               ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
 682 
 683         __vcpu_sys_reg(vcpu, reg) = event_type;
 684 
 685         kvm_pmu_update_pmc_chained(vcpu, select_idx);
 686         kvm_pmu_create_perf_event(vcpu, select_idx);
 687 }
 688 
 689 bool kvm_arm_support_pmu_v3(void)
 690 {
 691         /*
 692          * Check if HW_PERF_EVENTS are supported by checking the number of
 693          * hardware performance counters. This could ensure the presence of
 694          * a physical PMU and CONFIG_PERF_EVENT is selected.
 695          */
 696         return (perf_num_counters() > 0);
 697 }
 698 
 699 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 700 {
 701         if (!vcpu->arch.pmu.created)
 702                 return 0;
 703 
 704         /*
 705          * A valid interrupt configuration for the PMU is either to have a
 706          * properly configured interrupt number and using an in-kernel
 707          * irqchip, or to not have an in-kernel GIC and not set an IRQ.
 708          */
 709         if (irqchip_in_kernel(vcpu->kvm)) {
 710                 int irq = vcpu->arch.pmu.irq_num;
 711                 if (!kvm_arm_pmu_irq_initialized(vcpu))
 712                         return -EINVAL;
 713 
 714                 /*
 715                  * If we are using an in-kernel vgic, at this point we know
 716                  * the vgic will be initialized, so we can check the PMU irq
 717                  * number against the dimensions of the vgic and make sure
 718                  * it's valid.
 719                  */
 720                 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
 721                         return -EINVAL;
 722         } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
 723                    return -EINVAL;
 724         }
 725 
 726         kvm_pmu_vcpu_reset(vcpu);
 727         vcpu->arch.pmu.ready = true;
 728 
 729         return 0;
 730 }
 731 
 732 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
 733 {
 734         if (!kvm_arm_support_pmu_v3())
 735                 return -ENODEV;
 736 
 737         if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 738                 return -ENXIO;
 739 
 740         if (vcpu->arch.pmu.created)
 741                 return -EBUSY;
 742 
 743         if (irqchip_in_kernel(vcpu->kvm)) {
 744                 int ret;
 745 
 746                 /*
 747                  * If using the PMU with an in-kernel virtual GIC
 748                  * implementation, we require the GIC to be already
 749                  * initialized when initializing the PMU.
 750                  */
 751                 if (!vgic_initialized(vcpu->kvm))
 752                         return -ENODEV;
 753 
 754                 if (!kvm_arm_pmu_irq_initialized(vcpu))
 755                         return -ENXIO;
 756 
 757                 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
 758                                          &vcpu->arch.pmu);
 759                 if (ret)
 760                         return ret;
 761         }
 762 
 763         vcpu->arch.pmu.created = true;
 764         return 0;
 765 }
 766 
 767 /*
 768  * For one VM the interrupt type must be same for each vcpu.
 769  * As a PPI, the interrupt number is the same for all vcpus,
 770  * while as an SPI it must be a separate number per vcpu.
 771  */
 772 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
 773 {
 774         int i;
 775         struct kvm_vcpu *vcpu;
 776 
 777         kvm_for_each_vcpu(i, vcpu, kvm) {
 778                 if (!kvm_arm_pmu_irq_initialized(vcpu))
 779                         continue;
 780 
 781                 if (irq_is_ppi(irq)) {
 782                         if (vcpu->arch.pmu.irq_num != irq)
 783                                 return false;
 784                 } else {
 785                         if (vcpu->arch.pmu.irq_num == irq)
 786                                 return false;
 787                 }
 788         }
 789 
 790         return true;
 791 }
 792 
 793 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 794 {
 795         switch (attr->attr) {
 796         case KVM_ARM_VCPU_PMU_V3_IRQ: {
 797                 int __user *uaddr = (int __user *)(long)attr->addr;
 798                 int irq;
 799 
 800                 if (!irqchip_in_kernel(vcpu->kvm))
 801                         return -EINVAL;
 802 
 803                 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 804                         return -ENODEV;
 805 
 806                 if (get_user(irq, uaddr))
 807                         return -EFAULT;
 808 
 809                 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
 810                 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
 811                         return -EINVAL;
 812 
 813                 if (!pmu_irq_is_valid(vcpu->kvm, irq))
 814                         return -EINVAL;
 815 
 816                 if (kvm_arm_pmu_irq_initialized(vcpu))
 817                         return -EBUSY;
 818 
 819                 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
 820                 vcpu->arch.pmu.irq_num = irq;
 821                 return 0;
 822         }
 823         case KVM_ARM_VCPU_PMU_V3_INIT:
 824                 return kvm_arm_pmu_v3_init(vcpu);
 825         }
 826 
 827         return -ENXIO;
 828 }
 829 
 830 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 831 {
 832         switch (attr->attr) {
 833         case KVM_ARM_VCPU_PMU_V3_IRQ: {
 834                 int __user *uaddr = (int __user *)(long)attr->addr;
 835                 int irq;
 836 
 837                 if (!irqchip_in_kernel(vcpu->kvm))
 838                         return -EINVAL;
 839 
 840                 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 841                         return -ENODEV;
 842 
 843                 if (!kvm_arm_pmu_irq_initialized(vcpu))
 844                         return -ENXIO;
 845 
 846                 irq = vcpu->arch.pmu.irq_num;
 847                 return put_user(irq, uaddr);
 848         }
 849         }
 850 
 851         return -ENXIO;
 852 }
 853 
 854 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 855 {
 856         switch (attr->attr) {
 857         case KVM_ARM_VCPU_PMU_V3_IRQ:
 858         case KVM_ARM_VCPU_PMU_V3_INIT:
 859                 if (kvm_arm_support_pmu_v3() &&
 860                     test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 861                         return 0;
 862         }
 863 
 864         return -ENXIO;
 865 }

/* [<][>][^][v][top][bottom][index][help] */