1/* 2 * PMU support 3 * 4 * Copyright (C) 2012 ARM Limited 5 * Author: Will Deacon <will.deacon@arm.com> 6 * 7 * This code is based heavily on the ARMv7 perf event code. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21#define pr_fmt(fmt) "hw perfevents: " fmt 22 23#include <linux/bitmap.h> 24#include <linux/interrupt.h> 25#include <linux/irq.h> 26#include <linux/kernel.h> 27#include <linux/export.h> 28#include <linux/of.h> 29#include <linux/perf_event.h> 30#include <linux/platform_device.h> 31#include <linux/slab.h> 32#include <linux/spinlock.h> 33#include <linux/uaccess.h> 34 35#include <asm/cputype.h> 36#include <asm/irq.h> 37#include <asm/irq_regs.h> 38#include <asm/pmu.h> 39#include <asm/stacktrace.h> 40 41/* 42 * ARMv8 supports a maximum of 32 events. 43 * The cycle counter is included in this total. 44 */ 45#define ARMPMU_MAX_HWEVENTS 32 46 47static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); 48static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); 49static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); 50 51#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 52 53/* Set at runtime when we know what CPU type we are. */ 54static struct arm_pmu *cpu_pmu; 55 56int 57armpmu_get_max_events(void) 58{ 59 int max_events = 0; 60 61 if (cpu_pmu != NULL) 62 max_events = cpu_pmu->num_events; 63 64 return max_events; 65} 66EXPORT_SYMBOL_GPL(armpmu_get_max_events); 67 68int perf_num_counters(void) 69{ 70 return armpmu_get_max_events(); 71} 72EXPORT_SYMBOL_GPL(perf_num_counters); 73 74#define HW_OP_UNSUPPORTED 0xFFFF 75 76#define C(_x) \ 77 PERF_COUNT_HW_CACHE_##_x 78 79#define CACHE_OP_UNSUPPORTED 0xFFFF 80 81static int 82armpmu_map_cache_event(const unsigned (*cache_map) 83 [PERF_COUNT_HW_CACHE_MAX] 84 [PERF_COUNT_HW_CACHE_OP_MAX] 85 [PERF_COUNT_HW_CACHE_RESULT_MAX], 86 u64 config) 87{ 88 unsigned int cache_type, cache_op, cache_result, ret; 89 90 cache_type = (config >> 0) & 0xff; 91 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 92 return -EINVAL; 93 94 cache_op = (config >> 8) & 0xff; 95 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 96 return -EINVAL; 97 98 cache_result = (config >> 16) & 0xff; 99 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 100 return -EINVAL; 101 102 ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; 103 104 if (ret == CACHE_OP_UNSUPPORTED) 105 return -ENOENT; 106 107 return ret; 108} 109 110static int 111armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 112{ 113 int mapping; 114 115 if (config >= PERF_COUNT_HW_MAX) 116 return -EINVAL; 117 118 mapping = (*event_map)[config]; 119 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 120} 121 122static int 123armpmu_map_raw_event(u32 raw_event_mask, u64 config) 124{ 125 return (int)(config & raw_event_mask); 126} 127 128static int map_cpu_event(struct perf_event *event, 129 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 130 const unsigned (*cache_map) 131 [PERF_COUNT_HW_CACHE_MAX] 132 [PERF_COUNT_HW_CACHE_OP_MAX] 133 [PERF_COUNT_HW_CACHE_RESULT_MAX], 134 u32 raw_event_mask) 135{ 136 u64 config = event->attr.config; 137 138 switch (event->attr.type) { 139 case PERF_TYPE_HARDWARE: 140 return armpmu_map_event(event_map, config); 141 case PERF_TYPE_HW_CACHE: 142 return armpmu_map_cache_event(cache_map, config); 143 case PERF_TYPE_RAW: 144 return armpmu_map_raw_event(raw_event_mask, config); 145 } 146 147 return -ENOENT; 148} 149 150int 151armpmu_event_set_period(struct perf_event *event, 152 struct hw_perf_event *hwc, 153 int idx) 154{ 155 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 156 s64 left = local64_read(&hwc->period_left); 157 s64 period = hwc->sample_period; 158 int ret = 0; 159 160 if (unlikely(left <= -period)) { 161 left = period; 162 local64_set(&hwc->period_left, left); 163 hwc->last_period = period; 164 ret = 1; 165 } 166 167 if (unlikely(left <= 0)) { 168 left += period; 169 local64_set(&hwc->period_left, left); 170 hwc->last_period = period; 171 ret = 1; 172 } 173 174 /* 175 * Limit the maximum period to prevent the counter value 176 * from overtaking the one we are about to program. In 177 * effect we are reducing max_period to account for 178 * interrupt latency (and we are being very conservative). 179 */ 180 if (left > (armpmu->max_period >> 1)) 181 left = armpmu->max_period >> 1; 182 183 local64_set(&hwc->prev_count, (u64)-left); 184 185 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); 186 187 perf_event_update_userpage(event); 188 189 return ret; 190} 191 192u64 193armpmu_event_update(struct perf_event *event, 194 struct hw_perf_event *hwc, 195 int idx) 196{ 197 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 198 u64 delta, prev_raw_count, new_raw_count; 199 200again: 201 prev_raw_count = local64_read(&hwc->prev_count); 202 new_raw_count = armpmu->read_counter(idx); 203 204 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 205 new_raw_count) != prev_raw_count) 206 goto again; 207 208 delta = (new_raw_count - prev_raw_count) & armpmu->max_period; 209 210 local64_add(delta, &event->count); 211 local64_sub(delta, &hwc->period_left); 212 213 return new_raw_count; 214} 215 216static void 217armpmu_read(struct perf_event *event) 218{ 219 struct hw_perf_event *hwc = &event->hw; 220 221 /* Don't read disabled counters! */ 222 if (hwc->idx < 0) 223 return; 224 225 armpmu_event_update(event, hwc, hwc->idx); 226} 227 228static void 229armpmu_stop(struct perf_event *event, int flags) 230{ 231 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 232 struct hw_perf_event *hwc = &event->hw; 233 234 /* 235 * ARM pmu always has to update the counter, so ignore 236 * PERF_EF_UPDATE, see comments in armpmu_start(). 237 */ 238 if (!(hwc->state & PERF_HES_STOPPED)) { 239 armpmu->disable(hwc, hwc->idx); 240 barrier(); /* why? */ 241 armpmu_event_update(event, hwc, hwc->idx); 242 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 243 } 244} 245 246static void 247armpmu_start(struct perf_event *event, int flags) 248{ 249 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 250 struct hw_perf_event *hwc = &event->hw; 251 252 /* 253 * ARM pmu always has to reprogram the period, so ignore 254 * PERF_EF_RELOAD, see the comment below. 255 */ 256 if (flags & PERF_EF_RELOAD) 257 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 258 259 hwc->state = 0; 260 /* 261 * Set the period again. Some counters can't be stopped, so when we 262 * were stopped we simply disabled the IRQ source and the counter 263 * may have been left counting. If we don't do this step then we may 264 * get an interrupt too soon or *way* too late if the overflow has 265 * happened since disabling. 266 */ 267 armpmu_event_set_period(event, hwc, hwc->idx); 268 armpmu->enable(hwc, hwc->idx); 269} 270 271static void 272armpmu_del(struct perf_event *event, int flags) 273{ 274 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 275 struct pmu_hw_events *hw_events = armpmu->get_hw_events(); 276 struct hw_perf_event *hwc = &event->hw; 277 int idx = hwc->idx; 278 279 WARN_ON(idx < 0); 280 281 armpmu_stop(event, PERF_EF_UPDATE); 282 hw_events->events[idx] = NULL; 283 clear_bit(idx, hw_events->used_mask); 284 285 perf_event_update_userpage(event); 286} 287 288static int 289armpmu_add(struct perf_event *event, int flags) 290{ 291 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 292 struct pmu_hw_events *hw_events = armpmu->get_hw_events(); 293 struct hw_perf_event *hwc = &event->hw; 294 int idx; 295 int err = 0; 296 297 perf_pmu_disable(event->pmu); 298 299 /* If we don't have a space for the counter then finish early. */ 300 idx = armpmu->get_event_idx(hw_events, hwc); 301 if (idx < 0) { 302 err = idx; 303 goto out; 304 } 305 306 /* 307 * If there is an event in the counter we are going to use then make 308 * sure it is disabled. 309 */ 310 event->hw.idx = idx; 311 armpmu->disable(hwc, idx); 312 hw_events->events[idx] = event; 313 314 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 315 if (flags & PERF_EF_START) 316 armpmu_start(event, PERF_EF_RELOAD); 317 318 /* Propagate our changes to the userspace mapping. */ 319 perf_event_update_userpage(event); 320 321out: 322 perf_pmu_enable(event->pmu); 323 return err; 324} 325 326static int 327validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, 328 struct perf_event *event) 329{ 330 struct arm_pmu *armpmu; 331 struct hw_perf_event fake_event = event->hw; 332 struct pmu *leader_pmu = event->group_leader->pmu; 333 334 if (is_software_event(event)) 335 return 1; 336 337 /* 338 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The 339 * core perf code won't check that the pmu->ctx == leader->ctx 340 * until after pmu->event_init(event). 341 */ 342 if (event->pmu != pmu) 343 return 0; 344 345 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 346 return 1; 347 348 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 349 return 1; 350 351 armpmu = to_arm_pmu(event->pmu); 352 return armpmu->get_event_idx(hw_events, &fake_event) >= 0; 353} 354 355static int 356validate_group(struct perf_event *event) 357{ 358 struct perf_event *sibling, *leader = event->group_leader; 359 struct pmu_hw_events fake_pmu; 360 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); 361 362 /* 363 * Initialise the fake PMU. We only need to populate the 364 * used_mask for the purposes of validation. 365 */ 366 memset(fake_used_mask, 0, sizeof(fake_used_mask)); 367 fake_pmu.used_mask = fake_used_mask; 368 369 if (!validate_event(event->pmu, &fake_pmu, leader)) 370 return -EINVAL; 371 372 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 373 if (!validate_event(event->pmu, &fake_pmu, sibling)) 374 return -EINVAL; 375 } 376 377 if (!validate_event(event->pmu, &fake_pmu, event)) 378 return -EINVAL; 379 380 return 0; 381} 382 383static void 384armpmu_disable_percpu_irq(void *data) 385{ 386 unsigned int irq = *(unsigned int *)data; 387 disable_percpu_irq(irq); 388} 389 390static void 391armpmu_release_hardware(struct arm_pmu *armpmu) 392{ 393 int irq; 394 unsigned int i, irqs; 395 struct platform_device *pmu_device = armpmu->plat_device; 396 397 irqs = min(pmu_device->num_resources, num_possible_cpus()); 398 if (!irqs) 399 return; 400 401 irq = platform_get_irq(pmu_device, 0); 402 if (irq <= 0) 403 return; 404 405 if (irq_is_percpu(irq)) { 406 on_each_cpu(armpmu_disable_percpu_irq, &irq, 1); 407 free_percpu_irq(irq, &cpu_hw_events); 408 } else { 409 for (i = 0; i < irqs; ++i) { 410 int cpu = i; 411 412 if (armpmu->irq_affinity) 413 cpu = armpmu->irq_affinity[i]; 414 415 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) 416 continue; 417 irq = platform_get_irq(pmu_device, i); 418 if (irq > 0) 419 free_irq(irq, armpmu); 420 } 421 } 422} 423 424static void 425armpmu_enable_percpu_irq(void *data) 426{ 427 unsigned int irq = *(unsigned int *)data; 428 enable_percpu_irq(irq, IRQ_TYPE_NONE); 429} 430 431static int 432armpmu_reserve_hardware(struct arm_pmu *armpmu) 433{ 434 int err, irq; 435 unsigned int i, irqs; 436 struct platform_device *pmu_device = armpmu->plat_device; 437 438 if (!pmu_device) { 439 pr_err("no PMU device registered\n"); 440 return -ENODEV; 441 } 442 443 irqs = min(pmu_device->num_resources, num_possible_cpus()); 444 if (!irqs) { 445 pr_err("no irqs for PMUs defined\n"); 446 return -ENODEV; 447 } 448 449 irq = platform_get_irq(pmu_device, 0); 450 if (irq <= 0) { 451 pr_err("failed to get valid irq for PMU device\n"); 452 return -ENODEV; 453 } 454 455 if (irq_is_percpu(irq)) { 456 err = request_percpu_irq(irq, armpmu->handle_irq, 457 "arm-pmu", &cpu_hw_events); 458 459 if (err) { 460 pr_err("unable to request percpu IRQ%d for ARM PMU counters\n", 461 irq); 462 armpmu_release_hardware(armpmu); 463 return err; 464 } 465 466 on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); 467 } else { 468 for (i = 0; i < irqs; ++i) { 469 int cpu = i; 470 471 err = 0; 472 irq = platform_get_irq(pmu_device, i); 473 if (irq <= 0) 474 continue; 475 476 if (armpmu->irq_affinity) 477 cpu = armpmu->irq_affinity[i]; 478 479 /* 480 * If we have a single PMU interrupt that we can't shift, 481 * assume that we're running on a uniprocessor machine and 482 * continue. Otherwise, continue without this interrupt. 483 */ 484 if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { 485 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", 486 irq, cpu); 487 continue; 488 } 489 490 err = request_irq(irq, armpmu->handle_irq, 491 IRQF_NOBALANCING, 492 "arm-pmu", armpmu); 493 if (err) { 494 pr_err("unable to request IRQ%d for ARM PMU counters\n", 495 irq); 496 armpmu_release_hardware(armpmu); 497 return err; 498 } 499 500 cpumask_set_cpu(cpu, &armpmu->active_irqs); 501 } 502 } 503 504 return 0; 505} 506 507static void 508hw_perf_event_destroy(struct perf_event *event) 509{ 510 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 511 atomic_t *active_events = &armpmu->active_events; 512 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; 513 514 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { 515 armpmu_release_hardware(armpmu); 516 mutex_unlock(pmu_reserve_mutex); 517 } 518} 519 520static int 521event_requires_mode_exclusion(struct perf_event_attr *attr) 522{ 523 return attr->exclude_idle || attr->exclude_user || 524 attr->exclude_kernel || attr->exclude_hv; 525} 526 527static int 528__hw_perf_event_init(struct perf_event *event) 529{ 530 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 531 struct hw_perf_event *hwc = &event->hw; 532 int mapping, err; 533 534 mapping = armpmu->map_event(event); 535 536 if (mapping < 0) { 537 pr_debug("event %x:%llx not supported\n", event->attr.type, 538 event->attr.config); 539 return mapping; 540 } 541 542 /* 543 * We don't assign an index until we actually place the event onto 544 * hardware. Use -1 to signify that we haven't decided where to put it 545 * yet. For SMP systems, each core has it's own PMU so we can't do any 546 * clever allocation or constraints checking at this point. 547 */ 548 hwc->idx = -1; 549 hwc->config_base = 0; 550 hwc->config = 0; 551 hwc->event_base = 0; 552 553 /* 554 * Check whether we need to exclude the counter from certain modes. 555 */ 556 if ((!armpmu->set_event_filter || 557 armpmu->set_event_filter(hwc, &event->attr)) && 558 event_requires_mode_exclusion(&event->attr)) { 559 pr_debug("ARM performance counters do not support mode exclusion\n"); 560 return -EPERM; 561 } 562 563 /* 564 * Store the event encoding into the config_base field. 565 */ 566 hwc->config_base |= (unsigned long)mapping; 567 568 if (!hwc->sample_period) { 569 /* 570 * For non-sampling runs, limit the sample_period to half 571 * of the counter width. That way, the new counter value 572 * is far less likely to overtake the previous one unless 573 * you have some serious IRQ latency issues. 574 */ 575 hwc->sample_period = armpmu->max_period >> 1; 576 hwc->last_period = hwc->sample_period; 577 local64_set(&hwc->period_left, hwc->sample_period); 578 } 579 580 err = 0; 581 if (event->group_leader != event) { 582 err = validate_group(event); 583 if (err) 584 return -EINVAL; 585 } 586 587 return err; 588} 589 590static int armpmu_event_init(struct perf_event *event) 591{ 592 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 593 int err = 0; 594 atomic_t *active_events = &armpmu->active_events; 595 596 if (armpmu->map_event(event) == -ENOENT) 597 return -ENOENT; 598 599 event->destroy = hw_perf_event_destroy; 600 601 if (!atomic_inc_not_zero(active_events)) { 602 mutex_lock(&armpmu->reserve_mutex); 603 if (atomic_read(active_events) == 0) 604 err = armpmu_reserve_hardware(armpmu); 605 606 if (!err) 607 atomic_inc(active_events); 608 mutex_unlock(&armpmu->reserve_mutex); 609 } 610 611 if (err) 612 return err; 613 614 err = __hw_perf_event_init(event); 615 if (err) 616 hw_perf_event_destroy(event); 617 618 return err; 619} 620 621static void armpmu_enable(struct pmu *pmu) 622{ 623 struct arm_pmu *armpmu = to_arm_pmu(pmu); 624 struct pmu_hw_events *hw_events = armpmu->get_hw_events(); 625 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); 626 627 if (enabled) 628 armpmu->start(); 629} 630 631static void armpmu_disable(struct pmu *pmu) 632{ 633 struct arm_pmu *armpmu = to_arm_pmu(pmu); 634 armpmu->stop(); 635} 636 637static void __init armpmu_init(struct arm_pmu *armpmu) 638{ 639 atomic_set(&armpmu->active_events, 0); 640 mutex_init(&armpmu->reserve_mutex); 641 642 armpmu->pmu = (struct pmu) { 643 .pmu_enable = armpmu_enable, 644 .pmu_disable = armpmu_disable, 645 .event_init = armpmu_event_init, 646 .add = armpmu_add, 647 .del = armpmu_del, 648 .start = armpmu_start, 649 .stop = armpmu_stop, 650 .read = armpmu_read, 651 }; 652} 653 654int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) 655{ 656 armpmu_init(armpmu); 657 return perf_pmu_register(&armpmu->pmu, name, type); 658} 659 660/* 661 * ARMv8 PMUv3 Performance Events handling code. 662 * Common event types. 663 */ 664enum armv8_pmuv3_perf_types { 665 /* Required events. */ 666 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00, 667 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03, 668 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04, 669 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, 670 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11, 671 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12, 672 673 /* At least one of the following is required. */ 674 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08, 675 ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B, 676 677 /* Common architectural events. */ 678 ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06, 679 ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07, 680 ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09, 681 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A, 682 ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B, 683 ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C, 684 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D, 685 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E, 686 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F, 687 ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C, 688 689 /* Common microarchitectural events. */ 690 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01, 691 ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02, 692 ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05, 693 ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13, 694 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14, 695 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15, 696 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16, 697 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17, 698 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18, 699 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, 700 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, 701 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, 702}; 703 704/* PMUv3 HW events mapping. */ 705static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { 706 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, 707 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, 708 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, 709 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, 710 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED, 711 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, 712 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 713 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, 714 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, 715}; 716 717static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 718 [PERF_COUNT_HW_CACHE_OP_MAX] 719 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 720 [C(L1D)] = { 721 [C(OP_READ)] = { 722 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, 723 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, 724 }, 725 [C(OP_WRITE)] = { 726 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, 727 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, 728 }, 729 [C(OP_PREFETCH)] = { 730 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 731 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 732 }, 733 }, 734 [C(L1I)] = { 735 [C(OP_READ)] = { 736 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 737 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 738 }, 739 [C(OP_WRITE)] = { 740 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 741 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 742 }, 743 [C(OP_PREFETCH)] = { 744 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 745 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 746 }, 747 }, 748 [C(LL)] = { 749 [C(OP_READ)] = { 750 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 751 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 752 }, 753 [C(OP_WRITE)] = { 754 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 755 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 756 }, 757 [C(OP_PREFETCH)] = { 758 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 759 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 760 }, 761 }, 762 [C(DTLB)] = { 763 [C(OP_READ)] = { 764 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 765 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 766 }, 767 [C(OP_WRITE)] = { 768 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 769 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 770 }, 771 [C(OP_PREFETCH)] = { 772 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 773 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 774 }, 775 }, 776 [C(ITLB)] = { 777 [C(OP_READ)] = { 778 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 779 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 780 }, 781 [C(OP_WRITE)] = { 782 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 783 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 784 }, 785 [C(OP_PREFETCH)] = { 786 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 787 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 788 }, 789 }, 790 [C(BPU)] = { 791 [C(OP_READ)] = { 792 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, 793 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, 794 }, 795 [C(OP_WRITE)] = { 796 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, 797 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, 798 }, 799 [C(OP_PREFETCH)] = { 800 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 801 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 802 }, 803 }, 804 [C(NODE)] = { 805 [C(OP_READ)] = { 806 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 807 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 808 }, 809 [C(OP_WRITE)] = { 810 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 811 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 812 }, 813 [C(OP_PREFETCH)] = { 814 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 815 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 816 }, 817 }, 818}; 819 820/* 821 * Perf Events' indices 822 */ 823#define ARMV8_IDX_CYCLE_COUNTER 0 824#define ARMV8_IDX_COUNTER0 1 825#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) 826 827#define ARMV8_MAX_COUNTERS 32 828#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) 829 830/* 831 * ARMv8 low level PMU access 832 */ 833 834/* 835 * Perf Event to low level counters mapping 836 */ 837#define ARMV8_IDX_TO_COUNTER(x) \ 838 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) 839 840/* 841 * Per-CPU PMCR: config reg 842 */ 843#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ 844#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ 845#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ 846#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ 847#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ 848#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ 849#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ 850#define ARMV8_PMCR_N_MASK 0x1f 851#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ 852 853/* 854 * PMOVSR: counters overflow flag status reg 855 */ 856#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ 857#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK 858 859/* 860 * PMXEVTYPER: Event selection reg 861 */ 862#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ 863#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ 864 865/* 866 * Event filters for PMUv3 867 */ 868#define ARMV8_EXCLUDE_EL1 (1 << 31) 869#define ARMV8_EXCLUDE_EL0 (1 << 30) 870#define ARMV8_INCLUDE_EL2 (1 << 27) 871 872static inline u32 armv8pmu_pmcr_read(void) 873{ 874 u32 val; 875 asm volatile("mrs %0, pmcr_el0" : "=r" (val)); 876 return val; 877} 878 879static inline void armv8pmu_pmcr_write(u32 val) 880{ 881 val &= ARMV8_PMCR_MASK; 882 isb(); 883 asm volatile("msr pmcr_el0, %0" :: "r" (val)); 884} 885 886static inline int armv8pmu_has_overflowed(u32 pmovsr) 887{ 888 return pmovsr & ARMV8_OVERFLOWED_MASK; 889} 890 891static inline int armv8pmu_counter_valid(int idx) 892{ 893 return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST; 894} 895 896static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) 897{ 898 int ret = 0; 899 u32 counter; 900 901 if (!armv8pmu_counter_valid(idx)) { 902 pr_err("CPU%u checking wrong counter %d overflow status\n", 903 smp_processor_id(), idx); 904 } else { 905 counter = ARMV8_IDX_TO_COUNTER(idx); 906 ret = pmnc & BIT(counter); 907 } 908 909 return ret; 910} 911 912static inline int armv8pmu_select_counter(int idx) 913{ 914 u32 counter; 915 916 if (!armv8pmu_counter_valid(idx)) { 917 pr_err("CPU%u selecting wrong PMNC counter %d\n", 918 smp_processor_id(), idx); 919 return -EINVAL; 920 } 921 922 counter = ARMV8_IDX_TO_COUNTER(idx); 923 asm volatile("msr pmselr_el0, %0" :: "r" (counter)); 924 isb(); 925 926 return idx; 927} 928 929static inline u32 armv8pmu_read_counter(int idx) 930{ 931 u32 value = 0; 932 933 if (!armv8pmu_counter_valid(idx)) 934 pr_err("CPU%u reading wrong counter %d\n", 935 smp_processor_id(), idx); 936 else if (idx == ARMV8_IDX_CYCLE_COUNTER) 937 asm volatile("mrs %0, pmccntr_el0" : "=r" (value)); 938 else if (armv8pmu_select_counter(idx) == idx) 939 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value)); 940 941 return value; 942} 943 944static inline void armv8pmu_write_counter(int idx, u32 value) 945{ 946 if (!armv8pmu_counter_valid(idx)) 947 pr_err("CPU%u writing wrong counter %d\n", 948 smp_processor_id(), idx); 949 else if (idx == ARMV8_IDX_CYCLE_COUNTER) 950 asm volatile("msr pmccntr_el0, %0" :: "r" (value)); 951 else if (armv8pmu_select_counter(idx) == idx) 952 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); 953} 954 955static inline void armv8pmu_write_evtype(int idx, u32 val) 956{ 957 if (armv8pmu_select_counter(idx) == idx) { 958 val &= ARMV8_EVTYPE_MASK; 959 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); 960 } 961} 962 963static inline int armv8pmu_enable_counter(int idx) 964{ 965 u32 counter; 966 967 if (!armv8pmu_counter_valid(idx)) { 968 pr_err("CPU%u enabling wrong PMNC counter %d\n", 969 smp_processor_id(), idx); 970 return -EINVAL; 971 } 972 973 counter = ARMV8_IDX_TO_COUNTER(idx); 974 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); 975 return idx; 976} 977 978static inline int armv8pmu_disable_counter(int idx) 979{ 980 u32 counter; 981 982 if (!armv8pmu_counter_valid(idx)) { 983 pr_err("CPU%u disabling wrong PMNC counter %d\n", 984 smp_processor_id(), idx); 985 return -EINVAL; 986 } 987 988 counter = ARMV8_IDX_TO_COUNTER(idx); 989 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); 990 return idx; 991} 992 993static inline int armv8pmu_enable_intens(int idx) 994{ 995 u32 counter; 996 997 if (!armv8pmu_counter_valid(idx)) { 998 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", 999 smp_processor_id(), idx); 1000 return -EINVAL; 1001 } 1002 1003 counter = ARMV8_IDX_TO_COUNTER(idx); 1004 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); 1005 return idx; 1006} 1007 1008static inline int armv8pmu_disable_intens(int idx) 1009{ 1010 u32 counter; 1011 1012 if (!armv8pmu_counter_valid(idx)) { 1013 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", 1014 smp_processor_id(), idx); 1015 return -EINVAL; 1016 } 1017 1018 counter = ARMV8_IDX_TO_COUNTER(idx); 1019 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); 1020 isb(); 1021 /* Clear the overflow flag in case an interrupt is pending. */ 1022 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); 1023 isb(); 1024 return idx; 1025} 1026 1027static inline u32 armv8pmu_getreset_flags(void) 1028{ 1029 u32 value; 1030 1031 /* Read */ 1032 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); 1033 1034 /* Write to clear flags */ 1035 value &= ARMV8_OVSR_MASK; 1036 asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); 1037 1038 return value; 1039} 1040 1041static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) 1042{ 1043 unsigned long flags; 1044 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1045 1046 /* 1047 * Enable counter and interrupt, and set the counter to count 1048 * the event that we're interested in. 1049 */ 1050 raw_spin_lock_irqsave(&events->pmu_lock, flags); 1051 1052 /* 1053 * Disable counter 1054 */ 1055 armv8pmu_disable_counter(idx); 1056 1057 /* 1058 * Set event (if destined for PMNx counters). 1059 */ 1060 armv8pmu_write_evtype(idx, hwc->config_base); 1061 1062 /* 1063 * Enable interrupt for this counter 1064 */ 1065 armv8pmu_enable_intens(idx); 1066 1067 /* 1068 * Enable counter 1069 */ 1070 armv8pmu_enable_counter(idx); 1071 1072 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1073} 1074 1075static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx) 1076{ 1077 unsigned long flags; 1078 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1079 1080 /* 1081 * Disable counter and interrupt 1082 */ 1083 raw_spin_lock_irqsave(&events->pmu_lock, flags); 1084 1085 /* 1086 * Disable counter 1087 */ 1088 armv8pmu_disable_counter(idx); 1089 1090 /* 1091 * Disable interrupt for this counter 1092 */ 1093 armv8pmu_disable_intens(idx); 1094 1095 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1096} 1097 1098static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) 1099{ 1100 u32 pmovsr; 1101 struct perf_sample_data data; 1102 struct pmu_hw_events *cpuc; 1103 struct pt_regs *regs; 1104 int idx; 1105 1106 /* 1107 * Get and reset the IRQ flags 1108 */ 1109 pmovsr = armv8pmu_getreset_flags(); 1110 1111 /* 1112 * Did an overflow occur? 1113 */ 1114 if (!armv8pmu_has_overflowed(pmovsr)) 1115 return IRQ_NONE; 1116 1117 /* 1118 * Handle the counter(s) overflow(s) 1119 */ 1120 regs = get_irq_regs(); 1121 1122 cpuc = this_cpu_ptr(&cpu_hw_events); 1123 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 1124 struct perf_event *event = cpuc->events[idx]; 1125 struct hw_perf_event *hwc; 1126 1127 /* Ignore if we don't have an event. */ 1128 if (!event) 1129 continue; 1130 1131 /* 1132 * We have a single interrupt for all counters. Check that 1133 * each counter has overflowed before we process it. 1134 */ 1135 if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) 1136 continue; 1137 1138 hwc = &event->hw; 1139 armpmu_event_update(event, hwc, idx); 1140 perf_sample_data_init(&data, 0, hwc->last_period); 1141 if (!armpmu_event_set_period(event, hwc, idx)) 1142 continue; 1143 1144 if (perf_event_overflow(event, &data, regs)) 1145 cpu_pmu->disable(hwc, idx); 1146 } 1147 1148 /* 1149 * Handle the pending perf events. 1150 * 1151 * Note: this call *must* be run with interrupts disabled. For 1152 * platforms that can have the PMU interrupts raised as an NMI, this 1153 * will not work. 1154 */ 1155 irq_work_run(); 1156 1157 return IRQ_HANDLED; 1158} 1159 1160static void armv8pmu_start(void) 1161{ 1162 unsigned long flags; 1163 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1164 1165 raw_spin_lock_irqsave(&events->pmu_lock, flags); 1166 /* Enable all counters */ 1167 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); 1168 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1169} 1170 1171static void armv8pmu_stop(void) 1172{ 1173 unsigned long flags; 1174 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1175 1176 raw_spin_lock_irqsave(&events->pmu_lock, flags); 1177 /* Disable all counters */ 1178 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); 1179 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1180} 1181 1182static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, 1183 struct hw_perf_event *event) 1184{ 1185 int idx; 1186 unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; 1187 1188 /* Always place a cycle counter into the cycle counter. */ 1189 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { 1190 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) 1191 return -EAGAIN; 1192 1193 return ARMV8_IDX_CYCLE_COUNTER; 1194 } 1195 1196 /* 1197 * For anything other than a cycle counter, try and use 1198 * the events counters 1199 */ 1200 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { 1201 if (!test_and_set_bit(idx, cpuc->used_mask)) 1202 return idx; 1203 } 1204 1205 /* The counters are all in use. */ 1206 return -EAGAIN; 1207} 1208 1209/* 1210 * Add an event filter to a given event. This will only work for PMUv2 PMUs. 1211 */ 1212static int armv8pmu_set_event_filter(struct hw_perf_event *event, 1213 struct perf_event_attr *attr) 1214{ 1215 unsigned long config_base = 0; 1216 1217 if (attr->exclude_idle) 1218 return -EPERM; 1219 if (attr->exclude_user) 1220 config_base |= ARMV8_EXCLUDE_EL0; 1221 if (attr->exclude_kernel) 1222 config_base |= ARMV8_EXCLUDE_EL1; 1223 if (!attr->exclude_hv) 1224 config_base |= ARMV8_INCLUDE_EL2; 1225 1226 /* 1227 * Install the filter into config_base as this is used to 1228 * construct the event type. 1229 */ 1230 event->config_base = config_base; 1231 1232 return 0; 1233} 1234 1235static void armv8pmu_reset(void *info) 1236{ 1237 u32 idx, nb_cnt = cpu_pmu->num_events; 1238 1239 /* The counter and interrupt enable registers are unknown at reset. */ 1240 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) 1241 armv8pmu_disable_event(NULL, idx); 1242 1243 /* Initialize & Reset PMNC: C and P bits. */ 1244 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); 1245} 1246 1247static int armv8_pmuv3_map_event(struct perf_event *event) 1248{ 1249 return map_cpu_event(event, &armv8_pmuv3_perf_map, 1250 &armv8_pmuv3_perf_cache_map, 1251 ARMV8_EVTYPE_EVENT); 1252} 1253 1254static struct arm_pmu armv8pmu = { 1255 .handle_irq = armv8pmu_handle_irq, 1256 .enable = armv8pmu_enable_event, 1257 .disable = armv8pmu_disable_event, 1258 .read_counter = armv8pmu_read_counter, 1259 .write_counter = armv8pmu_write_counter, 1260 .get_event_idx = armv8pmu_get_event_idx, 1261 .start = armv8pmu_start, 1262 .stop = armv8pmu_stop, 1263 .reset = armv8pmu_reset, 1264 .max_period = (1LLU << 32) - 1, 1265}; 1266 1267static u32 __init armv8pmu_read_num_pmnc_events(void) 1268{ 1269 u32 nb_cnt; 1270 1271 /* Read the nb of CNTx counters supported from PMNC */ 1272 nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; 1273 1274 /* Add the CPU cycles counter and return */ 1275 return nb_cnt + 1; 1276} 1277 1278static struct arm_pmu *__init armv8_pmuv3_pmu_init(void) 1279{ 1280 armv8pmu.name = "arm/armv8-pmuv3"; 1281 armv8pmu.map_event = armv8_pmuv3_map_event; 1282 armv8pmu.num_events = armv8pmu_read_num_pmnc_events(); 1283 armv8pmu.set_event_filter = armv8pmu_set_event_filter; 1284 return &armv8pmu; 1285} 1286 1287/* 1288 * Ensure the PMU has sane values out of reset. 1289 * This requires SMP to be available, so exists as a separate initcall. 1290 */ 1291static int __init 1292cpu_pmu_reset(void) 1293{ 1294 if (cpu_pmu && cpu_pmu->reset) 1295 return on_each_cpu(cpu_pmu->reset, NULL, 1); 1296 return 0; 1297} 1298arch_initcall(cpu_pmu_reset); 1299 1300/* 1301 * PMU platform driver and devicetree bindings. 1302 */ 1303static const struct of_device_id armpmu_of_device_ids[] = { 1304 {.compatible = "arm,armv8-pmuv3"}, 1305 {}, 1306}; 1307 1308static int armpmu_device_probe(struct platform_device *pdev) 1309{ 1310 int i, irq, *irqs; 1311 1312 if (!cpu_pmu) 1313 return -ENODEV; 1314 1315 /* Don't bother with PPIs; they're already affine */ 1316 irq = platform_get_irq(pdev, 0); 1317 if (irq >= 0 && irq_is_percpu(irq)) 1318 goto out; 1319 1320 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); 1321 if (!irqs) 1322 return -ENOMEM; 1323 1324 for (i = 0; i < pdev->num_resources; ++i) { 1325 struct device_node *dn; 1326 int cpu; 1327 1328 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", 1329 i); 1330 if (!dn) { 1331 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", 1332 of_node_full_name(pdev->dev.of_node), i); 1333 break; 1334 } 1335 1336 for_each_possible_cpu(cpu) 1337 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) 1338 break; 1339 1340 of_node_put(dn); 1341 if (cpu >= nr_cpu_ids) { 1342 pr_warn("Failed to find logical CPU for %s\n", 1343 dn->name); 1344 break; 1345 } 1346 1347 irqs[i] = cpu; 1348 } 1349 1350 if (i == pdev->num_resources) 1351 cpu_pmu->irq_affinity = irqs; 1352 else 1353 kfree(irqs); 1354 1355out: 1356 cpu_pmu->plat_device = pdev; 1357 return 0; 1358} 1359 1360static struct platform_driver armpmu_driver = { 1361 .driver = { 1362 .name = "arm-pmu", 1363 .of_match_table = armpmu_of_device_ids, 1364 }, 1365 .probe = armpmu_device_probe, 1366}; 1367 1368static int __init register_pmu_driver(void) 1369{ 1370 return platform_driver_register(&armpmu_driver); 1371} 1372device_initcall(register_pmu_driver); 1373 1374static struct pmu_hw_events *armpmu_get_cpu_events(void) 1375{ 1376 return this_cpu_ptr(&cpu_hw_events); 1377} 1378 1379static void __init cpu_pmu_init(struct arm_pmu *armpmu) 1380{ 1381 int cpu; 1382 for_each_possible_cpu(cpu) { 1383 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); 1384 events->events = per_cpu(hw_events, cpu); 1385 events->used_mask = per_cpu(used_mask, cpu); 1386 raw_spin_lock_init(&events->pmu_lock); 1387 } 1388 armpmu->get_hw_events = armpmu_get_cpu_events; 1389} 1390 1391static int __init init_hw_perf_events(void) 1392{ 1393 u64 dfr = read_cpuid(ID_AA64DFR0_EL1); 1394 1395 switch ((dfr >> 8) & 0xf) { 1396 case 0x1: /* PMUv3 */ 1397 cpu_pmu = armv8_pmuv3_pmu_init(); 1398 break; 1399 } 1400 1401 if (cpu_pmu) { 1402 pr_info("enabled with %s PMU driver, %d counters available\n", 1403 cpu_pmu->name, cpu_pmu->num_events); 1404 cpu_pmu_init(cpu_pmu); 1405 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); 1406 } else { 1407 pr_info("no hardware support available\n"); 1408 } 1409 1410 return 0; 1411} 1412early_initcall(init_hw_perf_events); 1413 1414/* 1415 * Callchain handling code. 1416 */ 1417struct frame_tail { 1418 struct frame_tail __user *fp; 1419 unsigned long lr; 1420} __attribute__((packed)); 1421 1422/* 1423 * Get the return address for a single stackframe and return a pointer to the 1424 * next frame tail. 1425 */ 1426static struct frame_tail __user * 1427user_backtrace(struct frame_tail __user *tail, 1428 struct perf_callchain_entry *entry) 1429{ 1430 struct frame_tail buftail; 1431 unsigned long err; 1432 1433 /* Also check accessibility of one struct frame_tail beyond */ 1434 if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 1435 return NULL; 1436 1437 pagefault_disable(); 1438 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); 1439 pagefault_enable(); 1440 1441 if (err) 1442 return NULL; 1443 1444 perf_callchain_store(entry, buftail.lr); 1445 1446 /* 1447 * Frame pointers should strictly progress back up the stack 1448 * (towards higher addresses). 1449 */ 1450 if (tail >= buftail.fp) 1451 return NULL; 1452 1453 return buftail.fp; 1454} 1455 1456#ifdef CONFIG_COMPAT 1457/* 1458 * The registers we're interested in are at the end of the variable 1459 * length saved register structure. The fp points at the end of this 1460 * structure so the address of this struct is: 1461 * (struct compat_frame_tail *)(xxx->fp)-1 1462 * 1463 * This code has been adapted from the ARM OProfile support. 1464 */ 1465struct compat_frame_tail { 1466 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */ 1467 u32 sp; 1468 u32 lr; 1469} __attribute__((packed)); 1470 1471static struct compat_frame_tail __user * 1472compat_user_backtrace(struct compat_frame_tail __user *tail, 1473 struct perf_callchain_entry *entry) 1474{ 1475 struct compat_frame_tail buftail; 1476 unsigned long err; 1477 1478 /* Also check accessibility of one struct frame_tail beyond */ 1479 if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 1480 return NULL; 1481 1482 pagefault_disable(); 1483 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); 1484 pagefault_enable(); 1485 1486 if (err) 1487 return NULL; 1488 1489 perf_callchain_store(entry, buftail.lr); 1490 1491 /* 1492 * Frame pointers should strictly progress back up the stack 1493 * (towards higher addresses). 1494 */ 1495 if (tail + 1 >= (struct compat_frame_tail __user *) 1496 compat_ptr(buftail.fp)) 1497 return NULL; 1498 1499 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; 1500} 1501#endif /* CONFIG_COMPAT */ 1502 1503void perf_callchain_user(struct perf_callchain_entry *entry, 1504 struct pt_regs *regs) 1505{ 1506 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 1507 /* We don't support guest os callchain now */ 1508 return; 1509 } 1510 1511 perf_callchain_store(entry, regs->pc); 1512 1513 if (!compat_user_mode(regs)) { 1514 /* AARCH64 mode */ 1515 struct frame_tail __user *tail; 1516 1517 tail = (struct frame_tail __user *)regs->regs[29]; 1518 1519 while (entry->nr < PERF_MAX_STACK_DEPTH && 1520 tail && !((unsigned long)tail & 0xf)) 1521 tail = user_backtrace(tail, entry); 1522 } else { 1523#ifdef CONFIG_COMPAT 1524 /* AARCH32 compat mode */ 1525 struct compat_frame_tail __user *tail; 1526 1527 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; 1528 1529 while ((entry->nr < PERF_MAX_STACK_DEPTH) && 1530 tail && !((unsigned long)tail & 0x3)) 1531 tail = compat_user_backtrace(tail, entry); 1532#endif 1533 } 1534} 1535 1536/* 1537 * Gets called by walk_stackframe() for every stackframe. This will be called 1538 * whist unwinding the stackframe and is like a subroutine return so we use 1539 * the PC. 1540 */ 1541static int callchain_trace(struct stackframe *frame, void *data) 1542{ 1543 struct perf_callchain_entry *entry = data; 1544 perf_callchain_store(entry, frame->pc); 1545 return 0; 1546} 1547 1548void perf_callchain_kernel(struct perf_callchain_entry *entry, 1549 struct pt_regs *regs) 1550{ 1551 struct stackframe frame; 1552 1553 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 1554 /* We don't support guest os callchain now */ 1555 return; 1556 } 1557 1558 frame.fp = regs->regs[29]; 1559 frame.sp = regs->sp; 1560 frame.pc = regs->pc; 1561 1562 walk_stackframe(&frame, callchain_trace, entry); 1563} 1564 1565unsigned long perf_instruction_pointer(struct pt_regs *regs) 1566{ 1567 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) 1568 return perf_guest_cbs->get_guest_ip(); 1569 1570 return instruction_pointer(regs); 1571} 1572 1573unsigned long perf_misc_flags(struct pt_regs *regs) 1574{ 1575 int misc = 0; 1576 1577 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 1578 if (perf_guest_cbs->is_user_mode()) 1579 misc |= PERF_RECORD_MISC_GUEST_USER; 1580 else 1581 misc |= PERF_RECORD_MISC_GUEST_KERNEL; 1582 } else { 1583 if (user_mode(regs)) 1584 misc |= PERF_RECORD_MISC_USER; 1585 else 1586 misc |= PERF_RECORD_MISC_KERNEL; 1587 } 1588 1589 return misc; 1590} 1591