root/drivers/perf/arm_dsu_pmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_dsu_pmu
  2. dsu_pmu_sysfs_event_show
  3. dsu_pmu_sysfs_format_show
  4. dsu_pmu_cpumask_show
  5. dsu_pmu_event_attr_is_visible
  6. dsu_pmu_get_online_cpu_any_but
  7. dsu_pmu_counter_valid
  8. dsu_pmu_read_counter
  9. dsu_pmu_write_counter
  10. dsu_pmu_get_event_idx
  11. dsu_pmu_enable_counter
  12. dsu_pmu_disable_counter
  13. dsu_pmu_set_event
  14. dsu_pmu_event_update
  15. dsu_pmu_read
  16. dsu_pmu_get_reset_overflow
  17. dsu_pmu_set_event_period
  18. dsu_pmu_handle_irq
  19. dsu_pmu_start
  20. dsu_pmu_stop
  21. dsu_pmu_add
  22. dsu_pmu_del
  23. dsu_pmu_enable
  24. dsu_pmu_disable
  25. dsu_pmu_validate_event
  26. dsu_pmu_validate_group
  27. dsu_pmu_event_init
  28. dsu_pmu_alloc
  29. dsu_pmu_dt_get_cpus
  30. dsu_pmu_probe_pmu
  31. dsu_pmu_set_active_cpu
  32. dsu_pmu_init_pmu
  33. dsu_pmu_device_probe
  34. dsu_pmu_device_remove
  35. dsu_pmu_cpu_online
  36. dsu_pmu_cpu_teardown
  37. dsu_pmu_init
  38. dsu_pmu_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * ARM DynamIQ Shared Unit (DSU) PMU driver
   4  *
   5  * Copyright (C) ARM Limited, 2017.
   6  *
   7  * Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers.
   8  */
   9 
  10 #define PMUNAME         "arm_dsu"
  11 #define DRVNAME         PMUNAME "_pmu"
  12 #define pr_fmt(fmt)     DRVNAME ": " fmt
  13 
  14 #include <linux/bitmap.h>
  15 #include <linux/bitops.h>
  16 #include <linux/bug.h>
  17 #include <linux/cpumask.h>
  18 #include <linux/device.h>
  19 #include <linux/interrupt.h>
  20 #include <linux/kernel.h>
  21 #include <linux/module.h>
  22 #include <linux/of_device.h>
  23 #include <linux/perf_event.h>
  24 #include <linux/platform_device.h>
  25 #include <linux/spinlock.h>
  26 #include <linux/smp.h>
  27 #include <linux/sysfs.h>
  28 #include <linux/types.h>
  29 
  30 #include <asm/arm_dsu_pmu.h>
  31 #include <asm/local64.h>
  32 
  33 /* PMU event codes */
  34 #define DSU_PMU_EVT_CYCLES              0x11
  35 #define DSU_PMU_EVT_CHAIN               0x1e
  36 
  37 #define DSU_PMU_MAX_COMMON_EVENTS       0x40
  38 
  39 #define DSU_PMU_MAX_HW_CNTRS            32
  40 #define DSU_PMU_HW_COUNTER_MASK         (DSU_PMU_MAX_HW_CNTRS - 1)
  41 
  42 #define CLUSTERPMCR_E                   BIT(0)
  43 #define CLUSTERPMCR_P                   BIT(1)
  44 #define CLUSTERPMCR_C                   BIT(2)
  45 #define CLUSTERPMCR_N_SHIFT             11
  46 #define CLUSTERPMCR_N_MASK              0x1f
  47 #define CLUSTERPMCR_IDCODE_SHIFT        16
  48 #define CLUSTERPMCR_IDCODE_MASK         0xff
  49 #define CLUSTERPMCR_IMP_SHIFT           24
  50 #define CLUSTERPMCR_IMP_MASK            0xff
  51 #define CLUSTERPMCR_RES_MASK            0x7e8
  52 #define CLUSTERPMCR_RES_VAL             0x40
  53 
  54 #define DSU_ACTIVE_CPU_MASK             0x0
  55 #define DSU_ASSOCIATED_CPU_MASK         0x1
  56 
  57 /*
  58  * We use the index of the counters as they appear in the counter
  59  * bit maps in the PMU registers (e.g CLUSTERPMSELR).
  60  * i.e,
  61  *      counter 0       - Bit 0
  62  *      counter 1       - Bit 1
  63  *      ...
  64  *      Cycle counter   - Bit 31
  65  */
  66 #define DSU_PMU_IDX_CYCLE_COUNTER       31
  67 
  68 /* All event counters are 32bit, with a 64bit Cycle counter */
  69 #define DSU_PMU_COUNTER_WIDTH(idx)      \
  70         (((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32)
  71 
  72 #define DSU_PMU_COUNTER_MASK(idx)       \
  73         GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0)
  74 
  75 #define DSU_EXT_ATTR(_name, _func, _config)             \
  76         (&((struct dev_ext_attribute[]) {                               \
  77                 {                                                       \
  78                         .attr = __ATTR(_name, 0444, _func, NULL),       \
  79                         .var = (void *)_config                          \
  80                 }                                                       \
  81         })[0].attr.attr)
  82 
  83 #define DSU_EVENT_ATTR(_name, _config)          \
  84         DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config)
  85 
  86 #define DSU_FORMAT_ATTR(_name, _config)         \
  87         DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config)
  88 
  89 #define DSU_CPUMASK_ATTR(_name, _config)        \
  90         DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config)
  91 
  92 struct dsu_hw_events {
  93         DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS);
  94         struct perf_event       *events[DSU_PMU_MAX_HW_CNTRS];
  95 };
  96 
  97 /*
  98  * struct dsu_pmu       - DSU PMU descriptor
  99  *
 100  * @pmu_lock            : Protects accesses to DSU PMU register from normal vs
 101  *                        interrupt handler contexts.
 102  * @hw_events           : Holds the event counter state.
 103  * @associated_cpus     : CPUs attached to the DSU.
 104  * @active_cpu          : CPU to which the PMU is bound for accesses.
 105  * @cpuhp_node          : Node for CPU hotplug notifier link.
 106  * @num_counters        : Number of event counters implemented by the PMU,
 107  *                        excluding the cycle counter.
 108  * @irq                 : Interrupt line for counter overflow.
 109  * @cpmceid_bitmap      : Bitmap for the availability of architected common
 110  *                        events (event_code < 0x40).
 111  */
 112 struct dsu_pmu {
 113         struct pmu                      pmu;
 114         struct device                   *dev;
 115         raw_spinlock_t                  pmu_lock;
 116         struct dsu_hw_events            hw_events;
 117         cpumask_t                       associated_cpus;
 118         cpumask_t                       active_cpu;
 119         struct hlist_node               cpuhp_node;
 120         s8                              num_counters;
 121         int                             irq;
 122         DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS);
 123 };
 124 
 125 static unsigned long dsu_pmu_cpuhp_state;
 126 
 127 static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
 128 {
 129         return container_of(pmu, struct dsu_pmu, pmu);
 130 }
 131 
 132 static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
 133                                         struct device_attribute *attr,
 134                                         char *buf)
 135 {
 136         struct dev_ext_attribute *eattr = container_of(attr,
 137                                         struct dev_ext_attribute, attr);
 138         return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
 139                                          (unsigned long)eattr->var);
 140 }
 141 
 142 static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
 143                                          struct device_attribute *attr,
 144                                          char *buf)
 145 {
 146         struct dev_ext_attribute *eattr = container_of(attr,
 147                                         struct dev_ext_attribute, attr);
 148         return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
 149 }
 150 
 151 static ssize_t dsu_pmu_cpumask_show(struct device *dev,
 152                                     struct device_attribute *attr,
 153                                     char *buf)
 154 {
 155         struct pmu *pmu = dev_get_drvdata(dev);
 156         struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
 157         struct dev_ext_attribute *eattr = container_of(attr,
 158                                         struct dev_ext_attribute, attr);
 159         unsigned long mask_id = (unsigned long)eattr->var;
 160         const cpumask_t *cpumask;
 161 
 162         switch (mask_id) {
 163         case DSU_ACTIVE_CPU_MASK:
 164                 cpumask = &dsu_pmu->active_cpu;
 165                 break;
 166         case DSU_ASSOCIATED_CPU_MASK:
 167                 cpumask = &dsu_pmu->associated_cpus;
 168                 break;
 169         default:
 170                 return 0;
 171         }
 172         return cpumap_print_to_pagebuf(true, buf, cpumask);
 173 }
 174 
 175 static struct attribute *dsu_pmu_format_attrs[] = {
 176         DSU_FORMAT_ATTR(event, "config:0-31"),
 177         NULL,
 178 };
 179 
 180 static const struct attribute_group dsu_pmu_format_attr_group = {
 181         .name = "format",
 182         .attrs = dsu_pmu_format_attrs,
 183 };
 184 
 185 static struct attribute *dsu_pmu_event_attrs[] = {
 186         DSU_EVENT_ATTR(cycles, 0x11),
 187         DSU_EVENT_ATTR(bus_access, 0x19),
 188         DSU_EVENT_ATTR(memory_error, 0x1a),
 189         DSU_EVENT_ATTR(bus_cycles, 0x1d),
 190         DSU_EVENT_ATTR(l3d_cache_allocate, 0x29),
 191         DSU_EVENT_ATTR(l3d_cache_refill, 0x2a),
 192         DSU_EVENT_ATTR(l3d_cache, 0x2b),
 193         DSU_EVENT_ATTR(l3d_cache_wb, 0x2c),
 194         NULL,
 195 };
 196 
 197 static umode_t
 198 dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr,
 199                                 int unused)
 200 {
 201         struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
 202         struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
 203         struct dev_ext_attribute *eattr = container_of(attr,
 204                                         struct dev_ext_attribute, attr.attr);
 205         unsigned long evt = (unsigned long)eattr->var;
 206 
 207         return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0;
 208 }
 209 
 210 static const struct attribute_group dsu_pmu_events_attr_group = {
 211         .name = "events",
 212         .attrs = dsu_pmu_event_attrs,
 213         .is_visible = dsu_pmu_event_attr_is_visible,
 214 };
 215 
 216 static struct attribute *dsu_pmu_cpumask_attrs[] = {
 217         DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK),
 218         DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK),
 219         NULL,
 220 };
 221 
 222 static const struct attribute_group dsu_pmu_cpumask_attr_group = {
 223         .attrs = dsu_pmu_cpumask_attrs,
 224 };
 225 
 226 static const struct attribute_group *dsu_pmu_attr_groups[] = {
 227         &dsu_pmu_cpumask_attr_group,
 228         &dsu_pmu_events_attr_group,
 229         &dsu_pmu_format_attr_group,
 230         NULL,
 231 };
 232 
 233 static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
 234 {
 235         struct cpumask online_supported;
 236 
 237         cpumask_and(&online_supported,
 238                          &dsu_pmu->associated_cpus, cpu_online_mask);
 239         return cpumask_any_but(&online_supported, cpu);
 240 }
 241 
 242 static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
 243 {
 244         return (idx < dsu_pmu->num_counters) ||
 245                (idx == DSU_PMU_IDX_CYCLE_COUNTER);
 246 }
 247 
 248 static inline u64 dsu_pmu_read_counter(struct perf_event *event)
 249 {
 250         u64 val;
 251         unsigned long flags;
 252         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 253         int idx = event->hw.idx;
 254 
 255         if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
 256                                  &dsu_pmu->associated_cpus)))
 257                 return 0;
 258 
 259         if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
 260                 dev_err(event->pmu->dev,
 261                         "Trying reading invalid counter %d\n", idx);
 262                 return 0;
 263         }
 264 
 265         raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
 266         if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
 267                 val = __dsu_pmu_read_pmccntr();
 268         else
 269                 val = __dsu_pmu_read_counter(idx);
 270         raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
 271 
 272         return val;
 273 }
 274 
 275 static void dsu_pmu_write_counter(struct perf_event *event, u64 val)
 276 {
 277         unsigned long flags;
 278         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 279         int idx = event->hw.idx;
 280 
 281         if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
 282                          &dsu_pmu->associated_cpus)))
 283                 return;
 284 
 285         if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
 286                 dev_err(event->pmu->dev,
 287                         "writing to invalid counter %d\n", idx);
 288                 return;
 289         }
 290 
 291         raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
 292         if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
 293                 __dsu_pmu_write_pmccntr(val);
 294         else
 295                 __dsu_pmu_write_counter(idx, val);
 296         raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
 297 }
 298 
 299 static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events,
 300                                  struct perf_event *event)
 301 {
 302         int idx;
 303         unsigned long evtype = event->attr.config;
 304         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 305         unsigned long *used_mask = hw_events->used_mask;
 306 
 307         if (evtype == DSU_PMU_EVT_CYCLES) {
 308                 if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, used_mask))
 309                         return -EAGAIN;
 310                 return DSU_PMU_IDX_CYCLE_COUNTER;
 311         }
 312 
 313         idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters);
 314         if (idx >= dsu_pmu->num_counters)
 315                 return -EAGAIN;
 316         set_bit(idx, hw_events->used_mask);
 317         return idx;
 318 }
 319 
 320 static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx)
 321 {
 322         __dsu_pmu_counter_interrupt_enable(idx);
 323         __dsu_pmu_enable_counter(idx);
 324 }
 325 
 326 static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx)
 327 {
 328         __dsu_pmu_disable_counter(idx);
 329         __dsu_pmu_counter_interrupt_disable(idx);
 330 }
 331 
 332 static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu,
 333                                         struct perf_event *event)
 334 {
 335         int idx = event->hw.idx;
 336         unsigned long flags;
 337 
 338         if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
 339                 dev_err(event->pmu->dev,
 340                         "Trying to set invalid counter %d\n", idx);
 341                 return;
 342         }
 343 
 344         raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
 345         __dsu_pmu_set_event(idx, event->hw.config_base);
 346         raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
 347 }
 348 
 349 static void dsu_pmu_event_update(struct perf_event *event)
 350 {
 351         struct hw_perf_event *hwc = &event->hw;
 352         u64 delta, prev_count, new_count;
 353 
 354         do {
 355                 /* We may also be called from the irq handler */
 356                 prev_count = local64_read(&hwc->prev_count);
 357                 new_count = dsu_pmu_read_counter(event);
 358         } while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) !=
 359                         prev_count);
 360         delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx);
 361         local64_add(delta, &event->count);
 362 }
 363 
 364 static void dsu_pmu_read(struct perf_event *event)
 365 {
 366         dsu_pmu_event_update(event);
 367 }
 368 
 369 static inline u32 dsu_pmu_get_reset_overflow(void)
 370 {
 371         return __dsu_pmu_get_reset_overflow();
 372 }
 373 
 374 /**
 375  * dsu_pmu_set_event_period: Set the period for the counter.
 376  *
 377  * All DSU PMU event counters, except the cycle counter are 32bit
 378  * counters. To handle cases of extreme interrupt latency, we program
 379  * the counter with half of the max count for the counters.
 380  */
 381 static void dsu_pmu_set_event_period(struct perf_event *event)
 382 {
 383         int idx = event->hw.idx;
 384         u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1;
 385 
 386         local64_set(&event->hw.prev_count, val);
 387         dsu_pmu_write_counter(event, val);
 388 }
 389 
 390 static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev)
 391 {
 392         int i;
 393         bool handled = false;
 394         struct dsu_pmu *dsu_pmu = dev;
 395         struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
 396         unsigned long overflow;
 397 
 398         overflow = dsu_pmu_get_reset_overflow();
 399         if (!overflow)
 400                 return IRQ_NONE;
 401 
 402         for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) {
 403                 struct perf_event *event = hw_events->events[i];
 404 
 405                 if (!event)
 406                         continue;
 407                 dsu_pmu_event_update(event);
 408                 dsu_pmu_set_event_period(event);
 409                 handled = true;
 410         }
 411 
 412         return IRQ_RETVAL(handled);
 413 }
 414 
 415 static void dsu_pmu_start(struct perf_event *event, int pmu_flags)
 416 {
 417         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 418 
 419         /* We always reprogram the counter */
 420         if (pmu_flags & PERF_EF_RELOAD)
 421                 WARN_ON(!(event->hw.state & PERF_HES_UPTODATE));
 422         dsu_pmu_set_event_period(event);
 423         if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER)
 424                 dsu_pmu_set_event(dsu_pmu, event);
 425         event->hw.state = 0;
 426         dsu_pmu_enable_counter(dsu_pmu, event->hw.idx);
 427 }
 428 
 429 static void dsu_pmu_stop(struct perf_event *event, int pmu_flags)
 430 {
 431         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 432 
 433         if (event->hw.state & PERF_HES_STOPPED)
 434                 return;
 435         dsu_pmu_disable_counter(dsu_pmu, event->hw.idx);
 436         dsu_pmu_event_update(event);
 437         event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 438 }
 439 
 440 static int dsu_pmu_add(struct perf_event *event, int flags)
 441 {
 442         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 443         struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
 444         struct hw_perf_event *hwc = &event->hw;
 445         int idx;
 446 
 447         if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
 448                                            &dsu_pmu->associated_cpus)))
 449                 return -ENOENT;
 450 
 451         idx = dsu_pmu_get_event_idx(hw_events, event);
 452         if (idx < 0)
 453                 return idx;
 454 
 455         hwc->idx = idx;
 456         hw_events->events[idx] = event;
 457         hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 458 
 459         if (flags & PERF_EF_START)
 460                 dsu_pmu_start(event, PERF_EF_RELOAD);
 461 
 462         perf_event_update_userpage(event);
 463         return 0;
 464 }
 465 
 466 static void dsu_pmu_del(struct perf_event *event, int flags)
 467 {
 468         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 469         struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
 470         struct hw_perf_event *hwc = &event->hw;
 471         int idx = hwc->idx;
 472 
 473         dsu_pmu_stop(event, PERF_EF_UPDATE);
 474         hw_events->events[idx] = NULL;
 475         clear_bit(idx, hw_events->used_mask);
 476         perf_event_update_userpage(event);
 477 }
 478 
 479 static void dsu_pmu_enable(struct pmu *pmu)
 480 {
 481         u32 pmcr;
 482         unsigned long flags;
 483         struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
 484 
 485         /* If no counters are added, skip enabling the PMU */
 486         if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS))
 487                 return;
 488 
 489         raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
 490         pmcr = __dsu_pmu_read_pmcr();
 491         pmcr |= CLUSTERPMCR_E;
 492         __dsu_pmu_write_pmcr(pmcr);
 493         raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
 494 }
 495 
 496 static void dsu_pmu_disable(struct pmu *pmu)
 497 {
 498         u32 pmcr;
 499         unsigned long flags;
 500         struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
 501 
 502         raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
 503         pmcr = __dsu_pmu_read_pmcr();
 504         pmcr &= ~CLUSTERPMCR_E;
 505         __dsu_pmu_write_pmcr(pmcr);
 506         raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
 507 }
 508 
 509 static bool dsu_pmu_validate_event(struct pmu *pmu,
 510                                   struct dsu_hw_events *hw_events,
 511                                   struct perf_event *event)
 512 {
 513         if (is_software_event(event))
 514                 return true;
 515         /* Reject groups spanning multiple HW PMUs. */
 516         if (event->pmu != pmu)
 517                 return false;
 518         return dsu_pmu_get_event_idx(hw_events, event) >= 0;
 519 }
 520 
 521 /*
 522  * Make sure the group of events can be scheduled at once
 523  * on the PMU.
 524  */
 525 static bool dsu_pmu_validate_group(struct perf_event *event)
 526 {
 527         struct perf_event *sibling, *leader = event->group_leader;
 528         struct dsu_hw_events fake_hw;
 529 
 530         if (event->group_leader == event)
 531                 return true;
 532 
 533         memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
 534         if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
 535                 return false;
 536         for_each_sibling_event(sibling, leader) {
 537                 if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
 538                         return false;
 539         }
 540         return dsu_pmu_validate_event(event->pmu, &fake_hw, event);
 541 }
 542 
 543 static int dsu_pmu_event_init(struct perf_event *event)
 544 {
 545         struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
 546 
 547         if (event->attr.type != event->pmu->type)
 548                 return -ENOENT;
 549 
 550         /* We don't support sampling */
 551         if (is_sampling_event(event)) {
 552                 dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
 553                 return -EOPNOTSUPP;
 554         }
 555 
 556         /* We cannot support task bound events */
 557         if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
 558                 dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
 559                 return -EINVAL;
 560         }
 561 
 562         if (has_branch_stack(event)) {
 563                 dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
 564                 return -EINVAL;
 565         }
 566 
 567         if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
 568                 dev_dbg(dsu_pmu->pmu.dev,
 569                          "Requested cpu is not associated with the DSU\n");
 570                 return -EINVAL;
 571         }
 572         /*
 573          * Choose the current active CPU to read the events. We don't want
 574          * to migrate the event contexts, irq handling etc to the requested
 575          * CPU. As long as the requested CPU is within the same DSU, we
 576          * are fine.
 577          */
 578         event->cpu = cpumask_first(&dsu_pmu->active_cpu);
 579         if (event->cpu >= nr_cpu_ids)
 580                 return -EINVAL;
 581         if (!dsu_pmu_validate_group(event))
 582                 return -EINVAL;
 583 
 584         event->hw.config_base = event->attr.config;
 585         return 0;
 586 }
 587 
 588 static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
 589 {
 590         struct dsu_pmu *dsu_pmu;
 591 
 592         dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL);
 593         if (!dsu_pmu)
 594                 return ERR_PTR(-ENOMEM);
 595 
 596         raw_spin_lock_init(&dsu_pmu->pmu_lock);
 597         /*
 598          * Initialise the number of counters to -1, until we probe
 599          * the real number on a connected CPU.
 600          */
 601         dsu_pmu->num_counters = -1;
 602         return dsu_pmu;
 603 }
 604 
 605 /**
 606  * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster.
 607  */
 608 static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask)
 609 {
 610         int i = 0, n, cpu;
 611         struct device_node *cpu_node;
 612 
 613         n = of_count_phandle_with_args(dev, "cpus", NULL);
 614         if (n <= 0)
 615                 return -ENODEV;
 616         for (; i < n; i++) {
 617                 cpu_node = of_parse_phandle(dev, "cpus", i);
 618                 if (!cpu_node)
 619                         break;
 620                 cpu = of_cpu_node_to_id(cpu_node);
 621                 of_node_put(cpu_node);
 622                 /*
 623                  * We have to ignore the failures here and continue scanning
 624                  * the list to handle cases where the nr_cpus could be capped
 625                  * in the running kernel.
 626                  */
 627                 if (cpu < 0)
 628                         continue;
 629                 cpumask_set_cpu(cpu, mask);
 630         }
 631         return 0;
 632 }
 633 
 634 /*
 635  * dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster.
 636  */
 637 static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
 638 {
 639         u64 num_counters;
 640         u32 cpmceid[2];
 641 
 642         num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) &
 643                                                 CLUSTERPMCR_N_MASK;
 644         /* We can only support up to 31 independent counters */
 645         if (WARN_ON(num_counters > 31))
 646                 num_counters = 31;
 647         dsu_pmu->num_counters = num_counters;
 648         if (!dsu_pmu->num_counters)
 649                 return;
 650         cpmceid[0] = __dsu_pmu_read_pmceid(0);
 651         cpmceid[1] = __dsu_pmu_read_pmceid(1);
 652         bitmap_from_arr32(dsu_pmu->cpmceid_bitmap, cpmceid,
 653                           DSU_PMU_MAX_COMMON_EVENTS);
 654 }
 655 
 656 static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
 657 {
 658         cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
 659         if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu))
 660                 pr_warn("Failed to set irq affinity to %d\n", cpu);
 661 }
 662 
 663 /*
 664  * dsu_pmu_init_pmu: Initialise the DSU PMU configurations if
 665  * we haven't done it already.
 666  */
 667 static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu)
 668 {
 669         if (dsu_pmu->num_counters == -1)
 670                 dsu_pmu_probe_pmu(dsu_pmu);
 671         /* Reset the interrupt overflow mask */
 672         dsu_pmu_get_reset_overflow();
 673 }
 674 
 675 static int dsu_pmu_device_probe(struct platform_device *pdev)
 676 {
 677         int irq, rc;
 678         struct dsu_pmu *dsu_pmu;
 679         char *name;
 680         static atomic_t pmu_idx = ATOMIC_INIT(-1);
 681 
 682         dsu_pmu = dsu_pmu_alloc(pdev);
 683         if (IS_ERR(dsu_pmu))
 684                 return PTR_ERR(dsu_pmu);
 685 
 686         rc = dsu_pmu_dt_get_cpus(pdev->dev.of_node, &dsu_pmu->associated_cpus);
 687         if (rc) {
 688                 dev_warn(&pdev->dev, "Failed to parse the CPUs\n");
 689                 return rc;
 690         }
 691 
 692         irq = platform_get_irq(pdev, 0);
 693         if (irq < 0) {
 694                 dev_warn(&pdev->dev, "Failed to find IRQ\n");
 695                 return -EINVAL;
 696         }
 697 
 698         name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
 699                                 PMUNAME, atomic_inc_return(&pmu_idx));
 700         if (!name)
 701                 return -ENOMEM;
 702         rc = devm_request_irq(&pdev->dev, irq, dsu_pmu_handle_irq,
 703                               IRQF_NOBALANCING, name, dsu_pmu);
 704         if (rc) {
 705                 dev_warn(&pdev->dev, "Failed to request IRQ %d\n", irq);
 706                 return rc;
 707         }
 708 
 709         dsu_pmu->irq = irq;
 710         platform_set_drvdata(pdev, dsu_pmu);
 711         rc = cpuhp_state_add_instance(dsu_pmu_cpuhp_state,
 712                                                 &dsu_pmu->cpuhp_node);
 713         if (rc)
 714                 return rc;
 715 
 716         dsu_pmu->pmu = (struct pmu) {
 717                 .task_ctx_nr    = perf_invalid_context,
 718                 .module         = THIS_MODULE,
 719                 .pmu_enable     = dsu_pmu_enable,
 720                 .pmu_disable    = dsu_pmu_disable,
 721                 .event_init     = dsu_pmu_event_init,
 722                 .add            = dsu_pmu_add,
 723                 .del            = dsu_pmu_del,
 724                 .start          = dsu_pmu_start,
 725                 .stop           = dsu_pmu_stop,
 726                 .read           = dsu_pmu_read,
 727 
 728                 .attr_groups    = dsu_pmu_attr_groups,
 729                 .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
 730         };
 731 
 732         rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
 733         if (rc) {
 734                 cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
 735                                                  &dsu_pmu->cpuhp_node);
 736                 irq_set_affinity_hint(dsu_pmu->irq, NULL);
 737         }
 738 
 739         return rc;
 740 }
 741 
 742 static int dsu_pmu_device_remove(struct platform_device *pdev)
 743 {
 744         struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
 745 
 746         perf_pmu_unregister(&dsu_pmu->pmu);
 747         cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
 748         irq_set_affinity_hint(dsu_pmu->irq, NULL);
 749 
 750         return 0;
 751 }
 752 
 753 static const struct of_device_id dsu_pmu_of_match[] = {
 754         { .compatible = "arm,dsu-pmu", },
 755         {},
 756 };
 757 
 758 static struct platform_driver dsu_pmu_driver = {
 759         .driver = {
 760                 .name   = DRVNAME,
 761                 .of_match_table = of_match_ptr(dsu_pmu_of_match),
 762         },
 763         .probe = dsu_pmu_device_probe,
 764         .remove = dsu_pmu_device_remove,
 765 };
 766 
 767 static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
 768 {
 769         struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
 770                                                    cpuhp_node);
 771 
 772         if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
 773                 return 0;
 774 
 775         /* If the PMU is already managed, there is nothing to do */
 776         if (!cpumask_empty(&dsu_pmu->active_cpu))
 777                 return 0;
 778 
 779         dsu_pmu_init_pmu(dsu_pmu);
 780         dsu_pmu_set_active_cpu(cpu, dsu_pmu);
 781 
 782         return 0;
 783 }
 784 
 785 static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
 786 {
 787         int dst;
 788         struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
 789                                                    cpuhp_node);
 790 
 791         if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
 792                 return 0;
 793 
 794         dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
 795         /* If there are no active CPUs in the DSU, leave IRQ disabled */
 796         if (dst >= nr_cpu_ids) {
 797                 irq_set_affinity_hint(dsu_pmu->irq, NULL);
 798                 return 0;
 799         }
 800 
 801         perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
 802         dsu_pmu_set_active_cpu(dst, dsu_pmu);
 803 
 804         return 0;
 805 }
 806 
 807 static int __init dsu_pmu_init(void)
 808 {
 809         int ret;
 810 
 811         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
 812                                         DRVNAME,
 813                                         dsu_pmu_cpu_online,
 814                                         dsu_pmu_cpu_teardown);
 815         if (ret < 0)
 816                 return ret;
 817         dsu_pmu_cpuhp_state = ret;
 818         return platform_driver_register(&dsu_pmu_driver);
 819 }
 820 
 821 static void __exit dsu_pmu_exit(void)
 822 {
 823         platform_driver_unregister(&dsu_pmu_driver);
 824         cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
 825 }
 826 
 827 module_init(dsu_pmu_init);
 828 module_exit(dsu_pmu_exit);
 829 
 830 MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
 831 MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit");
 832 MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
 833 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */