root/arch/x86/events/amd/uncore.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_nb_event
  2. is_llc_event
  3. event_to_amd_uncore
  4. amd_uncore_read
  5. amd_uncore_start
  6. amd_uncore_stop
  7. amd_uncore_add
  8. amd_uncore_del
  9. amd_uncore_event_init
  10. amd_uncore_attr_show_cpumask
  11. amd_uncore_alloc
  12. amd_uncore_cpu_up_prepare
  13. amd_uncore_find_online_sibling
  14. amd_uncore_cpu_starting
  15. uncore_clean_online
  16. uncore_online
  17. amd_uncore_cpu_online
  18. uncore_down_prepare
  19. amd_uncore_cpu_down_prepare
  20. uncore_dead
  21. amd_uncore_cpu_dead
  22. amd_uncore_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
   4  *
   5  * Author: Jacob Shin <jacob.shin@amd.com>
   6  */
   7 
   8 #include <linux/perf_event.h>
   9 #include <linux/percpu.h>
  10 #include <linux/types.h>
  11 #include <linux/slab.h>
  12 #include <linux/init.h>
  13 #include <linux/cpu.h>
  14 #include <linux/cpumask.h>
  15 
  16 #include <asm/cpufeature.h>
  17 #include <asm/perf_event.h>
  18 #include <asm/msr.h>
  19 #include <asm/smp.h>
  20 
  21 #define NUM_COUNTERS_NB         4
  22 #define NUM_COUNTERS_L2         4
  23 #define NUM_COUNTERS_L3         6
  24 #define MAX_COUNTERS            6
  25 
  26 #define RDPMC_BASE_NB           6
  27 #define RDPMC_BASE_LLC          10
  28 
  29 #define COUNTER_SHIFT           16
  30 
  31 #undef pr_fmt
  32 #define pr_fmt(fmt)     "amd_uncore: " fmt
  33 
  34 static int num_counters_llc;
  35 static int num_counters_nb;
  36 static bool l3_mask;
  37 
  38 static HLIST_HEAD(uncore_unused_list);
  39 
  40 struct amd_uncore {
  41         int id;
  42         int refcnt;
  43         int cpu;
  44         int num_counters;
  45         int rdpmc_base;
  46         u32 msr_base;
  47         cpumask_t *active_mask;
  48         struct pmu *pmu;
  49         struct perf_event *events[MAX_COUNTERS];
  50         struct hlist_node node;
  51 };
  52 
  53 static struct amd_uncore * __percpu *amd_uncore_nb;
  54 static struct amd_uncore * __percpu *amd_uncore_llc;
  55 
  56 static struct pmu amd_nb_pmu;
  57 static struct pmu amd_llc_pmu;
  58 
  59 static cpumask_t amd_nb_active_mask;
  60 static cpumask_t amd_llc_active_mask;
  61 
  62 static bool is_nb_event(struct perf_event *event)
  63 {
  64         return event->pmu->type == amd_nb_pmu.type;
  65 }
  66 
  67 static bool is_llc_event(struct perf_event *event)
  68 {
  69         return event->pmu->type == amd_llc_pmu.type;
  70 }
  71 
  72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
  73 {
  74         if (is_nb_event(event) && amd_uncore_nb)
  75                 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
  76         else if (is_llc_event(event) && amd_uncore_llc)
  77                 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
  78 
  79         return NULL;
  80 }
  81 
  82 static void amd_uncore_read(struct perf_event *event)
  83 {
  84         struct hw_perf_event *hwc = &event->hw;
  85         u64 prev, new;
  86         s64 delta;
  87 
  88         /*
  89          * since we do not enable counter overflow interrupts,
  90          * we do not have to worry about prev_count changing on us
  91          */
  92 
  93         prev = local64_read(&hwc->prev_count);
  94         rdpmcl(hwc->event_base_rdpmc, new);
  95         local64_set(&hwc->prev_count, new);
  96         delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
  97         delta >>= COUNTER_SHIFT;
  98         local64_add(delta, &event->count);
  99 }
 100 
 101 static void amd_uncore_start(struct perf_event *event, int flags)
 102 {
 103         struct hw_perf_event *hwc = &event->hw;
 104 
 105         if (flags & PERF_EF_RELOAD)
 106                 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
 107 
 108         hwc->state = 0;
 109         wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
 110         perf_event_update_userpage(event);
 111 }
 112 
 113 static void amd_uncore_stop(struct perf_event *event, int flags)
 114 {
 115         struct hw_perf_event *hwc = &event->hw;
 116 
 117         wrmsrl(hwc->config_base, hwc->config);
 118         hwc->state |= PERF_HES_STOPPED;
 119 
 120         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
 121                 amd_uncore_read(event);
 122                 hwc->state |= PERF_HES_UPTODATE;
 123         }
 124 }
 125 
 126 static int amd_uncore_add(struct perf_event *event, int flags)
 127 {
 128         int i;
 129         struct amd_uncore *uncore = event_to_amd_uncore(event);
 130         struct hw_perf_event *hwc = &event->hw;
 131 
 132         /* are we already assigned? */
 133         if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
 134                 goto out;
 135 
 136         for (i = 0; i < uncore->num_counters; i++) {
 137                 if (uncore->events[i] == event) {
 138                         hwc->idx = i;
 139                         goto out;
 140                 }
 141         }
 142 
 143         /* if not, take the first available counter */
 144         hwc->idx = -1;
 145         for (i = 0; i < uncore->num_counters; i++) {
 146                 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
 147                         hwc->idx = i;
 148                         break;
 149                 }
 150         }
 151 
 152 out:
 153         if (hwc->idx == -1)
 154                 return -EBUSY;
 155 
 156         hwc->config_base = uncore->msr_base + (2 * hwc->idx);
 157         hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
 158         hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
 159         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 160 
 161         if (flags & PERF_EF_START)
 162                 amd_uncore_start(event, PERF_EF_RELOAD);
 163 
 164         return 0;
 165 }
 166 
 167 static void amd_uncore_del(struct perf_event *event, int flags)
 168 {
 169         int i;
 170         struct amd_uncore *uncore = event_to_amd_uncore(event);
 171         struct hw_perf_event *hwc = &event->hw;
 172 
 173         amd_uncore_stop(event, PERF_EF_UPDATE);
 174 
 175         for (i = 0; i < uncore->num_counters; i++) {
 176                 if (cmpxchg(&uncore->events[i], event, NULL) == event)
 177                         break;
 178         }
 179 
 180         hwc->idx = -1;
 181 }
 182 
 183 static int amd_uncore_event_init(struct perf_event *event)
 184 {
 185         struct amd_uncore *uncore;
 186         struct hw_perf_event *hwc = &event->hw;
 187 
 188         if (event->attr.type != event->pmu->type)
 189                 return -ENOENT;
 190 
 191         /*
 192          * NB and Last level cache counters (MSRs) are shared across all cores
 193          * that share the same NB / Last level cache.  On family 16h and below,
 194          * Interrupts can be directed to a single target core, however, event
 195          * counts generated by processes running on other cores cannot be masked
 196          * out. So we do not support sampling and per-thread events via
 197          * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
 198          */
 199         hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
 200         hwc->idx = -1;
 201 
 202         if (event->cpu < 0)
 203                 return -EINVAL;
 204 
 205         /*
 206          * SliceMask and ThreadMask need to be set for certain L3 events in
 207          * Family 17h. For other events, the two fields do not affect the count.
 208          */
 209         if (l3_mask && is_llc_event(event)) {
 210                 int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
 211 
 212                 if (smp_num_siblings > 1)
 213                         thread += cpu_data(event->cpu).apicid & 1;
 214 
 215                 hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
 216                                 AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
 217         }
 218 
 219         uncore = event_to_amd_uncore(event);
 220         if (!uncore)
 221                 return -ENODEV;
 222 
 223         /*
 224          * since request can come in to any of the shared cores, we will remap
 225          * to a single common cpu.
 226          */
 227         event->cpu = uncore->cpu;
 228 
 229         return 0;
 230 }
 231 
 232 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
 233                                             struct device_attribute *attr,
 234                                             char *buf)
 235 {
 236         cpumask_t *active_mask;
 237         struct pmu *pmu = dev_get_drvdata(dev);
 238 
 239         if (pmu->type == amd_nb_pmu.type)
 240                 active_mask = &amd_nb_active_mask;
 241         else if (pmu->type == amd_llc_pmu.type)
 242                 active_mask = &amd_llc_active_mask;
 243         else
 244                 return 0;
 245 
 246         return cpumap_print_to_pagebuf(true, buf, active_mask);
 247 }
 248 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
 249 
 250 static struct attribute *amd_uncore_attrs[] = {
 251         &dev_attr_cpumask.attr,
 252         NULL,
 253 };
 254 
 255 static struct attribute_group amd_uncore_attr_group = {
 256         .attrs = amd_uncore_attrs,
 257 };
 258 
 259 /*
 260  * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
 261  * on family
 262  */
 263 #define AMD_FORMAT_ATTR(_dev, _name, _format)                                \
 264 static ssize_t                                                               \
 265 _dev##_show##_name(struct device *dev,                                       \
 266                 struct device_attribute *attr,                               \
 267                 char *page)                                                  \
 268 {                                                                            \
 269         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                          \
 270         return sprintf(page, _format "\n");                                  \
 271 }                                                                            \
 272 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
 273 
 274 /* Used for each uncore counter type */
 275 #define AMD_ATTRIBUTE(_name)                                                 \
 276 static struct attribute *amd_uncore_format_attr_##_name[] = {                \
 277         &format_attr_event_##_name.attr,                                     \
 278         &format_attr_umask.attr,                                             \
 279         NULL,                                                                \
 280 };                                                                           \
 281 static struct attribute_group amd_uncore_format_group_##_name = {            \
 282         .name = "format",                                                    \
 283         .attrs = amd_uncore_format_attr_##_name,                             \
 284 };                                                                           \
 285 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
 286         &amd_uncore_attr_group,                                              \
 287         &amd_uncore_format_group_##_name,                                    \
 288         NULL,                                                                \
 289 };
 290 
 291 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
 292 AMD_FORMAT_ATTR(umask, , "config:8-15");
 293 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
 294 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
 295 AMD_ATTRIBUTE(df);
 296 AMD_ATTRIBUTE(l3);
 297 
 298 static struct pmu amd_nb_pmu = {
 299         .task_ctx_nr    = perf_invalid_context,
 300         .event_init     = amd_uncore_event_init,
 301         .add            = amd_uncore_add,
 302         .del            = amd_uncore_del,
 303         .start          = amd_uncore_start,
 304         .stop           = amd_uncore_stop,
 305         .read           = amd_uncore_read,
 306         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 307 };
 308 
 309 static struct pmu amd_llc_pmu = {
 310         .task_ctx_nr    = perf_invalid_context,
 311         .event_init     = amd_uncore_event_init,
 312         .add            = amd_uncore_add,
 313         .del            = amd_uncore_del,
 314         .start          = amd_uncore_start,
 315         .stop           = amd_uncore_stop,
 316         .read           = amd_uncore_read,
 317         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 318 };
 319 
 320 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
 321 {
 322         return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
 323                         cpu_to_node(cpu));
 324 }
 325 
 326 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
 327 {
 328         struct amd_uncore *uncore_nb = NULL, *uncore_llc;
 329 
 330         if (amd_uncore_nb) {
 331                 uncore_nb = amd_uncore_alloc(cpu);
 332                 if (!uncore_nb)
 333                         goto fail;
 334                 uncore_nb->cpu = cpu;
 335                 uncore_nb->num_counters = num_counters_nb;
 336                 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
 337                 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
 338                 uncore_nb->active_mask = &amd_nb_active_mask;
 339                 uncore_nb->pmu = &amd_nb_pmu;
 340                 uncore_nb->id = -1;
 341                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
 342         }
 343 
 344         if (amd_uncore_llc) {
 345                 uncore_llc = amd_uncore_alloc(cpu);
 346                 if (!uncore_llc)
 347                         goto fail;
 348                 uncore_llc->cpu = cpu;
 349                 uncore_llc->num_counters = num_counters_llc;
 350                 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
 351                 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
 352                 uncore_llc->active_mask = &amd_llc_active_mask;
 353                 uncore_llc->pmu = &amd_llc_pmu;
 354                 uncore_llc->id = -1;
 355                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
 356         }
 357 
 358         return 0;
 359 
 360 fail:
 361         if (amd_uncore_nb)
 362                 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
 363         kfree(uncore_nb);
 364         return -ENOMEM;
 365 }
 366 
 367 static struct amd_uncore *
 368 amd_uncore_find_online_sibling(struct amd_uncore *this,
 369                                struct amd_uncore * __percpu *uncores)
 370 {
 371         unsigned int cpu;
 372         struct amd_uncore *that;
 373 
 374         for_each_online_cpu(cpu) {
 375                 that = *per_cpu_ptr(uncores, cpu);
 376 
 377                 if (!that)
 378                         continue;
 379 
 380                 if (this == that)
 381                         continue;
 382 
 383                 if (this->id == that->id) {
 384                         hlist_add_head(&this->node, &uncore_unused_list);
 385                         this = that;
 386                         break;
 387                 }
 388         }
 389 
 390         this->refcnt++;
 391         return this;
 392 }
 393 
 394 static int amd_uncore_cpu_starting(unsigned int cpu)
 395 {
 396         unsigned int eax, ebx, ecx, edx;
 397         struct amd_uncore *uncore;
 398 
 399         if (amd_uncore_nb) {
 400                 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
 401                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 402                 uncore->id = ecx & 0xff;
 403 
 404                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
 405                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
 406         }
 407 
 408         if (amd_uncore_llc) {
 409                 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
 410                 uncore->id = per_cpu(cpu_llc_id, cpu);
 411 
 412                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
 413                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
 414         }
 415 
 416         return 0;
 417 }
 418 
 419 static void uncore_clean_online(void)
 420 {
 421         struct amd_uncore *uncore;
 422         struct hlist_node *n;
 423 
 424         hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
 425                 hlist_del(&uncore->node);
 426                 kfree(uncore);
 427         }
 428 }
 429 
 430 static void uncore_online(unsigned int cpu,
 431                           struct amd_uncore * __percpu *uncores)
 432 {
 433         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
 434 
 435         uncore_clean_online();
 436 
 437         if (cpu == uncore->cpu)
 438                 cpumask_set_cpu(cpu, uncore->active_mask);
 439 }
 440 
 441 static int amd_uncore_cpu_online(unsigned int cpu)
 442 {
 443         if (amd_uncore_nb)
 444                 uncore_online(cpu, amd_uncore_nb);
 445 
 446         if (amd_uncore_llc)
 447                 uncore_online(cpu, amd_uncore_llc);
 448 
 449         return 0;
 450 }
 451 
 452 static void uncore_down_prepare(unsigned int cpu,
 453                                 struct amd_uncore * __percpu *uncores)
 454 {
 455         unsigned int i;
 456         struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
 457 
 458         if (this->cpu != cpu)
 459                 return;
 460 
 461         /* this cpu is going down, migrate to a shared sibling if possible */
 462         for_each_online_cpu(i) {
 463                 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
 464 
 465                 if (cpu == i)
 466                         continue;
 467 
 468                 if (this == that) {
 469                         perf_pmu_migrate_context(this->pmu, cpu, i);
 470                         cpumask_clear_cpu(cpu, that->active_mask);
 471                         cpumask_set_cpu(i, that->active_mask);
 472                         that->cpu = i;
 473                         break;
 474                 }
 475         }
 476 }
 477 
 478 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
 479 {
 480         if (amd_uncore_nb)
 481                 uncore_down_prepare(cpu, amd_uncore_nb);
 482 
 483         if (amd_uncore_llc)
 484                 uncore_down_prepare(cpu, amd_uncore_llc);
 485 
 486         return 0;
 487 }
 488 
 489 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
 490 {
 491         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
 492 
 493         if (cpu == uncore->cpu)
 494                 cpumask_clear_cpu(cpu, uncore->active_mask);
 495 
 496         if (!--uncore->refcnt)
 497                 kfree(uncore);
 498         *per_cpu_ptr(uncores, cpu) = NULL;
 499 }
 500 
 501 static int amd_uncore_cpu_dead(unsigned int cpu)
 502 {
 503         if (amd_uncore_nb)
 504                 uncore_dead(cpu, amd_uncore_nb);
 505 
 506         if (amd_uncore_llc)
 507                 uncore_dead(cpu, amd_uncore_llc);
 508 
 509         return 0;
 510 }
 511 
 512 static int __init amd_uncore_init(void)
 513 {
 514         int ret = -ENODEV;
 515 
 516         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
 517             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
 518                 return -ENODEV;
 519 
 520         if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
 521                 return -ENODEV;
 522 
 523         if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
 524                 /*
 525                  * For F17h or F18h, the Northbridge counters are
 526                  * repurposed as Data Fabric counters. Also, L3
 527                  * counters are supported too. The PMUs are exported
 528                  * based on family as either L2 or L3 and NB or DF.
 529                  */
 530                 num_counters_nb           = NUM_COUNTERS_NB;
 531                 num_counters_llc          = NUM_COUNTERS_L3;
 532                 amd_nb_pmu.name           = "amd_df";
 533                 amd_llc_pmu.name          = "amd_l3";
 534                 format_attr_event_df.show = &event_show_df;
 535                 format_attr_event_l3.show = &event_show_l3;
 536                 l3_mask                   = true;
 537         } else {
 538                 num_counters_nb           = NUM_COUNTERS_NB;
 539                 num_counters_llc          = NUM_COUNTERS_L2;
 540                 amd_nb_pmu.name           = "amd_nb";
 541                 amd_llc_pmu.name          = "amd_l2";
 542                 format_attr_event_df      = format_attr_event;
 543                 format_attr_event_l3      = format_attr_event;
 544                 l3_mask                   = false;
 545         }
 546 
 547         amd_nb_pmu.attr_groups  = amd_uncore_attr_groups_df;
 548         amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
 549 
 550         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
 551                 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
 552                 if (!amd_uncore_nb) {
 553                         ret = -ENOMEM;
 554                         goto fail_nb;
 555                 }
 556                 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
 557                 if (ret)
 558                         goto fail_nb;
 559 
 560                 pr_info("%s NB counters detected\n",
 561                         boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
 562                                 "HYGON" : "AMD");
 563                 ret = 0;
 564         }
 565 
 566         if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
 567                 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
 568                 if (!amd_uncore_llc) {
 569                         ret = -ENOMEM;
 570                         goto fail_llc;
 571                 }
 572                 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
 573                 if (ret)
 574                         goto fail_llc;
 575 
 576                 pr_info("%s LLC counters detected\n",
 577                         boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
 578                                 "HYGON" : "AMD");
 579                 ret = 0;
 580         }
 581 
 582         /*
 583          * Install callbacks. Core will call them for each online cpu.
 584          */
 585         if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
 586                               "perf/x86/amd/uncore:prepare",
 587                               amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
 588                 goto fail_llc;
 589 
 590         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
 591                               "perf/x86/amd/uncore:starting",
 592                               amd_uncore_cpu_starting, NULL))
 593                 goto fail_prep;
 594         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
 595                               "perf/x86/amd/uncore:online",
 596                               amd_uncore_cpu_online,
 597                               amd_uncore_cpu_down_prepare))
 598                 goto fail_start;
 599         return 0;
 600 
 601 fail_start:
 602         cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
 603 fail_prep:
 604         cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
 605 fail_llc:
 606         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
 607                 perf_pmu_unregister(&amd_nb_pmu);
 608         if (amd_uncore_llc)
 609                 free_percpu(amd_uncore_llc);
 610 fail_nb:
 611         if (amd_uncore_nb)
 612                 free_percpu(amd_uncore_nb);
 613 
 614         return ret;
 615 }
 616 device_initcall(amd_uncore_init);

/* [<][>][^][v][top][bottom][index][help] */