root/arch/x86/events/amd/core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. amd_pmu_event_map
  2. amd_pmu_addr_offset
  3. amd_get_event_code
  4. amd_is_pair_event_code
  5. amd_core_hw_config
  6. amd_is_nb_event
  7. amd_has_nb
  8. amd_pmu_hw_config
  9. __amd_put_nb_event_constraints
  10. __amd_get_nb_event_constraints
  11. amd_alloc_nb
  12. amd_pmu_cpu_prepare
  13. amd_pmu_cpu_starting
  14. amd_pmu_cpu_dead
  15. amd_pmu_wait_on_overflow
  16. amd_pmu_disable_all
  17. amd_pmu_disable_event
  18. amd_pmu_handle_irq
  19. amd_get_event_constraints
  20. amd_put_event_constraints
  21. amd_get_event_constraints_f15h
  22. amd_get_event_constraints_f17h
  23. amd_event_sysfs_show
  24. amd_core_pmu_init
  25. amd_pmu_init
  26. amd_pmu_enable_virt
  27. amd_pmu_disable_virt

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 #include <linux/perf_event.h>
   3 #include <linux/export.h>
   4 #include <linux/types.h>
   5 #include <linux/init.h>
   6 #include <linux/slab.h>
   7 #include <linux/delay.h>
   8 #include <linux/jiffies.h>
   9 #include <asm/apicdef.h>
  10 #include <asm/nmi.h>
  11 
  12 #include "../perf_event.h"
  13 
  14 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
  15 static unsigned long perf_nmi_window;
  16 
  17 static __initconst const u64 amd_hw_cache_event_ids
  18                                 [PERF_COUNT_HW_CACHE_MAX]
  19                                 [PERF_COUNT_HW_CACHE_OP_MAX]
  20                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  21 {
  22  [ C(L1D) ] = {
  23         [ C(OP_READ) ] = {
  24                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
  25                 [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
  26         },
  27         [ C(OP_WRITE) ] = {
  28                 [ C(RESULT_ACCESS) ] = 0,
  29                 [ C(RESULT_MISS)   ] = 0,
  30         },
  31         [ C(OP_PREFETCH) ] = {
  32                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
  33                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
  34         },
  35  },
  36  [ C(L1I ) ] = {
  37         [ C(OP_READ) ] = {
  38                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
  39                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
  40         },
  41         [ C(OP_WRITE) ] = {
  42                 [ C(RESULT_ACCESS) ] = -1,
  43                 [ C(RESULT_MISS)   ] = -1,
  44         },
  45         [ C(OP_PREFETCH) ] = {
  46                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
  47                 [ C(RESULT_MISS)   ] = 0,
  48         },
  49  },
  50  [ C(LL  ) ] = {
  51         [ C(OP_READ) ] = {
  52                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
  53                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
  54         },
  55         [ C(OP_WRITE) ] = {
  56                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
  57                 [ C(RESULT_MISS)   ] = 0,
  58         },
  59         [ C(OP_PREFETCH) ] = {
  60                 [ C(RESULT_ACCESS) ] = 0,
  61                 [ C(RESULT_MISS)   ] = 0,
  62         },
  63  },
  64  [ C(DTLB) ] = {
  65         [ C(OP_READ) ] = {
  66                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
  67                 [ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
  68         },
  69         [ C(OP_WRITE) ] = {
  70                 [ C(RESULT_ACCESS) ] = 0,
  71                 [ C(RESULT_MISS)   ] = 0,
  72         },
  73         [ C(OP_PREFETCH) ] = {
  74                 [ C(RESULT_ACCESS) ] = 0,
  75                 [ C(RESULT_MISS)   ] = 0,
  76         },
  77  },
  78  [ C(ITLB) ] = {
  79         [ C(OP_READ) ] = {
  80                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
  81                 [ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
  82         },
  83         [ C(OP_WRITE) ] = {
  84                 [ C(RESULT_ACCESS) ] = -1,
  85                 [ C(RESULT_MISS)   ] = -1,
  86         },
  87         [ C(OP_PREFETCH) ] = {
  88                 [ C(RESULT_ACCESS) ] = -1,
  89                 [ C(RESULT_MISS)   ] = -1,
  90         },
  91  },
  92  [ C(BPU ) ] = {
  93         [ C(OP_READ) ] = {
  94                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
  95                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
  96         },
  97         [ C(OP_WRITE) ] = {
  98                 [ C(RESULT_ACCESS) ] = -1,
  99                 [ C(RESULT_MISS)   ] = -1,
 100         },
 101         [ C(OP_PREFETCH) ] = {
 102                 [ C(RESULT_ACCESS) ] = -1,
 103                 [ C(RESULT_MISS)   ] = -1,
 104         },
 105  },
 106  [ C(NODE) ] = {
 107         [ C(OP_READ) ] = {
 108                 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
 109                 [ C(RESULT_MISS)   ] = 0x98e9, /* CPU Request to Memory, r   */
 110         },
 111         [ C(OP_WRITE) ] = {
 112                 [ C(RESULT_ACCESS) ] = -1,
 113                 [ C(RESULT_MISS)   ] = -1,
 114         },
 115         [ C(OP_PREFETCH) ] = {
 116                 [ C(RESULT_ACCESS) ] = -1,
 117                 [ C(RESULT_MISS)   ] = -1,
 118         },
 119  },
 120 };
 121 
 122 static __initconst const u64 amd_hw_cache_event_ids_f17h
 123                                 [PERF_COUNT_HW_CACHE_MAX]
 124                                 [PERF_COUNT_HW_CACHE_OP_MAX]
 125                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 126 [C(L1D)] = {
 127         [C(OP_READ)] = {
 128                 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
 129                 [C(RESULT_MISS)]   = 0xc860, /* L2$ access from DC Miss */
 130         },
 131         [C(OP_WRITE)] = {
 132                 [C(RESULT_ACCESS)] = 0,
 133                 [C(RESULT_MISS)]   = 0,
 134         },
 135         [C(OP_PREFETCH)] = {
 136                 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
 137                 [C(RESULT_MISS)]   = 0,
 138         },
 139 },
 140 [C(L1I)] = {
 141         [C(OP_READ)] = {
 142                 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches  */
 143                 [C(RESULT_MISS)]   = 0x0081, /* Instruction cache misses   */
 144         },
 145         [C(OP_WRITE)] = {
 146                 [C(RESULT_ACCESS)] = -1,
 147                 [C(RESULT_MISS)]   = -1,
 148         },
 149         [C(OP_PREFETCH)] = {
 150                 [C(RESULT_ACCESS)] = 0,
 151                 [C(RESULT_MISS)]   = 0,
 152         },
 153 },
 154 [C(LL)] = {
 155         [C(OP_READ)] = {
 156                 [C(RESULT_ACCESS)] = 0,
 157                 [C(RESULT_MISS)]   = 0,
 158         },
 159         [C(OP_WRITE)] = {
 160                 [C(RESULT_ACCESS)] = 0,
 161                 [C(RESULT_MISS)]   = 0,
 162         },
 163         [C(OP_PREFETCH)] = {
 164                 [C(RESULT_ACCESS)] = 0,
 165                 [C(RESULT_MISS)]   = 0,
 166         },
 167 },
 168 [C(DTLB)] = {
 169         [C(OP_READ)] = {
 170                 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
 171                 [C(RESULT_MISS)]   = 0xf045, /* L2 DTLB misses (PT walks) */
 172         },
 173         [C(OP_WRITE)] = {
 174                 [C(RESULT_ACCESS)] = 0,
 175                 [C(RESULT_MISS)]   = 0,
 176         },
 177         [C(OP_PREFETCH)] = {
 178                 [C(RESULT_ACCESS)] = 0,
 179                 [C(RESULT_MISS)]   = 0,
 180         },
 181 },
 182 [C(ITLB)] = {
 183         [C(OP_READ)] = {
 184                 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
 185                 [C(RESULT_MISS)]   = 0xff85, /* L1 ITLB misses, L2 misses */
 186         },
 187         [C(OP_WRITE)] = {
 188                 [C(RESULT_ACCESS)] = -1,
 189                 [C(RESULT_MISS)]   = -1,
 190         },
 191         [C(OP_PREFETCH)] = {
 192                 [C(RESULT_ACCESS)] = -1,
 193                 [C(RESULT_MISS)]   = -1,
 194         },
 195 },
 196 [C(BPU)] = {
 197         [C(OP_READ)] = {
 198                 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr.      */
 199                 [C(RESULT_MISS)]   = 0x00c3, /* Retired Mispredicted BI    */
 200         },
 201         [C(OP_WRITE)] = {
 202                 [C(RESULT_ACCESS)] = -1,
 203                 [C(RESULT_MISS)]   = -1,
 204         },
 205         [C(OP_PREFETCH)] = {
 206                 [C(RESULT_ACCESS)] = -1,
 207                 [C(RESULT_MISS)]   = -1,
 208         },
 209 },
 210 [C(NODE)] = {
 211         [C(OP_READ)] = {
 212                 [C(RESULT_ACCESS)] = 0,
 213                 [C(RESULT_MISS)]   = 0,
 214         },
 215         [C(OP_WRITE)] = {
 216                 [C(RESULT_ACCESS)] = -1,
 217                 [C(RESULT_MISS)]   = -1,
 218         },
 219         [C(OP_PREFETCH)] = {
 220                 [C(RESULT_ACCESS)] = -1,
 221                 [C(RESULT_MISS)]   = -1,
 222         },
 223 },
 224 };
 225 
 226 /*
 227  * AMD Performance Monitor K7 and later, up to and including Family 16h:
 228  */
 229 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 230 {
 231         [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
 232         [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
 233         [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x077d,
 234         [PERF_COUNT_HW_CACHE_MISSES]            = 0x077e,
 235         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
 236         [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
 237         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
 238         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x00d1, /* "Dispatch stalls" event */
 239 };
 240 
 241 /*
 242  * AMD Performance Monitor Family 17h and later:
 243  */
 244 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
 245 {
 246         [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
 247         [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
 248         [PERF_COUNT_HW_CACHE_REFERENCES]        = 0xff60,
 249         [PERF_COUNT_HW_CACHE_MISSES]            = 0x0964,
 250         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
 251         [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
 252         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
 253         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x0187,
 254 };
 255 
 256 static u64 amd_pmu_event_map(int hw_event)
 257 {
 258         if (boot_cpu_data.x86 >= 0x17)
 259                 return amd_f17h_perfmon_event_map[hw_event];
 260 
 261         return amd_perfmon_event_map[hw_event];
 262 }
 263 
 264 /*
 265  * Previously calculated offsets
 266  */
 267 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
 268 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
 269 
 270 /*
 271  * Legacy CPUs:
 272  *   4 counters starting at 0xc0010000 each offset by 1
 273  *
 274  * CPUs with core performance counter extensions:
 275  *   6 counters starting at 0xc0010200 each offset by 2
 276  */
 277 static inline int amd_pmu_addr_offset(int index, bool eventsel)
 278 {
 279         int offset;
 280 
 281         if (!index)
 282                 return index;
 283 
 284         if (eventsel)
 285                 offset = event_offsets[index];
 286         else
 287                 offset = count_offsets[index];
 288 
 289         if (offset)
 290                 return offset;
 291 
 292         if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
 293                 offset = index;
 294         else
 295                 offset = index << 1;
 296 
 297         if (eventsel)
 298                 event_offsets[index] = offset;
 299         else
 300                 count_offsets[index] = offset;
 301 
 302         return offset;
 303 }
 304 
 305 /*
 306  * AMD64 events are detected based on their event codes.
 307  */
 308 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
 309 {
 310         return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
 311 }
 312 
 313 static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
 314 {
 315         if (!(x86_pmu.flags & PMU_FL_PAIR))
 316                 return false;
 317 
 318         switch (amd_get_event_code(hwc)) {
 319         case 0x003:     return true;    /* Retired SSE/AVX FLOPs */
 320         default:        return false;
 321         }
 322 }
 323 
 324 static int amd_core_hw_config(struct perf_event *event)
 325 {
 326         if (event->attr.exclude_host && event->attr.exclude_guest)
 327                 /*
 328                  * When HO == GO == 1 the hardware treats that as GO == HO == 0
 329                  * and will count in both modes. We don't want to count in that
 330                  * case so we emulate no-counting by setting US = OS = 0.
 331                  */
 332                 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
 333                                       ARCH_PERFMON_EVENTSEL_OS);
 334         else if (event->attr.exclude_host)
 335                 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
 336         else if (event->attr.exclude_guest)
 337                 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
 338 
 339         return 0;
 340 }
 341 
 342 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
 343 {
 344         return (hwc->config & 0xe0) == 0xe0;
 345 }
 346 
 347 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
 348 {
 349         struct amd_nb *nb = cpuc->amd_nb;
 350 
 351         return nb && nb->nb_id != -1;
 352 }
 353 
 354 static int amd_pmu_hw_config(struct perf_event *event)
 355 {
 356         int ret;
 357 
 358         /* pass precise event sampling to ibs: */
 359         if (event->attr.precise_ip && get_ibs_caps())
 360                 return -ENOENT;
 361 
 362         if (has_branch_stack(event))
 363                 return -EOPNOTSUPP;
 364 
 365         ret = x86_pmu_hw_config(event);
 366         if (ret)
 367                 return ret;
 368 
 369         if (event->attr.type == PERF_TYPE_RAW)
 370                 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
 371 
 372         return amd_core_hw_config(event);
 373 }
 374 
 375 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
 376                                            struct perf_event *event)
 377 {
 378         struct amd_nb *nb = cpuc->amd_nb;
 379         int i;
 380 
 381         /*
 382          * need to scan whole list because event may not have
 383          * been assigned during scheduling
 384          *
 385          * no race condition possible because event can only
 386          * be removed on one CPU at a time AND PMU is disabled
 387          * when we come here
 388          */
 389         for (i = 0; i < x86_pmu.num_counters; i++) {
 390                 if (cmpxchg(nb->owners + i, event, NULL) == event)
 391                         break;
 392         }
 393 }
 394 
 395  /*
 396   * AMD64 NorthBridge events need special treatment because
 397   * counter access needs to be synchronized across all cores
 398   * of a package. Refer to BKDG section 3.12
 399   *
 400   * NB events are events measuring L3 cache, Hypertransport
 401   * traffic. They are identified by an event code >= 0xe00.
 402   * They measure events on the NorthBride which is shared
 403   * by all cores on a package. NB events are counted on a
 404   * shared set of counters. When a NB event is programmed
 405   * in a counter, the data actually comes from a shared
 406   * counter. Thus, access to those counters needs to be
 407   * synchronized.
 408   *
 409   * We implement the synchronization such that no two cores
 410   * can be measuring NB events using the same counters. Thus,
 411   * we maintain a per-NB allocation table. The available slot
 412   * is propagated using the event_constraint structure.
 413   *
 414   * We provide only one choice for each NB event based on
 415   * the fact that only NB events have restrictions. Consequently,
 416   * if a counter is available, there is a guarantee the NB event
 417   * will be assigned to it. If no slot is available, an empty
 418   * constraint is returned and scheduling will eventually fail
 419   * for this event.
 420   *
 421   * Note that all cores attached the same NB compete for the same
 422   * counters to host NB events, this is why we use atomic ops. Some
 423   * multi-chip CPUs may have more than one NB.
 424   *
 425   * Given that resources are allocated (cmpxchg), they must be
 426   * eventually freed for others to use. This is accomplished by
 427   * calling __amd_put_nb_event_constraints()
 428   *
 429   * Non NB events are not impacted by this restriction.
 430   */
 431 static struct event_constraint *
 432 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
 433                                struct event_constraint *c)
 434 {
 435         struct hw_perf_event *hwc = &event->hw;
 436         struct amd_nb *nb = cpuc->amd_nb;
 437         struct perf_event *old;
 438         int idx, new = -1;
 439 
 440         if (!c)
 441                 c = &unconstrained;
 442 
 443         if (cpuc->is_fake)
 444                 return c;
 445 
 446         /*
 447          * detect if already present, if so reuse
 448          *
 449          * cannot merge with actual allocation
 450          * because of possible holes
 451          *
 452          * event can already be present yet not assigned (in hwc->idx)
 453          * because of successive calls to x86_schedule_events() from
 454          * hw_perf_group_sched_in() without hw_perf_enable()
 455          */
 456         for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
 457                 if (new == -1 || hwc->idx == idx)
 458                         /* assign free slot, prefer hwc->idx */
 459                         old = cmpxchg(nb->owners + idx, NULL, event);
 460                 else if (nb->owners[idx] == event)
 461                         /* event already present */
 462                         old = event;
 463                 else
 464                         continue;
 465 
 466                 if (old && old != event)
 467                         continue;
 468 
 469                 /* reassign to this slot */
 470                 if (new != -1)
 471                         cmpxchg(nb->owners + new, event, NULL);
 472                 new = idx;
 473 
 474                 /* already present, reuse */
 475                 if (old == event)
 476                         break;
 477         }
 478 
 479         if (new == -1)
 480                 return &emptyconstraint;
 481 
 482         return &nb->event_constraints[new];
 483 }
 484 
 485 static struct amd_nb *amd_alloc_nb(int cpu)
 486 {
 487         struct amd_nb *nb;
 488         int i;
 489 
 490         nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
 491         if (!nb)
 492                 return NULL;
 493 
 494         nb->nb_id = -1;
 495 
 496         /*
 497          * initialize all possible NB constraints
 498          */
 499         for (i = 0; i < x86_pmu.num_counters; i++) {
 500                 __set_bit(i, nb->event_constraints[i].idxmsk);
 501                 nb->event_constraints[i].weight = 1;
 502         }
 503         return nb;
 504 }
 505 
 506 static int amd_pmu_cpu_prepare(int cpu)
 507 {
 508         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 509 
 510         WARN_ON_ONCE(cpuc->amd_nb);
 511 
 512         if (!x86_pmu.amd_nb_constraints)
 513                 return 0;
 514 
 515         cpuc->amd_nb = amd_alloc_nb(cpu);
 516         if (!cpuc->amd_nb)
 517                 return -ENOMEM;
 518 
 519         return 0;
 520 }
 521 
 522 static void amd_pmu_cpu_starting(int cpu)
 523 {
 524         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 525         void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
 526         struct amd_nb *nb;
 527         int i, nb_id;
 528 
 529         cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 530 
 531         if (!x86_pmu.amd_nb_constraints)
 532                 return;
 533 
 534         nb_id = amd_get_nb_id(cpu);
 535         WARN_ON_ONCE(nb_id == BAD_APICID);
 536 
 537         for_each_online_cpu(i) {
 538                 nb = per_cpu(cpu_hw_events, i).amd_nb;
 539                 if (WARN_ON_ONCE(!nb))
 540                         continue;
 541 
 542                 if (nb->nb_id == nb_id) {
 543                         *onln = cpuc->amd_nb;
 544                         cpuc->amd_nb = nb;
 545                         break;
 546                 }
 547         }
 548 
 549         cpuc->amd_nb->nb_id = nb_id;
 550         cpuc->amd_nb->refcnt++;
 551 }
 552 
 553 static void amd_pmu_cpu_dead(int cpu)
 554 {
 555         struct cpu_hw_events *cpuhw;
 556 
 557         if (!x86_pmu.amd_nb_constraints)
 558                 return;
 559 
 560         cpuhw = &per_cpu(cpu_hw_events, cpu);
 561 
 562         if (cpuhw->amd_nb) {
 563                 struct amd_nb *nb = cpuhw->amd_nb;
 564 
 565                 if (nb->nb_id == -1 || --nb->refcnt == 0)
 566                         kfree(nb);
 567 
 568                 cpuhw->amd_nb = NULL;
 569         }
 570 }
 571 
 572 /*
 573  * When a PMC counter overflows, an NMI is used to process the event and
 574  * reset the counter. NMI latency can result in the counter being updated
 575  * before the NMI can run, which can result in what appear to be spurious
 576  * NMIs. This function is intended to wait for the NMI to run and reset
 577  * the counter to avoid possible unhandled NMI messages.
 578  */
 579 #define OVERFLOW_WAIT_COUNT     50
 580 
 581 static void amd_pmu_wait_on_overflow(int idx)
 582 {
 583         unsigned int i;
 584         u64 counter;
 585 
 586         /*
 587          * Wait for the counter to be reset if it has overflowed. This loop
 588          * should exit very, very quickly, but just in case, don't wait
 589          * forever...
 590          */
 591         for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
 592                 rdmsrl(x86_pmu_event_addr(idx), counter);
 593                 if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
 594                         break;
 595 
 596                 /* Might be in IRQ context, so can't sleep */
 597                 udelay(1);
 598         }
 599 }
 600 
 601 static void amd_pmu_disable_all(void)
 602 {
 603         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 604         int idx;
 605 
 606         x86_pmu_disable_all();
 607 
 608         /*
 609          * This shouldn't be called from NMI context, but add a safeguard here
 610          * to return, since if we're in NMI context we can't wait for an NMI
 611          * to reset an overflowed counter value.
 612          */
 613         if (in_nmi())
 614                 return;
 615 
 616         /*
 617          * Check each counter for overflow and wait for it to be reset by the
 618          * NMI if it has overflowed. This relies on the fact that all active
 619          * counters are always enabled when this function is caled and
 620          * ARCH_PERFMON_EVENTSEL_INT is always set.
 621          */
 622         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
 623                 if (!test_bit(idx, cpuc->active_mask))
 624                         continue;
 625 
 626                 amd_pmu_wait_on_overflow(idx);
 627         }
 628 }
 629 
 630 static void amd_pmu_disable_event(struct perf_event *event)
 631 {
 632         x86_pmu_disable_event(event);
 633 
 634         /*
 635          * This can be called from NMI context (via x86_pmu_stop). The counter
 636          * may have overflowed, but either way, we'll never see it get reset
 637          * by the NMI if we're already in the NMI. And the NMI latency support
 638          * below will take care of any pending NMI that might have been
 639          * generated by the overflow.
 640          */
 641         if (in_nmi())
 642                 return;
 643 
 644         amd_pmu_wait_on_overflow(event->hw.idx);
 645 }
 646 
 647 /*
 648  * Because of NMI latency, if multiple PMC counters are active or other sources
 649  * of NMIs are received, the perf NMI handler can handle one or more overflowed
 650  * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
 651  * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
 652  * back-to-back NMI support won't be active. This PMC handler needs to take into
 653  * account that this can occur, otherwise this could result in unknown NMI
 654  * messages being issued. Examples of this is PMC overflow while in the NMI
 655  * handler when multiple PMCs are active or PMC overflow while handling some
 656  * other source of an NMI.
 657  *
 658  * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
 659  * received during this window will be claimed. This prevents extending the
 660  * window past when it is possible that latent NMIs should be received. The
 661  * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
 662  * handled a counter. When an un-handled NMI is received, it will be claimed
 663  * only if arriving within that window.
 664  */
 665 static int amd_pmu_handle_irq(struct pt_regs *regs)
 666 {
 667         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 668         int active, handled;
 669 
 670         /*
 671          * Obtain the active count before calling x86_pmu_handle_irq() since
 672          * it is possible that x86_pmu_handle_irq() may make a counter
 673          * inactive (through x86_pmu_stop).
 674          */
 675         active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
 676 
 677         /* Process any counter overflows */
 678         handled = x86_pmu_handle_irq(regs);
 679 
 680         /*
 681          * If a counter was handled, record a timestamp such that un-handled
 682          * NMIs will be claimed if arriving within that window.
 683          */
 684         if (handled) {
 685                 this_cpu_write(perf_nmi_tstamp,
 686                                jiffies + perf_nmi_window);
 687 
 688                 return handled;
 689         }
 690 
 691         if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
 692                 return NMI_DONE;
 693 
 694         return NMI_HANDLED;
 695 }
 696 
 697 static struct event_constraint *
 698 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 699                           struct perf_event *event)
 700 {
 701         /*
 702          * if not NB event or no NB, then no constraints
 703          */
 704         if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
 705                 return &unconstrained;
 706 
 707         return __amd_get_nb_event_constraints(cpuc, event, NULL);
 708 }
 709 
 710 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
 711                                       struct perf_event *event)
 712 {
 713         if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
 714                 __amd_put_nb_event_constraints(cpuc, event);
 715 }
 716 
 717 PMU_FORMAT_ATTR(event,  "config:0-7,32-35");
 718 PMU_FORMAT_ATTR(umask,  "config:8-15"   );
 719 PMU_FORMAT_ATTR(edge,   "config:18"     );
 720 PMU_FORMAT_ATTR(inv,    "config:23"     );
 721 PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
 722 
 723 static struct attribute *amd_format_attr[] = {
 724         &format_attr_event.attr,
 725         &format_attr_umask.attr,
 726         &format_attr_edge.attr,
 727         &format_attr_inv.attr,
 728         &format_attr_cmask.attr,
 729         NULL,
 730 };
 731 
 732 /* AMD Family 15h */
 733 
 734 #define AMD_EVENT_TYPE_MASK     0x000000F0ULL
 735 
 736 #define AMD_EVENT_FP            0x00000000ULL ... 0x00000010ULL
 737 #define AMD_EVENT_LS            0x00000020ULL ... 0x00000030ULL
 738 #define AMD_EVENT_DC            0x00000040ULL ... 0x00000050ULL
 739 #define AMD_EVENT_CU            0x00000060ULL ... 0x00000070ULL
 740 #define AMD_EVENT_IC_DE         0x00000080ULL ... 0x00000090ULL
 741 #define AMD_EVENT_EX_LS         0x000000C0ULL
 742 #define AMD_EVENT_DE            0x000000D0ULL
 743 #define AMD_EVENT_NB            0x000000E0ULL ... 0x000000F0ULL
 744 
 745 /*
 746  * AMD family 15h event code/PMC mappings:
 747  *
 748  * type = event_code & 0x0F0:
 749  *
 750  * 0x000        FP      PERF_CTL[5:3]
 751  * 0x010        FP      PERF_CTL[5:3]
 752  * 0x020        LS      PERF_CTL[5:0]
 753  * 0x030        LS      PERF_CTL[5:0]
 754  * 0x040        DC      PERF_CTL[5:0]
 755  * 0x050        DC      PERF_CTL[5:0]
 756  * 0x060        CU      PERF_CTL[2:0]
 757  * 0x070        CU      PERF_CTL[2:0]
 758  * 0x080        IC/DE   PERF_CTL[2:0]
 759  * 0x090        IC/DE   PERF_CTL[2:0]
 760  * 0x0A0        ---
 761  * 0x0B0        ---
 762  * 0x0C0        EX/LS   PERF_CTL[5:0]
 763  * 0x0D0        DE      PERF_CTL[2:0]
 764  * 0x0E0        NB      NB_PERF_CTL[3:0]
 765  * 0x0F0        NB      NB_PERF_CTL[3:0]
 766  *
 767  * Exceptions:
 768  *
 769  * 0x000        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
 770  * 0x003        FP      PERF_CTL[3]
 771  * 0x004        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
 772  * 0x00B        FP      PERF_CTL[3]
 773  * 0x00D        FP      PERF_CTL[3]
 774  * 0x023        DE      PERF_CTL[2:0]
 775  * 0x02D        LS      PERF_CTL[3]
 776  * 0x02E        LS      PERF_CTL[3,0]
 777  * 0x031        LS      PERF_CTL[2:0] (**)
 778  * 0x043        CU      PERF_CTL[2:0]
 779  * 0x045        CU      PERF_CTL[2:0]
 780  * 0x046        CU      PERF_CTL[2:0]
 781  * 0x054        CU      PERF_CTL[2:0]
 782  * 0x055        CU      PERF_CTL[2:0]
 783  * 0x08F        IC      PERF_CTL[0]
 784  * 0x187        DE      PERF_CTL[0]
 785  * 0x188        DE      PERF_CTL[0]
 786  * 0x0DB        EX      PERF_CTL[5:0]
 787  * 0x0DC        LS      PERF_CTL[5:0]
 788  * 0x0DD        LS      PERF_CTL[5:0]
 789  * 0x0DE        LS      PERF_CTL[5:0]
 790  * 0x0DF        LS      PERF_CTL[5:0]
 791  * 0x1C0        EX      PERF_CTL[5:3]
 792  * 0x1D6        EX      PERF_CTL[5:0]
 793  * 0x1D8        EX      PERF_CTL[5:0]
 794  *
 795  * (*)  depending on the umask all FPU counters may be used
 796  * (**) only one unitmask enabled at a time
 797  */
 798 
 799 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
 800 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
 801 static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
 802 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
 803 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
 804 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
 805 
 806 static struct event_constraint *
 807 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
 808                                struct perf_event *event)
 809 {
 810         struct hw_perf_event *hwc = &event->hw;
 811         unsigned int event_code = amd_get_event_code(hwc);
 812 
 813         switch (event_code & AMD_EVENT_TYPE_MASK) {
 814         case AMD_EVENT_FP:
 815                 switch (event_code) {
 816                 case 0x000:
 817                         if (!(hwc->config & 0x0000F000ULL))
 818                                 break;
 819                         if (!(hwc->config & 0x00000F00ULL))
 820                                 break;
 821                         return &amd_f15_PMC3;
 822                 case 0x004:
 823                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
 824                                 break;
 825                         return &amd_f15_PMC3;
 826                 case 0x003:
 827                 case 0x00B:
 828                 case 0x00D:
 829                         return &amd_f15_PMC3;
 830                 }
 831                 return &amd_f15_PMC53;
 832         case AMD_EVENT_LS:
 833         case AMD_EVENT_DC:
 834         case AMD_EVENT_EX_LS:
 835                 switch (event_code) {
 836                 case 0x023:
 837                 case 0x043:
 838                 case 0x045:
 839                 case 0x046:
 840                 case 0x054:
 841                 case 0x055:
 842                         return &amd_f15_PMC20;
 843                 case 0x02D:
 844                         return &amd_f15_PMC3;
 845                 case 0x02E:
 846                         return &amd_f15_PMC30;
 847                 case 0x031:
 848                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
 849                                 return &amd_f15_PMC20;
 850                         return &emptyconstraint;
 851                 case 0x1C0:
 852                         return &amd_f15_PMC53;
 853                 default:
 854                         return &amd_f15_PMC50;
 855                 }
 856         case AMD_EVENT_CU:
 857         case AMD_EVENT_IC_DE:
 858         case AMD_EVENT_DE:
 859                 switch (event_code) {
 860                 case 0x08F:
 861                 case 0x187:
 862                 case 0x188:
 863                         return &amd_f15_PMC0;
 864                 case 0x0DB ... 0x0DF:
 865                 case 0x1D6:
 866                 case 0x1D8:
 867                         return &amd_f15_PMC50;
 868                 default:
 869                         return &amd_f15_PMC20;
 870                 }
 871         case AMD_EVENT_NB:
 872                 /* moved to uncore.c */
 873                 return &emptyconstraint;
 874         default:
 875                 return &emptyconstraint;
 876         }
 877 }
 878 
 879 static struct event_constraint pair_constraint;
 880 
 881 static struct event_constraint *
 882 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
 883                                struct perf_event *event)
 884 {
 885         struct hw_perf_event *hwc = &event->hw;
 886 
 887         if (amd_is_pair_event_code(hwc))
 888                 return &pair_constraint;
 889 
 890         return &unconstrained;
 891 }
 892 
 893 static ssize_t amd_event_sysfs_show(char *page, u64 config)
 894 {
 895         u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
 896                     (config & AMD64_EVENTSEL_EVENT) >> 24;
 897 
 898         return x86_event_sysfs_show(page, config, event);
 899 }
 900 
 901 static __initconst const struct x86_pmu amd_pmu = {
 902         .name                   = "AMD",
 903         .handle_irq             = amd_pmu_handle_irq,
 904         .disable_all            = amd_pmu_disable_all,
 905         .enable_all             = x86_pmu_enable_all,
 906         .enable                 = x86_pmu_enable_event,
 907         .disable                = amd_pmu_disable_event,
 908         .hw_config              = amd_pmu_hw_config,
 909         .schedule_events        = x86_schedule_events,
 910         .eventsel               = MSR_K7_EVNTSEL0,
 911         .perfctr                = MSR_K7_PERFCTR0,
 912         .addr_offset            = amd_pmu_addr_offset,
 913         .event_map              = amd_pmu_event_map,
 914         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
 915         .num_counters           = AMD64_NUM_COUNTERS,
 916         .cntval_bits            = 48,
 917         .cntval_mask            = (1ULL << 48) - 1,
 918         .apic                   = 1,
 919         /* use highest bit to detect overflow */
 920         .max_period             = (1ULL << 47) - 1,
 921         .get_event_constraints  = amd_get_event_constraints,
 922         .put_event_constraints  = amd_put_event_constraints,
 923 
 924         .format_attrs           = amd_format_attr,
 925         .events_sysfs_show      = amd_event_sysfs_show,
 926 
 927         .cpu_prepare            = amd_pmu_cpu_prepare,
 928         .cpu_starting           = amd_pmu_cpu_starting,
 929         .cpu_dead               = amd_pmu_cpu_dead,
 930 
 931         .amd_nb_constraints     = 1,
 932 };
 933 
 934 static int __init amd_core_pmu_init(void)
 935 {
 936         u64 even_ctr_mask = 0ULL;
 937         int i;
 938 
 939         if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
 940                 return 0;
 941 
 942         /* Avoid calculating the value each time in the NMI handler */
 943         perf_nmi_window = msecs_to_jiffies(100);
 944 
 945         /*
 946          * If core performance counter extensions exists, we must use
 947          * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
 948          * amd_pmu_addr_offset().
 949          */
 950         x86_pmu.eventsel        = MSR_F15H_PERF_CTL;
 951         x86_pmu.perfctr         = MSR_F15H_PERF_CTR;
 952         x86_pmu.num_counters    = AMD64_NUM_COUNTERS_CORE;
 953         /*
 954          * AMD Core perfctr has separate MSRs for the NB events, see
 955          * the amd/uncore.c driver.
 956          */
 957         x86_pmu.amd_nb_constraints = 0;
 958 
 959         if (boot_cpu_data.x86 == 0x15) {
 960                 pr_cont("Fam15h ");
 961                 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
 962         }
 963         if (boot_cpu_data.x86 >= 0x17) {
 964                 pr_cont("Fam17h+ ");
 965                 /*
 966                  * Family 17h and compatibles have constraints for Large
 967                  * Increment per Cycle events: they may only be assigned an
 968                  * even numbered counter that has a consecutive adjacent odd
 969                  * numbered counter following it.
 970                  */
 971                 for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
 972                         even_ctr_mask |= 1 << i;
 973 
 974                 pair_constraint = (struct event_constraint)
 975                                     __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
 976                                     x86_pmu.num_counters / 2, 0,
 977                                     PERF_X86_EVENT_PAIR);
 978 
 979                 x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
 980                 x86_pmu.flags |= PMU_FL_PAIR;
 981         }
 982 
 983         pr_cont("core perfctr, ");
 984         return 0;
 985 }
 986 
 987 __init int amd_pmu_init(void)
 988 {
 989         int ret;
 990 
 991         /* Performance-monitoring supported from K7 and later: */
 992         if (boot_cpu_data.x86 < 6)
 993                 return -ENODEV;
 994 
 995         x86_pmu = amd_pmu;
 996 
 997         ret = amd_core_pmu_init();
 998         if (ret)
 999                 return ret;
1000 
1001         if (num_possible_cpus() == 1) {
1002                 /*
1003                  * No point in allocating data structures to serialize
1004                  * against other CPUs, when there is only the one CPU.
1005                  */
1006                 x86_pmu.amd_nb_constraints = 0;
1007         }
1008 
1009         if (boot_cpu_data.x86 >= 0x17)
1010                 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
1011         else
1012                 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
1013 
1014         return 0;
1015 }
1016 
1017 void amd_pmu_enable_virt(void)
1018 {
1019         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1020 
1021         cpuc->perf_ctr_virt_mask = 0;
1022 
1023         /* Reload all events */
1024         amd_pmu_disable_all();
1025         x86_pmu_enable_all(0);
1026 }
1027 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
1028 
1029 void amd_pmu_disable_virt(void)
1030 {
1031         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1032 
1033         /*
1034          * We only mask out the Host-only bit so that host-only counting works
1035          * when SVM is disabled. If someone sets up a guest-only counter when
1036          * SVM is disabled the Guest-only bits still gets set and the counter
1037          * will not count anything.
1038          */
1039         cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
1040 
1041         /* Reload all events */
1042         amd_pmu_disable_all();
1043         x86_pmu_enable_all(0);
1044 }
1045 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);

/* [<][>][^][v][top][bottom][index][help] */