Lines Matching refs:cpuhw

120 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)  in ebb_switch_in()  argument
122 return cpuhw->mmcr[0]; in ebb_switch_in()
128 static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} in power_pmu_bhrb_read() argument
351 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_bhrb_enable() local
357 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { in power_pmu_bhrb_enable()
359 cpuhw->bhrb_context = event->ctx; in power_pmu_bhrb_enable()
361 cpuhw->bhrb_users++; in power_pmu_bhrb_enable()
367 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_bhrb_disable() local
372 WARN_ON_ONCE(!cpuhw->bhrb_users); in power_pmu_bhrb_disable()
373 cpuhw->bhrb_users--; in power_pmu_bhrb_disable()
376 if (!cpuhw->disabled && !cpuhw->bhrb_users) { in power_pmu_bhrb_disable()
382 cpuhw->bhrb_context = NULL; in power_pmu_bhrb_disable()
425 static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) in power_pmu_bhrb_read() argument
469 cpuhw->bhrb_entries[u_index].to = addr; in power_pmu_bhrb_read()
470 cpuhw->bhrb_entries[u_index].mispred = pred; in power_pmu_bhrb_read()
471 cpuhw->bhrb_entries[u_index].predicted = ~pred; in power_pmu_bhrb_read()
482 cpuhw->bhrb_entries[u_index].from = addr; in power_pmu_bhrb_read()
486 cpuhw->bhrb_entries[u_index].from = addr; in power_pmu_bhrb_read()
487 cpuhw->bhrb_entries[u_index].to = in power_pmu_bhrb_read()
489 cpuhw->bhrb_entries[u_index].mispred = pred; in power_pmu_bhrb_read()
490 cpuhw->bhrb_entries[u_index].predicted = ~pred; in power_pmu_bhrb_read()
496 cpuhw->bhrb_stack.nr = u_index; in power_pmu_bhrb_read()
564 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) in ebb_switch_in() argument
566 unsigned long mmcr0 = cpuhw->mmcr[0]; in ebb_switch_in()
600 mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2); in ebb_switch_in()
828 static int power_check_constraints(struct cpu_hw_events *cpuhw, in power_check_constraints() argument
847 cpuhw->alternatives[i]); in power_check_constraints()
848 event_id[i] = cpuhw->alternatives[i][0]; in power_check_constraints()
850 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], in power_check_constraints()
851 &cpuhw->avalues[i][0])) in power_check_constraints()
856 nv = (value | cpuhw->avalues[i][0]) + in power_check_constraints()
857 (value & cpuhw->avalues[i][0] & addf); in power_check_constraints()
859 (((nv + tadd) ^ cpuhw->avalues[i][0]) & in power_check_constraints()
860 cpuhw->amasks[i][0]) != 0) in power_check_constraints()
863 mask |= cpuhw->amasks[i][0]; in power_check_constraints()
874 cpuhw->alternatives[i]); in power_check_constraints()
876 ppmu->get_constraint(cpuhw->alternatives[i][j], in power_check_constraints()
877 &cpuhw->amasks[i][j], in power_check_constraints()
878 &cpuhw->avalues[i][j]); in power_check_constraints()
897 nv = (value | cpuhw->avalues[i][j]) + in power_check_constraints()
898 (value & cpuhw->avalues[i][j] & addf); in power_check_constraints()
900 (((nv + tadd) ^ cpuhw->avalues[i][j]) in power_check_constraints()
901 & cpuhw->amasks[i][j]) == 0) in power_check_constraints()
923 mask |= cpuhw->amasks[i][j]; in power_check_constraints()
931 event_id[i] = cpuhw->alternatives[i][choice[i]]; in power_check_constraints()
1065 static void freeze_limited_counters(struct cpu_hw_events *cpuhw, in freeze_limited_counters() argument
1072 for (i = 0; i < cpuhw->n_limited; ++i) { in freeze_limited_counters()
1073 event = cpuhw->limited_counter[i]; in freeze_limited_counters()
1085 static void thaw_limited_counters(struct cpu_hw_events *cpuhw, in thaw_limited_counters() argument
1092 for (i = 0; i < cpuhw->n_limited; ++i) { in thaw_limited_counters()
1093 event = cpuhw->limited_counter[i]; in thaw_limited_counters()
1094 event->hw.idx = cpuhw->limited_hwidx[i]; in thaw_limited_counters()
1114 static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) in write_mmcr0() argument
1118 if (!cpuhw->n_limited) { in write_mmcr0()
1137 freeze_limited_counters(cpuhw, pmc5, pmc6); in write_mmcr0()
1139 thaw_limited_counters(cpuhw, pmc5, pmc6); in write_mmcr0()
1155 struct cpu_hw_events *cpuhw; in power_pmu_disable() local
1161 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_disable()
1163 if (!cpuhw->disabled) { in power_pmu_disable()
1167 if (!cpuhw->pmcs_enabled) { in power_pmu_disable()
1169 cpuhw->pmcs_enabled = 1; in power_pmu_disable()
1185 write_mmcr0(cpuhw, val); in power_pmu_disable()
1191 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { in power_pmu_disable()
1193 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); in power_pmu_disable()
1197 cpuhw->disabled = 1; in power_pmu_disable()
1198 cpuhw->n_added = 0; in power_pmu_disable()
1214 struct cpu_hw_events *cpuhw; in power_pmu_enable() local
1228 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_enable()
1229 if (!cpuhw->disabled) in power_pmu_enable()
1232 if (cpuhw->n_events == 0) { in power_pmu_enable()
1237 cpuhw->disabled = 0; in power_pmu_enable()
1244 ebb = is_ebb_event(cpuhw->event[0]); in power_pmu_enable()
1252 if (!cpuhw->n_added) { in power_pmu_enable()
1253 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); in power_pmu_enable()
1254 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); in power_pmu_enable()
1261 memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); in power_pmu_enable()
1263 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, in power_pmu_enable()
1264 cpuhw->mmcr, cpuhw->event)) { in power_pmu_enable()
1276 event = cpuhw->event[0]; in power_pmu_enable()
1278 cpuhw->mmcr[0] |= MMCR0_FCP; in power_pmu_enable()
1280 cpuhw->mmcr[0] |= freeze_events_kernel; in power_pmu_enable()
1282 cpuhw->mmcr[0] |= MMCR0_FCHV; in power_pmu_enable()
1291 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); in power_pmu_enable()
1292 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); in power_pmu_enable()
1293 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) in power_pmu_enable()
1296 mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); in power_pmu_enable()
1302 for (i = 0; i < cpuhw->n_events; ++i) { in power_pmu_enable()
1303 event = cpuhw->event[i]; in power_pmu_enable()
1314 cpuhw->n_limited = n_lim = 0; in power_pmu_enable()
1315 for (i = 0; i < cpuhw->n_events; ++i) { in power_pmu_enable()
1316 event = cpuhw->event[i]; in power_pmu_enable()
1321 cpuhw->limited_counter[n_lim] = event; in power_pmu_enable()
1322 cpuhw->limited_hwidx[n_lim] = idx; in power_pmu_enable()
1346 cpuhw->n_limited = n_lim; in power_pmu_enable()
1347 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; in power_pmu_enable()
1352 mmcr0 = ebb_switch_in(ebb, cpuhw); in power_pmu_enable()
1355 if (cpuhw->bhrb_users) in power_pmu_enable()
1356 ppmu->config_bhrb(cpuhw->bhrb_filter); in power_pmu_enable()
1358 write_mmcr0(cpuhw, mmcr0); in power_pmu_enable()
1363 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { in power_pmu_enable()
1365 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); in power_pmu_enable()
1408 struct cpu_hw_events *cpuhw; in power_pmu_add() local
1420 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_add()
1421 n0 = cpuhw->n_events; in power_pmu_add()
1424 cpuhw->event[n0] = event; in power_pmu_add()
1425 cpuhw->events[n0] = event->hw.config; in power_pmu_add()
1426 cpuhw->flags[n0] = event->hw.event_base; in power_pmu_add()
1444 if (cpuhw->txn_flags & PERF_PMU_TXN_ADD) in power_pmu_add()
1447 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) in power_pmu_add()
1449 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) in power_pmu_add()
1451 event->hw.config = cpuhw->events[n0]; in power_pmu_add()
1456 ++cpuhw->n_events; in power_pmu_add()
1457 ++cpuhw->n_added; in power_pmu_add()
1463 cpuhw->bhrb_filter = ppmu->bhrb_filter_map( in power_pmu_add()
1477 struct cpu_hw_events *cpuhw; in power_pmu_del() local
1486 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_del()
1487 for (i = 0; i < cpuhw->n_events; ++i) { in power_pmu_del()
1488 if (event == cpuhw->event[i]) { in power_pmu_del()
1489 while (++i < cpuhw->n_events) { in power_pmu_del()
1490 cpuhw->event[i-1] = cpuhw->event[i]; in power_pmu_del()
1491 cpuhw->events[i-1] = cpuhw->events[i]; in power_pmu_del()
1492 cpuhw->flags[i-1] = cpuhw->flags[i]; in power_pmu_del()
1494 --cpuhw->n_events; in power_pmu_del()
1495 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); in power_pmu_del()
1504 for (i = 0; i < cpuhw->n_limited; ++i) in power_pmu_del()
1505 if (event == cpuhw->limited_counter[i]) in power_pmu_del()
1507 if (i < cpuhw->n_limited) { in power_pmu_del()
1508 while (++i < cpuhw->n_limited) { in power_pmu_del()
1509 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; in power_pmu_del()
1510 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; in power_pmu_del()
1512 --cpuhw->n_limited; in power_pmu_del()
1514 if (cpuhw->n_events == 0) { in power_pmu_del()
1516 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); in power_pmu_del()
1596 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_start_txn() local
1598 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ in power_pmu_start_txn()
1600 cpuhw->txn_flags = txn_flags; in power_pmu_start_txn()
1605 cpuhw->n_txn_start = cpuhw->n_events; in power_pmu_start_txn()
1615 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_cancel_txn() local
1618 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ in power_pmu_cancel_txn()
1620 txn_flags = cpuhw->txn_flags; in power_pmu_cancel_txn()
1621 cpuhw->txn_flags = 0; in power_pmu_cancel_txn()
1635 struct cpu_hw_events *cpuhw; in power_pmu_commit_txn() local
1641 cpuhw = this_cpu_ptr(&cpu_hw_events); in power_pmu_commit_txn()
1642 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ in power_pmu_commit_txn()
1644 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { in power_pmu_commit_txn()
1645 cpuhw->txn_flags = 0; in power_pmu_commit_txn()
1649 n = cpuhw->n_events; in power_pmu_commit_txn()
1650 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) in power_pmu_commit_txn()
1652 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); in power_pmu_commit_txn()
1656 for (i = cpuhw->n_txn_start; i < n; ++i) in power_pmu_commit_txn()
1657 cpuhw->event[i]->hw.config = cpuhw->events[i]; in power_pmu_commit_txn()
1659 cpuhw->txn_flags = 0; in power_pmu_commit_txn()
1773 struct cpu_hw_events *cpuhw; in power_pmu_event_init() local
1866 cpuhw = &get_cpu_var(cpu_hw_events); in power_pmu_event_init()
1867 err = power_check_constraints(cpuhw, events, cflags, n + 1); in power_pmu_event_init()
1870 cpuhw->bhrb_filter = ppmu->bhrb_filter_map( in power_pmu_event_init()
1873 if (cpuhw->bhrb_filter == -1) { in power_pmu_event_init()
2006 struct cpu_hw_events *cpuhw; in record_and_restart() local
2007 cpuhw = this_cpu_ptr(&cpu_hw_events); in record_and_restart()
2008 power_pmu_bhrb_read(cpuhw); in record_and_restart()
2009 data.br_stack = &cpuhw->bhrb_stack; in record_and_restart()
2080 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); in perf_event_interrupt() local
2086 if (cpuhw->n_limited) in perf_event_interrupt()
2087 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), in perf_event_interrupt()
2116 for (j = 0; j < cpuhw->n_events; ++j) { in perf_event_interrupt()
2117 event = cpuhw->event[j]; in perf_event_interrupt()
2130 for (i = 0; i < cpuhw->n_events; ++i) { in perf_event_interrupt()
2131 event = cpuhw->event[i]; in perf_event_interrupt()
2153 write_mmcr0(cpuhw, cpuhw->mmcr[0]); in perf_event_interrupt()
2163 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); in power_pmu_setup() local
2167 memset(cpuhw, 0, sizeof(*cpuhw)); in power_pmu_setup()
2168 cpuhw->mmcr[0] = MMCR0_FC; in power_pmu_setup()