Searched refs:cpuhw (Results 1 - 9 of 9) sorted by relevance

/linux-4.4.14/arch/powerpc/perf/
H A Dcore-book3s.c120 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) ebb_switch_in() argument
122 return cpuhw->mmcr[0]; ebb_switch_in()
128 static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} pmao_restore_workaround() argument
351 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_bhrb_enable() local
357 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { power_pmu_bhrb_enable()
359 cpuhw->bhrb_context = event->ctx; power_pmu_bhrb_enable()
361 cpuhw->bhrb_users++; power_pmu_bhrb_enable()
367 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_bhrb_disable() local
372 WARN_ON_ONCE(!cpuhw->bhrb_users); power_pmu_bhrb_disable()
373 cpuhw->bhrb_users--; power_pmu_bhrb_disable()
376 if (!cpuhw->disabled && !cpuhw->bhrb_users) { power_pmu_bhrb_disable()
382 cpuhw->bhrb_context = NULL; power_pmu_bhrb_disable()
425 static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) power_pmu_bhrb_read() argument
469 cpuhw->bhrb_entries[u_index].to = addr; power_pmu_bhrb_read()
470 cpuhw->bhrb_entries[u_index].mispred = pred; power_pmu_bhrb_read()
471 cpuhw->bhrb_entries[u_index].predicted = ~pred; power_pmu_bhrb_read()
482 cpuhw->bhrb_entries[u_index].from = addr; power_pmu_bhrb_read()
486 cpuhw->bhrb_entries[u_index].from = addr; power_pmu_bhrb_read()
487 cpuhw->bhrb_entries[u_index].to = power_pmu_bhrb_read()
489 cpuhw->bhrb_entries[u_index].mispred = pred; power_pmu_bhrb_read()
490 cpuhw->bhrb_entries[u_index].predicted = ~pred; power_pmu_bhrb_read()
496 cpuhw->bhrb_stack.nr = u_index; power_pmu_bhrb_read()
564 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) ebb_switch_in() argument
566 unsigned long mmcr0 = cpuhw->mmcr[0]; ebb_switch_in()
600 mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2); ebb_switch_in()
828 static int power_check_constraints(struct cpu_hw_events *cpuhw, power_check_constraints() argument
847 cpuhw->alternatives[i]); power_check_constraints()
848 event_id[i] = cpuhw->alternatives[i][0]; power_check_constraints()
850 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], power_check_constraints()
851 &cpuhw->avalues[i][0])) power_check_constraints()
856 nv = (value | cpuhw->avalues[i][0]) + power_check_constraints()
857 (value & cpuhw->avalues[i][0] & addf); power_check_constraints()
859 (((nv + tadd) ^ cpuhw->avalues[i][0]) & power_check_constraints()
860 cpuhw->amasks[i][0]) != 0) power_check_constraints()
863 mask |= cpuhw->amasks[i][0]; power_check_constraints()
874 cpuhw->alternatives[i]); power_check_constraints()
876 ppmu->get_constraint(cpuhw->alternatives[i][j], power_check_constraints()
877 &cpuhw->amasks[i][j], power_check_constraints()
878 &cpuhw->avalues[i][j]); power_check_constraints()
897 nv = (value | cpuhw->avalues[i][j]) + power_check_constraints()
898 (value & cpuhw->avalues[i][j] & addf); power_check_constraints()
900 (((nv + tadd) ^ cpuhw->avalues[i][j]) power_check_constraints()
901 & cpuhw->amasks[i][j]) == 0) power_check_constraints()
923 mask |= cpuhw->amasks[i][j]; power_check_constraints()
931 event_id[i] = cpuhw->alternatives[i][choice[i]]; power_check_constraints()
1065 static void freeze_limited_counters(struct cpu_hw_events *cpuhw, freeze_limited_counters() argument
1072 for (i = 0; i < cpuhw->n_limited; ++i) { freeze_limited_counters()
1073 event = cpuhw->limited_counter[i]; freeze_limited_counters()
1085 static void thaw_limited_counters(struct cpu_hw_events *cpuhw, thaw_limited_counters() argument
1092 for (i = 0; i < cpuhw->n_limited; ++i) { thaw_limited_counters()
1093 event = cpuhw->limited_counter[i]; thaw_limited_counters()
1094 event->hw.idx = cpuhw->limited_hwidx[i]; thaw_limited_counters()
1114 static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) write_mmcr0() argument
1118 if (!cpuhw->n_limited) { write_mmcr0()
1137 freeze_limited_counters(cpuhw, pmc5, pmc6); write_mmcr0()
1139 thaw_limited_counters(cpuhw, pmc5, pmc6); write_mmcr0()
1155 struct cpu_hw_events *cpuhw; power_pmu_disable() local
1161 cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_disable()
1163 if (!cpuhw->disabled) { power_pmu_disable()
1167 if (!cpuhw->pmcs_enabled) { power_pmu_disable()
1169 cpuhw->pmcs_enabled = 1; power_pmu_disable()
1185 write_mmcr0(cpuhw, val); power_pmu_disable()
1191 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { power_pmu_disable()
1193 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); power_pmu_disable()
1197 cpuhw->disabled = 1; power_pmu_disable()
1198 cpuhw->n_added = 0; power_pmu_disable()
1214 struct cpu_hw_events *cpuhw; power_pmu_enable() local
1228 cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_enable()
1229 if (!cpuhw->disabled) power_pmu_enable()
1232 if (cpuhw->n_events == 0) { power_pmu_enable()
1237 cpuhw->disabled = 0; power_pmu_enable()
1244 ebb = is_ebb_event(cpuhw->event[0]); power_pmu_enable()
1252 if (!cpuhw->n_added) { power_pmu_enable()
1253 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); power_pmu_enable()
1254 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); power_pmu_enable()
1261 memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); power_pmu_enable()
1263 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, power_pmu_enable()
1264 cpuhw->mmcr, cpuhw->event)) { power_pmu_enable()
1276 event = cpuhw->event[0]; power_pmu_enable()
1278 cpuhw->mmcr[0] |= MMCR0_FCP; power_pmu_enable()
1280 cpuhw->mmcr[0] |= freeze_events_kernel; power_pmu_enable()
1282 cpuhw->mmcr[0] |= MMCR0_FCHV; power_pmu_enable()
1291 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); power_pmu_enable()
1292 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); power_pmu_enable()
1293 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) power_pmu_enable()
1296 mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); power_pmu_enable()
1302 for (i = 0; i < cpuhw->n_events; ++i) { power_pmu_enable()
1303 event = cpuhw->event[i]; power_pmu_enable()
1314 cpuhw->n_limited = n_lim = 0; power_pmu_enable()
1315 for (i = 0; i < cpuhw->n_events; ++i) { power_pmu_enable()
1316 event = cpuhw->event[i]; power_pmu_enable()
1321 cpuhw->limited_counter[n_lim] = event; power_pmu_enable()
1322 cpuhw->limited_hwidx[n_lim] = idx; power_pmu_enable()
1346 cpuhw->n_limited = n_lim; power_pmu_enable()
1347 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; power_pmu_enable()
1352 mmcr0 = ebb_switch_in(ebb, cpuhw); power_pmu_enable()
1355 if (cpuhw->bhrb_users) power_pmu_enable()
1356 ppmu->config_bhrb(cpuhw->bhrb_filter); power_pmu_enable()
1358 write_mmcr0(cpuhw, mmcr0); power_pmu_enable()
1363 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { power_pmu_enable()
1365 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); power_pmu_enable()
1408 struct cpu_hw_events *cpuhw; power_pmu_add() local
1420 cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_add()
1421 n0 = cpuhw->n_events; power_pmu_add()
1424 cpuhw->event[n0] = event; power_pmu_add()
1425 cpuhw->events[n0] = event->hw.config; power_pmu_add()
1426 cpuhw->flags[n0] = event->hw.event_base; power_pmu_add()
1444 if (cpuhw->txn_flags & PERF_PMU_TXN_ADD) power_pmu_add()
1447 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) power_pmu_add()
1449 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) power_pmu_add()
1451 event->hw.config = cpuhw->events[n0]; power_pmu_add()
1456 ++cpuhw->n_events; power_pmu_add()
1457 ++cpuhw->n_added; power_pmu_add()
1463 cpuhw->bhrb_filter = ppmu->bhrb_filter_map( power_pmu_add()
1477 struct cpu_hw_events *cpuhw; power_pmu_del() local
1486 cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_del()
1487 for (i = 0; i < cpuhw->n_events; ++i) { power_pmu_del()
1488 if (event == cpuhw->event[i]) { power_pmu_del()
1489 while (++i < cpuhw->n_events) { power_pmu_del()
1490 cpuhw->event[i-1] = cpuhw->event[i]; power_pmu_del()
1491 cpuhw->events[i-1] = cpuhw->events[i]; power_pmu_del()
1492 cpuhw->flags[i-1] = cpuhw->flags[i]; power_pmu_del()
1494 --cpuhw->n_events; power_pmu_del()
1495 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); power_pmu_del()
1504 for (i = 0; i < cpuhw->n_limited; ++i) power_pmu_del()
1505 if (event == cpuhw->limited_counter[i]) power_pmu_del()
1507 if (i < cpuhw->n_limited) { power_pmu_del()
1508 while (++i < cpuhw->n_limited) { power_pmu_del()
1509 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; power_pmu_del()
1510 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; power_pmu_del()
1512 --cpuhw->n_limited; power_pmu_del()
1514 if (cpuhw->n_events == 0) { power_pmu_del()
1516 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); power_pmu_del()
1596 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_start_txn() local
1598 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ power_pmu_start_txn()
1600 cpuhw->txn_flags = txn_flags; power_pmu_start_txn()
1605 cpuhw->n_txn_start = cpuhw->n_events; power_pmu_start_txn()
1615 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_cancel_txn() local
1618 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ power_pmu_cancel_txn()
1620 txn_flags = cpuhw->txn_flags; power_pmu_cancel_txn()
1621 cpuhw->txn_flags = 0; power_pmu_cancel_txn()
1635 struct cpu_hw_events *cpuhw; power_pmu_commit_txn() local
1641 cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_commit_txn()
1642 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ power_pmu_commit_txn()
1644 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { power_pmu_commit_txn()
1645 cpuhw->txn_flags = 0; power_pmu_commit_txn()
1649 n = cpuhw->n_events; power_pmu_commit_txn()
1650 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) power_pmu_commit_txn()
1652 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); power_pmu_commit_txn()
1656 for (i = cpuhw->n_txn_start; i < n; ++i) power_pmu_commit_txn()
1657 cpuhw->event[i]->hw.config = cpuhw->events[i]; power_pmu_commit_txn()
1659 cpuhw->txn_flags = 0; power_pmu_commit_txn()
1773 struct cpu_hw_events *cpuhw; power_pmu_event_init() local
1866 cpuhw = &get_cpu_var(cpu_hw_events); power_pmu_event_init()
1867 err = power_check_constraints(cpuhw, events, cflags, n + 1); power_pmu_event_init()
1870 cpuhw->bhrb_filter = ppmu->bhrb_filter_map( power_pmu_event_init()
1873 if (cpuhw->bhrb_filter == -1) { power_pmu_event_init()
2006 struct cpu_hw_events *cpuhw; record_and_restart() local
2007 cpuhw = this_cpu_ptr(&cpu_hw_events); record_and_restart()
2008 power_pmu_bhrb_read(cpuhw); record_and_restart()
2009 data.br_stack = &cpuhw->bhrb_stack; record_and_restart()
2080 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); perf_event_interrupt() local
2086 if (cpuhw->n_limited) perf_event_interrupt()
2087 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), perf_event_interrupt()
2116 for (j = 0; j < cpuhw->n_events; ++j) { perf_event_interrupt()
2117 event = cpuhw->event[j]; perf_event_interrupt()
2130 for (i = 0; i < cpuhw->n_events; ++i) { perf_event_interrupt()
2131 event = cpuhw->event[i]; perf_event_interrupt()
2153 write_mmcr0(cpuhw, cpuhw->mmcr[0]); perf_event_interrupt()
2163 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); power_pmu_setup() local
2167 memset(cpuhw, 0, sizeof(*cpuhw)); power_pmu_setup()
2168 cpuhw->mmcr[0] = MMCR0_FC; power_pmu_setup()
H A Dcore-fsl-emb.c209 struct cpu_hw_events *cpuhw; fsl_emb_pmu_disable() local
213 cpuhw = this_cpu_ptr(&cpu_hw_events); fsl_emb_pmu_disable()
215 if (!cpuhw->disabled) { fsl_emb_pmu_disable()
216 cpuhw->disabled = 1; fsl_emb_pmu_disable()
221 if (!cpuhw->pmcs_enabled) { fsl_emb_pmu_disable()
223 cpuhw->pmcs_enabled = 1; fsl_emb_pmu_disable()
248 struct cpu_hw_events *cpuhw; fsl_emb_pmu_enable() local
252 cpuhw = this_cpu_ptr(&cpu_hw_events); fsl_emb_pmu_enable()
253 if (!cpuhw->disabled) fsl_emb_pmu_enable()
256 cpuhw->disabled = 0; fsl_emb_pmu_enable()
257 ppc_set_pmu_inuse(cpuhw->n_events != 0); fsl_emb_pmu_enable()
259 if (cpuhw->n_events > 0) { fsl_emb_pmu_enable()
295 struct cpu_hw_events *cpuhw; fsl_emb_pmu_add() local
302 cpuhw = &get_cpu_var(cpu_hw_events); fsl_emb_pmu_add()
312 if (cpuhw->event[i]) fsl_emb_pmu_add()
322 cpuhw->event[i] = event; fsl_emb_pmu_add()
323 ++cpuhw->n_events; fsl_emb_pmu_add()
356 struct cpu_hw_events *cpuhw; fsl_emb_pmu_del() local
365 cpuhw = &get_cpu_var(cpu_hw_events); fsl_emb_pmu_del()
367 WARN_ON(event != cpuhw->event[event->hw.idx]); fsl_emb_pmu_del()
373 cpuhw->event[i] = NULL; fsl_emb_pmu_del()
384 cpuhw->n_events--; fsl_emb_pmu_del()
662 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); perf_event_interrupt() local
675 event = cpuhw->event[i]; perf_event_interrupt()
706 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); hw_perf_event_setup() local
708 memset(cpuhw, 0, sizeof(*cpuhw)); hw_perf_event_setup()
/linux-4.4.14/arch/s390/kernel/
H A Dperf_cpum_cf.c127 struct cpu_hw_events *cpuhw; validate_ctr_version() local
130 cpuhw = &get_cpu_var(cpu_hw_events); validate_ctr_version()
136 if (cpuhw->info.cfvn < 1) validate_ctr_version()
141 if (cpuhw->info.csvn < 1) validate_ctr_version()
143 if ((cpuhw->info.csvn == 1 && hwc->config > 159) || validate_ctr_version()
144 (cpuhw->info.csvn == 2 && hwc->config > 175) || validate_ctr_version()
145 (cpuhw->info.csvn > 2 && hwc->config > 255)) validate_ctr_version()
156 struct cpu_hw_events *cpuhw; validate_ctr_auth() local
160 cpuhw = &get_cpu_var(cpu_hw_events); validate_ctr_auth()
168 if (!(ctrs_state & cpuhw->info.auth_ctl)) validate_ctr_auth()
182 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_enable() local
185 if (cpuhw->flags & PMU_F_ENABLED) cpumf_pmu_enable()
188 err = lcctl(cpuhw->state); cpumf_pmu_enable()
195 cpuhw->flags |= PMU_F_ENABLED; cpumf_pmu_enable()
205 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_disable() local
209 if (!(cpuhw->flags & PMU_F_ENABLED)) cpumf_pmu_disable()
212 inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); cpumf_pmu_disable()
220 cpuhw->flags &= ~PMU_F_ENABLED; cpumf_pmu_disable()
233 struct cpu_hw_events *cpuhw; cpumf_measurement_alert() local
239 cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_measurement_alert()
243 if (!(cpuhw->flags & PMU_F_RESERVED)) cpumf_measurement_alert()
248 qctri(&cpuhw->info); cpumf_measurement_alert()
259 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); setup_pmc_cpu() local
263 memset(&cpuhw->info, 0, sizeof(cpuhw->info)); setup_pmc_cpu()
264 qctri(&cpuhw->info); setup_pmc_cpu()
265 cpuhw->flags |= PMU_F_RESERVED; setup_pmc_cpu()
269 cpuhw->flags &= ~PMU_F_RESERVED; setup_pmc_cpu()
484 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_start() local
499 ctr_set_enable(&cpuhw->state, hwc->config_base); cpumf_pmu_start()
500 ctr_set_start(&cpuhw->state, hwc->config_base); cpumf_pmu_start()
510 atomic_inc(&cpuhw->ctr_set[hwc->config_base]); cpumf_pmu_start()
515 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_stop() local
523 if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) cpumf_pmu_stop()
524 ctr_set_stop(&cpuhw->state, hwc->config_base); cpumf_pmu_stop()
536 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_add() local
543 if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD)) cpumf_pmu_add()
547 ctr_set_enable(&cpuhw->state, event->hw.config_base); cpumf_pmu_add()
560 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_del() local
572 if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) cpumf_pmu_del()
573 ctr_set_disable(&cpuhw->state, event->hw.config_base); cpumf_pmu_del()
588 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_start_txn() local
590 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ cpumf_pmu_start_txn()
592 cpuhw->txn_flags = txn_flags; cpumf_pmu_start_txn()
597 cpuhw->tx_state = cpuhw->state; cpumf_pmu_start_txn()
608 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_cancel_txn() local
610 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ cpumf_pmu_cancel_txn()
612 txn_flags = cpuhw->txn_flags; cpumf_pmu_cancel_txn()
613 cpuhw->txn_flags = 0; cpumf_pmu_cancel_txn()
617 WARN_ON(cpuhw->tx_state != cpuhw->state); cpumf_pmu_cancel_txn()
629 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); cpumf_pmu_commit_txn() local
632 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ cpumf_pmu_commit_txn()
634 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { cpumf_pmu_commit_txn()
635 cpuhw->txn_flags = 0; cpumf_pmu_commit_txn()
640 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); cpumf_pmu_commit_txn()
642 if ((state & cpuhw->info.auth_ctl) != state) cpumf_pmu_commit_txn()
645 cpuhw->txn_flags = 0; cpumf_pmu_commit_txn()
H A Dperf_cpum_sf.c108 static int sf_buffer_available(struct cpu_hw_sf *cpuhw) sf_buffer_available() argument
110 return !!cpuhw->sfb.sdbt; sf_buffer_available()
360 static void deallocate_buffers(struct cpu_hw_sf *cpuhw) deallocate_buffers() argument
362 if (cpuhw->sfb.sdbt) deallocate_buffers()
363 free_sampling_buffer(&cpuhw->sfb); deallocate_buffers()
366 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) allocate_buffers() argument
388 sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) + allocate_buffers()
395 sfr->bsdes = cpuhw->qsi.bsdes; allocate_buffers()
396 sfr->dsdes = cpuhw->qsi.dsdes; allocate_buffers()
424 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); allocate_buffers()
439 if (sf_buffer_available(cpuhw)) allocate_buffers()
444 " sample_size=%lu cpuhw=%p\n", allocate_buffers()
446 sample_size, cpuhw); allocate_buffers()
448 return alloc_sampling_buffer(&cpuhw->sfb, allocate_buffers()
449 sfb_pending_allocs(&cpuhw->sfb, hwc)); allocate_buffers()
481 static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, sfb_account_overflows() argument
496 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, sfb_account_overflows()
497 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); sfb_account_overflows()
500 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); sfb_account_overflows()
580 "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); setup_pmc_cpu()
591 "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); setup_pmc_cpu()
670 struct cpu_hw_sf *cpuhw; __hw_perf_event_init() local
698 * Later, cpuhw indicates whether to allocate sampling buffers for a __hw_perf_event_init()
699 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). __hw_perf_event_init()
702 cpuhw = NULL; __hw_perf_event_init()
709 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); __hw_perf_event_init()
710 si = cpuhw->qsi; __hw_perf_event_init()
780 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling __hw_perf_event_init()
783 if (cpuhw) __hw_perf_event_init()
785 err = allocate_buffers(cpuhw, hwc); __hw_perf_event_init()
791 cpuhw = &per_cpu(cpu_hw_sf, cpu); for_each_online_cpu()
792 err = allocate_buffers(cpuhw, hwc); for_each_online_cpu()
852 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); cpumsf_pmu_enable() local
856 if (cpuhw->flags & PMU_F_ENABLED) cpumsf_pmu_enable()
859 if (cpuhw->flags & PMU_F_ERR_MASK) cpumsf_pmu_enable()
873 if (cpuhw->event) { cpumsf_pmu_enable()
874 hwc = &cpuhw->event->hw; cpumsf_pmu_enable()
876 sfb_account_overflows(cpuhw, hwc); cpumsf_pmu_enable()
877 if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) cpumsf_pmu_enable()
878 extend_sampling_buffer(&cpuhw->sfb, hwc); cpumsf_pmu_enable()
882 cpuhw->flags |= PMU_F_ENABLED; cpumsf_pmu_enable()
885 err = lsctl(&cpuhw->lsctl); cpumsf_pmu_enable()
887 cpuhw->flags &= ~PMU_F_ENABLED; cpumsf_pmu_enable()
894 "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs, cpumsf_pmu_enable()
895 cpuhw->lsctl.ed, cpuhw->lsctl.cd, cpumsf_pmu_enable()
896 (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear); cpumsf_pmu_enable()
901 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); cpumsf_pmu_disable() local
906 if (!(cpuhw->flags & PMU_F_ENABLED)) cpumsf_pmu_disable()
909 if (cpuhw->flags & PMU_F_ERR_MASK) cpumsf_pmu_disable()
913 inactive = cpuhw->lsctl; cpumsf_pmu_disable()
932 cpuhw->lsctl.tear = si.tear; cpumsf_pmu_disable()
933 cpuhw->lsctl.dear = si.dear; cpumsf_pmu_disable()
939 cpuhw->flags &= ~PMU_F_ENABLED; cpumsf_pmu_disable()
1308 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); cpumsf_pmu_start() local
1318 cpuhw->lsctl.cs = 1; cpumsf_pmu_start()
1320 cpuhw->lsctl.cd = 1; cpumsf_pmu_start()
1329 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); cpumsf_pmu_stop() local
1335 cpuhw->lsctl.cs = 0; cpumsf_pmu_stop()
1336 cpuhw->lsctl.cd = 0; cpumsf_pmu_stop()
1348 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); cpumsf_pmu_add() local
1351 if (cpuhw->flags & PMU_F_IN_USE) cpumsf_pmu_add()
1354 if (!cpuhw->sfb.sdbt) cpumsf_pmu_add()
1367 cpuhw->lsctl.s = 0; cpumsf_pmu_add()
1368 cpuhw->lsctl.h = 1; cpumsf_pmu_add()
1369 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; cpumsf_pmu_add()
1370 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; cpumsf_pmu_add()
1371 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); cpumsf_pmu_add()
1372 hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); cpumsf_pmu_add()
1376 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { cpumsf_pmu_add()
1380 cpuhw->lsctl.es = 1; cpumsf_pmu_add()
1382 cpuhw->lsctl.ed = 1; cpumsf_pmu_add()
1385 cpuhw->event = event; cpumsf_pmu_add()
1386 cpuhw->flags |= PMU_F_IN_USE; cpumsf_pmu_add()
1398 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); cpumsf_pmu_del() local
1403 cpuhw->lsctl.es = 0; cpumsf_pmu_del()
1404 cpuhw->lsctl.ed = 0; cpumsf_pmu_del()
1405 cpuhw->flags &= ~PMU_F_IN_USE; cpumsf_pmu_del()
1406 cpuhw->event = NULL; cpumsf_pmu_del()
1460 struct cpu_hw_sf *cpuhw; cpumf_measurement_alert() local
1465 cpuhw = this_cpu_ptr(&cpu_hw_sf); cpumf_measurement_alert()
1469 if (!(cpuhw->flags & PMU_F_RESERVED)) cpumf_measurement_alert()
1477 if (cpuhw->flags & PMU_F_IN_USE) cpumf_measurement_alert()
1478 hw_perf_event_update(cpuhw->event, 0); cpumf_measurement_alert()
1480 WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); cpumf_measurement_alert()
1489 qsi(&cpuhw->qsi); cpumf_measurement_alert()
1494 cpuhw->flags |= PMU_F_ERR_LSDA; cpumf_measurement_alert()
1502 cpuhw->flags |= PMU_F_ERR_IBE; cpumf_measurement_alert()
/linux-4.4.14/arch/x86/kernel/cpu/
H A Dperf_event_amd.c415 struct cpu_hw_events *cpuhw; amd_pmu_cpu_dead() local
420 cpuhw = &per_cpu(cpu_hw_events, cpu); amd_pmu_cpu_dead()
422 if (cpuhw->amd_nb) { amd_pmu_cpu_dead()
423 struct amd_nb *nb = cpuhw->amd_nb; amd_pmu_cpu_dead()
428 cpuhw->amd_nb = NULL; amd_pmu_cpu_dead()
/linux-4.4.14/arch/sh/kernel/
H A Dperf_event.c357 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); sh_pmu_setup() local
359 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); sh_pmu_setup()
/linux-4.4.14/arch/blackfin/kernel/
H A Dperf_event.c458 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); bfin_pmu_setup() local
460 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); bfin_pmu_setup()
/linux-4.4.14/arch/sparc/kernel/
H A Dperf_event.c1499 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); sparc_pmu_start_txn() local
1501 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ sparc_pmu_start_txn()
1503 cpuhw->txn_flags = txn_flags; sparc_pmu_start_txn()
1517 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); sparc_pmu_cancel_txn() local
1520 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ sparc_pmu_cancel_txn()
1522 txn_flags = cpuhw->txn_flags; sparc_pmu_cancel_txn()
1523 cpuhw->txn_flags = 0; sparc_pmu_cancel_txn()
/linux-4.4.14/arch/metag/kernel/perf/
H A Dperf_event.c754 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); metag_pmu_counter_overflow() local
755 struct perf_event *event = cpuhw->events[idx]; metag_pmu_counter_overflow()

Completed in 330 milliseconds