Lines Matching refs:event
87 struct perf_event *event; /* Scheduled perf event */ member
625 static void hw_perf_event_destroy(struct perf_event *event) in hw_perf_event_destroy() argument
628 if (RAWSAMPLE_REG(&event->hw)) in hw_perf_event_destroy()
629 kfree((void *) RAWSAMPLE_REG(&event->hw)); in hw_perf_event_destroy()
668 static int __hw_perf_event_init(struct perf_event *event) in __hw_perf_event_init() argument
672 struct perf_event_attr *attr = &event->attr; in __hw_perf_event_init()
673 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
687 event->destroy = hw_perf_event_destroy; in __hw_perf_event_init()
703 if (event->cpu == -1) in __hw_perf_event_init()
709 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); in __hw_perf_event_init()
801 static int cpumsf_pmu_event_init(struct perf_event *event) in cpumsf_pmu_event_init() argument
806 if (has_branch_stack(event)) in cpumsf_pmu_event_init()
809 switch (event->attr.type) { in cpumsf_pmu_event_init()
811 if ((event->attr.config != PERF_EVENT_CPUM_SF) && in cpumsf_pmu_event_init()
812 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) in cpumsf_pmu_event_init()
821 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) in cpumsf_pmu_event_init()
823 if (!is_sampling_event(event)) in cpumsf_pmu_event_init()
831 if (event->cpu >= nr_cpumask_bits || in cpumsf_pmu_event_init()
832 (event->cpu >= 0 && !cpu_online(event->cpu))) in cpumsf_pmu_event_init()
838 if (event->attr.exclude_hv) in cpumsf_pmu_event_init()
839 event->attr.exclude_hv = 0; in cpumsf_pmu_event_init()
840 if (event->attr.exclude_idle) in cpumsf_pmu_event_init()
841 event->attr.exclude_idle = 0; in cpumsf_pmu_event_init()
843 err = __hw_perf_event_init(event); in cpumsf_pmu_event_init()
845 if (event->destroy) in cpumsf_pmu_event_init()
846 event->destroy(event); in cpumsf_pmu_event_init()
873 if (cpuhw->event) { in cpumsf_pmu_enable()
874 hwc = &cpuhw->event->hw; in cpumsf_pmu_enable()
951 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, in perf_exclude_event() argument
954 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
956 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
958 if (event->attr.exclude_guest && sde_regs->in_guest) in perf_exclude_event()
960 if (event->attr.exclude_host && !sde_regs->in_guest) in perf_exclude_event()
976 static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) in perf_push_sample() argument
985 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_push_sample()
1033 if (perf_exclude_event(event, ®s, sde_regs)) in perf_push_sample()
1035 if (perf_event_overflow(event, &data, ®s)) { in perf_push_sample()
1037 event->pmu->stop(event, 0); in perf_push_sample()
1039 perf_event_update_userpage(event); in perf_push_sample()
1044 static void perf_event_count_update(struct perf_event *event, u64 count) in perf_event_count_update() argument
1046 local64_add(count, &event->count); in perf_event_count_update()
1140 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, in hw_collect_samples() argument
1143 unsigned long flags = SAMPL_FLAGS(&event->hw); in hw_collect_samples()
1150 sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); in hw_collect_samples()
1153 sample_size = event_sample_size(&event->hw); in hw_collect_samples()
1162 perf_event_count_update(event, SAMPL_RATE(&event->hw)); in hw_collect_samples()
1174 *overflow = perf_push_sample(event, sfr); in hw_collect_samples()
1215 static void hw_perf_event_update(struct perf_event *event, int flush_all) in hw_perf_event_update() argument
1217 struct hw_perf_event *hwc = &event->hw; in hw_perf_event_update()
1257 hw_collect_samples(event, sdbt, &event_overflow); in hw_perf_event_update()
1299 static void cpumsf_pmu_read(struct perf_event *event) in cpumsf_pmu_read() argument
1307 static void cpumsf_pmu_start(struct perf_event *event, int flags) in cpumsf_pmu_start() argument
1311 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in cpumsf_pmu_start()
1315 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in cpumsf_pmu_start()
1317 perf_pmu_disable(event->pmu); in cpumsf_pmu_start()
1318 event->hw.state = 0; in cpumsf_pmu_start()
1320 if (SAMPL_DIAG_MODE(&event->hw)) in cpumsf_pmu_start()
1322 perf_pmu_enable(event->pmu); in cpumsf_pmu_start()
1328 static void cpumsf_pmu_stop(struct perf_event *event, int flags) in cpumsf_pmu_stop() argument
1332 if (event->hw.state & PERF_HES_STOPPED) in cpumsf_pmu_stop()
1335 perf_pmu_disable(event->pmu); in cpumsf_pmu_stop()
1338 event->hw.state |= PERF_HES_STOPPED; in cpumsf_pmu_stop()
1340 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { in cpumsf_pmu_stop()
1341 hw_perf_event_update(event, 1); in cpumsf_pmu_stop()
1342 event->hw.state |= PERF_HES_UPTODATE; in cpumsf_pmu_stop()
1344 perf_pmu_enable(event->pmu); in cpumsf_pmu_stop()
1347 static int cpumsf_pmu_add(struct perf_event *event, int flags) in cpumsf_pmu_add() argument
1359 perf_pmu_disable(event->pmu); in cpumsf_pmu_add()
1361 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in cpumsf_pmu_add()
1372 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); in cpumsf_pmu_add()
1373 hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); in cpumsf_pmu_add()
1382 if (SAMPL_DIAG_MODE(&event->hw)) in cpumsf_pmu_add()
1386 cpuhw->event = event; in cpumsf_pmu_add()
1390 cpumsf_pmu_start(event, PERF_EF_RELOAD); in cpumsf_pmu_add()
1392 perf_event_update_userpage(event); in cpumsf_pmu_add()
1393 perf_pmu_enable(event->pmu); in cpumsf_pmu_add()
1397 static void cpumsf_pmu_del(struct perf_event *event, int flags) in cpumsf_pmu_del() argument
1401 perf_pmu_disable(event->pmu); in cpumsf_pmu_del()
1402 cpumsf_pmu_stop(event, PERF_EF_UPDATE); in cpumsf_pmu_del()
1407 cpuhw->event = NULL; in cpumsf_pmu_del()
1409 perf_event_update_userpage(event); in cpumsf_pmu_del()
1410 perf_pmu_enable(event->pmu); in cpumsf_pmu_del()
1422 PMU_FORMAT_ATTR(event, "config:0-63");
1479 hw_perf_event_update(cpuhw->event, 0); in cpumf_measurement_alert()