arc_pmu 96 arch/arc/kernel/perf_event.c static struct arc_pmu *arc_pmu; arc_pmu 173 arch/arc/kernel/perf_event.c hwc->sample_period = arc_pmu->max_period; arc_pmu 194 arch/arc/kernel/perf_event.c if (arc_pmu->ev_hw_idx[event->attr.config] < 0) arc_pmu 196 arch/arc/kernel/perf_event.c hwc->config |= arc_pmu->ev_hw_idx[event->attr.config]; arc_pmu 206 arch/arc/kernel/perf_event.c hwc->config |= arc_pmu->ev_hw_idx[ret]; arc_pmu 212 arch/arc/kernel/perf_event.c if (event->attr.config >= arc_pmu->n_events) arc_pmu 218 arch/arc/kernel/perf_event.c arc_pmu->raw_entry[event->attr.config].name); arc_pmu 266 arch/arc/kernel/perf_event.c if (left > arc_pmu->max_period) arc_pmu 267 arch/arc/kernel/perf_event.c left = arc_pmu->max_period; arc_pmu 269 arch/arc/kernel/perf_event.c value = arc_pmu->max_period - left; arc_pmu 367 arch/arc/kernel/perf_event.c if (idx == arc_pmu->n_counters) arc_pmu 380 arch/arc/kernel/perf_event.c lower_32_bits(arc_pmu->max_period)); arc_pmu 382 arch/arc/kernel/perf_event.c upper_32_bits(arc_pmu->max_period)); arc_pmu 408 arch/arc/kernel/perf_event.c arc_pmu_disable(&arc_pmu->pmu); arc_pmu 449 arch/arc/kernel/perf_event.c arc_pmu_enable(&arc_pmu->pmu); arc_pmu 505 arch/arc/kernel/perf_event.c memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1); arc_pmu 506 arch/arc/kernel/perf_event.c arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name; arc_pmu 507 arch/arc/kernel/perf_event.c arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444); arc_pmu 508 arch/arc/kernel/perf_event.c arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show; arc_pmu 509 arch/arc/kernel/perf_event.c arc_pmu->attr[j].id = j; arc_pmu 510 arch/arc/kernel/perf_event.c arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr); arc_pmu 515 arch/arc/kernel/perf_event.c arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1, arc_pmu 516 arch/arc/kernel/perf_event.c sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO); arc_pmu 517 arch/arc/kernel/perf_event.c if (!arc_pmu->attr) arc_pmu 520 arch/arc/kernel/perf_event.c arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1, arc_pmu 521 arch/arc/kernel/perf_event.c sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO); arc_pmu 522 arch/arc/kernel/perf_event.c if (!arc_pmu->attrs) arc_pmu 525 arch/arc/kernel/perf_event.c arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events, arc_pmu 526 arch/arc/kernel/perf_event.c sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO); arc_pmu 527 arch/arc/kernel/perf_event.c if (!arc_pmu->raw_entry) arc_pmu 556 arch/arc/kernel/perf_event.c arc_pmu->ev_hw_idx[i] = j; arc_pmu 590 arch/arc/kernel/perf_event.c arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); arc_pmu 591 arch/arc/kernel/perf_event.c if (!arc_pmu) arc_pmu 594 arch/arc/kernel/perf_event.c arc_pmu->n_events = cc_bcr.c; arc_pmu 601 arch/arc/kernel/perf_event.c arc_pmu->n_counters = pct_bcr.c; arc_pmu 604 arch/arc/kernel/perf_event.c arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL; arc_pmu 607 arch/arc/kernel/perf_event.c arc_pmu->n_counters, counter_size, cc_bcr.c, arc_pmu 612 arch/arc/kernel/perf_event.c arc_pmu->ev_hw_idx[i] = -1; arc_pmu 624 arch/arc/kernel/perf_event.c arc_pmu_events_attr_gr.attrs = arc_pmu->attrs; arc_pmu 625 arch/arc/kernel/perf_event.c arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr; arc_pmu 626 arch/arc/kernel/perf_event.c arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr; arc_pmu 628 arch/arc/kernel/perf_event.c arc_pmu->pmu = (struct pmu) { arc_pmu 637 arch/arc/kernel/perf_event.c .attr_groups = arc_pmu->attr_groups, arc_pmu 648 arch/arc/kernel/perf_event.c arc_pmu->irq = irq; arc_pmu 657 arch/arc/kernel/perf_event.c arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; arc_pmu 663 arch/arc/kernel/perf_event.c return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);