cci_pmu 41 drivers/perf/arm-cci.c #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) cci_pmu 77 drivers/perf/arm-cci.c struct cci_pmu; cci_pmu 92 drivers/perf/arm-cci.c int (*validate_hw_event)(struct cci_pmu *, unsigned long); cci_pmu 93 drivers/perf/arm-cci.c int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); cci_pmu 94 drivers/perf/arm-cci.c void (*write_counters)(struct cci_pmu *, unsigned long *); cci_pmu 115 drivers/perf/arm-cci.c #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) cci_pmu 117 drivers/perf/arm-cci.c static struct cci_pmu *g_cci_pmu; cci_pmu 131 drivers/perf/arm-cci.c static void pmu_write_counters(struct cci_pmu *cci_pmu, cci_pmu 312 drivers/perf/arm-cci.c static int cci400_get_event_idx(struct cci_pmu *cci_pmu, cci_pmu 326 drivers/perf/arm-cci.c for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) cci_pmu 334 drivers/perf/arm-cci.c static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) cci_pmu 365 drivers/perf/arm-cci.c if (ev_code >= cci_pmu->model->event_ranges[if_type].min && cci_pmu 366 drivers/perf/arm-cci.c ev_code <= cci_pmu->model->event_ranges[if_type].max) cci_pmu 372 drivers/perf/arm-cci.c static int probe_cci400_revision(struct cci_pmu *cci_pmu) cci_pmu 375 drivers/perf/arm-cci.c rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; cci_pmu 384 drivers/perf/arm-cci.c static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) cci_pmu 387 drivers/perf/arm-cci.c return &cci_pmu_models[probe_cci400_revision(cci_pmu)]; cci_pmu 391 drivers/perf/arm-cci.c static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) cci_pmu 541 drivers/perf/arm-cci.c static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, cci_pmu 576 drivers/perf/arm-cci.c if (ev_code >= cci_pmu->model->event_ranges[if_type].min && cci_pmu 577 drivers/perf/arm-cci.c ev_code <= cci_pmu->model->event_ranges[if_type].max) cci_pmu 592 drivers/perf/arm-cci.c static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, cci_pmu 628 drivers/perf/arm-cci.c if (ev_code >= cci_pmu->model->event_ranges[if_type].min && cci_pmu 629 drivers/perf/arm-cci.c ev_code <= cci_pmu->model->event_ranges[if_type].max) cci_pmu 642 drivers/perf/arm-cci.c static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) cci_pmu 645 drivers/perf/arm-cci.c struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; cci_pmu 648 drivers/perf/arm-cci.c bitmap_zero(mask, cci_pmu->num_cntrs); cci_pmu 649 drivers/perf/arm-cci.c for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { cci_pmu 664 drivers/perf/arm-cci.c pmu_write_counters(cci_pmu, mask); cci_pmu 668 drivers/perf/arm-cci.c static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) cci_pmu 673 drivers/perf/arm-cci.c val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; cci_pmu 674 drivers/perf/arm-cci.c writel(val, cci_pmu->ctrl_base + CCI_PMCR); cci_pmu 678 drivers/perf/arm-cci.c static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) cci_pmu 680 drivers/perf/arm-cci.c cci_pmu_sync_counters(cci_pmu); cci_pmu 681 drivers/perf/arm-cci.c __cci_pmu_enable_nosync(cci_pmu); cci_pmu 685 drivers/perf/arm-cci.c static void __cci_pmu_disable(struct cci_pmu *cci_pmu) cci_pmu 690 drivers/perf/arm-cci.c val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; cci_pmu 691 drivers/perf/arm-cci.c writel(val, cci_pmu->ctrl_base + CCI_PMCR); cci_pmu 712 drivers/perf/arm-cci.c static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) cci_pmu 714 drivers/perf/arm-cci.c return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); cci_pmu 717 drivers/perf/arm-cci.c static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) cci_pmu 719 drivers/perf/arm-cci.c return readl_relaxed(cci_pmu->base + cci_pmu 720 drivers/perf/arm-cci.c CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); cci_pmu 723 drivers/perf/arm-cci.c static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, cci_pmu 726 drivers/perf/arm-cci.c writel_relaxed(value, cci_pmu->base + cci_pmu 727 drivers/perf/arm-cci.c CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); cci_pmu 730 drivers/perf/arm-cci.c static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) cci_pmu 732 drivers/perf/arm-cci.c pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); cci_pmu 735 drivers/perf/arm-cci.c static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) cci_pmu 737 drivers/perf/arm-cci.c pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); cci_pmu 741 drivers/perf/arm-cci.c pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) cci_pmu 743 drivers/perf/arm-cci.c return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; cci_pmu 746 drivers/perf/arm-cci.c static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) cci_pmu 748 drivers/perf/arm-cci.c pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); cci_pmu 764 drivers/perf/arm-cci.c pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) cci_pmu 768 drivers/perf/arm-cci.c for (i = 0; i < cci_pmu->num_cntrs; i++) { cci_pmu 769 drivers/perf/arm-cci.c if (pmu_counter_is_enabled(cci_pmu, i)) { cci_pmu 771 drivers/perf/arm-cci.c pmu_disable_counter(cci_pmu, i); cci_pmu 781 drivers/perf/arm-cci.c pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) cci_pmu 785 drivers/perf/arm-cci.c for_each_set_bit(i, mask, cci_pmu->num_cntrs) cci_pmu 786 drivers/perf/arm-cci.c pmu_enable_counter(cci_pmu, i); cci_pmu 793 drivers/perf/arm-cci.c static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu) cci_pmu 795 drivers/perf/arm-cci.c return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & cci_pmu 801 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 805 drivers/perf/arm-cci.c if (cci_pmu->model->get_event_idx) cci_pmu 806 drivers/perf/arm-cci.c return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); cci_pmu 809 drivers/perf/arm-cci.c for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) cci_pmu 819 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 822 drivers/perf/arm-cci.c !cci_pmu->model->validate_hw_event) cci_pmu 825 drivers/perf/arm-cci.c return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); cci_pmu 828 drivers/perf/arm-cci.c static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) cci_pmu 831 drivers/perf/arm-cci.c struct platform_device *pmu_device = cci_pmu->plat_device; cci_pmu 836 drivers/perf/arm-cci.c if (cci_pmu->nr_irqs < 1) { cci_pmu 848 drivers/perf/arm-cci.c for (i = 0; i < cci_pmu->nr_irqs; i++) { cci_pmu 849 drivers/perf/arm-cci.c int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, cci_pmu 850 drivers/perf/arm-cci.c "arm-cci-pmu", cci_pmu); cci_pmu 853 drivers/perf/arm-cci.c cci_pmu->irqs[i]); cci_pmu 857 drivers/perf/arm-cci.c set_bit(i, &cci_pmu->active_irqs); cci_pmu 863 drivers/perf/arm-cci.c static void pmu_free_irq(struct cci_pmu *cci_pmu) cci_pmu 867 drivers/perf/arm-cci.c for (i = 0; i < cci_pmu->nr_irqs; i++) { cci_pmu 868 drivers/perf/arm-cci.c if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) cci_pmu 871 drivers/perf/arm-cci.c free_irq(cci_pmu->irqs[i], cci_pmu); cci_pmu 877 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 882 drivers/perf/arm-cci.c if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { cci_pmu 883 drivers/perf/arm-cci.c dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); cci_pmu 886 drivers/perf/arm-cci.c value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); cci_pmu 891 drivers/perf/arm-cci.c static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) cci_pmu 893 drivers/perf/arm-cci.c pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); cci_pmu 896 drivers/perf/arm-cci.c static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) cci_pmu 899 drivers/perf/arm-cci.c struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; cci_pmu 901 drivers/perf/arm-cci.c for_each_set_bit(i, mask, cci_pmu->num_cntrs) { cci_pmu 906 drivers/perf/arm-cci.c pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); cci_pmu 910 drivers/perf/arm-cci.c static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) cci_pmu 912 drivers/perf/arm-cci.c if (cci_pmu->model->write_counters) cci_pmu 913 drivers/perf/arm-cci.c cci_pmu->model->write_counters(cci_pmu, mask); cci_pmu 915 drivers/perf/arm-cci.c __pmu_write_counters(cci_pmu, mask); cci_pmu 949 drivers/perf/arm-cci.c static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) cci_pmu 954 drivers/perf/arm-cci.c bitmap_zero(saved_mask, cci_pmu->num_cntrs); cci_pmu 955 drivers/perf/arm-cci.c pmu_save_counters(cci_pmu, saved_mask); cci_pmu 961 drivers/perf/arm-cci.c __cci_pmu_enable_nosync(cci_pmu); cci_pmu 963 drivers/perf/arm-cci.c for_each_set_bit(i, mask, cci_pmu->num_cntrs) { cci_pmu 964 drivers/perf/arm-cci.c struct perf_event *event = cci_pmu->hw_events.events[i]; cci_pmu 969 drivers/perf/arm-cci.c pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); cci_pmu 970 drivers/perf/arm-cci.c pmu_enable_counter(cci_pmu, i); cci_pmu 971 drivers/perf/arm-cci.c pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); cci_pmu 972 drivers/perf/arm-cci.c pmu_disable_counter(cci_pmu, i); cci_pmu 973 drivers/perf/arm-cci.c pmu_set_event(cci_pmu, i, event->hw.config_base); cci_pmu 976 drivers/perf/arm-cci.c __cci_pmu_disable(cci_pmu); cci_pmu 978 drivers/perf/arm-cci.c pmu_restore_counters(cci_pmu, saved_mask); cci_pmu 1030 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = dev; cci_pmu 1031 drivers/perf/arm-cci.c struct cci_pmu_hw_events *events = &cci_pmu->hw_events; cci_pmu 1037 drivers/perf/arm-cci.c __cci_pmu_disable(cci_pmu); cci_pmu 1043 drivers/perf/arm-cci.c for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { cci_pmu 1050 drivers/perf/arm-cci.c if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & cci_pmu 1054 drivers/perf/arm-cci.c pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, cci_pmu 1063 drivers/perf/arm-cci.c __cci_pmu_enable_sync(cci_pmu); cci_pmu 1069 drivers/perf/arm-cci.c static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) cci_pmu 1071 drivers/perf/arm-cci.c int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); cci_pmu 1073 drivers/perf/arm-cci.c pmu_free_irq(cci_pmu); cci_pmu 1079 drivers/perf/arm-cci.c static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) cci_pmu 1081 drivers/perf/arm-cci.c pmu_free_irq(cci_pmu); cci_pmu 1086 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 1087 drivers/perf/arm-cci.c atomic_t *active_events = &cci_pmu->active_events; cci_pmu 1088 drivers/perf/arm-cci.c struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; cci_pmu 1091 drivers/perf/arm-cci.c cci_pmu_put_hw(cci_pmu); cci_pmu 1098 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(pmu); cci_pmu 1099 drivers/perf/arm-cci.c struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; cci_pmu 1100 drivers/perf/arm-cci.c int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); cci_pmu 1107 drivers/perf/arm-cci.c __cci_pmu_enable_sync(cci_pmu); cci_pmu 1114 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(pmu); cci_pmu 1115 drivers/perf/arm-cci.c struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; cci_pmu 1119 drivers/perf/arm-cci.c __cci_pmu_disable(cci_pmu); cci_pmu 1128 drivers/perf/arm-cci.c static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) cci_pmu 1130 drivers/perf/arm-cci.c return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); cci_pmu 1135 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 1136 drivers/perf/arm-cci.c struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; cci_pmu 1150 drivers/perf/arm-cci.c if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { cci_pmu 1151 drivers/perf/arm-cci.c dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); cci_pmu 1158 drivers/perf/arm-cci.c if (!pmu_fixed_hw_idx(cci_pmu, idx)) cci_pmu 1159 drivers/perf/arm-cci.c pmu_set_event(cci_pmu, idx, hwc->config_base); cci_pmu 1162 drivers/perf/arm-cci.c pmu_enable_counter(cci_pmu, idx); cci_pmu 1169 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 1176 drivers/perf/arm-cci.c if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { cci_pmu 1177 drivers/perf/arm-cci.c dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); cci_pmu 1185 drivers/perf/arm-cci.c pmu_disable_counter(cci_pmu, idx); cci_pmu 1192 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 1193 drivers/perf/arm-cci.c struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; cci_pmu 1217 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 1218 drivers/perf/arm-cci.c struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; cci_pmu 1229 drivers/perf/arm-cci.c static int validate_event(struct pmu *cci_pmu, cci_pmu 1241 drivers/perf/arm-cci.c if (event->pmu != cci_pmu) cci_pmu 1256 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 1265 drivers/perf/arm-cci.c memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); cci_pmu 1319 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); cci_pmu 1320 drivers/perf/arm-cci.c atomic_t *active_events = &cci_pmu->active_events; cci_pmu 1341 drivers/perf/arm-cci.c event->cpu = cci_pmu->cpu; cci_pmu 1345 drivers/perf/arm-cci.c mutex_lock(&cci_pmu->reserve_mutex); cci_pmu 1347 drivers/perf/arm-cci.c err = cci_pmu_get_hw(cci_pmu); cci_pmu 1350 drivers/perf/arm-cci.c mutex_unlock(&cci_pmu->reserve_mutex); cci_pmu 1366 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu = to_cci_pmu(pmu); cci_pmu 1368 drivers/perf/arm-cci.c return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); cci_pmu 1400 drivers/perf/arm-cci.c static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) cci_pmu 1402 drivers/perf/arm-cci.c const struct cci_pmu_model *model = cci_pmu->model; cci_pmu 1414 drivers/perf/arm-cci.c cci_pmu->pmu = (struct pmu) { cci_pmu 1416 drivers/perf/arm-cci.c .name = cci_pmu->model->name, cci_pmu 1430 drivers/perf/arm-cci.c cci_pmu->plat_device = pdev; cci_pmu 1431 drivers/perf/arm-cci.c num_cntrs = pmu_get_max_counters(cci_pmu); cci_pmu 1432 drivers/perf/arm-cci.c if (num_cntrs > cci_pmu->model->num_hw_cntrs) { cci_pmu 1436 drivers/perf/arm-cci.c num_cntrs, cci_pmu->model->num_hw_cntrs); cci_pmu 1437 drivers/perf/arm-cci.c num_cntrs = cci_pmu->model->num_hw_cntrs; cci_pmu 1439 drivers/perf/arm-cci.c cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; cci_pmu 1441 drivers/perf/arm-cci.c return perf_pmu_register(&cci_pmu->pmu, name, -1); cci_pmu 1595 drivers/perf/arm-cci.c static struct cci_pmu *cci_pmu_alloc(struct device *dev) cci_pmu 1597 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu; cci_pmu 1605 drivers/perf/arm-cci.c cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); cci_pmu 1606 drivers/perf/arm-cci.c if (!cci_pmu) cci_pmu 1609 drivers/perf/arm-cci.c cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; cci_pmu 1615 drivers/perf/arm-cci.c model = probe_cci_model(cci_pmu); cci_pmu 1622 drivers/perf/arm-cci.c cci_pmu->model = model; cci_pmu 1623 drivers/perf/arm-cci.c cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), cci_pmu 1624 drivers/perf/arm-cci.c sizeof(*cci_pmu->irqs), GFP_KERNEL); cci_pmu 1625 drivers/perf/arm-cci.c if (!cci_pmu->irqs) cci_pmu 1627 drivers/perf/arm-cci.c cci_pmu->hw_events.events = devm_kcalloc(dev, cci_pmu 1629 drivers/perf/arm-cci.c sizeof(*cci_pmu->hw_events.events), cci_pmu 1631 drivers/perf/arm-cci.c if (!cci_pmu->hw_events.events) cci_pmu 1633 drivers/perf/arm-cci.c cci_pmu->hw_events.used_mask = devm_kcalloc(dev, cci_pmu 1635 drivers/perf/arm-cci.c sizeof(*cci_pmu->hw_events.used_mask), cci_pmu 1637 drivers/perf/arm-cci.c if (!cci_pmu->hw_events.used_mask) cci_pmu 1640 drivers/perf/arm-cci.c return cci_pmu; cci_pmu 1646 drivers/perf/arm-cci.c struct cci_pmu *cci_pmu; cci_pmu 1649 drivers/perf/arm-cci.c cci_pmu = cci_pmu_alloc(&pdev->dev); cci_pmu 1650 drivers/perf/arm-cci.c if (IS_ERR(cci_pmu)) cci_pmu 1651 drivers/perf/arm-cci.c return PTR_ERR(cci_pmu); cci_pmu 1654 drivers/perf/arm-cci.c cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); cci_pmu 1655 drivers/perf/arm-cci.c if (IS_ERR(cci_pmu->base)) cci_pmu 1662 drivers/perf/arm-cci.c cci_pmu->nr_irqs = 0; cci_pmu 1663 drivers/perf/arm-cci.c for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { cci_pmu 1668 drivers/perf/arm-cci.c if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) cci_pmu 1671 drivers/perf/arm-cci.c cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; cci_pmu 1678 drivers/perf/arm-cci.c if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { cci_pmu 1680 drivers/perf/arm-cci.c i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); cci_pmu 1684 drivers/perf/arm-cci.c raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); cci_pmu 1685 drivers/perf/arm-cci.c mutex_init(&cci_pmu->reserve_mutex); cci_pmu 1686 drivers/perf/arm-cci.c atomic_set(&cci_pmu->active_events, 0); cci_pmu 1688 drivers/perf/arm-cci.c cci_pmu->cpu = raw_smp_processor_id(); cci_pmu 1689 drivers/perf/arm-cci.c g_cci_pmu = cci_pmu; cci_pmu 1694 drivers/perf/arm-cci.c ret = cci_pmu_init(cci_pmu, pdev); cci_pmu 1698 drivers/perf/arm-cci.c pr_info("ARM %s PMU driver probed", cci_pmu->model->name);