Lines Matching refs:pmu
117 struct pmu pmu; member
129 static struct cci_pmu *pmu; variable
131 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
210 if (ev_code >= pmu->model->event_ranges[if_type].min && in pmu_validate_hw_event()
211 ev_code <= pmu->model->event_ranges[if_type].max) in pmu_validate_hw_event()
244 return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); in pmu_read_register()
249 return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); in pmu_write_register()
278 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_get_event_idx()
322 if (pmu->nr_irqs < 1) { in pmu_request_irq()
334 for (i = 0; i < pmu->nr_irqs; i++) { in pmu_request_irq()
335 int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED, in pmu_request_irq()
339 pmu->irqs[i]); in pmu_request_irq()
343 set_bit(i, &pmu->active_irqs); in pmu_request_irq()
353 for (i = 0; i < pmu->nr_irqs; i++) { in pmu_free_irq()
354 if (!test_and_clear_bit(i, &pmu->active_irqs)) in pmu_free_irq()
357 free_irq(pmu->irqs[i], cci_pmu); in pmu_free_irq()
363 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_read_counter()
379 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_write_counter()
430 struct cci_pmu_hw_events *events = &pmu->hw_events; in pmu_handle_irq()
481 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in hw_perf_event_destroy()
491 static void cci_pmu_enable(struct pmu *pmu) in cci_pmu_enable() argument
493 struct cci_pmu *cci_pmu = to_cci_pmu(pmu); in cci_pmu_enable()
511 static void cci_pmu_disable(struct pmu *pmu) in cci_pmu_disable() argument
513 struct cci_pmu *cci_pmu = to_cci_pmu(pmu); in cci_pmu_disable()
528 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_start()
562 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_stop()
585 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_add()
591 perf_pmu_disable(event->pmu); in cci_pmu_add()
611 perf_pmu_enable(event->pmu); in cci_pmu_add()
617 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_del()
630 validate_event(struct pmu *cci_pmu, in validate_event()
642 if (event->pmu != cci_pmu) in validate_event()
666 if (!validate_event(event->pmu, &fake_pmu, leader)) in validate_group()
670 if (!validate_event(event->pmu, &fake_pmu, sibling)) in validate_group()
674 if (!validate_event(event->pmu, &fake_pmu, event)) in validate_group()
728 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_event_init()
733 if (event->attr.type != event->pmu->type) in cci_pmu_event_init()
786 cpumask_pr_args(&pmu->cpus)); in pmu_attr_cpumask_show()
811 cci_pmu->pmu = (struct pmu) { in cci_pmu_init()
828 return perf_pmu_register(&cci_pmu->pmu, name, -1); in cci_pmu_init()
839 if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus)) in cci_pmu_cpu_notifier()
848 cpumask_set_cpu(target, &pmu->cpus); in cci_pmu_cpu_notifier()
947 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); in cci_pmu_probe()
948 if (!pmu) in cci_pmu_probe()
951 pmu->model = model; in cci_pmu_probe()
953 pmu->base = devm_ioremap_resource(&pdev->dev, res); in cci_pmu_probe()
954 if (IS_ERR(pmu->base)) in cci_pmu_probe()
961 pmu->nr_irqs = 0; in cci_pmu_probe()
967 if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs)) in cci_pmu_probe()
970 pmu->irqs[pmu->nr_irqs++] = irq; in cci_pmu_probe()
983 raw_spin_lock_init(&pmu->hw_events.pmu_lock); in cci_pmu_probe()
984 mutex_init(&pmu->reserve_mutex); in cci_pmu_probe()
985 atomic_set(&pmu->active_events, 0); in cci_pmu_probe()
986 cpumask_set_cpu(smp_processor_id(), &pmu->cpus); in cci_pmu_probe()
992 ret = cci_pmu_init(pmu, pdev); in cci_pmu_probe()
996 pr_info("ARM %s PMU driver probed", pmu->model->name); in cci_pmu_probe()