Lines Matching refs:pmu
125 struct pmu *pmu; /* pointer to rapl_pmu_class */ member
131 static struct pmu rapl_pmu_class;
205 static void rapl_start_hrtimer(struct rapl_pmu *pmu) in rapl_start_hrtimer() argument
207 __hrtimer_start_range_ns(&pmu->hrtimer, in rapl_start_hrtimer()
208 pmu->timer_interval, 0, in rapl_start_hrtimer()
212 static void rapl_stop_hrtimer(struct rapl_pmu *pmu) in rapl_stop_hrtimer() argument
214 hrtimer_cancel(&pmu->hrtimer); in rapl_stop_hrtimer()
219 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_hrtimer_handle() local
223 if (!pmu->n_active) in rapl_hrtimer_handle()
226 spin_lock_irqsave(&pmu->lock, flags); in rapl_hrtimer_handle()
228 list_for_each_entry(event, &pmu->active_list, active_entry) { in rapl_hrtimer_handle()
232 spin_unlock_irqrestore(&pmu->lock, flags); in rapl_hrtimer_handle()
234 hrtimer_forward_now(hrtimer, pmu->timer_interval); in rapl_hrtimer_handle()
239 static void rapl_hrtimer_init(struct rapl_pmu *pmu) in rapl_hrtimer_init() argument
241 struct hrtimer *hr = &pmu->hrtimer; in rapl_hrtimer_init()
247 static void __rapl_pmu_event_start(struct rapl_pmu *pmu, in __rapl_pmu_event_start() argument
255 list_add_tail(&event->active_entry, &pmu->active_list); in __rapl_pmu_event_start()
259 pmu->n_active++; in __rapl_pmu_event_start()
260 if (pmu->n_active == 1) in __rapl_pmu_event_start()
261 rapl_start_hrtimer(pmu); in __rapl_pmu_event_start()
266 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_start() local
269 spin_lock_irqsave(&pmu->lock, flags); in rapl_pmu_event_start()
270 __rapl_pmu_event_start(pmu, event); in rapl_pmu_event_start()
271 spin_unlock_irqrestore(&pmu->lock, flags); in rapl_pmu_event_start()
276 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_stop() local
280 spin_lock_irqsave(&pmu->lock, flags); in rapl_pmu_event_stop()
284 WARN_ON_ONCE(pmu->n_active <= 0); in rapl_pmu_event_stop()
285 pmu->n_active--; in rapl_pmu_event_stop()
286 if (pmu->n_active == 0) in rapl_pmu_event_stop()
287 rapl_stop_hrtimer(pmu); in rapl_pmu_event_stop()
305 spin_unlock_irqrestore(&pmu->lock, flags); in rapl_pmu_event_stop()
310 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_add() local
314 spin_lock_irqsave(&pmu->lock, flags); in rapl_pmu_event_add()
319 __rapl_pmu_event_start(pmu, event); in rapl_pmu_event_add()
321 spin_unlock_irqrestore(&pmu->lock, flags); in rapl_pmu_event_add()
513 static struct pmu rapl_pmu_class = {
526 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_exit() local
552 perf_pmu_migrate_context(pmu->pmu, cpu, target); in rapl_cpu_exit()
555 rapl_stop_hrtimer(pmu); in rapl_cpu_exit()
584 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_prepare() local
588 if (pmu) in rapl_cpu_prepare()
594 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); in rapl_cpu_prepare()
595 if (!pmu) in rapl_cpu_prepare()
597 spin_lock_init(&pmu->lock); in rapl_cpu_prepare()
599 INIT_LIST_HEAD(&pmu->active_list); in rapl_cpu_prepare()
601 pmu->pmu = &rapl_pmu_class; in rapl_cpu_prepare()
615 pmu->timer_interval = ms_to_ktime(ms); in rapl_cpu_prepare()
617 rapl_hrtimer_init(pmu); in rapl_cpu_prepare()
620 per_cpu(rapl_pmu, cpu) = pmu; in rapl_cpu_prepare()
628 struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); in rapl_cpu_kfree() local
630 kfree(pmu); in rapl_cpu_kfree()
637 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_dying() local
639 if (!pmu) in rapl_cpu_dying()
644 per_cpu(rapl_pmu_to_free, cpu) = pmu; in rapl_cpu_dying()
700 struct rapl_pmu *pmu; in rapl_pmu_init() local
764 pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_init()
771 ktime_to_ms(pmu->timer_interval)); in rapl_pmu_init()