Lines Matching refs:pmu
340 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); in __get_cpu_context()
475 struct pmu *pmu; in perf_cgroup_switch() local
491 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_cgroup_switch()
492 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_cgroup_switch()
493 if (cpuctx->unique_pmu != pmu) in perf_cgroup_switch()
505 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
526 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
777 struct pmu *pmu; in perf_cpu_hrtimer_cancel() local
787 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_cpu_hrtimer_cancel()
788 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_cpu_hrtimer_cancel()
790 if (pmu->task_ctx_nr == perf_sw_context) in perf_cpu_hrtimer_cancel()
804 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_cpu_hrtimer_init() local
808 if (pmu->task_ctx_nr == perf_sw_context) in __perf_cpu_hrtimer_init()
815 timer = pmu->hrtimer_interval_ms; in __perf_cpu_hrtimer_init()
817 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; in __perf_cpu_hrtimer_init()
828 struct pmu *pmu = cpuctx->ctx.pmu; in perf_cpu_hrtimer_restart() local
831 if (pmu->task_ctx_nr == perf_sw_context) in perf_cpu_hrtimer_restart()
842 void perf_pmu_disable(struct pmu *pmu) in perf_pmu_disable() argument
844 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_disable()
846 pmu->pmu_disable(pmu); in perf_pmu_disable()
849 void perf_pmu_enable(struct pmu *pmu) in perf_pmu_enable() argument
851 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_enable()
853 pmu->pmu_enable(pmu); in perf_pmu_enable()
1563 perf_pmu_disable(event->pmu); in event_sched_out()
1566 event->pmu->del(event, 0); in event_sched_out()
1586 perf_pmu_enable(event->pmu); in event_sched_out()
1887 perf_pmu_disable(event->pmu); in event_sched_in()
1893 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
1916 perf_pmu_enable(event->pmu); in event_sched_in()
1927 struct pmu *pmu = ctx->pmu; in group_sched_in() local
1934 pmu->start_txn(pmu); in group_sched_in()
1937 pmu->cancel_txn(pmu); in group_sched_in()
1952 if (!pmu->commit_txn(pmu)) in group_sched_in()
1983 pmu->cancel_txn(pmu); in group_sched_in()
2066 perf_pmu_disable(cpuctx->ctx.pmu); in __perf_install_in_context()
2107 perf_pmu_enable(cpuctx->ctx.pmu); in __perf_install_in_context()
2397 perf_pmu_disable(ctx->pmu); in ctx_sched_out()
2407 perf_pmu_enable(ctx->pmu); in ctx_sched_out()
2465 event->pmu->read(event); in __perf_event_sync_stat()
2590 void perf_sched_cb_dec(struct pmu *pmu) in perf_sched_cb_dec() argument
2595 void perf_sched_cb_inc(struct pmu *pmu) in perf_sched_cb_inc() argument
2609 struct pmu *pmu; in perf_pmu_sched_task() local
2619 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_pmu_sched_task()
2620 if (pmu->sched_task) { in perf_pmu_sched_task()
2621 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_sched_task()
2625 perf_pmu_disable(pmu); in perf_pmu_sched_task()
2627 pmu->sched_task(cpuctx->task_ctx, sched_in); in perf_pmu_sched_task()
2629 perf_pmu_enable(pmu); in perf_pmu_sched_task()
2803 perf_pmu_disable(ctx->pmu); in perf_event_context_sched_in()
2816 perf_pmu_enable(ctx->pmu); in perf_event_context_sched_in()
2952 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
2957 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
2983 perf_pmu_disable(ctx->pmu); in perf_adjust_freq_unthr_context()
2992 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
2999 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
3008 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
3024 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
3026 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
3029 perf_pmu_enable(ctx->pmu); in perf_adjust_freq_unthr_context()
3066 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
3078 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
3221 event->pmu->read(event); in __perf_event_read()
3227 if (event->pmu->count) in perf_event_count()
3228 return event->pmu->count(event); in perf_event_count()
3279 alloc_perf_context(struct pmu *pmu, struct task_struct *task) in alloc_perf_context() argument
3292 ctx->pmu = pmu; in alloc_perf_context()
3331 find_get_context(struct pmu *pmu, struct task_struct *task, in find_get_context() argument
3354 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
3363 ctxn = pmu->task_ctx_nr; in find_get_context()
3368 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); in find_get_context()
3390 ctx = alloc_perf_context(pmu, task); in find_get_context()
3497 struct pmu *pmu = event->pmu; in exclusive_event_init() local
3499 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) in exclusive_event_init()
3516 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) in exclusive_event_init()
3519 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) in exclusive_event_init()
3528 struct pmu *pmu = event->pmu; in exclusive_event_destroy() local
3530 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) in exclusive_event_destroy()
3535 atomic_dec(&pmu->exclusive_cnt); in exclusive_event_destroy()
3537 atomic_inc(&pmu->exclusive_cnt); in exclusive_event_destroy()
3542 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && in exclusive_event_match()
3555 struct pmu *pmu = event->pmu; in exclusive_event_installable() local
3557 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) in exclusive_event_installable()
3583 if (event->pmu) { in __free_event()
3585 module_put(event->pmu->module); in __free_event()
4002 perf_pmu_disable(ctx->pmu); in __perf_event_period()
4003 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
4009 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
4010 perf_pmu_enable(ctx->pmu); in __perf_event_period()
4222 return event->pmu->event_idx(event); in perf_event_index()
4449 if (event->pmu->event_mapped) in perf_mmap_open()
4450 event->pmu->event_mapped(event); in perf_mmap_open()
4470 if (event->pmu->event_unmapped) in perf_mmap_close()
4471 event->pmu->event_unmapped(event); in perf_mmap_close()
4756 if (event->pmu->event_mapped) in perf_mmap()
4757 event->pmu->event_mapped(event); in perf_mmap()
5108 leader->pmu->read(leader); in perf_output_read_group()
5121 sub->pmu->read(sub); in perf_output_read_group()
5506 struct pmu *pmu; in perf_event_aux() local
5510 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_event_aux()
5511 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); in perf_event_aux()
5512 if (cpuctx->unique_pmu != pmu) in perf_event_aux()
5517 ctxn = pmu->task_ctx_nr; in perf_event_aux()
5524 put_cpu_ptr(pmu->pmu_cpu_context); in perf_event_aux()
6067 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
6612 static struct pmu perf_swevent = {
6731 static struct pmu perf_tracepoint = {
6868 event->pmu->read(event); in perf_swevent_hrtimer()
7008 static struct pmu perf_cpu_clock = {
7089 static struct pmu perf_task_clock = {
7102 static void perf_pmu_nop_void(struct pmu *pmu) in perf_pmu_nop_void() argument
7106 static int perf_pmu_nop_int(struct pmu *pmu) in perf_pmu_nop_int() argument
7111 static void perf_pmu_start_txn(struct pmu *pmu) in perf_pmu_start_txn() argument
7113 perf_pmu_disable(pmu); in perf_pmu_start_txn()
7116 static int perf_pmu_commit_txn(struct pmu *pmu) in perf_pmu_commit_txn() argument
7118 perf_pmu_enable(pmu); in perf_pmu_commit_txn()
7122 static void perf_pmu_cancel_txn(struct pmu *pmu) in perf_pmu_cancel_txn() argument
7124 perf_pmu_enable(pmu); in perf_pmu_cancel_txn()
7138 struct pmu *pmu; in find_pmu_context() local
7143 list_for_each_entry(pmu, &pmus, entry) { in find_pmu_context()
7144 if (pmu->task_ctx_nr == ctxn) in find_pmu_context()
7145 return pmu->pmu_cpu_context; in find_pmu_context()
7151 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) in update_pmu_context() argument
7158 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in update_pmu_context()
7161 cpuctx->unique_pmu = pmu; in update_pmu_context()
7165 static void free_pmu_context(struct pmu *pmu) in free_pmu_context() argument
7167 struct pmu *i; in free_pmu_context()
7174 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { in free_pmu_context()
7175 update_pmu_context(i, pmu); in free_pmu_context()
7180 free_percpu(pmu->pmu_cpu_context); in free_pmu_context()
7189 struct pmu *pmu = dev_get_drvdata(dev); in type_show() local
7191 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); in type_show()
7200 struct pmu *pmu = dev_get_drvdata(dev); in perf_event_mux_interval_ms_show() local
7202 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); in perf_event_mux_interval_ms_show()
7210 struct pmu *pmu = dev_get_drvdata(dev); in perf_event_mux_interval_ms_store() local
7221 if (timer == pmu->hrtimer_interval_ms) in perf_event_mux_interval_ms_store()
7224 pmu->hrtimer_interval_ms = timer; in perf_event_mux_interval_ms_store()
7229 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
7258 static int pmu_dev_alloc(struct pmu *pmu) in pmu_dev_alloc() argument
7262 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); in pmu_dev_alloc()
7263 if (!pmu->dev) in pmu_dev_alloc()
7266 pmu->dev->groups = pmu->attr_groups; in pmu_dev_alloc()
7267 device_initialize(pmu->dev); in pmu_dev_alloc()
7268 ret = dev_set_name(pmu->dev, "%s", pmu->name); in pmu_dev_alloc()
7272 dev_set_drvdata(pmu->dev, pmu); in pmu_dev_alloc()
7273 pmu->dev->bus = &pmu_bus; in pmu_dev_alloc()
7274 pmu->dev->release = pmu_dev_release; in pmu_dev_alloc()
7275 ret = device_add(pmu->dev); in pmu_dev_alloc()
7283 put_device(pmu->dev); in pmu_dev_alloc()
7290 int perf_pmu_register(struct pmu *pmu, const char *name, int type) in perf_pmu_register() argument
7296 pmu->pmu_disable_count = alloc_percpu(int); in perf_pmu_register()
7297 if (!pmu->pmu_disable_count) in perf_pmu_register()
7300 pmu->type = -1; in perf_pmu_register()
7303 pmu->name = name; in perf_pmu_register()
7306 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); in perf_pmu_register()
7312 pmu->type = type; in perf_pmu_register()
7315 ret = pmu_dev_alloc(pmu); in perf_pmu_register()
7321 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); in perf_pmu_register()
7322 if (pmu->pmu_cpu_context) in perf_pmu_register()
7326 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); in perf_pmu_register()
7327 if (!pmu->pmu_cpu_context) in perf_pmu_register()
7333 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
7337 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
7341 cpuctx->unique_pmu = pmu; in perf_pmu_register()
7345 if (!pmu->start_txn) { in perf_pmu_register()
7346 if (pmu->pmu_enable) { in perf_pmu_register()
7352 pmu->start_txn = perf_pmu_start_txn; in perf_pmu_register()
7353 pmu->commit_txn = perf_pmu_commit_txn; in perf_pmu_register()
7354 pmu->cancel_txn = perf_pmu_cancel_txn; in perf_pmu_register()
7356 pmu->start_txn = perf_pmu_nop_void; in perf_pmu_register()
7357 pmu->commit_txn = perf_pmu_nop_int; in perf_pmu_register()
7358 pmu->cancel_txn = perf_pmu_nop_void; in perf_pmu_register()
7362 if (!pmu->pmu_enable) { in perf_pmu_register()
7363 pmu->pmu_enable = perf_pmu_nop_void; in perf_pmu_register()
7364 pmu->pmu_disable = perf_pmu_nop_void; in perf_pmu_register()
7367 if (!pmu->event_idx) in perf_pmu_register()
7368 pmu->event_idx = perf_event_idx_default; in perf_pmu_register()
7370 list_add_rcu(&pmu->entry, &pmus); in perf_pmu_register()
7371 atomic_set(&pmu->exclusive_cnt, 0); in perf_pmu_register()
7379 device_del(pmu->dev); in perf_pmu_register()
7380 put_device(pmu->dev); in perf_pmu_register()
7383 if (pmu->type >= PERF_TYPE_MAX) in perf_pmu_register()
7384 idr_remove(&pmu_idr, pmu->type); in perf_pmu_register()
7387 free_percpu(pmu->pmu_disable_count); in perf_pmu_register()
7392 void perf_pmu_unregister(struct pmu *pmu) in perf_pmu_unregister() argument
7395 list_del_rcu(&pmu->entry); in perf_pmu_unregister()
7405 free_percpu(pmu->pmu_disable_count); in perf_pmu_unregister()
7406 if (pmu->type >= PERF_TYPE_MAX) in perf_pmu_unregister()
7407 idr_remove(&pmu_idr, pmu->type); in perf_pmu_unregister()
7408 device_del(pmu->dev); in perf_pmu_unregister()
7409 put_device(pmu->dev); in perf_pmu_unregister()
7410 free_pmu_context(pmu); in perf_pmu_unregister()
7414 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
7419 if (!try_module_get(pmu->module)) in perf_try_init_event()
7432 event->pmu = pmu; in perf_try_init_event()
7433 ret = pmu->event_init(event); in perf_try_init_event()
7439 module_put(pmu->module); in perf_try_init_event()
7444 struct pmu *perf_init_event(struct perf_event *event) in perf_init_event()
7446 struct pmu *pmu = NULL; in perf_init_event() local
7453 pmu = idr_find(&pmu_idr, event->attr.type); in perf_init_event()
7455 if (pmu) { in perf_init_event()
7456 ret = perf_try_init_event(pmu, event); in perf_init_event()
7458 pmu = ERR_PTR(ret); in perf_init_event()
7462 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_init_event()
7463 ret = perf_try_init_event(pmu, event); in perf_init_event()
7468 pmu = ERR_PTR(ret); in perf_init_event()
7472 pmu = ERR_PTR(-ENOENT); in perf_init_event()
7476 return pmu; in perf_init_event()
7524 struct pmu *pmu; in perf_event_alloc() local
7565 event->pmu = NULL; in perf_event_alloc()
7599 pmu = NULL; in perf_event_alloc()
7624 pmu = perf_init_event(event); in perf_event_alloc()
7625 if (!pmu) in perf_event_alloc()
7627 else if (IS_ERR(pmu)) { in perf_event_alloc()
7628 err = PTR_ERR(pmu); in perf_event_alloc()
7655 module_put(pmu->module); in perf_event_alloc()
7831 event->pmu != output_event->pmu) in perf_event_set_output()
7897 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
7922 struct pmu *pmu; in SYSCALL_DEFINE5() local
8004 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
8014 pmu = event->pmu; in SYSCALL_DEFINE5()
8033 pmu = group_leader->pmu; in SYSCALL_DEFINE5()
8048 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
8054 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { in SYSCALL_DEFINE5()
8274 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
8303 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) in perf_pmu_migrate_context() argument
8310 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
8311 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
8739 child_ctx = alloc_perf_context(parent_ctx->pmu, child); in inherit_task_group()
8917 struct pmu *pmu; in perf_event_exit_cpu_context() local
8921 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_event_exit_cpu_context()
8922 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; in perf_event_exit_cpu_context()
9032 struct pmu *pmu; in perf_event_sysfs_init() local
9041 list_for_each_entry(pmu, &pmus, entry) { in perf_event_sysfs_init()
9042 if (!pmu->name || pmu->type < 0) in perf_event_sysfs_init()
9045 ret = pmu_dev_alloc(pmu); in perf_event_sysfs_init()
9046 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); in perf_event_sysfs_init()