Lines Matching refs:pmu

343 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);  in __get_cpu_context()
478 struct pmu *pmu; in perf_cgroup_switch() local
493 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_cgroup_switch()
494 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_cgroup_switch()
495 if (cpuctx->unique_pmu != pmu) in perf_cgroup_switch()
507 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
530 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
785 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_mux_hrtimer_init() local
789 if (pmu->task_ctx_nr == perf_sw_context) in __perf_mux_hrtimer_init()
796 interval = pmu->hrtimer_interval_ms; in __perf_mux_hrtimer_init()
798 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; in __perf_mux_hrtimer_init()
810 struct pmu *pmu = cpuctx->ctx.pmu; in perf_mux_hrtimer_restart() local
814 if (pmu->task_ctx_nr == perf_sw_context) in perf_mux_hrtimer_restart()
828 void perf_pmu_disable(struct pmu *pmu) in perf_pmu_disable() argument
830 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_disable()
832 pmu->pmu_disable(pmu); in perf_pmu_disable()
835 void perf_pmu_enable(struct pmu *pmu) in perf_pmu_enable() argument
837 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_enable()
839 pmu->pmu_enable(pmu); in perf_pmu_enable()
1544 struct pmu *pmu = event->pmu; in pmu_filter_match() local
1545 return pmu->filter_match ? pmu->filter_match(event) : 1; in pmu_filter_match()
1582 perf_pmu_disable(event->pmu); in event_sched_out()
1585 event->pmu->del(event, 0); in event_sched_out()
1605 perf_pmu_enable(event->pmu); in event_sched_out()
1906 perf_pmu_disable(event->pmu); in event_sched_in()
1912 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
1935 perf_pmu_enable(event->pmu); in event_sched_in()
1946 struct pmu *pmu = ctx->pmu; in group_sched_in() local
1953 pmu->start_txn(pmu, PERF_PMU_TXN_ADD); in group_sched_in()
1956 pmu->cancel_txn(pmu); in group_sched_in()
1971 if (!pmu->commit_txn(pmu)) in group_sched_in()
2002 pmu->cancel_txn(pmu); in group_sched_in()
2085 perf_pmu_disable(cpuctx->ctx.pmu); in __perf_install_in_context()
2126 perf_pmu_enable(cpuctx->ctx.pmu); in __perf_install_in_context()
2416 perf_pmu_disable(ctx->pmu); in ctx_sched_out()
2426 perf_pmu_enable(ctx->pmu); in ctx_sched_out()
2484 event->pmu->read(event); in __perf_event_sync_stat()
2609 void perf_sched_cb_dec(struct pmu *pmu) in perf_sched_cb_dec() argument
2614 void perf_sched_cb_inc(struct pmu *pmu) in perf_sched_cb_inc() argument
2628 struct pmu *pmu; in perf_pmu_sched_task() local
2638 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_pmu_sched_task()
2639 if (pmu->sched_task) { in perf_pmu_sched_task()
2640 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_sched_task()
2644 perf_pmu_disable(pmu); in perf_pmu_sched_task()
2646 pmu->sched_task(cpuctx->task_ctx, sched_in); in perf_pmu_sched_task()
2648 perf_pmu_enable(pmu); in perf_pmu_sched_task()
2828 perf_pmu_disable(ctx->pmu); in perf_event_context_sched_in()
2841 perf_pmu_enable(ctx->pmu); in perf_event_context_sched_in()
2980 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
2985 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
3011 perf_pmu_disable(ctx->pmu); in perf_adjust_freq_unthr_context()
3020 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
3027 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
3036 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
3052 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
3054 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
3057 perf_pmu_enable(ctx->pmu); in perf_adjust_freq_unthr_context()
3094 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
3106 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
3233 struct pmu *pmu = event->pmu; in __perf_event_read() local
3256 pmu->read(event); in __perf_event_read()
3261 pmu->start_txn(pmu, PERF_PMU_TXN_READ); in __perf_event_read()
3263 pmu->read(event); in __perf_event_read()
3272 sub->pmu->read(sub); in __perf_event_read()
3276 data->ret = pmu->commit_txn(pmu); in __perf_event_read()
3284 if (event->pmu->count) in perf_event_count()
3285 return event->pmu->count(event); in perf_event_count()
3327 WARN_ON_ONCE(event->pmu->count); in perf_event_read_local()
3335 event->pmu->read(event); in perf_event_read_local()
3400 alloc_perf_context(struct pmu *pmu, struct task_struct *task) in alloc_perf_context() argument
3413 ctx->pmu = pmu; in alloc_perf_context()
3442 find_get_context(struct pmu *pmu, struct task_struct *task, in find_get_context() argument
3465 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
3474 ctxn = pmu->task_ctx_nr; in find_get_context()
3479 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); in find_get_context()
3501 ctx = alloc_perf_context(pmu, task); in find_get_context()
3612 struct pmu *pmu = event->pmu; in exclusive_event_init() local
3614 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) in exclusive_event_init()
3631 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) in exclusive_event_init()
3634 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) in exclusive_event_init()
3643 struct pmu *pmu = event->pmu; in exclusive_event_destroy() local
3645 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) in exclusive_event_destroy()
3650 atomic_dec(&pmu->exclusive_cnt); in exclusive_event_destroy()
3652 atomic_inc(&pmu->exclusive_cnt); in exclusive_event_destroy()
3657 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && in exclusive_event_match()
3670 struct pmu *pmu = event->pmu; in exclusive_event_installable() local
3672 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) in exclusive_event_installable()
3698 if (event->pmu) { in __free_event()
3700 module_put(event->pmu->module); in __free_event()
4161 perf_pmu_disable(ctx->pmu); in __perf_event_period()
4162 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
4168 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
4169 perf_pmu_enable(ctx->pmu); in __perf_event_period()
4388 return event->pmu->event_idx(event); in perf_event_index()
4615 if (event->pmu->event_mapped) in perf_mmap_open()
4616 event->pmu->event_mapped(event); in perf_mmap_open()
4636 if (event->pmu->event_unmapped) in perf_mmap_close()
4637 event->pmu->event_unmapped(event); in perf_mmap_close()
4922 if (event->pmu->event_mapped) in perf_mmap()
4923 event->pmu->event_mapped(event); in perf_mmap()
5274 leader->pmu->read(leader); in perf_output_read_group()
5287 sub->pmu->read(sub); in perf_output_read_group()
5688 struct pmu *pmu; in perf_event_aux() local
5703 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_event_aux()
5704 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); in perf_event_aux()
5705 if (cpuctx->unique_pmu != pmu) in perf_event_aux()
5708 ctxn = pmu->task_ctx_nr; in perf_event_aux()
5715 put_cpu_ptr(pmu->pmu_cpu_context); in perf_event_aux()
6370 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
6903 static struct pmu perf_swevent = {
7026 static struct pmu perf_tracepoint = {
7163 event->pmu->read(event); in perf_swevent_hrtimer()
7302 static struct pmu perf_cpu_clock = {
7383 static struct pmu perf_task_clock = {
7396 static void perf_pmu_nop_void(struct pmu *pmu) in perf_pmu_nop_void() argument
7400 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) in perf_pmu_nop_txn() argument
7404 static int perf_pmu_nop_int(struct pmu *pmu) in perf_pmu_nop_int() argument
7411 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) in perf_pmu_start_txn() argument
7418 perf_pmu_disable(pmu); in perf_pmu_start_txn()
7421 static int perf_pmu_commit_txn(struct pmu *pmu) in perf_pmu_commit_txn() argument
7430 perf_pmu_enable(pmu); in perf_pmu_commit_txn()
7434 static void perf_pmu_cancel_txn(struct pmu *pmu) in perf_pmu_cancel_txn() argument
7443 perf_pmu_enable(pmu); in perf_pmu_cancel_txn()
7457 struct pmu *pmu; in find_pmu_context() local
7462 list_for_each_entry(pmu, &pmus, entry) { in find_pmu_context()
7463 if (pmu->task_ctx_nr == ctxn) in find_pmu_context()
7464 return pmu->pmu_cpu_context; in find_pmu_context()
7470 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) in update_pmu_context() argument
7477 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in update_pmu_context()
7480 cpuctx->unique_pmu = pmu; in update_pmu_context()
7484 static void free_pmu_context(struct pmu *pmu) in free_pmu_context() argument
7486 struct pmu *i; in free_pmu_context()
7493 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { in free_pmu_context()
7494 update_pmu_context(i, pmu); in free_pmu_context()
7499 free_percpu(pmu->pmu_cpu_context); in free_pmu_context()
7508 struct pmu *pmu = dev_get_drvdata(dev); in type_show() local
7510 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); in type_show()
7519 struct pmu *pmu = dev_get_drvdata(dev); in perf_event_mux_interval_ms_show() local
7521 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); in perf_event_mux_interval_ms_show()
7531 struct pmu *pmu = dev_get_drvdata(dev); in perf_event_mux_interval_ms_store() local
7542 if (timer == pmu->hrtimer_interval_ms) in perf_event_mux_interval_ms_store()
7546 pmu->hrtimer_interval_ms = timer; in perf_event_mux_interval_ms_store()
7552 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
7583 static int pmu_dev_alloc(struct pmu *pmu) in pmu_dev_alloc() argument
7587 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); in pmu_dev_alloc()
7588 if (!pmu->dev) in pmu_dev_alloc()
7591 pmu->dev->groups = pmu->attr_groups; in pmu_dev_alloc()
7592 device_initialize(pmu->dev); in pmu_dev_alloc()
7593 ret = dev_set_name(pmu->dev, "%s", pmu->name); in pmu_dev_alloc()
7597 dev_set_drvdata(pmu->dev, pmu); in pmu_dev_alloc()
7598 pmu->dev->bus = &pmu_bus; in pmu_dev_alloc()
7599 pmu->dev->release = pmu_dev_release; in pmu_dev_alloc()
7600 ret = device_add(pmu->dev); in pmu_dev_alloc()
7608 put_device(pmu->dev); in pmu_dev_alloc()
7615 int perf_pmu_register(struct pmu *pmu, const char *name, int type) in perf_pmu_register() argument
7621 pmu->pmu_disable_count = alloc_percpu(int); in perf_pmu_register()
7622 if (!pmu->pmu_disable_count) in perf_pmu_register()
7625 pmu->type = -1; in perf_pmu_register()
7628 pmu->name = name; in perf_pmu_register()
7631 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); in perf_pmu_register()
7637 pmu->type = type; in perf_pmu_register()
7640 ret = pmu_dev_alloc(pmu); in perf_pmu_register()
7646 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); in perf_pmu_register()
7647 if (pmu->pmu_cpu_context) in perf_pmu_register()
7651 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); in perf_pmu_register()
7652 if (!pmu->pmu_cpu_context) in perf_pmu_register()
7658 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
7662 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
7666 cpuctx->unique_pmu = pmu; in perf_pmu_register()
7670 if (!pmu->start_txn) { in perf_pmu_register()
7671 if (pmu->pmu_enable) { in perf_pmu_register()
7677 pmu->start_txn = perf_pmu_start_txn; in perf_pmu_register()
7678 pmu->commit_txn = perf_pmu_commit_txn; in perf_pmu_register()
7679 pmu->cancel_txn = perf_pmu_cancel_txn; in perf_pmu_register()
7681 pmu->start_txn = perf_pmu_nop_txn; in perf_pmu_register()
7682 pmu->commit_txn = perf_pmu_nop_int; in perf_pmu_register()
7683 pmu->cancel_txn = perf_pmu_nop_void; in perf_pmu_register()
7687 if (!pmu->pmu_enable) { in perf_pmu_register()
7688 pmu->pmu_enable = perf_pmu_nop_void; in perf_pmu_register()
7689 pmu->pmu_disable = perf_pmu_nop_void; in perf_pmu_register()
7692 if (!pmu->event_idx) in perf_pmu_register()
7693 pmu->event_idx = perf_event_idx_default; in perf_pmu_register()
7695 list_add_rcu(&pmu->entry, &pmus); in perf_pmu_register()
7696 atomic_set(&pmu->exclusive_cnt, 0); in perf_pmu_register()
7704 device_del(pmu->dev); in perf_pmu_register()
7705 put_device(pmu->dev); in perf_pmu_register()
7708 if (pmu->type >= PERF_TYPE_MAX) in perf_pmu_register()
7709 idr_remove(&pmu_idr, pmu->type); in perf_pmu_register()
7712 free_percpu(pmu->pmu_disable_count); in perf_pmu_register()
7717 void perf_pmu_unregister(struct pmu *pmu) in perf_pmu_unregister() argument
7720 list_del_rcu(&pmu->entry); in perf_pmu_unregister()
7730 free_percpu(pmu->pmu_disable_count); in perf_pmu_unregister()
7731 if (pmu->type >= PERF_TYPE_MAX) in perf_pmu_unregister()
7732 idr_remove(&pmu_idr, pmu->type); in perf_pmu_unregister()
7733 device_del(pmu->dev); in perf_pmu_unregister()
7734 put_device(pmu->dev); in perf_pmu_unregister()
7735 free_pmu_context(pmu); in perf_pmu_unregister()
7739 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
7744 if (!try_module_get(pmu->module)) in perf_try_init_event()
7757 event->pmu = pmu; in perf_try_init_event()
7758 ret = pmu->event_init(event); in perf_try_init_event()
7764 module_put(pmu->module); in perf_try_init_event()
7769 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event()
7771 struct pmu *pmu = NULL; in perf_init_event() local
7778 pmu = idr_find(&pmu_idr, event->attr.type); in perf_init_event()
7780 if (pmu) { in perf_init_event()
7781 ret = perf_try_init_event(pmu, event); in perf_init_event()
7783 pmu = ERR_PTR(ret); in perf_init_event()
7787 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_init_event()
7788 ret = perf_try_init_event(pmu, event); in perf_init_event()
7793 pmu = ERR_PTR(ret); in perf_init_event()
7797 pmu = ERR_PTR(-ENOENT); in perf_init_event()
7801 return pmu; in perf_init_event()
7853 struct pmu *pmu; in perf_event_alloc() local
7894 event->pmu = NULL; in perf_event_alloc()
7928 pmu = NULL; in perf_event_alloc()
7953 pmu = perf_init_event(event); in perf_event_alloc()
7954 if (!pmu) in perf_event_alloc()
7956 else if (IS_ERR(pmu)) { in perf_event_alloc()
7957 err = PTR_ERR(pmu); in perf_event_alloc()
7984 module_put(pmu->module); in perf_event_alloc()
8160 event->pmu != output_event->pmu) in perf_event_set_output()
8226 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
8251 struct pmu *pmu; in SYSCALL_DEFINE5() local
8351 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
8361 pmu = event->pmu; in SYSCALL_DEFINE5()
8380 pmu = group_leader->pmu; in SYSCALL_DEFINE5()
8395 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
8401 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { in SYSCALL_DEFINE5()
8652 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
8681 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) in perf_pmu_migrate_context() argument
8688 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
8689 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
9151 child_ctx = alloc_perf_context(parent_ctx->pmu, child); in inherit_task_group()
9328 struct pmu *pmu; in perf_event_exit_cpu_context() local
9332 list_for_each_entry_rcu(pmu, &pmus, entry) { in perf_event_exit_cpu_context()
9333 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; in perf_event_exit_cpu_context()
9436 struct pmu *pmu; in perf_event_sysfs_init() local
9445 list_for_each_entry(pmu, &pmus, entry) { in perf_event_sysfs_init()
9446 if (!pmu->name || pmu->type < 0) in perf_event_sysfs_init()
9449 ret = pmu_dev_alloc(pmu); in perf_event_sysfs_init()
9450 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); in perf_event_sysfs_init()