l2cache_pmu       180 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu;
l2cache_pmu       192 drivers/perf/qcom_l2_pmu.c #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
l2cache_pmu       206 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu, int cpu)
l2cache_pmu       208 drivers/perf/qcom_l2_pmu.c 	return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
l2cache_pmu       386 drivers/perf/qcom_l2_pmu.c 	int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
l2cache_pmu       430 drivers/perf/qcom_l2_pmu.c 	int num_counters = cluster->l2cache_pmu->num_counters;
l2cache_pmu       485 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu;
l2cache_pmu       490 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu = to_l2cache_pmu(event->pmu);
l2cache_pmu       493 drivers/perf/qcom_l2_pmu.c 		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       499 drivers/perf/qcom_l2_pmu.c 		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       507 drivers/perf/qcom_l2_pmu.c 		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       516 drivers/perf/qcom_l2_pmu.c 		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       524 drivers/perf/qcom_l2_pmu.c 			dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       530 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
l2cache_pmu       533 drivers/perf/qcom_l2_pmu.c 		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       541 drivers/perf/qcom_l2_pmu.c 		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       551 drivers/perf/qcom_l2_pmu.c 		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       563 drivers/perf/qcom_l2_pmu.c 			dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
l2cache_pmu       681 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
l2cache_pmu       683 drivers/perf/qcom_l2_pmu.c 	return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
l2cache_pmu       780 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu, int cpu)
l2cache_pmu       797 drivers/perf/qcom_l2_pmu.c 	list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
l2cache_pmu       801 drivers/perf/qcom_l2_pmu.c 		dev_info(&l2cache_pmu->pdev->dev,
l2cache_pmu       805 drivers/perf/qcom_l2_pmu.c 		*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
l2cache_pmu       815 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu;
l2cache_pmu       817 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
l2cache_pmu       818 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(l2cache_pmu, cpu);
l2cache_pmu       821 drivers/perf/qcom_l2_pmu.c 		cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
l2cache_pmu       838 drivers/perf/qcom_l2_pmu.c 	cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
l2cache_pmu       850 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu;
l2cache_pmu       854 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
l2cache_pmu       855 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(l2cache_pmu, cpu);
l2cache_pmu       864 drivers/perf/qcom_l2_pmu.c 	cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
l2cache_pmu       876 drivers/perf/qcom_l2_pmu.c 	perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
l2cache_pmu       878 drivers/perf/qcom_l2_pmu.c 	cpumask_set_cpu(target, &l2cache_pmu->cpumask);
l2cache_pmu       888 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu = data;
l2cache_pmu       908 drivers/perf/qcom_l2_pmu.c 	list_add(&cluster->next, &l2cache_pmu->clusters);
l2cache_pmu       917 drivers/perf/qcom_l2_pmu.c 	cluster->l2cache_pmu = l2cache_pmu;
l2cache_pmu       934 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu->num_pmus++;
l2cache_pmu       942 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu;
l2cache_pmu       944 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu =
l2cache_pmu       945 drivers/perf/qcom_l2_pmu.c 		devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
l2cache_pmu       946 drivers/perf/qcom_l2_pmu.c 	if (!l2cache_pmu)
l2cache_pmu       949 drivers/perf/qcom_l2_pmu.c 	INIT_LIST_HEAD(&l2cache_pmu->clusters);
l2cache_pmu       951 drivers/perf/qcom_l2_pmu.c 	platform_set_drvdata(pdev, l2cache_pmu);
l2cache_pmu       952 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu->pmu = (struct pmu) {
l2cache_pmu       968 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu->num_counters = get_num_counters();
l2cache_pmu       969 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu->pdev = pdev;
l2cache_pmu       970 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
l2cache_pmu       972 drivers/perf/qcom_l2_pmu.c 	if (!l2cache_pmu->pmu_cluster)
l2cache_pmu       975 drivers/perf/qcom_l2_pmu.c 	l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
l2cache_pmu       976 drivers/perf/qcom_l2_pmu.c 	l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
l2cache_pmu       979 drivers/perf/qcom_l2_pmu.c 	cpumask_clear(&l2cache_pmu->cpumask);
l2cache_pmu       982 drivers/perf/qcom_l2_pmu.c 	err = device_for_each_child(&pdev->dev, l2cache_pmu,
l2cache_pmu       987 drivers/perf/qcom_l2_pmu.c 	if (l2cache_pmu->num_pmus == 0) {
l2cache_pmu       993 drivers/perf/qcom_l2_pmu.c 				       &l2cache_pmu->node);
l2cache_pmu       999 drivers/perf/qcom_l2_pmu.c 	err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
l2cache_pmu      1006 drivers/perf/qcom_l2_pmu.c 		 l2cache_pmu->num_pmus);
l2cache_pmu      1012 drivers/perf/qcom_l2_pmu.c 				    &l2cache_pmu->node);
l2cache_pmu      1018 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu =
l2cache_pmu      1021 drivers/perf/qcom_l2_pmu.c 	perf_pmu_unregister(&l2cache_pmu->pmu);
l2cache_pmu      1023 drivers/perf/qcom_l2_pmu.c 				    &l2cache_pmu->node);