csky_pmu           46 arch/csky/kernel/perf_event.c } csky_pmu;
csky_pmu           49 arch/csky/kernel/perf_event.c #define to_csky_pmu(p)  (container_of(p, struct csky_pmu, pmu))
csky_pmu          899 arch/csky/kernel/perf_event.c 	if (left > (s64)csky_pmu.max_period)
csky_pmu          900 arch/csky/kernel/perf_event.c 		left = csky_pmu.max_period;
csky_pmu          910 arch/csky/kernel/perf_event.c 						csky_pmu.max_period);
csky_pmu          928 arch/csky/kernel/perf_event.c 		hw_raw_read_mapping[hwc->idx](), csky_pmu.count_width - 1);
csky_pmu          998 arch/csky/kernel/perf_event.c 		csky_pmu.hpcr = BIT(2);
csky_pmu         1000 arch/csky/kernel/perf_event.c 		csky_pmu.hpcr = BIT(3);
csky_pmu         1002 arch/csky/kernel/perf_event.c 		csky_pmu.hpcr = BIT(2) | BIT(3);
csky_pmu         1004 arch/csky/kernel/perf_event.c 	csky_pmu.hpcr |= BIT(1) | BIT(0);
csky_pmu         1012 arch/csky/kernel/perf_event.c 	cpwcr(HPCR, csky_pmu.hpcr);
csky_pmu         1075 arch/csky/kernel/perf_event.c 	struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events);
csky_pmu         1088 arch/csky/kernel/perf_event.c 	struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events);
csky_pmu         1106 arch/csky/kernel/perf_event.c 	struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events);
csky_pmu         1121 arch/csky/kernel/perf_event.c 	csky_pmu_disable(&csky_pmu.pmu);
csky_pmu         1146 arch/csky/kernel/perf_event.c 	csky_pmu_enable(&csky_pmu.pmu);
csky_pmu         1163 arch/csky/kernel/perf_event.c 	struct platform_device *pmu_device = csky_pmu.plat_device;
csky_pmu         1178 arch/csky/kernel/perf_event.c 				 this_cpu_ptr(csky_pmu.hw_events));
csky_pmu         1191 arch/csky/kernel/perf_event.c 	struct platform_device *pmu_device = csky_pmu.plat_device;
csky_pmu         1195 arch/csky/kernel/perf_event.c 		free_percpu_irq(irq, this_cpu_ptr(csky_pmu.hw_events));
csky_pmu         1200 arch/csky/kernel/perf_event.c 	csky_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events,
csky_pmu         1202 arch/csky/kernel/perf_event.c 	if (!csky_pmu.hw_events) {
csky_pmu         1207 arch/csky/kernel/perf_event.c 	csky_pmu.pmu = (struct pmu) {
csky_pmu         1306 arch/csky/kernel/perf_event.c 				 &csky_pmu.count_width)) {
csky_pmu         1307 arch/csky/kernel/perf_event.c 		csky_pmu.count_width = DEFAULT_COUNT_WIDTH;
csky_pmu         1309 arch/csky/kernel/perf_event.c 	csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1;
csky_pmu         1311 arch/csky/kernel/perf_event.c 	csky_pmu.plat_device = pdev;
csky_pmu         1314 arch/csky/kernel/perf_event.c 	on_each_cpu(csky_pmu_reset, &csky_pmu, 1);
csky_pmu         1318 arch/csky/kernel/perf_event.c 		csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
csky_pmu         1327 arch/csky/kernel/perf_event.c 		free_percpu(csky_pmu.hw_events);
csky_pmu         1331 arch/csky/kernel/perf_event.c 	ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW);
csky_pmu         1334 arch/csky/kernel/perf_event.c 		free_percpu(csky_pmu.hw_events);