1 /*
2  *  linux/arch/arm/include/asm/pmu.h
3  *
4  *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  */
11 
12 #ifndef __ARM_PMU_H__
13 #define __ARM_PMU_H__
14 
15 #include <linux/interrupt.h>
16 #include <linux/perf_event.h>
17 
18 #include <asm/cputype.h>
19 
20 /*
21  * struct arm_pmu_platdata - ARM PMU platform data
22  *
23  * @handle_irq: an optional handler which will be called from the
24  *	interrupt and passed the address of the low level handler,
25  *	and can be used to implement any platform specific handling
26  *	before or after calling it.
27  * @runtime_resume: an optional handler which will be called by the
28  *	runtime PM framework following a call to pm_runtime_get().
29  *	Note that if pm_runtime_get() is called more than once in
30  *	succession this handler will only be called once.
31  * @runtime_suspend: an optional handler which will be called by the
32  *	runtime PM framework following a call to pm_runtime_put().
33  *	Note that if pm_runtime_get() is called more than once in
34  *	succession this handler will only be called following the
35  *	final call to pm_runtime_put() that actually disables the
36  *	hardware.
37  */
38 struct arm_pmu_platdata {
39 	irqreturn_t (*handle_irq)(int irq, void *dev,
40 				  irq_handler_t pmu_handler);
41 	int (*runtime_resume)(struct device *dev);
42 	int (*runtime_suspend)(struct device *dev);
43 };
44 
45 #ifdef CONFIG_HW_PERF_EVENTS
46 
47 /*
48  * The ARMv7 CPU PMU supports up to 32 event counters.
49  */
50 #define ARMPMU_MAX_HWEVENTS		32
51 
52 #define HW_OP_UNSUPPORTED		0xFFFF
53 #define C(_x)				PERF_COUNT_HW_CACHE_##_x
54 #define CACHE_OP_UNSUPPORTED		0xFFFF
55 
56 #define PERF_MAP_ALL_UNSUPPORTED					\
57 	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
58 
59 #define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
60 [0 ... C(MAX) - 1] = {							\
61 	[0 ... C(OP_MAX) - 1] = {					\
62 		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
63 	},								\
64 }
65 
66 /* The events for a given PMU register set. */
67 struct pmu_hw_events {
68 	/*
69 	 * The events that are active on the PMU for the given index.
70 	 */
71 	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
72 
73 	/*
74 	 * A 1 bit for an index indicates that the counter is being used for
75 	 * an event. A 0 means that the counter can be used.
76 	 */
77 	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
78 
79 	/*
80 	 * Hardware lock to serialize accesses to PMU registers. Needed for the
81 	 * read/modify/write sequences.
82 	 */
83 	raw_spinlock_t		pmu_lock;
84 
85 	/*
86 	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
87 	 * already have to allocate this struct per cpu.
88 	 */
89 	struct arm_pmu		*percpu_pmu;
90 };
91 
92 struct arm_pmu {
93 	struct pmu	pmu;
94 	cpumask_t	active_irqs;
95 	int		*irq_affinity;
96 	char		*name;
97 	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
98 	void		(*enable)(struct perf_event *event);
99 	void		(*disable)(struct perf_event *event);
100 	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
101 					 struct perf_event *event);
102 	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
103 					 struct perf_event *event);
104 	int		(*set_event_filter)(struct hw_perf_event *evt,
105 					    struct perf_event_attr *attr);
106 	u32		(*read_counter)(struct perf_event *event);
107 	void		(*write_counter)(struct perf_event *event, u32 val);
108 	void		(*start)(struct arm_pmu *);
109 	void		(*stop)(struct arm_pmu *);
110 	void		(*reset)(void *);
111 	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
112 	void		(*free_irq)(struct arm_pmu *);
113 	int		(*map_event)(struct perf_event *event);
114 	int		num_events;
115 	atomic_t	active_events;
116 	struct mutex	reserve_mutex;
117 	u64		max_period;
118 	struct platform_device	*plat_device;
119 	struct pmu_hw_events	__percpu *hw_events;
120 	struct notifier_block	hotplug_nb;
121 };
122 
123 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
124 
125 extern const struct dev_pm_ops armpmu_dev_pm_ops;
126 
127 int armpmu_register(struct arm_pmu *armpmu, int type);
128 
129 u64 armpmu_event_update(struct perf_event *event);
130 
131 int armpmu_event_set_period(struct perf_event *event);
132 
133 int armpmu_map_event(struct perf_event *event,
134 		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
135 		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
136 						[PERF_COUNT_HW_CACHE_OP_MAX]
137 						[PERF_COUNT_HW_CACHE_RESULT_MAX],
138 		     u32 raw_event_mask);
139 
140 struct pmu_probe_info {
141 	unsigned int cpuid;
142 	unsigned int mask;
143 	int (*init)(struct arm_pmu *);
144 };
145 
146 #define PMU_PROBE(_cpuid, _mask, _fn)	\
147 {					\
148 	.cpuid = (_cpuid),		\
149 	.mask = (_mask),		\
150 	.init = (_fn),			\
151 }
152 
153 #define ARM_PMU_PROBE(_cpuid, _fn) \
154 	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
155 
156 #define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
157 
158 #define XSCALE_PMU_PROBE(_version, _fn) \
159 	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
160 
161 #endif /* CONFIG_HW_PERF_EVENTS */
162 
163 #endif /* __ARM_PMU_H__ */
164