This source file includes following definitions.
- amdgpu_perf_event_init
- amdgpu_perf_start
- amdgpu_perf_read
- amdgpu_perf_stop
- amdgpu_perf_add
- amdgpu_perf_del
- init_pmu_by_type
- amdgpu_pmu_init
- amdgpu_pmu_fini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #include <linux/perf_event.h>
27 #include <linux/init.h>
28 #include "amdgpu.h"
29 #include "amdgpu_pmu.h"
30 #include "df_v3_6.h"
31
32 #define PMU_NAME_SIZE 32
33
34
35 struct amdgpu_pmu_entry {
36 struct list_head entry;
37 struct amdgpu_device *adev;
38 struct pmu pmu;
39 unsigned int pmu_perf_type;
40 };
41
42 static LIST_HEAD(amdgpu_pmu_list);
43
44
45
46 static int amdgpu_perf_event_init(struct perf_event *event)
47 {
48 struct hw_perf_event *hwc = &event->hw;
49
50
51 if (event->attr.type != event->pmu->type)
52 return -ENOENT;
53
54
55 hwc->conf = event->attr.config;
56
57 return 0;
58 }
59
60
61 static void amdgpu_perf_start(struct perf_event *event, int flags)
62 {
63 struct hw_perf_event *hwc = &event->hw;
64 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
65 struct amdgpu_pmu_entry,
66 pmu);
67
68 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
69 return;
70
71 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
72 hwc->state = 0;
73
74 switch (pe->pmu_perf_type) {
75 case PERF_TYPE_AMDGPU_DF:
76 if (!(flags & PERF_EF_RELOAD))
77 pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
78
79 pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
80 break;
81 default:
82 break;
83 }
84
85 perf_event_update_userpage(event);
86
87 }
88
89
90 static void amdgpu_perf_read(struct perf_event *event)
91 {
92 struct hw_perf_event *hwc = &event->hw;
93 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
94 struct amdgpu_pmu_entry,
95 pmu);
96
97 u64 count, prev;
98
99 do {
100 prev = local64_read(&hwc->prev_count);
101
102 switch (pe->pmu_perf_type) {
103 case PERF_TYPE_AMDGPU_DF:
104 pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf,
105 &count);
106 break;
107 default:
108 count = 0;
109 break;
110 };
111 } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
112
113 local64_add(count - prev, &event->count);
114 }
115
116
117 static void amdgpu_perf_stop(struct perf_event *event, int flags)
118 {
119 struct hw_perf_event *hwc = &event->hw;
120 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
121 struct amdgpu_pmu_entry,
122 pmu);
123
124 if (hwc->state & PERF_HES_UPTODATE)
125 return;
126
127 switch (pe->pmu_perf_type) {
128 case PERF_TYPE_AMDGPU_DF:
129 pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
130 break;
131 default:
132 break;
133 };
134
135 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
136 hwc->state |= PERF_HES_STOPPED;
137
138 if (hwc->state & PERF_HES_UPTODATE)
139 return;
140
141 amdgpu_perf_read(event);
142 hwc->state |= PERF_HES_UPTODATE;
143 }
144
145
146 static int amdgpu_perf_add(struct perf_event *event, int flags)
147 {
148 struct hw_perf_event *hwc = &event->hw;
149 int retval;
150
151 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
152 struct amdgpu_pmu_entry,
153 pmu);
154
155 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
156
157 switch (pe->pmu_perf_type) {
158 case PERF_TYPE_AMDGPU_DF:
159 retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
160 break;
161 default:
162 return 0;
163 };
164
165 if (retval)
166 return retval;
167
168 if (flags & PERF_EF_START)
169 amdgpu_perf_start(event, PERF_EF_RELOAD);
170
171 return retval;
172
173 }
174
175
176 static void amdgpu_perf_del(struct perf_event *event, int flags)
177 {
178 struct hw_perf_event *hwc = &event->hw;
179 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
180 struct amdgpu_pmu_entry,
181 pmu);
182
183 amdgpu_perf_stop(event, PERF_EF_UPDATE);
184
185 switch (pe->pmu_perf_type) {
186 case PERF_TYPE_AMDGPU_DF:
187 pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
188 break;
189 default:
190 break;
191 };
192
193 perf_event_update_userpage(event);
194 }
195
196
197
198
199 static int init_pmu_by_type(struct amdgpu_device *adev,
200 const struct attribute_group *attr_groups[],
201 char *pmu_type_name, char *pmu_file_prefix,
202 unsigned int pmu_perf_type,
203 unsigned int num_counters)
204 {
205 char pmu_name[PMU_NAME_SIZE];
206 struct amdgpu_pmu_entry *pmu_entry;
207 int ret = 0;
208
209 pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
210
211 if (!pmu_entry)
212 return -ENOMEM;
213
214 pmu_entry->adev = adev;
215 pmu_entry->pmu = (struct pmu){
216 .event_init = amdgpu_perf_event_init,
217 .add = amdgpu_perf_add,
218 .del = amdgpu_perf_del,
219 .start = amdgpu_perf_start,
220 .stop = amdgpu_perf_stop,
221 .read = amdgpu_perf_read,
222 .task_ctx_nr = perf_invalid_context,
223 };
224
225 pmu_entry->pmu.attr_groups = attr_groups;
226 pmu_entry->pmu_perf_type = pmu_perf_type;
227 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
228 pmu_file_prefix, adev->ddev->primary->index);
229
230 ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
231
232 if (ret) {
233 kfree(pmu_entry);
234 pr_warn("Error initializing AMDGPU %s PMUs.\n", pmu_type_name);
235 return ret;
236 }
237
238 pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
239 pmu_type_name, num_counters);
240
241 list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
242
243 return 0;
244 }
245
246
247 int amdgpu_pmu_init(struct amdgpu_device *adev)
248 {
249 int ret = 0;
250
251 switch (adev->asic_type) {
252 case CHIP_VEGA20:
253
254 ret = init_pmu_by_type(adev, df_v3_6_attr_groups,
255 "DF", "amdgpu_df", PERF_TYPE_AMDGPU_DF,
256 DF_V3_6_MAX_COUNTERS);
257
258
259 break;
260 default:
261 return 0;
262 }
263
264 return 0;
265 }
266
267
268
269 void amdgpu_pmu_fini(struct amdgpu_device *adev)
270 {
271 struct amdgpu_pmu_entry *pe, *temp;
272
273 list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
274 if (pe->adev == adev) {
275 list_del(&pe->entry);
276 perf_pmu_unregister(&pe->pmu);
277 kfree(pe);
278 }
279 }
280 }