1#include <math.h>
2#include "stat.h"
3#include "evlist.h"
4#include "evsel.h"
5#include "thread_map.h"
6
7void update_stats(struct stats *stats, u64 val)
8{
9	double delta;
10
11	stats->n++;
12	delta = val - stats->mean;
13	stats->mean += delta / stats->n;
14	stats->M2 += delta*(val - stats->mean);
15
16	if (val > stats->max)
17		stats->max = val;
18
19	if (val < stats->min)
20		stats->min = val;
21}
22
23double avg_stats(struct stats *stats)
24{
25	return stats->mean;
26}
27
28/*
29 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
30 *
31 *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
32 * s^2 = -------------------------------
33 *                  n - 1
34 *
35 * http://en.wikipedia.org/wiki/Stddev
36 *
37 * The std dev of the mean is related to the std dev by:
38 *
39 *             s
40 * s_mean = -------
41 *          sqrt(n)
42 *
43 */
44double stddev_stats(struct stats *stats)
45{
46	double variance, variance_mean;
47
48	if (stats->n < 2)
49		return 0.0;
50
51	variance = stats->M2 / (stats->n - 1);
52	variance_mean = variance / stats->n;
53
54	return sqrt(variance_mean);
55}
56
57double rel_stddev_stats(double stddev, double avg)
58{
59	double pct = 0.0;
60
61	if (avg)
62		pct = 100.0 * stddev/avg;
63
64	return pct;
65}
66
67bool __perf_evsel_stat__is(struct perf_evsel *evsel,
68			   enum perf_stat_evsel_id id)
69{
70	struct perf_stat_evsel *ps = evsel->priv;
71
72	return ps->id == id;
73}
74
75#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
76static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
77	ID(NONE,		x),
78	ID(CYCLES_IN_TX,	cpu/cycles-t/),
79	ID(TRANSACTION_START,	cpu/tx-start/),
80	ID(ELISION_START,	cpu/el-start/),
81	ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/),
82};
83#undef ID
84
85void perf_stat_evsel_id_init(struct perf_evsel *evsel)
86{
87	struct perf_stat_evsel *ps = evsel->priv;
88	int i;
89
90	/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
91
92	for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
93		if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
94			ps->id = i;
95			break;
96		}
97	}
98}
99
100void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
101{
102	int i;
103	struct perf_stat_evsel *ps = evsel->priv;
104
105	for (i = 0; i < 3; i++)
106		init_stats(&ps->res_stats[i]);
107
108	perf_stat_evsel_id_init(evsel);
109}
110
111int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
112{
113	evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
114	if (evsel->priv == NULL)
115		return -ENOMEM;
116	perf_evsel__reset_stat_priv(evsel);
117	return 0;
118}
119
120void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
121{
122	zfree(&evsel->priv);
123}
124
125int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
126				      int ncpus, int nthreads)
127{
128	struct perf_counts *counts;
129
130	counts = perf_counts__new(ncpus, nthreads);
131	if (counts)
132		evsel->prev_raw_counts = counts;
133
134	return counts ? 0 : -ENOMEM;
135}
136
137void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
138{
139	perf_counts__delete(evsel->prev_raw_counts);
140	evsel->prev_raw_counts = NULL;
141}
142
143int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
144{
145	int ncpus = perf_evsel__nr_cpus(evsel);
146	int nthreads = thread_map__nr(evsel->threads);
147
148	if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
149	    perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
150	    (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
151		return -ENOMEM;
152
153	return 0;
154}
155
156int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
157{
158	struct perf_evsel *evsel;
159
160	evlist__for_each(evlist, evsel) {
161		if (perf_evsel__alloc_stats(evsel, alloc_raw))
162			goto out_free;
163	}
164
165	return 0;
166
167out_free:
168	perf_evlist__free_stats(evlist);
169	return -1;
170}
171
172void perf_evlist__free_stats(struct perf_evlist *evlist)
173{
174	struct perf_evsel *evsel;
175
176	evlist__for_each(evlist, evsel) {
177		perf_evsel__free_stat_priv(evsel);
178		perf_evsel__free_counts(evsel);
179		perf_evsel__free_prev_raw_counts(evsel);
180	}
181}
182
183void perf_evlist__reset_stats(struct perf_evlist *evlist)
184{
185	struct perf_evsel *evsel;
186
187	evlist__for_each(evlist, evsel) {
188		perf_evsel__reset_stat_priv(evsel);
189		perf_evsel__reset_counts(evsel);
190	}
191}
192
193static void zero_per_pkg(struct perf_evsel *counter)
194{
195	if (counter->per_pkg_mask)
196		memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
197}
198
199static int check_per_pkg(struct perf_evsel *counter,
200			 struct perf_counts_values *vals, int cpu, bool *skip)
201{
202	unsigned long *mask = counter->per_pkg_mask;
203	struct cpu_map *cpus = perf_evsel__cpus(counter);
204	int s;
205
206	*skip = false;
207
208	if (!counter->per_pkg)
209		return 0;
210
211	if (cpu_map__empty(cpus))
212		return 0;
213
214	if (!mask) {
215		mask = zalloc(MAX_NR_CPUS);
216		if (!mask)
217			return -ENOMEM;
218
219		counter->per_pkg_mask = mask;
220	}
221
222	/*
223	 * we do not consider an event that has not run as a good
224	 * instance to mark a package as used (skip=1). Otherwise
225	 * we may run into a situation where the first CPU in a package
226	 * is not running anything, yet the second is, and this function
227	 * would mark the package as used after the first CPU and would
228	 * not read the values from the second CPU.
229	 */
230	if (!(vals->run && vals->ena))
231		return 0;
232
233	s = cpu_map__get_socket(cpus, cpu, NULL);
234	if (s < 0)
235		return -1;
236
237	*skip = test_and_set_bit(s, mask) == 1;
238	return 0;
239}
240
241static int
242process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel,
243		       int cpu, int thread,
244		       struct perf_counts_values *count)
245{
246	struct perf_counts_values *aggr = &evsel->counts->aggr;
247	static struct perf_counts_values zero;
248	bool skip = false;
249
250	if (check_per_pkg(evsel, count, cpu, &skip)) {
251		pr_err("failed to read per-pkg counter\n");
252		return -1;
253	}
254
255	if (skip)
256		count = &zero;
257
258	switch (config->aggr_mode) {
259	case AGGR_THREAD:
260	case AGGR_CORE:
261	case AGGR_SOCKET:
262	case AGGR_NONE:
263		if (!evsel->snapshot)
264			perf_evsel__compute_deltas(evsel, cpu, thread, count);
265		perf_counts_values__scale(count, config->scale, NULL);
266		if (config->aggr_mode == AGGR_NONE)
267			perf_stat__update_shadow_stats(evsel, count->values, cpu);
268		break;
269	case AGGR_GLOBAL:
270		aggr->val += count->val;
271		if (config->scale) {
272			aggr->ena += count->ena;
273			aggr->run += count->run;
274		}
275	case AGGR_UNSET:
276	default:
277		break;
278	}
279
280	return 0;
281}
282
283static int process_counter_maps(struct perf_stat_config *config,
284				struct perf_evsel *counter)
285{
286	int nthreads = thread_map__nr(counter->threads);
287	int ncpus = perf_evsel__nr_cpus(counter);
288	int cpu, thread;
289
290	if (counter->system_wide)
291		nthreads = 1;
292
293	for (thread = 0; thread < nthreads; thread++) {
294		for (cpu = 0; cpu < ncpus; cpu++) {
295			if (process_counter_values(config, counter, cpu, thread,
296						   perf_counts(counter->counts, cpu, thread)))
297				return -1;
298		}
299	}
300
301	return 0;
302}
303
304int perf_stat_process_counter(struct perf_stat_config *config,
305			      struct perf_evsel *counter)
306{
307	struct perf_counts_values *aggr = &counter->counts->aggr;
308	struct perf_stat_evsel *ps = counter->priv;
309	u64 *count = counter->counts->aggr.values;
310	int i, ret;
311
312	aggr->val = aggr->ena = aggr->run = 0;
313
314	if (counter->per_pkg)
315		zero_per_pkg(counter);
316
317	ret = process_counter_maps(config, counter);
318	if (ret)
319		return ret;
320
321	if (config->aggr_mode != AGGR_GLOBAL)
322		return 0;
323
324	if (!counter->snapshot)
325		perf_evsel__compute_deltas(counter, -1, -1, aggr);
326	perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
327
328	for (i = 0; i < 3; i++)
329		update_stats(&ps->res_stats[i], count[i]);
330
331	if (verbose) {
332		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
333			perf_evsel__name(counter), count[0], count[1], count[2]);
334	}
335
336	/*
337	 * Save the full runtime - to allow normalization during printout:
338	 */
339	perf_stat__update_shadow_stats(counter, count, 0);
340
341	return 0;
342}
343