1#include <api/fs/fs.h>
2#include <linux/err.h>
3#include "evsel.h"
4#include "tests.h"
5#include "thread_map.h"
6#include "cpumap.h"
7#include "debug.h"
8#include "stat.h"
9
10int test__openat_syscall_event_on_all_cpus(void)
11{
12	int err = -1, fd, cpu;
13	struct cpu_map *cpus;
14	struct perf_evsel *evsel;
15	unsigned int nr_openat_calls = 111, i;
16	cpu_set_t cpu_set;
17	struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
18	char sbuf[STRERR_BUFSIZE];
19	char errbuf[BUFSIZ];
20
21	if (threads == NULL) {
22		pr_debug("thread_map__new\n");
23		return -1;
24	}
25
26	cpus = cpu_map__new(NULL);
27	if (cpus == NULL) {
28		pr_debug("cpu_map__new\n");
29		goto out_thread_map_delete;
30	}
31
32	CPU_ZERO(&cpu_set);
33
34	evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
35	if (IS_ERR(evsel)) {
36		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
37		pr_debug("%s\n", errbuf);
38		goto out_thread_map_delete;
39	}
40
41	if (perf_evsel__open(evsel, cpus, threads) < 0) {
42		pr_debug("failed to open counter: %s, "
43			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
44			 strerror_r(errno, sbuf, sizeof(sbuf)));
45		goto out_evsel_delete;
46	}
47
48	for (cpu = 0; cpu < cpus->nr; ++cpu) {
49		unsigned int ncalls = nr_openat_calls + cpu;
50		/*
51		 * XXX eventually lift this restriction in a way that
52		 * keeps perf building on older glibc installations
53		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
54		 * a reasonable upper limit tho :-)
55		 */
56		if (cpus->map[cpu] >= CPU_SETSIZE) {
57			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
58			continue;
59		}
60
61		CPU_SET(cpus->map[cpu], &cpu_set);
62		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
63			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
64				 cpus->map[cpu],
65				 strerror_r(errno, sbuf, sizeof(sbuf)));
66			goto out_close_fd;
67		}
68		for (i = 0; i < ncalls; ++i) {
69			fd = openat(0, "/etc/passwd", O_RDONLY);
70			close(fd);
71		}
72		CPU_CLR(cpus->map[cpu], &cpu_set);
73	}
74
75	/*
76	 * Here we need to explicitely preallocate the counts, as if
77	 * we use the auto allocation it will allocate just for 1 cpu,
78	 * as we start by cpu 0.
79	 */
80	if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
81		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
82		goto out_close_fd;
83	}
84
85	err = 0;
86
87	for (cpu = 0; cpu < cpus->nr; ++cpu) {
88		unsigned int expected;
89
90		if (cpus->map[cpu] >= CPU_SETSIZE)
91			continue;
92
93		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
94			pr_debug("perf_evsel__read_on_cpu\n");
95			err = -1;
96			break;
97		}
98
99		expected = nr_openat_calls + cpu;
100		if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
101			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
102				 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
103			err = -1;
104		}
105	}
106
107	perf_evsel__free_counts(evsel);
108out_close_fd:
109	perf_evsel__close_fd(evsel, 1, threads->nr);
110out_evsel_delete:
111	perf_evsel__delete(evsel);
112out_thread_map_delete:
113	thread_map__put(threads);
114	return err;
115}
116