1#include "evlist.h" 2#include "evsel.h" 3#include "cpumap.h" 4#include "parse-events.h" 5#include <api/fs/fs.h> 6#include "util.h" 7#include "cloexec.h" 8 9typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel); 10 11static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) 12{ 13 struct perf_evlist *evlist; 14 struct perf_evsel *evsel; 15 unsigned long flags = perf_event_open_cloexec_flag(); 16 int err = -EAGAIN, fd; 17 static pid_t pid = -1; 18 19 evlist = perf_evlist__new(); 20 if (!evlist) 21 return -ENOMEM; 22 23 if (parse_events(evlist, str)) 24 goto out_delete; 25 26 evsel = perf_evlist__first(evlist); 27 28 while (1) { 29 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); 30 if (fd < 0) { 31 if (pid == -1 && errno == EACCES) { 32 pid = 0; 33 continue; 34 } 35 goto out_delete; 36 } 37 break; 38 } 39 close(fd); 40 41 fn(evsel); 42 43 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); 44 if (fd < 0) { 45 if (errno == EINVAL) 46 err = -EINVAL; 47 goto out_delete; 48 } 49 close(fd); 50 err = 0; 51 52out_delete: 53 perf_evlist__delete(evlist); 54 return err; 55} 56 57static bool perf_probe_api(setup_probe_fn_t fn) 58{ 59 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL}; 60 struct cpu_map *cpus; 61 int cpu, ret, i = 0; 62 63 cpus = cpu_map__new(NULL); 64 if (!cpus) 65 return false; 66 cpu = cpus->map[0]; 67 cpu_map__delete(cpus); 68 69 do { 70 ret = perf_do_probe_api(fn, cpu, try[i++]); 71 if (!ret) 72 return true; 73 } while (ret == -EAGAIN && try[i]); 74 75 return false; 76} 77 78static void perf_probe_sample_identifier(struct perf_evsel *evsel) 79{ 80 evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER; 81} 82 83static void perf_probe_comm_exec(struct perf_evsel *evsel) 84{ 85 evsel->attr.comm_exec = 1; 86} 87 88bool perf_can_sample_identifier(void) 89{ 90 return perf_probe_api(perf_probe_sample_identifier); 91} 92 93static bool perf_can_comm_exec(void) 94{ 95 return perf_probe_api(perf_probe_comm_exec); 96} 97 98void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) 99{ 100 struct perf_evsel *evsel; 101 bool use_sample_identifier = false; 102 bool use_comm_exec; 103 104 /* 105 * Set the evsel leader links before we configure attributes, 106 * since some might depend on this info. 107 */ 108 if (opts->group) 109 perf_evlist__set_leader(evlist); 110 111 if (evlist->cpus->map[0] < 0) 112 opts->no_inherit = true; 113 114 use_comm_exec = perf_can_comm_exec(); 115 116 evlist__for_each(evlist, evsel) { 117 perf_evsel__config(evsel, opts); 118 if (evsel->tracking && use_comm_exec) 119 evsel->attr.comm_exec = 1; 120 } 121 122 if (evlist->nr_entries > 1) { 123 struct perf_evsel *first = perf_evlist__first(evlist); 124 125 evlist__for_each(evlist, evsel) { 126 if (evsel->attr.sample_type == first->attr.sample_type) 127 continue; 128 use_sample_identifier = perf_can_sample_identifier(); 129 break; 130 } 131 evlist__for_each(evlist, evsel) 132 perf_evsel__set_sample_id(evsel, use_sample_identifier); 133 } 134 135 perf_evlist__set_id_pos(evlist); 136} 137 138static int get_max_rate(unsigned int *rate) 139{ 140 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate); 141} 142 143static int record_opts__config_freq(struct record_opts *opts) 144{ 145 bool user_freq = opts->user_freq != UINT_MAX; 146 unsigned int max_rate; 147 148 if (opts->user_interval != ULLONG_MAX) 149 opts->default_interval = opts->user_interval; 150 if (user_freq) 151 opts->freq = opts->user_freq; 152 153 /* 154 * User specified count overrides default frequency. 155 */ 156 if (opts->default_interval) 157 opts->freq = 0; 158 else if (opts->freq) { 159 opts->default_interval = opts->freq; 160 } else { 161 pr_err("frequency and count are zero, aborting\n"); 162 return -1; 163 } 164 165 if (get_max_rate(&max_rate)) 166 return 0; 167 168 /* 169 * User specified frequency is over current maximum. 170 */ 171 if (user_freq && (max_rate < opts->freq)) { 172 pr_err("Maximum frequency rate (%u) reached.\n" 173 "Please use -F freq option with lower value or consider\n" 174 "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n", 175 max_rate); 176 return -1; 177 } 178 179 /* 180 * Default frequency is over current maximum. 181 */ 182 if (max_rate < opts->freq) { 183 pr_warning("Lowering default frequency rate to %u.\n" 184 "Please consider tweaking " 185 "/proc/sys/kernel/perf_event_max_sample_rate.\n", 186 max_rate); 187 opts->freq = max_rate; 188 } 189 190 return 0; 191} 192 193int record_opts__config(struct record_opts *opts) 194{ 195 return record_opts__config_freq(opts); 196} 197 198bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) 199{ 200 struct perf_evlist *temp_evlist; 201 struct perf_evsel *evsel; 202 int err, fd, cpu; 203 bool ret = false; 204 pid_t pid = -1; 205 206 temp_evlist = perf_evlist__new(); 207 if (!temp_evlist) 208 return false; 209 210 err = parse_events(temp_evlist, str); 211 if (err) 212 goto out_delete; 213 214 evsel = perf_evlist__last(temp_evlist); 215 216 if (!evlist || cpu_map__empty(evlist->cpus)) { 217 struct cpu_map *cpus = cpu_map__new(NULL); 218 219 cpu = cpus ? cpus->map[0] : 0; 220 cpu_map__delete(cpus); 221 } else { 222 cpu = evlist->cpus->map[0]; 223 } 224 225 while (1) { 226 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, 227 perf_event_open_cloexec_flag()); 228 if (fd < 0) { 229 if (pid == -1 && errno == EACCES) { 230 pid = 0; 231 continue; 232 } 233 goto out_delete; 234 } 235 break; 236 } 237 close(fd); 238 ret = true; 239 240out_delete: 241 perf_evlist__delete(temp_evlist); 242 return ret; 243} 244