Lines Matching refs:evsel

41 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)  in perf_evsel__no_extra_init()
46 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused) in perf_evsel__no_extra_fini()
52 int (*init)(struct perf_evsel *evsel);
53 void (*fini)(struct perf_evsel *evsel);
61 int (*init)(struct perf_evsel *evsel), in perf_evsel__object_config() argument
62 void (*fini)(struct perf_evsel *evsel)) in perf_evsel__object_config() argument
161 void perf_evsel__calc_id_pos(struct perf_evsel *evsel) in perf_evsel__calc_id_pos() argument
163 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); in perf_evsel__calc_id_pos()
164 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); in perf_evsel__calc_id_pos()
167 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, in __perf_evsel__set_sample_bit() argument
170 if (!(evsel->attr.sample_type & bit)) { in __perf_evsel__set_sample_bit()
171 evsel->attr.sample_type |= bit; in __perf_evsel__set_sample_bit()
172 evsel->sample_size += sizeof(u64); in __perf_evsel__set_sample_bit()
173 perf_evsel__calc_id_pos(evsel); in __perf_evsel__set_sample_bit()
177 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, in __perf_evsel__reset_sample_bit() argument
180 if (evsel->attr.sample_type & bit) { in __perf_evsel__reset_sample_bit()
181 evsel->attr.sample_type &= ~bit; in __perf_evsel__reset_sample_bit()
182 evsel->sample_size -= sizeof(u64); in __perf_evsel__reset_sample_bit()
183 perf_evsel__calc_id_pos(evsel); in __perf_evsel__reset_sample_bit()
187 void perf_evsel__set_sample_id(struct perf_evsel *evsel, in perf_evsel__set_sample_id() argument
191 perf_evsel__reset_sample_bit(evsel, ID); in perf_evsel__set_sample_id()
192 perf_evsel__set_sample_bit(evsel, IDENTIFIER); in perf_evsel__set_sample_id()
194 perf_evsel__set_sample_bit(evsel, ID); in perf_evsel__set_sample_id()
196 evsel->attr.read_format |= PERF_FORMAT_ID; in perf_evsel__set_sample_id()
199 void perf_evsel__init(struct perf_evsel *evsel, in perf_evsel__init() argument
202 evsel->idx = idx; in perf_evsel__init()
203 evsel->tracking = !idx; in perf_evsel__init()
204 evsel->attr = *attr; in perf_evsel__init()
205 evsel->leader = evsel; in perf_evsel__init()
206 evsel->unit = ""; in perf_evsel__init()
207 evsel->scale = 1.0; in perf_evsel__init()
208 INIT_LIST_HEAD(&evsel->node); in perf_evsel__init()
209 perf_evsel__object.init(evsel); in perf_evsel__init()
210 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); in perf_evsel__init()
211 perf_evsel__calc_id_pos(evsel); in perf_evsel__init()
216 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); in perf_evsel__new_idx() local
218 if (evsel != NULL) in perf_evsel__new_idx()
219 perf_evsel__init(evsel, attr, idx); in perf_evsel__new_idx()
221 return evsel; in perf_evsel__new_idx()
226 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); in perf_evsel__newtp_idx() local
228 if (evsel != NULL) { in perf_evsel__newtp_idx()
235 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) in perf_evsel__newtp_idx()
238 evsel->tp_format = trace_event__tp_format(sys, name); in perf_evsel__newtp_idx()
239 if (evsel->tp_format == NULL) in perf_evsel__newtp_idx()
243 attr.config = evsel->tp_format->id; in perf_evsel__newtp_idx()
245 perf_evsel__init(evsel, &attr, idx); in perf_evsel__newtp_idx()
248 return evsel; in perf_evsel__newtp_idx()
251 zfree(&evsel->name); in perf_evsel__newtp_idx()
252 free(evsel); in perf_evsel__newtp_idx()
277 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) in perf_evsel__add_modifiers() argument
280 struct perf_event_attr *attr = &evsel->attr; in perf_evsel__add_modifiers()
313 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) in perf_evsel__hw_name() argument
315 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); in perf_evsel__hw_name()
316 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); in perf_evsel__hw_name()
339 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) in perf_evsel__sw_name() argument
341 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); in perf_evsel__sw_name()
342 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); in perf_evsel__sw_name()
363 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) in perf_evsel__bp_name() argument
365 struct perf_event_attr *attr = &evsel->attr; in perf_evsel__bp_name()
367 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); in perf_evsel__bp_name()
463 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) in perf_evsel__hw_cache_name() argument
465 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); in perf_evsel__hw_cache_name()
466 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); in perf_evsel__hw_cache_name()
469 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) in perf_evsel__raw_name() argument
471 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); in perf_evsel__raw_name()
472 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); in perf_evsel__raw_name()
475 const char *perf_evsel__name(struct perf_evsel *evsel) in perf_evsel__name() argument
479 if (evsel->name) in perf_evsel__name()
480 return evsel->name; in perf_evsel__name()
482 switch (evsel->attr.type) { in perf_evsel__name()
484 perf_evsel__raw_name(evsel, bf, sizeof(bf)); in perf_evsel__name()
488 perf_evsel__hw_name(evsel, bf, sizeof(bf)); in perf_evsel__name()
492 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); in perf_evsel__name()
496 perf_evsel__sw_name(evsel, bf, sizeof(bf)); in perf_evsel__name()
504 perf_evsel__bp_name(evsel, bf, sizeof(bf)); in perf_evsel__name()
509 evsel->attr.type); in perf_evsel__name()
513 evsel->name = strdup(bf); in perf_evsel__name()
515 return evsel->name ?: "unknown"; in perf_evsel__name()
518 const char *perf_evsel__group_name(struct perf_evsel *evsel) in perf_evsel__group_name() argument
520 return evsel->group_name ?: "anon group"; in perf_evsel__group_name()
523 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) in perf_evsel__group_desc() argument
527 const char *group_name = perf_evsel__group_name(evsel); in perf_evsel__group_desc()
532 perf_evsel__name(evsel)); in perf_evsel__group_desc()
534 for_each_group_member(pos, evsel) in perf_evsel__group_desc()
544 perf_evsel__config_callgraph(struct perf_evsel *evsel, in perf_evsel__config_callgraph() argument
547 bool function = perf_evsel__is_function_event(evsel); in perf_evsel__config_callgraph()
548 struct perf_event_attr *attr = &evsel->attr; in perf_evsel__config_callgraph()
550 perf_evsel__set_sample_bit(evsel, CALLCHAIN); in perf_evsel__config_callgraph()
559 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); in perf_evsel__config_callgraph()
570 perf_evsel__set_sample_bit(evsel, REGS_USER); in perf_evsel__config_callgraph()
571 perf_evsel__set_sample_bit(evsel, STACK_USER); in perf_evsel__config_callgraph()
615 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) in perf_evsel__config() argument
617 struct perf_evsel *leader = evsel->leader; in perf_evsel__config()
618 struct perf_event_attr *attr = &evsel->attr; in perf_evsel__config()
619 int track = evsel->tracking; in perf_evsel__config()
625 perf_evsel__set_sample_bit(evsel, IP); in perf_evsel__config()
626 perf_evsel__set_sample_bit(evsel, TID); in perf_evsel__config()
628 if (evsel->sample_read) { in perf_evsel__config()
629 perf_evsel__set_sample_bit(evsel, READ); in perf_evsel__config()
635 perf_evsel__set_sample_id(evsel, false); in perf_evsel__config()
654 perf_evsel__set_sample_bit(evsel, PERIOD); in perf_evsel__config()
666 if ((leader != evsel) && leader->sample_read) { in perf_evsel__config()
678 perf_evsel__set_sample_bit(evsel, ADDR); in perf_evsel__config()
687 if (perf_evsel__is_function_event(evsel)) in perf_evsel__config()
688 evsel->attr.exclude_callchain_user = 1; in perf_evsel__config()
690 if (callchain_param.enabled && !evsel->no_aux_samples) in perf_evsel__config()
691 perf_evsel__config_callgraph(evsel, opts); in perf_evsel__config()
695 perf_evsel__set_sample_bit(evsel, REGS_INTR); in perf_evsel__config()
699 perf_evsel__set_sample_bit(evsel, CPU); in perf_evsel__config()
702 perf_evsel__set_sample_bit(evsel, PERIOD); in perf_evsel__config()
710 perf_evsel__set_sample_bit(evsel, TIME); in perf_evsel__config()
712 if (opts->raw_samples && !evsel->no_aux_samples) { in perf_evsel__config()
713 perf_evsel__set_sample_bit(evsel, TIME); in perf_evsel__config()
714 perf_evsel__set_sample_bit(evsel, RAW); in perf_evsel__config()
715 perf_evsel__set_sample_bit(evsel, CPU); in perf_evsel__config()
719 perf_evsel__set_sample_bit(evsel, DATA_SRC); in perf_evsel__config()
725 if (opts->branch_stack && !evsel->no_aux_samples) { in perf_evsel__config()
726 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); in perf_evsel__config()
731 perf_evsel__set_sample_bit(evsel, WEIGHT); in perf_evsel__config()
739 perf_evsel__set_sample_bit(evsel, TRANSACTION); in perf_evsel__config()
742 evsel->attr.read_format |= in perf_evsel__config()
753 if (perf_evsel__is_group_leader(evsel)) in perf_evsel__config()
760 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) && in perf_evsel__config()
764 if (evsel->immediate) { in perf_evsel__config()
776 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_fd() argument
780 if (evsel->system_wide) in perf_evsel__alloc_fd()
783 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd()
785 if (evsel->fd) { in perf_evsel__alloc_fd()
788 FD(evsel, cpu, thread) = -1; in perf_evsel__alloc_fd()
793 return evsel->fd != NULL ? 0 : -ENOMEM; in perf_evsel__alloc_fd()
796 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, in perf_evsel__run_ioctl() argument
801 if (evsel->system_wide) in perf_evsel__run_ioctl()
806 int fd = FD(evsel, cpu, thread), in perf_evsel__run_ioctl()
817 int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, in perf_evsel__set_filter() argument
820 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, in perf_evsel__set_filter()
825 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__enable() argument
827 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, in perf_evsel__enable()
832 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_id() argument
837 if (evsel->system_wide) in perf_evsel__alloc_id()
840 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); in perf_evsel__alloc_id()
841 if (evsel->sample_id == NULL) in perf_evsel__alloc_id()
844 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); in perf_evsel__alloc_id()
845 if (evsel->id == NULL) { in perf_evsel__alloc_id()
846 xyarray__delete(evsel->sample_id); in perf_evsel__alloc_id()
847 evsel->sample_id = NULL; in perf_evsel__alloc_id()
854 void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus) in perf_evsel__reset_counts() argument
856 memset(evsel->counts, 0, (sizeof(*evsel->counts) + in perf_evsel__reset_counts()
860 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) in perf_evsel__alloc_counts() argument
862 evsel->counts = zalloc((sizeof(*evsel->counts) + in perf_evsel__alloc_counts()
864 return evsel->counts != NULL ? 0 : -ENOMEM; in perf_evsel__alloc_counts()
867 static void perf_evsel__free_fd(struct perf_evsel *evsel) in perf_evsel__free_fd() argument
869 xyarray__delete(evsel->fd); in perf_evsel__free_fd()
870 evsel->fd = NULL; in perf_evsel__free_fd()
873 static void perf_evsel__free_id(struct perf_evsel *evsel) in perf_evsel__free_id() argument
875 xyarray__delete(evsel->sample_id); in perf_evsel__free_id()
876 evsel->sample_id = NULL; in perf_evsel__free_id()
877 zfree(&evsel->id); in perf_evsel__free_id()
880 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__close_fd() argument
884 if (evsel->system_wide) in perf_evsel__close_fd()
889 close(FD(evsel, cpu, thread)); in perf_evsel__close_fd()
890 FD(evsel, cpu, thread) = -1; in perf_evsel__close_fd()
894 void perf_evsel__free_counts(struct perf_evsel *evsel) in perf_evsel__free_counts() argument
896 zfree(&evsel->counts); in perf_evsel__free_counts()
899 void perf_evsel__exit(struct perf_evsel *evsel) in perf_evsel__exit() argument
901 assert(list_empty(&evsel->node)); in perf_evsel__exit()
902 perf_evsel__free_fd(evsel); in perf_evsel__exit()
903 perf_evsel__free_id(evsel); in perf_evsel__exit()
904 close_cgroup(evsel->cgrp); in perf_evsel__exit()
905 zfree(&evsel->group_name); in perf_evsel__exit()
906 zfree(&evsel->name); in perf_evsel__exit()
907 perf_evsel__object.fini(evsel); in perf_evsel__exit()
910 void perf_evsel__delete(struct perf_evsel *evsel) in perf_evsel__delete() argument
912 perf_evsel__exit(evsel); in perf_evsel__delete()
913 free(evsel); in perf_evsel__delete()
916 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, in perf_evsel__compute_deltas() argument
921 if (!evsel->prev_raw_counts) in perf_evsel__compute_deltas()
925 tmp = evsel->prev_raw_counts->aggr; in perf_evsel__compute_deltas()
926 evsel->prev_raw_counts->aggr = *count; in perf_evsel__compute_deltas()
928 tmp = evsel->prev_raw_counts->cpu[cpu]; in perf_evsel__compute_deltas()
929 evsel->prev_raw_counts->cpu[cpu] = *count; in perf_evsel__compute_deltas()
957 int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, in perf_evsel__read_cb() argument
964 if (FD(evsel, cpu, thread) < 0) in perf_evsel__read_cb()
967 if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0) in perf_evsel__read_cb()
970 return cb(evsel, cpu, thread, &count); in perf_evsel__read_cb()
973 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, in __perf_evsel__read_on_cpu() argument
979 if (FD(evsel, cpu, thread) < 0) in __perf_evsel__read_on_cpu()
982 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) in __perf_evsel__read_on_cpu()
985 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) in __perf_evsel__read_on_cpu()
988 perf_evsel__compute_deltas(evsel, cpu, &count); in __perf_evsel__read_on_cpu()
990 evsel->counts->cpu[cpu] = count; in __perf_evsel__read_on_cpu()
994 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) in get_group_fd() argument
996 struct perf_evsel *leader = evsel->leader; in get_group_fd()
999 if (perf_evsel__is_group_leader(evsel)) in get_group_fd()
1134 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, in __perf_evsel__open() argument
1142 if (evsel->system_wide) in __perf_evsel__open()
1147 if (evsel->fd == NULL && in __perf_evsel__open()
1148 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) in __perf_evsel__open()
1151 if (evsel->cgrp) { in __perf_evsel__open()
1153 pid = evsel->cgrp->fd; in __perf_evsel__open()
1158 evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */ in __perf_evsel__open()
1160 evsel->attr.use_clockid = 0; in __perf_evsel__open()
1161 evsel->attr.clockid = 0; in __perf_evsel__open()
1166 evsel->attr.mmap2 = 0; in __perf_evsel__open()
1168 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; in __perf_evsel__open()
1171 evsel->attr.sample_id_all = 0; in __perf_evsel__open()
1176 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL); in __perf_evsel__open()
1185 if (!evsel->cgrp && !evsel->system_wide) in __perf_evsel__open()
1188 group_fd = get_group_fd(evsel, cpu, thread); in __perf_evsel__open()
1193 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, in __perf_evsel__open()
1197 if (FD(evsel, cpu, thread) < 0) { in __perf_evsel__open()
1252 if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) { in __perf_evsel__open()
1255 } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) { in __perf_evsel__open()
1261 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { in __perf_evsel__open()
1265 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { in __perf_evsel__open()
1276 close(FD(evsel, cpu, thread)); in __perf_evsel__open()
1277 FD(evsel, cpu, thread) = -1; in __perf_evsel__open()
1284 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__close() argument
1286 if (evsel->fd == NULL) in perf_evsel__close()
1289 perf_evsel__close_fd(evsel, ncpus, nthreads); in perf_evsel__close()
1290 perf_evsel__free_fd(evsel); in perf_evsel__close()
1309 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, in perf_evsel__open() argument
1320 return __perf_evsel__open(evsel, cpus, threads); in perf_evsel__open()
1323 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, in perf_evsel__open_per_cpu() argument
1326 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); in perf_evsel__open_per_cpu()
1329 int perf_evsel__open_per_thread(struct perf_evsel *evsel, in perf_evsel__open_per_thread() argument
1332 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); in perf_evsel__open_per_thread()
1335 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, in perf_evsel__parse_id_sample() argument
1339 u64 type = evsel->attr.sample_type; in perf_evsel__parse_id_sample()
1341 bool swapped = evsel->needs_swap; in perf_evsel__parse_id_sample()
1411 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, in perf_evsel__parse_sample() argument
1414 u64 type = evsel->attr.sample_type; in perf_evsel__parse_sample()
1415 bool swapped = evsel->needs_swap; in perf_evsel__parse_sample()
1430 data->period = evsel->attr.sample_period; in perf_evsel__parse_sample()
1434 if (!evsel->attr.sample_id_all) in perf_evsel__parse_sample()
1436 return perf_evsel__parse_id_sample(evsel, event, data); in perf_evsel__parse_sample()
1446 if (evsel->sample_size + sizeof(event->header) > event->header.size) in perf_evsel__parse_sample()
1514 u64 read_format = evsel->attr.read_format; in perf_evsel__parse_sample()
1606 u64 mask = evsel->attr.sample_regs_user; in perf_evsel__parse_sample()
1665 u64 mask = evsel->attr.sample_regs_intr; in perf_evsel__parse_sample()
1972 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) in perf_evsel__field() argument
1974 return pevent_find_field(evsel->tp_format, name); in perf_evsel__field()
1977 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, in perf_evsel__rawptr() argument
1980 struct format_field *field = perf_evsel__field(evsel, name); in perf_evsel__rawptr()
1996 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, in perf_evsel__intval() argument
1999 struct format_field *field = perf_evsel__field(evsel, name); in perf_evsel__intval()
2024 if (!evsel->needs_swap) in perf_evsel__intval()
2064 int perf_evsel__fprintf(struct perf_evsel *evsel, in perf_evsel__fprintf() argument
2073 if (!perf_evsel__is_group_leader(evsel)) in perf_evsel__fprintf()
2076 if (evsel->nr_members > 1) in perf_evsel__fprintf()
2077 printed += fprintf(fp, "%s{", evsel->group_name ?: ""); in perf_evsel__fprintf()
2079 printed += fprintf(fp, "%s", perf_evsel__name(evsel)); in perf_evsel__fprintf()
2080 for_each_group_member(pos, evsel) in perf_evsel__fprintf()
2083 if (evsel->nr_members > 1) in perf_evsel__fprintf()
2088 printed += fprintf(fp, "%s", perf_evsel__name(evsel)); in perf_evsel__fprintf()
2091 printed += perf_event_attr__fprintf(fp, &evsel->attr, in perf_evsel__fprintf()
2095 (u64)evsel->attr.sample_freq); in perf_evsel__fprintf()
2102 bool perf_evsel__fallback(struct perf_evsel *evsel, int err, in perf_evsel__fallback() argument
2106 evsel->attr.type == PERF_TYPE_HARDWARE && in perf_evsel__fallback()
2107 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { in perf_evsel__fallback()
2119 evsel->attr.type = PERF_TYPE_SOFTWARE; in perf_evsel__fallback()
2120 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; in perf_evsel__fallback()
2122 zfree(&evsel->name); in perf_evsel__fallback()
2129 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, in perf_evsel__open_strerror() argument
2147 perf_evsel__name(evsel)); in perf_evsel__open_strerror()
2158 if (evsel->attr.precise_ip) in perf_evsel__open_strerror()
2162 if (evsel->attr.type == PERF_TYPE_HARDWARE) in perf_evsel__open_strerror()
2189 perf_evsel__name(evsel)); in perf_evsel__open_strerror()