Searched refs:nr_events (Results 1 - 44 of 44) sorted by relevance

/linux-4.1.27/tools/perf/tests/
H A Dopen-syscall-tp-fields.c24 int err = -1, i, nr_events = 0, nr_polls = 0; test__syscall_open_tp_fields() local
72 int before = nr_events; test__syscall_open_tp_fields()
82 ++nr_events; test__syscall_open_tp_fields()
107 if (nr_events == before) test__syscall_open_tp_fields()
H A Dmmap-basic.c31 unsigned int nr_events[nsyscalls], test__basic_mmap() local
87 nr_events[i] = 0; test__basic_mmap()
125 nr_events[evsel->idx]++; test__basic_mmap()
131 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { evlist__for_each()
134 perf_evsel__name(evsel), nr_events[evsel->idx]); evlist__for_each()
H A Dhists_filter.c149 hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); evlist__for_each()
155 hists->stats.nr_events[PERF_RECORD_SAMPLE] == evlist__for_each()
174 hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); evlist__for_each()
203 hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); evlist__for_each()
238 hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); evlist__for_each()
265 hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); evlist__for_each()
H A Dperf-record.c61 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; test__PERF_RECORD() local
166 nr_events[type]++; test__PERF_RECORD()
281 if (nr_events[PERF_RECORD_COMM] > 1) { test__PERF_RECORD()
286 if (nr_events[PERF_RECORD_COMM] == 0) { test__PERF_RECORD()
/linux-4.1.27/tools/perf/util/
H A Dordered-events.c20 ++oe->nr_events; queue_event()
23 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events); queue_event()
151 oe->nr_events--; ordered_events__delete()
199 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events..."); __ordered_events__flush()
236 if (oe->nr_events == 0) ordered_events__flush()
267 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n", ordered_events__flush()
268 str[how], oe->nr_events); ordered_events__flush()
280 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n", ordered_events__flush()
281 str[how], oe->nr_events); ordered_events__flush()
H A Dordered-events.h40 unsigned int nr_events; member in struct:ordered_events
H A Devent.h224 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
229 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
238 u32 nr_events[PERF_RECORD_HEADER_MAX]; member in struct:events_stats
H A Dtrace-event-parse.c217 if (idx < pevent->nr_events && event == pevent->events[idx]) { trace_find_next_event()
219 if (idx == pevent->nr_events) trace_find_next_event()
224 for (idx = 1; idx < pevent->nr_events; idx++) { trace_find_next_event()
H A Dhist.c205 he_stat->nr_events += 1; he_stat__add_period()
215 dest->nr_events += src->nr_events; he_stat__add_stat()
222 he_stat->nr_events = (he_stat->nr_events * 7) / 8; he_stat__decay()
462 .nr_events = 1, __hists__add_entry()
523 * based on periods. We want sorting be done on nr_events * weight iter_add_single_mem_entry()
1173 hists->stats.nr_non_filtered_samples += h->stat.nr_events; hists__remove_entry_filter()
1279 ++stats->nr_events[0]; events_stats__inc()
1280 ++stats->nr_events[type]; events_stats__inc()
1390 * nr_events=0, to serve as the list header.
H A Dsort.h56 u32 nr_events; member in struct:he_stat
H A Dsymbol.h85 unsigned short nr_events; member in struct:symbol_conf
H A Dsession.c1134 stats->nr_events[PERF_RECORD_LOST] != 0) { perf_session__warn_about_errors()
1137 stats->nr_events[0], perf_session__warn_about_errors()
1138 stats->nr_events[PERF_RECORD_LOST]); perf_session__warn_about_errors()
1160 stats->nr_events[PERF_RECORD_SAMPLE]); perf_session__warn_about_errors()
H A Dannotate.c464 / symbol_conf.nr_events) symbol__alloc_hist()
467 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); symbol__alloc_hist()
471 notes->src->nr_histograms = symbol_conf.nr_events; symbol__alloc_hist()
H A Dsort.c936 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; he_weight()
H A Dheader.c2547 symbol_conf.nr_events = nr_attrs; perf_session__read_header()
2654 symbol_conf.nr_events = evlist->nr_entries; perf_event__process_attr()
/linux-4.1.27/drivers/s390/net/
H A Dfsm.c17 int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order) init_fsm()
40 f->nr_events = nr_events; init_fsm()
46 m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order); init_fsm()
57 (tmpl[i].cond_event >= nr_events) ) { init_fsm()
61 (long)tmpl[i].cond_event, (long)f->nr_events); init_fsm()
16 init_fsm(char *name, const char **state_names, const char **event_names, int nr_states, int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order) init_fsm() argument
H A Dfsm.h44 int nr_events; member in struct:__anon8473
103 * @param nr_events Number of events for this instance.
111 int nr_states, int nr_events, const fsm_node *tmpl,
149 (event >= fi->f->nr_events) ) { fsm_event()
152 (long)fi->f->nr_events); fsm_event()
/linux-4.1.27/fs/
H A Daio.c104 * The real limit is nr_events - 1, which will be larger (see
110 unsigned nr_events; member in struct:kioctx
430 unsigned nr_events = ctx->max_reqs; aio_setup_ring() local
438 nr_events += 2; /* 1 is required, 2 for good luck */ aio_setup_ring()
441 size += sizeof(struct io_event) * nr_events; aio_setup_ring()
454 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) aio_setup_ring()
504 ctx->nr_events = nr_events; /* trusted copy */ aio_setup_ring()
507 ring->nr = nr_events; /* user copy */ aio_setup_ring()
681 static struct kioctx *ioctx_alloc(unsigned nr_events) ioctx_alloc() argument
692 * and unavailable, double nr_events so userspace sees what they ioctx_alloc()
696 nr_events = max(nr_events, num_possible_cpus() * 4); ioctx_alloc()
697 nr_events *= 2; ioctx_alloc()
700 if (nr_events > (0x10000000U / sizeof(struct io_event))) { ioctx_alloc()
701 pr_debug("ENOMEM: nr_events too high\n"); ioctx_alloc()
705 if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) ioctx_alloc()
712 ctx->max_reqs = nr_events; ioctx_alloc()
738 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); ioctx_alloc()
739 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); ioctx_alloc()
745 if (aio_nr + nr_events > (aio_max_nr * 2UL) || ioctx_alloc()
746 aio_nr + nr_events < aio_nr) { ioctx_alloc()
765 ctx, ctx->user_id, mm, ctx->nr_events); ioctx_alloc()
935 head %= ctx->nr_events; refill_reqs_available()
939 events_in_ring = ctx->nr_events - (head - tail); refill_reqs_available()
1086 if (++tail >= ctx->nr_events) aio_complete()
1182 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); aio_read_events_ring()
1187 head %= ctx->nr_events; aio_read_events_ring()
1188 tail %= ctx->nr_events; aio_read_events_ring()
1195 avail = (head <= tail ? tail : ctx->nr_events) - head; aio_read_events_ring()
1219 head %= ctx->nr_events; aio_read_events_ring()
1295 * Create an aio_context capable of receiving at least nr_events.
1300 * if the specified nr_events exceeds internal limits. May fail
1301 * with -EAGAIN if the specified nr_events exceeds the user's limit
1307 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) SYSCALL_DEFINE2()
1318 if (unlikely(ctx || nr_events == 0)) { SYSCALL_DEFINE2()
1319 pr_debug("EINVAL: ctx %lu nr_events %u\n", SYSCALL_DEFINE2()
1320 ctx, nr_events); SYSCALL_DEFINE2()
1324 ioctx = ioctx_alloc(nr_events); SYSCALL_DEFINE2()
/linux-4.1.27/tools/perf/ui/browsers/
H A Dhists.c440 browser->hists->stats.nr_events[PERF_RECORD_LOST]) { hist_browser__run()
442 browser->hists->stats.nr_events[PERF_RECORD_LOST]; hist_browser__run()
1243 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; hists__browser_title()
1244 u64 nr_events = hists->stats.total_period; hists__browser_title() local
1252 nr_events = hists->stats.total_non_filtered_period; hists__browser_title()
1266 nr_events += pos_hists->stats.total_non_filtered_period; for_each_group_member()
1268 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; for_each_group_member()
1269 nr_events += pos_hists->stats.total_period; for_each_group_member()
1277 nr_samples, unit, ev_name, nr_events);
1416 static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, perf_evsel__hists_browse() argument
1510 if (nr_events == 1) perf_evsel__hists_browse()
1835 unsigned long nr_events = hists->stats.nr_events[PERF_RECORD_SAMPLE]; perf_evsel_menu__write() local
1851 nr_events += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; for_each_group_member()
1855 nr_events = convert_unit(nr_events, &unit);
1856 printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
1860 nr_events = hists->stats.nr_events[PERF_RECORD_LOST];
1861 if (nr_events != 0) {
1865 nr_events = convert_unit(nr_events, &unit);
1867 nr_events, unit, unit == ' ' ? "" : " ");
1878 int nr_events, const char *help, perf_evsel_menu__run()
1916 key = perf_evsel__hists_browse(pos, nr_events, help, perf_evsel_menu__run()
1877 perf_evsel_menu__run(struct perf_evsel_menu *menu, int nr_events, const char *help, struct hist_browser_timer *hbt) perf_evsel_menu__run() argument
H A Dannotate.c20 * actual length of this array is saved on the nr_events field
43 int nr_events; member in struct:annotate_browser
103 int i, pcnt_width = 7 * ab->nr_events; annotate_browser__write()
107 for (i = 0; i < ab->nr_events; i++) { annotate_browser__write()
113 for (i = 0; i < ab->nr_events; i++) { annotate_browser__write()
247 pcnt_width *= ab->nr_events; annotate_browser__draw_current_jump()
260 pcnt_width = 7 * ab->nr_events; annotate_browser__refresh()
284 int nr_events) disasm_rb_tree__insert()
294 if (disasm__cmp(bdl, l, nr_events)) disasm_rb_tree__insert()
368 for (i = 0; i < browser->nr_events; i++) { annotate_browser__calc_percent()
384 browser->nr_events); annotate_browser__calc_percent()
969 browser.nr_events = nr_pcnt; symbol__tui_annotate()
283 disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl, int nr_events) disasm_rb_tree__insert() argument
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_irq.c215 atomic_inc(&qp->nr_events); qp_event_callback()
235 if (atomic_dec_and_test(&qp->nr_events)) qp_event_callback()
249 atomic_inc(&cq->nr_events); cq_event_callback()
257 if (atomic_dec_and_test(&cq->nr_events)) cq_event_callback()
527 atomic_inc(&cq->nr_events); process_eqe()
540 if (atomic_dec_and_test(&cq->nr_events)) process_eqe()
586 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events); ehca_process_eq()
629 if (atomic_dec_and_test(&cq->nr_events)) ehca_process_eq()
734 if (atomic_dec_and_test(&cq->nr_events)) run_comp_task()
H A Dehca_cq.c158 atomic_set(&my_cq->nr_events, 0); ehca_create_cq()
342 wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events)); ehca_destroy_cq()
H A Dehca_classes.h223 atomic_t nr_events; /* events seen */ member in struct:ehca_qp
251 atomic_t nr_events; /* #events seen */ member in struct:ehca_cq
H A Dehca_qp.c624 atomic_set(&my_qp->nr_events, 0); internal_create_qp()
2181 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events)); internal_destroy_qp()
/linux-4.1.27/tools/perf/
H A Dbuiltin-report.c270 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; hists__fprintf_nr_sample_events()
271 u64 nr_events = hists->stats.total_period; hists__fprintf_nr_sample_events() local
278 nr_events = hists->stats.total_non_filtered_period; hists__fprintf_nr_sample_events()
292 nr_events += pos_hists->stats.total_non_filtered_period; for_each_group_member()
294 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; for_each_group_member()
295 nr_events += pos_hists->stats.total_period; for_each_group_member()
306 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
309 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
H A Dbuiltin-sched.c41 unsigned long nr_events; member in struct:task_desc
155 unsigned long nr_events; member in struct:perf_sched
243 unsigned long idx = task->nr_events; get_new_event()
249 task->nr_events++; get_new_event()
250 size = sizeof(struct sched_atom *) * task->nr_events; get_new_event()
261 if (!task->nr_events) last_event()
264 return task->atoms[task->nr_events - 1]; last_event()
380 printf("task %6ld (%20s:%10ld), nr_events: %ld\n", print_task_traces()
381 task->nr, task->comm, task->pid, task->nr_events); print_task_traces()
526 for (i = 0; i < this_task->nr_events; i++) { thread_func()
1512 sched->nr_events = session->evlist->stats.nr_events[0]; perf_sched__read_events()
1514 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; perf_sched__read_events()
1530 if (sched->nr_lost_events && sched->nr_events) { print_bad_events()
1532 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, print_bad_events()
1533 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); print_bad_events()
H A Dbuiltin-trace.c1166 unsigned long nr_events; member in struct:thread_trace
1204 ++ttrace->nr_events; thread__trace()
1232 unsigned long nr_events; member in struct:trace
2004 ++trace->nr_events; trace__process_sample()
2254 before = trace->nr_events; trace__run()
2262 ++trace->nr_events; trace__run()
2284 if (trace->nr_events == before) { trace__run()
2516 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; trace__fprintf_one_thread()
2519 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); trace__fprintf_one_thread()
H A Dbuiltin-annotate.c230 u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; __cmd_annotate()
H A Dbuiltin-top.c265 hists->stats.nr_events[PERF_RECORD_LOST]) { perf_top__print_sym_table()
267 hists->stats.nr_events[PERF_RECORD_LOST]; perf_top__print_sym_table()
1232 symbol_conf.nr_events = top.evlist->nr_entries; cmd_top()
H A Dbuiltin-kvm.c1377 symbol_conf.nr_events = kvm->evlist->nr_entries; kvm_events_live()
/linux-4.1.27/tools/perf/ui/stdio/
H A Dhist.c499 if (stats->nr_events[i] == 0) events_stats__fprintf()
507 stats->nr_events[i]); events_stats__fprintf()
/linux-4.1.27/include/linux/
H A Dhrtimer.h176 * @nr_events: Total number of hrtimer interrupt events
192 unsigned long nr_events; member in struct:hrtimer_cpu_base
H A Dperf_event.h520 int nr_events; member in struct:perf_event_context
/linux-4.1.27/kernel/time/
H A Dtimer_list.c158 P(nr_events); print_cpu()
H A Dhrtimer.c1252 cpu_base->nr_events++; hrtimer_interrupt()
/linux-4.1.27/tools/lib/traceevent/
H A Devent-parse.c695 (pevent->nr_events + 1)); add_event()
701 for (i = 0; i < pevent->nr_events; i++) { add_event()
705 if (i < pevent->nr_events) add_event()
708 sizeof(event) * (pevent->nr_events - i)); add_event()
711 pevent->nr_events++; add_event()
3342 eventptr = bsearch(&pkey, pevent->events, pevent->nr_events, pevent_find_event()
3374 for (i = 0; i < pevent->nr_events; i++) { pevent_find_event_by_name()
3383 if (i == pevent->nr_events) pevent_find_event_by_name()
5332 events = malloc(sizeof(*events) * (pevent->nr_events + 1)); pevent_list_events()
5336 memcpy(events, pevent->events, sizeof(*events) * pevent->nr_events); pevent_list_events()
5337 events[pevent->nr_events] = NULL; pevent_list_events()
5362 qsort(events, pevent->nr_events, sizeof(*events), sort); pevent_list_events()
6569 for (i = 0; i < pevent->nr_events; i++) pevent_free()
H A Devent-parse.h492 int nr_events; member in struct:pevent
H A Dparse-filter.c313 for (i = 0; i < pevent->nr_events; i++) { find_event()
/linux-4.1.27/block/
H A Dgenhd.c1626 int nr_events = 0, i; disk_check_events() local
1652 envp[nr_events++] = disk_uevents[i]; disk_check_events()
1654 if (nr_events) disk_check_events()
/linux-4.1.27/kernel/trace/
H A Dtrace_events.c482 if (!--dir->nr_events) { remove_subsystem()
1150 if (dir->nr_events) { subsystem_open()
1511 dir->nr_events++; event_subsystem_dir()
1546 dir->nr_events = 1; event_subsystem_dir()
H A Dtrace.h1002 int nr_events; member in struct:ftrace_subsystem_dir
H A Dtrace_events_filter.c2041 if (!dir->nr_events) { apply_subsystem_event_filter()
/linux-4.1.27/tools/perf/ui/
H A Dhist.c364 HPP_RAW_FNS(samples, nr_events)
/linux-4.1.27/kernel/events/
H A Dcore.c1253 ctx->nr_events++; list_add_event()
1418 ctx->nr_events--; list_del_event()
1632 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { __perf_remove_from_context()
2389 if (likely(!ctx->nr_events)) ctx_sched_out()
2766 if (likely(!ctx->nr_events)) ctx_sched_in()
2811 if (ctx->nr_events) perf_event_context_sched_in()
3051 if (cpuctx->ctx.nr_events) { perf_rotate_context()
3052 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) perf_rotate_context()
3057 if (ctx && ctx->nr_events) { perf_rotate_context()
3058 if (ctx->nr_events != ctx->nr_active) perf_rotate_context()
3139 if (!ctx || !ctx->nr_events) perf_event_enable_on_exec()

Completed in 3437 milliseconds