/linux-4.4.14/tools/perf/arch/x86/util/ |
D | kvm-stat.c | 22 key->key = perf_evsel__intval(evsel, sample, "gpa"); in mmio_event_get_key() 23 key->info = perf_evsel__intval(evsel, sample, "type"); in mmio_event_get_key() 39 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { in mmio_event_begin() 56 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { in mmio_event_end() 85 key->key = perf_evsel__intval(evsel, sample, "port"); in ioport_event_get_key() 86 key->info = perf_evsel__intval(evsel, sample, "rw"); in ioport_event_get_key()
|
/linux-4.4.14/tools/perf/arch/s390/util/ |
D | kvm-stat.c | 27 insn = perf_evsel__intval(evsel, sample, "instruction"); in event_icpt_insn_get_key() 36 key->key = perf_evsel__intval(evsel, sample, "order_code"); in event_sigp_get_key() 44 key->key = perf_evsel__intval(evsel, sample, "code"); in event_diag_get_key() 52 key->key = perf_evsel__intval(evsel, sample, "code"); in event_icpt_prog_get_key()
|
/linux-4.4.14/tools/perf/ |
D | builtin-timechart.c | 584 u32 state = perf_evsel__intval(evsel, sample, "state"); in process_sample_cpu_idle() 585 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); in process_sample_cpu_idle() 600 u32 state = perf_evsel__intval(evsel, sample, "state"); in process_sample_cpu_frequency() 601 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); in process_sample_cpu_frequency() 613 u8 flags = perf_evsel__intval(evsel, sample, "common_flags"); in process_sample_sched_wakeup() 614 int waker = perf_evsel__intval(evsel, sample, "common_pid"); in process_sample_sched_wakeup() 615 int wakee = perf_evsel__intval(evsel, sample, "pid"); in process_sample_sched_wakeup() 627 int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"); in process_sample_sched_switch() 628 int next_pid = perf_evsel__intval(evsel, sample, "next_pid"); in process_sample_sched_switch() 629 u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); in process_sample_sched_switch() [all …]
|
D | builtin-kmem.c | 156 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), in perf_evsel__process_alloc_event() 157 call_site = perf_evsel__intval(evsel, sample, "call_site"); in perf_evsel__process_alloc_event() 158 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), in perf_evsel__process_alloc_event() 159 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); in perf_evsel__process_alloc_event() 179 node2 = perf_evsel__intval(evsel, sample, "node"); in perf_evsel__process_alloc_node_event() 219 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); in perf_evsel__process_free_event() 762 unsigned int order = perf_evsel__intval(evsel, sample, "order"); in perf_evsel__process_page_alloc_event() 763 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags"); in perf_evsel__process_page_alloc_event() 764 unsigned int migrate_type = perf_evsel__intval(evsel, sample, in perf_evsel__process_page_alloc_event() 776 page = perf_evsel__intval(evsel, sample, "pfn"); in perf_evsel__process_page_alloc_event() [all …]
|
D | builtin-sched.c | 699 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); in replay_wakeup_event() 722 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), in replay_switch_event() 723 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); in replay_switch_event() 724 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); in replay_switch_event() 959 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), in latency_switch_event() 960 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); in latency_switch_event() 961 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); in latency_switch_event() 1029 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); in latency_runtime_event() 1030 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); in latency_runtime_event() 1064 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); in latency_wakeup_event() [all …]
|
D | builtin-lock.c | 403 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_acquire_event() 404 int flag = perf_evsel__intval(evsel, sample, "flag"); in report_lock_acquire_event() 476 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_acquired_event() 538 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_contended_event() 593 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_release_event()
|
D | builtin-kvm.c | 41 key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON); in exit_event_get_key() 388 vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, VCPU_ID); in per_vcpu_record()
|
D | builtin-inject.c | 451 u32 pid = perf_evsel__intval(evsel, sample, "pid"); in perf_inject__sched_stat()
|
D | builtin-trace.c | 2154 u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); in trace__sched_stat_runtime() 2173 (pid_t)perf_evsel__intval(evsel, sample, "pid"), in trace__sched_stat_runtime() 2175 perf_evsel__intval(evsel, sample, "vruntime")); in trace__sched_stat_runtime()
|
/linux-4.4.14/tools/perf/tests/ |
D | openat-syscall-tp-fields.c | 96 tp_flags = perf_evsel__intval(evsel, &sample, "flags"); in test__syscall_openat_tp_fields()
|
D | switch-tracking.c | 129 next_tid = perf_evsel__intval(evsel, &sample, "next_pid"); in process_sample_event() 130 prev_tid = perf_evsel__intval(evsel, &sample, "prev_pid"); in process_sample_event()
|
/linux-4.4.14/tools/perf/util/ |
D | evsel.h | 244 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
D | evsel.c | 2169 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, in perf_evsel__intval() function
|
D | intel-pt.c | 1565 tid = perf_evsel__intval(evsel, sample, "next_pid"); in intel_pt_process_switch()
|