Lines Matching refs:sample
1510 struct perf_sample *sample) in perf_evsel__parse_id_sample() argument
1513 const u64 *array = event->sample.array; in perf_evsel__parse_id_sample()
1521 sample->id = *array; in perf_evsel__parse_id_sample()
1533 sample->cpu = u.val32[0]; in perf_evsel__parse_id_sample()
1538 sample->stream_id = *array; in perf_evsel__parse_id_sample()
1543 sample->id = *array; in perf_evsel__parse_id_sample()
1548 sample->time = *array; in perf_evsel__parse_id_sample()
1561 sample->pid = u.val32[0]; in perf_evsel__parse_id_sample()
1562 sample->tid = u.val32[1]; in perf_evsel__parse_id_sample()
1612 array = event->sample.array; in perf_evsel__parse_sample()
1851 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, in perf_event__sample_event_size() argument
1891 sz = sample->read.group.nr * in perf_event__sample_event_size()
1900 sz = (sample->callchain->nr + 1) * sizeof(u64); in perf_event__sample_event_size()
1906 result += sample->raw_size; in perf_event__sample_event_size()
1910 sz = sample->branch_stack->nr * sizeof(struct branch_entry); in perf_event__sample_event_size()
1916 if (sample->user_regs.abi) { in perf_event__sample_event_size()
1918 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); in perf_event__sample_event_size()
1926 sz = sample->user_stack.size; in perf_event__sample_event_size()
1944 if (sample->intr_regs.abi) { in perf_event__sample_event_size()
1946 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); in perf_event__sample_event_size()
1958 const struct perf_sample *sample, in perf_event__synthesize_sample() argument
1969 array = event->sample.array; in perf_event__synthesize_sample()
1972 *array = sample->id; in perf_event__synthesize_sample()
1977 *array = sample->ip; in perf_event__synthesize_sample()
1982 u.val32[0] = sample->pid; in perf_event__synthesize_sample()
1983 u.val32[1] = sample->tid; in perf_event__synthesize_sample()
1998 *array = sample->time; in perf_event__synthesize_sample()
2003 *array = sample->addr; in perf_event__synthesize_sample()
2008 *array = sample->id; in perf_event__synthesize_sample()
2013 *array = sample->stream_id; in perf_event__synthesize_sample()
2018 u.val32[0] = sample->cpu; in perf_event__synthesize_sample()
2031 *array = sample->period; in perf_event__synthesize_sample()
2037 *array = sample->read.group.nr; in perf_event__synthesize_sample()
2039 *array = sample->read.one.value; in perf_event__synthesize_sample()
2043 *array = sample->read.time_enabled; in perf_event__synthesize_sample()
2048 *array = sample->read.time_running; in perf_event__synthesize_sample()
2054 sz = sample->read.group.nr * in perf_event__synthesize_sample()
2056 memcpy(array, sample->read.group.values, sz); in perf_event__synthesize_sample()
2059 *array = sample->read.one.id; in perf_event__synthesize_sample()
2065 sz = (sample->callchain->nr + 1) * sizeof(u64); in perf_event__synthesize_sample()
2066 memcpy(array, sample->callchain, sz); in perf_event__synthesize_sample()
2071 u.val32[0] = sample->raw_size; in perf_event__synthesize_sample()
2084 memcpy(array, sample->raw_data, sample->raw_size); in perf_event__synthesize_sample()
2085 array = (void *)array + sample->raw_size; in perf_event__synthesize_sample()
2089 sz = sample->branch_stack->nr * sizeof(struct branch_entry); in perf_event__synthesize_sample()
2091 memcpy(array, sample->branch_stack, sz); in perf_event__synthesize_sample()
2096 if (sample->user_regs.abi) { in perf_event__synthesize_sample()
2097 *array++ = sample->user_regs.abi; in perf_event__synthesize_sample()
2098 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); in perf_event__synthesize_sample()
2099 memcpy(array, sample->user_regs.regs, sz); in perf_event__synthesize_sample()
2107 sz = sample->user_stack.size; in perf_event__synthesize_sample()
2110 memcpy(array, sample->user_stack.data, sz); in perf_event__synthesize_sample()
2117 *array = sample->weight; in perf_event__synthesize_sample()
2122 *array = sample->data_src; in perf_event__synthesize_sample()
2127 *array = sample->transaction; in perf_event__synthesize_sample()
2132 if (sample->intr_regs.abi) { in perf_event__synthesize_sample()
2133 *array++ = sample->intr_regs.abi; in perf_event__synthesize_sample()
2134 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); in perf_event__synthesize_sample()
2135 memcpy(array, sample->intr_regs.regs, sz); in perf_event__synthesize_sample()
2150 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, in perf_evsel__rawptr() argument
2162 offset = *(int *)(sample->raw_data + field->offset); in perf_evsel__rawptr()
2166 return sample->raw_data + offset; in perf_evsel__rawptr()
2169 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, in perf_evsel__intval() argument
2179 ptr = sample->raw_data + field->offset; in perf_evsel__intval()