Lines Matching refs:event
130 struct kvm_event *event; in clear_events_cache_stats() local
136 list_for_each_entry(event, head, hash_entry) { in clear_events_cache_stats()
138 event->total.time = 0; in clear_events_cache_stats()
139 init_stats(&event->total.stats); in clear_events_cache_stats()
141 for (j = 0; j < event->max_vcpu; ++j) { in clear_events_cache_stats()
142 event->vcpu[j].time = 0; in clear_events_cache_stats()
143 init_stats(&event->vcpu[j].stats); in clear_events_cache_stats()
155 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) in kvm_event_expand() argument
157 int old_max_vcpu = event->max_vcpu; in kvm_event_expand()
160 if (vcpu_id < event->max_vcpu) in kvm_event_expand()
163 while (event->max_vcpu <= vcpu_id) in kvm_event_expand()
164 event->max_vcpu += DEFAULT_VCPU_NUM; in kvm_event_expand()
166 prev = event->vcpu; in kvm_event_expand()
167 event->vcpu = realloc(event->vcpu, in kvm_event_expand()
168 event->max_vcpu * sizeof(*event->vcpu)); in kvm_event_expand()
169 if (!event->vcpu) { in kvm_event_expand()
175 memset(event->vcpu + old_max_vcpu, 0, in kvm_event_expand()
176 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); in kvm_event_expand()
182 struct kvm_event *event; in kvm_alloc_init_event() local
184 event = zalloc(sizeof(*event)); in kvm_alloc_init_event()
185 if (!event) { in kvm_alloc_init_event()
190 event->key = *key; in kvm_alloc_init_event()
191 init_stats(&event->total.stats); in kvm_alloc_init_event()
192 return event; in kvm_alloc_init_event()
198 struct kvm_event *event; in find_create_kvm_event() local
204 list_for_each_entry(event, head, hash_entry) { in find_create_kvm_event()
205 if (event->key.key == key->key && event->key.info == key->info) in find_create_kvm_event()
206 return event; in find_create_kvm_event()
209 event = kvm_alloc_init_event(key); in find_create_kvm_event()
210 if (!event) in find_create_kvm_event()
213 list_add(&event->hash_entry, head); in find_create_kvm_event()
214 return event; in find_create_kvm_event()
221 struct kvm_event *event = NULL; in handle_begin_event() local
224 event = find_create_kvm_event(kvm, key); in handle_begin_event()
226 vcpu_record->last_event = event; in handle_begin_event()
238 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) in kvm_event_rel_stddev() argument
240 struct kvm_event_stats *kvm_stats = &event->total; in kvm_event_rel_stddev()
243 kvm_stats = &event->vcpu[vcpu_id]; in kvm_event_rel_stddev()
249 static bool update_kvm_event(struct kvm_event *event, int vcpu_id, in update_kvm_event() argument
253 kvm_update_event_stats(&event->total, time_diff); in update_kvm_event()
257 if (!kvm_event_expand(event, vcpu_id)) in update_kvm_event()
260 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); in update_kvm_event()
291 struct kvm_event *event = NULL; in handle_child_event() local
294 event = find_create_kvm_event(kvm, key); in handle_child_event()
296 vcpu_record->last_event = event; in handle_child_event()
301 static bool skip_event(const char *event) in skip_event() argument
306 if (!strcmp(event, *skip_events)) in skip_event()
317 struct kvm_event *event; in handle_end_event() local
326 event = vcpu_record->last_event; in handle_end_event()
339 if (!event && key->key == INVALID_KEY) in handle_end_event()
342 if (!event) in handle_end_event()
343 event = find_create_kvm_event(kvm, key); in handle_end_event()
345 if (!event) in handle_end_event()
362 kvm->events_ops->decode_key(kvm, &event->key, decode); in handle_end_event()
370 return update_kvm_event(event, vcpu, time_diff); in handle_end_event()
426 static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
429 return event->total.field; \
431 if (vcpu >= event->max_vcpu) \
434 return event->vcpu[vcpu].field; \
476 static void insert_to_result(struct rb_root *result, struct kvm_event *event, in insert_to_result() argument
487 if (bigger(event, p, vcpu)) in insert_to_result()
493 rb_link_node(&event->rb, parent, rb); in insert_to_result()
494 rb_insert_color(&event->rb, result); in insert_to_result()
498 update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event) in update_total_count() argument
502 kvm->total_count += get_event_count(event, vcpu); in update_total_count()
503 kvm->total_time += get_event_time(event, vcpu); in update_total_count()
506 static bool event_is_valid(struct kvm_event *event, int vcpu) in event_is_valid() argument
508 return !!get_event_count(event, vcpu); in event_is_valid()
515 struct kvm_event *event; in sort_result() local
518 list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { in sort_result()
519 if (event_is_valid(event, vcpu)) { in sort_result()
520 update_total_count(kvm, event); in sort_result()
521 insert_to_result(&kvm->result, event, in sort_result()
578 struct kvm_event *event; in print_result() local
598 while ((event = pop_from_result(&kvm->result))) { in print_result()
601 ecount = get_event_count(event, vcpu); in print_result()
602 etime = get_event_time(event, vcpu); in print_result()
603 max = get_event_max(event, vcpu); in print_result()
604 min = get_event_min(event, vcpu); in print_result()
606 kvm->events_ops->decode_key(kvm, &event->key, decode); in print_result()
614 kvm_event_rel_stddev(vcpu, event)); in print_result()
627 union perf_event *event __maybe_unused, in process_lost_event()
648 union perf_event *event, in process_sample_event() argument
664 event->header.type); in process_sample_event()
721 union perf_event *event; in perf_kvm__mmap_read_idx() local
727 while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) { in perf_kvm__mmap_read_idx()
728 err = perf_evlist__parse_sample(kvm->evlist, event, &sample); in perf_kvm__mmap_read_idx()
735 err = perf_session__queue_event(kvm->session, event, &sample, 0); in perf_kvm__mmap_read_idx()