1#include "util.h" 2#include "build-id.h" 3#include "hist.h" 4#include "session.h" 5#include "sort.h" 6#include "evlist.h" 7#include "evsel.h" 8#include "annotate.h" 9#include "ui/progress.h" 10#include <math.h> 11 12static bool hists__filter_entry_by_dso(struct hists *hists, 13 struct hist_entry *he); 14static bool hists__filter_entry_by_thread(struct hists *hists, 15 struct hist_entry *he); 16static bool hists__filter_entry_by_symbol(struct hists *hists, 17 struct hist_entry *he); 18 19u16 hists__col_len(struct hists *hists, enum hist_column col) 20{ 21 return hists->col_len[col]; 22} 23 24void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 25{ 26 hists->col_len[col] = len; 27} 28 29bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 30{ 31 if (len > hists__col_len(hists, col)) { 32 hists__set_col_len(hists, col, len); 33 return true; 34 } 35 return false; 36} 37 38void hists__reset_col_len(struct hists *hists) 39{ 40 enum hist_column col; 41 42 for (col = 0; col < HISTC_NR_COLS; ++col) 43 hists__set_col_len(hists, col, 0); 44} 45 46static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 47{ 48 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 49 50 if (hists__col_len(hists, dso) < unresolved_col_width && 51 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 52 !symbol_conf.dso_list) 53 hists__set_col_len(hists, dso, unresolved_col_width); 54} 55 56void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 57{ 58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 59 int symlen; 60 u16 len; 61 62 /* 63 * +4 accounts for '[x] ' priv level info 64 * +2 accounts for 0x prefix on raw addresses 65 * +3 accounts for ' y ' symtab origin info 66 */ 67 if (h->ms.sym) { 68 symlen = h->ms.sym->namelen + 4; 69 if (verbose) 70 symlen += BITS_PER_LONG / 4 + 2 + 3; 71 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 72 } else { 73 symlen = unresolved_col_width + 4 + 2; 74 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 75 hists__set_unres_dso_col_len(hists, HISTC_DSO); 76 } 77 78 len = thread__comm_len(h->thread); 79 if (hists__new_col_len(hists, HISTC_COMM, len)) 80 hists__set_col_len(hists, HISTC_THREAD, len + 6); 81 82 if (h->ms.map) { 83 len = dso__name_len(h->ms.map->dso); 84 hists__new_col_len(hists, HISTC_DSO, len); 85 } 86 87 if (h->parent) 88 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 89 90 if (h->branch_info) { 91 if (h->branch_info->from.sym) { 92 symlen = (int)h->branch_info->from.sym->namelen + 4; 93 if (verbose) 94 symlen += BITS_PER_LONG / 4 + 2 + 3; 95 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 96 97 symlen = dso__name_len(h->branch_info->from.map->dso); 98 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 99 } else { 100 symlen = unresolved_col_width + 4 + 2; 101 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 102 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 103 } 104 105 if (h->branch_info->to.sym) { 106 symlen = (int)h->branch_info->to.sym->namelen + 4; 107 if (verbose) 108 symlen += BITS_PER_LONG / 4 + 2 + 3; 109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 110 111 symlen = dso__name_len(h->branch_info->to.map->dso); 112 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 113 } else { 114 symlen = unresolved_col_width + 4 + 2; 115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 117 } 118 } 119 120 if (h->mem_info) { 121 if (h->mem_info->daddr.sym) { 122 symlen = (int)h->mem_info->daddr.sym->namelen + 4 123 + unresolved_col_width + 2; 124 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 125 symlen); 126 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 127 symlen + 1); 128 } else { 129 symlen = unresolved_col_width + 4 + 2; 130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 131 symlen); 132 } 133 if (h->mem_info->daddr.map) { 134 symlen = dso__name_len(h->mem_info->daddr.map->dso); 135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 136 symlen); 137 } else { 138 symlen = unresolved_col_width + 4 + 2; 139 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 140 } 141 } else { 142 symlen = unresolved_col_width + 4 + 2; 143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 145 } 146 147 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 148 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 149 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 150 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 151 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 152 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 153 154 if (h->srcline) 155 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline)); 156 157 if (h->transaction) 158 hists__new_col_len(hists, HISTC_TRANSACTION, 159 hist_entry__transaction_len()); 160} 161 162void hists__output_recalc_col_len(struct hists *hists, int max_rows) 163{ 164 struct rb_node *next = rb_first(&hists->entries); 165 struct hist_entry *n; 166 int row = 0; 167 168 hists__reset_col_len(hists); 169 170 while (next && row++ < max_rows) { 171 n = rb_entry(next, struct hist_entry, rb_node); 172 if (!n->filtered) 173 hists__calc_col_len(hists, n); 174 next = rb_next(&n->rb_node); 175 } 176} 177 178static void he_stat__add_cpumode_period(struct he_stat *he_stat, 179 unsigned int cpumode, u64 period) 180{ 181 switch (cpumode) { 182 case PERF_RECORD_MISC_KERNEL: 183 he_stat->period_sys += period; 184 break; 185 case PERF_RECORD_MISC_USER: 186 he_stat->period_us += period; 187 break; 188 case PERF_RECORD_MISC_GUEST_KERNEL: 189 he_stat->period_guest_sys += period; 190 break; 191 case PERF_RECORD_MISC_GUEST_USER: 192 he_stat->period_guest_us += period; 193 break; 194 default: 195 break; 196 } 197} 198 199static void he_stat__add_period(struct he_stat *he_stat, u64 period, 200 u64 weight) 201{ 202 203 he_stat->period += period; 204 he_stat->weight += weight; 205 he_stat->nr_events += 1; 206} 207 208static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 209{ 210 dest->period += src->period; 211 dest->period_sys += src->period_sys; 212 dest->period_us += src->period_us; 213 dest->period_guest_sys += src->period_guest_sys; 214 dest->period_guest_us += src->period_guest_us; 215 dest->nr_events += src->nr_events; 216 dest->weight += src->weight; 217} 218 219static void he_stat__decay(struct he_stat *he_stat) 220{ 221 he_stat->period = (he_stat->period * 7) / 8; 222 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 223 /* XXX need decay for weight too? */ 224} 225 226static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 227{ 228 u64 prev_period = he->stat.period; 229 u64 diff; 230 231 if (prev_period == 0) 232 return true; 233 234 he_stat__decay(&he->stat); 235 if (symbol_conf.cumulate_callchain) 236 he_stat__decay(he->stat_acc); 237 238 diff = prev_period - he->stat.period; 239 240 hists->stats.total_period -= diff; 241 if (!he->filtered) 242 hists->stats.total_non_filtered_period -= diff; 243 244 return he->stat.period == 0; 245} 246 247static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 248{ 249 rb_erase(&he->rb_node, &hists->entries); 250 251 if (sort__need_collapse) 252 rb_erase(&he->rb_node_in, &hists->entries_collapsed); 253 254 --hists->nr_entries; 255 if (!he->filtered) 256 --hists->nr_non_filtered_entries; 257 258 hist_entry__delete(he); 259} 260 261void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 262{ 263 struct rb_node *next = rb_first(&hists->entries); 264 struct hist_entry *n; 265 266 while (next) { 267 n = rb_entry(next, struct hist_entry, rb_node); 268 next = rb_next(&n->rb_node); 269 if (((zap_user && n->level == '.') || 270 (zap_kernel && n->level != '.') || 271 hists__decay_entry(hists, n))) { 272 hists__delete_entry(hists, n); 273 } 274 } 275} 276 277void hists__delete_entries(struct hists *hists) 278{ 279 struct rb_node *next = rb_first(&hists->entries); 280 struct hist_entry *n; 281 282 while (next) { 283 n = rb_entry(next, struct hist_entry, rb_node); 284 next = rb_next(&n->rb_node); 285 286 hists__delete_entry(hists, n); 287 } 288} 289 290/* 291 * histogram, sorted on item, collects periods 292 */ 293 294static struct hist_entry *hist_entry__new(struct hist_entry *template, 295 bool sample_self) 296{ 297 size_t callchain_size = 0; 298 struct hist_entry *he; 299 300 if (symbol_conf.use_callchain) 301 callchain_size = sizeof(struct callchain_root); 302 303 he = zalloc(sizeof(*he) + callchain_size); 304 305 if (he != NULL) { 306 *he = *template; 307 308 if (symbol_conf.cumulate_callchain) { 309 he->stat_acc = malloc(sizeof(he->stat)); 310 if (he->stat_acc == NULL) { 311 free(he); 312 return NULL; 313 } 314 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 315 if (!sample_self) 316 memset(&he->stat, 0, sizeof(he->stat)); 317 } 318 319 if (he->ms.map) 320 he->ms.map->referenced = true; 321 322 if (he->branch_info) { 323 /* 324 * This branch info is (a part of) allocated from 325 * sample__resolve_bstack() and will be freed after 326 * adding new entries. So we need to save a copy. 327 */ 328 he->branch_info = malloc(sizeof(*he->branch_info)); 329 if (he->branch_info == NULL) { 330 free(he->stat_acc); 331 free(he); 332 return NULL; 333 } 334 335 memcpy(he->branch_info, template->branch_info, 336 sizeof(*he->branch_info)); 337 338 if (he->branch_info->from.map) 339 he->branch_info->from.map->referenced = true; 340 if (he->branch_info->to.map) 341 he->branch_info->to.map->referenced = true; 342 } 343 344 if (he->mem_info) { 345 if (he->mem_info->iaddr.map) 346 he->mem_info->iaddr.map->referenced = true; 347 if (he->mem_info->daddr.map) 348 he->mem_info->daddr.map->referenced = true; 349 } 350 351 if (symbol_conf.use_callchain) 352 callchain_init(he->callchain); 353 354 INIT_LIST_HEAD(&he->pairs.node); 355 thread__get(he->thread); 356 } 357 358 return he; 359} 360 361static u8 symbol__parent_filter(const struct symbol *parent) 362{ 363 if (symbol_conf.exclude_other && parent == NULL) 364 return 1 << HIST_FILTER__PARENT; 365 return 0; 366} 367 368static struct hist_entry *add_hist_entry(struct hists *hists, 369 struct hist_entry *entry, 370 struct addr_location *al, 371 bool sample_self) 372{ 373 struct rb_node **p; 374 struct rb_node *parent = NULL; 375 struct hist_entry *he; 376 int64_t cmp; 377 u64 period = entry->stat.period; 378 u64 weight = entry->stat.weight; 379 380 p = &hists->entries_in->rb_node; 381 382 while (*p != NULL) { 383 parent = *p; 384 he = rb_entry(parent, struct hist_entry, rb_node_in); 385 386 /* 387 * Make sure that it receives arguments in a same order as 388 * hist_entry__collapse() so that we can use an appropriate 389 * function when searching an entry regardless which sort 390 * keys were used. 391 */ 392 cmp = hist_entry__cmp(he, entry); 393 394 if (!cmp) { 395 if (sample_self) 396 he_stat__add_period(&he->stat, period, weight); 397 if (symbol_conf.cumulate_callchain) 398 he_stat__add_period(he->stat_acc, period, weight); 399 400 /* 401 * This mem info was allocated from sample__resolve_mem 402 * and will not be used anymore. 403 */ 404 zfree(&entry->mem_info); 405 406 /* If the map of an existing hist_entry has 407 * become out-of-date due to an exec() or 408 * similar, update it. Otherwise we will 409 * mis-adjust symbol addresses when computing 410 * the history counter to increment. 411 */ 412 if (he->ms.map != entry->ms.map) { 413 he->ms.map = entry->ms.map; 414 if (he->ms.map) 415 he->ms.map->referenced = true; 416 } 417 goto out; 418 } 419 420 if (cmp < 0) 421 p = &(*p)->rb_left; 422 else 423 p = &(*p)->rb_right; 424 } 425 426 he = hist_entry__new(entry, sample_self); 427 if (!he) 428 return NULL; 429 430 hists->nr_entries++; 431 432 rb_link_node(&he->rb_node_in, parent, p); 433 rb_insert_color(&he->rb_node_in, hists->entries_in); 434out: 435 if (sample_self) 436 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 437 if (symbol_conf.cumulate_callchain) 438 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 439 return he; 440} 441 442struct hist_entry *__hists__add_entry(struct hists *hists, 443 struct addr_location *al, 444 struct symbol *sym_parent, 445 struct branch_info *bi, 446 struct mem_info *mi, 447 u64 period, u64 weight, u64 transaction, 448 bool sample_self) 449{ 450 struct hist_entry entry = { 451 .thread = al->thread, 452 .comm = thread__comm(al->thread), 453 .ms = { 454 .map = al->map, 455 .sym = al->sym, 456 }, 457 .cpu = al->cpu, 458 .cpumode = al->cpumode, 459 .ip = al->addr, 460 .level = al->level, 461 .stat = { 462 .nr_events = 1, 463 .period = period, 464 .weight = weight, 465 }, 466 .parent = sym_parent, 467 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 468 .hists = hists, 469 .branch_info = bi, 470 .mem_info = mi, 471 .transaction = transaction, 472 }; 473 474 return add_hist_entry(hists, &entry, al, sample_self); 475} 476 477static int 478iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 479 struct addr_location *al __maybe_unused) 480{ 481 return 0; 482} 483 484static int 485iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 486 struct addr_location *al __maybe_unused) 487{ 488 return 0; 489} 490 491static int 492iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 493{ 494 struct perf_sample *sample = iter->sample; 495 struct mem_info *mi; 496 497 mi = sample__resolve_mem(sample, al); 498 if (mi == NULL) 499 return -ENOMEM; 500 501 iter->priv = mi; 502 return 0; 503} 504 505static int 506iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 507{ 508 u64 cost; 509 struct mem_info *mi = iter->priv; 510 struct hists *hists = evsel__hists(iter->evsel); 511 struct hist_entry *he; 512 513 if (mi == NULL) 514 return -EINVAL; 515 516 cost = iter->sample->weight; 517 if (!cost) 518 cost = 1; 519 520 /* 521 * must pass period=weight in order to get the correct 522 * sorting from hists__collapse_resort() which is solely 523 * based on periods. We want sorting be done on nr_events * weight 524 * and this is indirectly achieved by passing period=weight here 525 * and the he_stat__add_period() function. 526 */ 527 he = __hists__add_entry(hists, al, iter->parent, NULL, mi, 528 cost, cost, 0, true); 529 if (!he) 530 return -ENOMEM; 531 532 iter->he = he; 533 return 0; 534} 535 536static int 537iter_finish_mem_entry(struct hist_entry_iter *iter, 538 struct addr_location *al __maybe_unused) 539{ 540 struct perf_evsel *evsel = iter->evsel; 541 struct hists *hists = evsel__hists(evsel); 542 struct hist_entry *he = iter->he; 543 int err = -EINVAL; 544 545 if (he == NULL) 546 goto out; 547 548 hists__inc_nr_samples(hists, he->filtered); 549 550 err = hist_entry__append_callchain(he, iter->sample); 551 552out: 553 /* 554 * We don't need to free iter->priv (mem_info) here since 555 * the mem info was either already freed in add_hist_entry() or 556 * passed to a new hist entry by hist_entry__new(). 557 */ 558 iter->priv = NULL; 559 560 iter->he = NULL; 561 return err; 562} 563 564static int 565iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 566{ 567 struct branch_info *bi; 568 struct perf_sample *sample = iter->sample; 569 570 bi = sample__resolve_bstack(sample, al); 571 if (!bi) 572 return -ENOMEM; 573 574 iter->curr = 0; 575 iter->total = sample->branch_stack->nr; 576 577 iter->priv = bi; 578 return 0; 579} 580 581static int 582iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 583 struct addr_location *al __maybe_unused) 584{ 585 /* to avoid calling callback function */ 586 iter->he = NULL; 587 588 return 0; 589} 590 591static int 592iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 593{ 594 struct branch_info *bi = iter->priv; 595 int i = iter->curr; 596 597 if (bi == NULL) 598 return 0; 599 600 if (iter->curr >= iter->total) 601 return 0; 602 603 al->map = bi[i].to.map; 604 al->sym = bi[i].to.sym; 605 al->addr = bi[i].to.addr; 606 return 1; 607} 608 609static int 610iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 611{ 612 struct branch_info *bi; 613 struct perf_evsel *evsel = iter->evsel; 614 struct hists *hists = evsel__hists(evsel); 615 struct hist_entry *he = NULL; 616 int i = iter->curr; 617 int err = 0; 618 619 bi = iter->priv; 620 621 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 622 goto out; 623 624 /* 625 * The report shows the percentage of total branches captured 626 * and not events sampled. Thus we use a pseudo period of 1. 627 */ 628 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 629 1, 1, 0, true); 630 if (he == NULL) 631 return -ENOMEM; 632 633 hists__inc_nr_samples(hists, he->filtered); 634 635out: 636 iter->he = he; 637 iter->curr++; 638 return err; 639} 640 641static int 642iter_finish_branch_entry(struct hist_entry_iter *iter, 643 struct addr_location *al __maybe_unused) 644{ 645 zfree(&iter->priv); 646 iter->he = NULL; 647 648 return iter->curr >= iter->total ? 0 : -1; 649} 650 651static int 652iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 653 struct addr_location *al __maybe_unused) 654{ 655 return 0; 656} 657 658static int 659iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 660{ 661 struct perf_evsel *evsel = iter->evsel; 662 struct perf_sample *sample = iter->sample; 663 struct hist_entry *he; 664 665 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 666 sample->period, sample->weight, 667 sample->transaction, true); 668 if (he == NULL) 669 return -ENOMEM; 670 671 iter->he = he; 672 return 0; 673} 674 675static int 676iter_finish_normal_entry(struct hist_entry_iter *iter, 677 struct addr_location *al __maybe_unused) 678{ 679 struct hist_entry *he = iter->he; 680 struct perf_evsel *evsel = iter->evsel; 681 struct perf_sample *sample = iter->sample; 682 683 if (he == NULL) 684 return 0; 685 686 iter->he = NULL; 687 688 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 689 690 return hist_entry__append_callchain(he, sample); 691} 692 693static int 694iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, 695 struct addr_location *al __maybe_unused) 696{ 697 struct hist_entry **he_cache; 698 699 callchain_cursor_commit(&callchain_cursor); 700 701 /* 702 * This is for detecting cycles or recursions so that they're 703 * cumulated only one time to prevent entries more than 100% 704 * overhead. 705 */ 706 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1)); 707 if (he_cache == NULL) 708 return -ENOMEM; 709 710 iter->priv = he_cache; 711 iter->curr = 0; 712 713 return 0; 714} 715 716static int 717iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 718 struct addr_location *al) 719{ 720 struct perf_evsel *evsel = iter->evsel; 721 struct hists *hists = evsel__hists(evsel); 722 struct perf_sample *sample = iter->sample; 723 struct hist_entry **he_cache = iter->priv; 724 struct hist_entry *he; 725 int err = 0; 726 727 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL, 728 sample->period, sample->weight, 729 sample->transaction, true); 730 if (he == NULL) 731 return -ENOMEM; 732 733 iter->he = he; 734 he_cache[iter->curr++] = he; 735 736 hist_entry__append_callchain(he, sample); 737 738 /* 739 * We need to re-initialize the cursor since callchain_append() 740 * advanced the cursor to the end. 741 */ 742 callchain_cursor_commit(&callchain_cursor); 743 744 hists__inc_nr_samples(hists, he->filtered); 745 746 return err; 747} 748 749static int 750iter_next_cumulative_entry(struct hist_entry_iter *iter, 751 struct addr_location *al) 752{ 753 struct callchain_cursor_node *node; 754 755 node = callchain_cursor_current(&callchain_cursor); 756 if (node == NULL) 757 return 0; 758 759 return fill_callchain_info(al, node, iter->hide_unresolved); 760} 761 762static int 763iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 764 struct addr_location *al) 765{ 766 struct perf_evsel *evsel = iter->evsel; 767 struct perf_sample *sample = iter->sample; 768 struct hist_entry **he_cache = iter->priv; 769 struct hist_entry *he; 770 struct hist_entry he_tmp = { 771 .cpu = al->cpu, 772 .thread = al->thread, 773 .comm = thread__comm(al->thread), 774 .ip = al->addr, 775 .ms = { 776 .map = al->map, 777 .sym = al->sym, 778 }, 779 .parent = iter->parent, 780 }; 781 int i; 782 struct callchain_cursor cursor; 783 784 callchain_cursor_snapshot(&cursor, &callchain_cursor); 785 786 callchain_cursor_advance(&callchain_cursor); 787 788 /* 789 * Check if there's duplicate entries in the callchain. 790 * It's possible that it has cycles or recursive calls. 791 */ 792 for (i = 0; i < iter->curr; i++) { 793 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 794 /* to avoid calling callback function */ 795 iter->he = NULL; 796 return 0; 797 } 798 } 799 800 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 801 sample->period, sample->weight, 802 sample->transaction, false); 803 if (he == NULL) 804 return -ENOMEM; 805 806 iter->he = he; 807 he_cache[iter->curr++] = he; 808 809 if (symbol_conf.use_callchain) 810 callchain_append(he->callchain, &cursor, sample->period); 811 return 0; 812} 813 814static int 815iter_finish_cumulative_entry(struct hist_entry_iter *iter, 816 struct addr_location *al __maybe_unused) 817{ 818 zfree(&iter->priv); 819 iter->he = NULL; 820 821 return 0; 822} 823 824const struct hist_iter_ops hist_iter_mem = { 825 .prepare_entry = iter_prepare_mem_entry, 826 .add_single_entry = iter_add_single_mem_entry, 827 .next_entry = iter_next_nop_entry, 828 .add_next_entry = iter_add_next_nop_entry, 829 .finish_entry = iter_finish_mem_entry, 830}; 831 832const struct hist_iter_ops hist_iter_branch = { 833 .prepare_entry = iter_prepare_branch_entry, 834 .add_single_entry = iter_add_single_branch_entry, 835 .next_entry = iter_next_branch_entry, 836 .add_next_entry = iter_add_next_branch_entry, 837 .finish_entry = iter_finish_branch_entry, 838}; 839 840const struct hist_iter_ops hist_iter_normal = { 841 .prepare_entry = iter_prepare_normal_entry, 842 .add_single_entry = iter_add_single_normal_entry, 843 .next_entry = iter_next_nop_entry, 844 .add_next_entry = iter_add_next_nop_entry, 845 .finish_entry = iter_finish_normal_entry, 846}; 847 848const struct hist_iter_ops hist_iter_cumulative = { 849 .prepare_entry = iter_prepare_cumulative_entry, 850 .add_single_entry = iter_add_single_cumulative_entry, 851 .next_entry = iter_next_cumulative_entry, 852 .add_next_entry = iter_add_next_cumulative_entry, 853 .finish_entry = iter_finish_cumulative_entry, 854}; 855 856int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 857 struct perf_evsel *evsel, struct perf_sample *sample, 858 int max_stack_depth, void *arg) 859{ 860 int err, err2; 861 862 err = sample__resolve_callchain(sample, &iter->parent, evsel, al, 863 max_stack_depth); 864 if (err) 865 return err; 866 867 iter->evsel = evsel; 868 iter->sample = sample; 869 870 err = iter->ops->prepare_entry(iter, al); 871 if (err) 872 goto out; 873 874 err = iter->ops->add_single_entry(iter, al); 875 if (err) 876 goto out; 877 878 if (iter->he && iter->add_entry_cb) { 879 err = iter->add_entry_cb(iter, al, true, arg); 880 if (err) 881 goto out; 882 } 883 884 while (iter->ops->next_entry(iter, al)) { 885 err = iter->ops->add_next_entry(iter, al); 886 if (err) 887 break; 888 889 if (iter->he && iter->add_entry_cb) { 890 err = iter->add_entry_cb(iter, al, false, arg); 891 if (err) 892 goto out; 893 } 894 } 895 896out: 897 err2 = iter->ops->finish_entry(iter, al); 898 if (!err) 899 err = err2; 900 901 return err; 902} 903 904int64_t 905hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 906{ 907 struct perf_hpp_fmt *fmt; 908 int64_t cmp = 0; 909 910 perf_hpp__for_each_sort_list(fmt) { 911 if (perf_hpp__should_skip(fmt)) 912 continue; 913 914 cmp = fmt->cmp(fmt, left, right); 915 if (cmp) 916 break; 917 } 918 919 return cmp; 920} 921 922int64_t 923hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 924{ 925 struct perf_hpp_fmt *fmt; 926 int64_t cmp = 0; 927 928 perf_hpp__for_each_sort_list(fmt) { 929 if (perf_hpp__should_skip(fmt)) 930 continue; 931 932 cmp = fmt->collapse(fmt, left, right); 933 if (cmp) 934 break; 935 } 936 937 return cmp; 938} 939 940void hist_entry__delete(struct hist_entry *he) 941{ 942 thread__zput(he->thread); 943 zfree(&he->branch_info); 944 zfree(&he->mem_info); 945 zfree(&he->stat_acc); 946 free_srcline(he->srcline); 947 free_callchain(he->callchain); 948 free(he); 949} 950 951/* 952 * collapse the histogram 953 */ 954 955static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, 956 struct rb_root *root, 957 struct hist_entry *he) 958{ 959 struct rb_node **p = &root->rb_node; 960 struct rb_node *parent = NULL; 961 struct hist_entry *iter; 962 int64_t cmp; 963 964 while (*p != NULL) { 965 parent = *p; 966 iter = rb_entry(parent, struct hist_entry, rb_node_in); 967 968 cmp = hist_entry__collapse(iter, he); 969 970 if (!cmp) { 971 he_stat__add_stat(&iter->stat, &he->stat); 972 if (symbol_conf.cumulate_callchain) 973 he_stat__add_stat(iter->stat_acc, he->stat_acc); 974 975 if (symbol_conf.use_callchain) { 976 callchain_cursor_reset(&callchain_cursor); 977 callchain_merge(&callchain_cursor, 978 iter->callchain, 979 he->callchain); 980 } 981 hist_entry__delete(he); 982 return false; 983 } 984 985 if (cmp < 0) 986 p = &(*p)->rb_left; 987 else 988 p = &(*p)->rb_right; 989 } 990 hists->nr_entries++; 991 992 rb_link_node(&he->rb_node_in, parent, p); 993 rb_insert_color(&he->rb_node_in, root); 994 return true; 995} 996 997static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) 998{ 999 struct rb_root *root; 1000 1001 pthread_mutex_lock(&hists->lock); 1002 1003 root = hists->entries_in; 1004 if (++hists->entries_in > &hists->entries_in_array[1]) 1005 hists->entries_in = &hists->entries_in_array[0]; 1006 1007 pthread_mutex_unlock(&hists->lock); 1008 1009 return root; 1010} 1011 1012static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1013{ 1014 hists__filter_entry_by_dso(hists, he); 1015 hists__filter_entry_by_thread(hists, he); 1016 hists__filter_entry_by_symbol(hists, he); 1017} 1018 1019void hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1020{ 1021 struct rb_root *root; 1022 struct rb_node *next; 1023 struct hist_entry *n; 1024 1025 if (!sort__need_collapse) 1026 return; 1027 1028 hists->nr_entries = 0; 1029 1030 root = hists__get_rotate_entries_in(hists); 1031 1032 next = rb_first(root); 1033 1034 while (next) { 1035 if (session_done()) 1036 break; 1037 n = rb_entry(next, struct hist_entry, rb_node_in); 1038 next = rb_next(&n->rb_node_in); 1039 1040 rb_erase(&n->rb_node_in, root); 1041 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { 1042 /* 1043 * If it wasn't combined with one of the entries already 1044 * collapsed, we need to apply the filters that may have 1045 * been set by, say, the hist_browser. 1046 */ 1047 hists__apply_filters(hists, n); 1048 } 1049 if (prog) 1050 ui_progress__update(prog, 1); 1051 } 1052} 1053 1054static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1055{ 1056 struct perf_hpp_fmt *fmt; 1057 int64_t cmp = 0; 1058 1059 perf_hpp__for_each_sort_list(fmt) { 1060 if (perf_hpp__should_skip(fmt)) 1061 continue; 1062 1063 cmp = fmt->sort(fmt, a, b); 1064 if (cmp) 1065 break; 1066 } 1067 1068 return cmp; 1069} 1070 1071static void hists__reset_filter_stats(struct hists *hists) 1072{ 1073 hists->nr_non_filtered_entries = 0; 1074 hists->stats.total_non_filtered_period = 0; 1075} 1076 1077void hists__reset_stats(struct hists *hists) 1078{ 1079 hists->nr_entries = 0; 1080 hists->stats.total_period = 0; 1081 1082 hists__reset_filter_stats(hists); 1083} 1084 1085static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1086{ 1087 hists->nr_non_filtered_entries++; 1088 hists->stats.total_non_filtered_period += h->stat.period; 1089} 1090 1091void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1092{ 1093 if (!h->filtered) 1094 hists__inc_filter_stats(hists, h); 1095 1096 hists->nr_entries++; 1097 hists->stats.total_period += h->stat.period; 1098} 1099 1100static void __hists__insert_output_entry(struct rb_root *entries, 1101 struct hist_entry *he, 1102 u64 min_callchain_hits) 1103{ 1104 struct rb_node **p = &entries->rb_node; 1105 struct rb_node *parent = NULL; 1106 struct hist_entry *iter; 1107 1108 if (symbol_conf.use_callchain) 1109 callchain_param.sort(&he->sorted_chain, he->callchain, 1110 min_callchain_hits, &callchain_param); 1111 1112 while (*p != NULL) { 1113 parent = *p; 1114 iter = rb_entry(parent, struct hist_entry, rb_node); 1115 1116 if (hist_entry__sort(he, iter) > 0) 1117 p = &(*p)->rb_left; 1118 else 1119 p = &(*p)->rb_right; 1120 } 1121 1122 rb_link_node(&he->rb_node, parent, p); 1123 rb_insert_color(&he->rb_node, entries); 1124} 1125 1126void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1127{ 1128 struct rb_root *root; 1129 struct rb_node *next; 1130 struct hist_entry *n; 1131 u64 min_callchain_hits; 1132 1133 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); 1134 1135 if (sort__need_collapse) 1136 root = &hists->entries_collapsed; 1137 else 1138 root = hists->entries_in; 1139 1140 next = rb_first(root); 1141 hists->entries = RB_ROOT; 1142 1143 hists__reset_stats(hists); 1144 hists__reset_col_len(hists); 1145 1146 while (next) { 1147 n = rb_entry(next, struct hist_entry, rb_node_in); 1148 next = rb_next(&n->rb_node_in); 1149 1150 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); 1151 hists__inc_stats(hists, n); 1152 1153 if (!n->filtered) 1154 hists__calc_col_len(hists, n); 1155 1156 if (prog) 1157 ui_progress__update(prog, 1); 1158 } 1159} 1160 1161static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1162 enum hist_filter filter) 1163{ 1164 h->filtered &= ~(1 << filter); 1165 if (h->filtered) 1166 return; 1167 1168 /* force fold unfiltered entry for simplicity */ 1169 h->ms.unfolded = false; 1170 h->row_offset = 0; 1171 h->nr_rows = 0; 1172 1173 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 1174 1175 hists__inc_filter_stats(hists, h); 1176 hists__calc_col_len(hists, h); 1177} 1178 1179 1180static bool hists__filter_entry_by_dso(struct hists *hists, 1181 struct hist_entry *he) 1182{ 1183 if (hists->dso_filter != NULL && 1184 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 1185 he->filtered |= (1 << HIST_FILTER__DSO); 1186 return true; 1187 } 1188 1189 return false; 1190} 1191 1192void hists__filter_by_dso(struct hists *hists) 1193{ 1194 struct rb_node *nd; 1195 1196 hists->stats.nr_non_filtered_samples = 0; 1197 1198 hists__reset_filter_stats(hists); 1199 hists__reset_col_len(hists); 1200 1201 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1202 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1203 1204 if (symbol_conf.exclude_other && !h->parent) 1205 continue; 1206 1207 if (hists__filter_entry_by_dso(hists, h)) 1208 continue; 1209 1210 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); 1211 } 1212} 1213 1214static bool hists__filter_entry_by_thread(struct hists *hists, 1215 struct hist_entry *he) 1216{ 1217 if (hists->thread_filter != NULL && 1218 he->thread != hists->thread_filter) { 1219 he->filtered |= (1 << HIST_FILTER__THREAD); 1220 return true; 1221 } 1222 1223 return false; 1224} 1225 1226void hists__filter_by_thread(struct hists *hists) 1227{ 1228 struct rb_node *nd; 1229 1230 hists->stats.nr_non_filtered_samples = 0; 1231 1232 hists__reset_filter_stats(hists); 1233 hists__reset_col_len(hists); 1234 1235 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1236 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1237 1238 if (hists__filter_entry_by_thread(hists, h)) 1239 continue; 1240 1241 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); 1242 } 1243} 1244 1245static bool hists__filter_entry_by_symbol(struct hists *hists, 1246 struct hist_entry *he) 1247{ 1248 if (hists->symbol_filter_str != NULL && 1249 (!he->ms.sym || strstr(he->ms.sym->name, 1250 hists->symbol_filter_str) == NULL)) { 1251 he->filtered |= (1 << HIST_FILTER__SYMBOL); 1252 return true; 1253 } 1254 1255 return false; 1256} 1257 1258void hists__filter_by_symbol(struct hists *hists) 1259{ 1260 struct rb_node *nd; 1261 1262 hists->stats.nr_non_filtered_samples = 0; 1263 1264 hists__reset_filter_stats(hists); 1265 hists__reset_col_len(hists); 1266 1267 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1268 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1269 1270 if (hists__filter_entry_by_symbol(hists, h)) 1271 continue; 1272 1273 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); 1274 } 1275} 1276 1277void events_stats__inc(struct events_stats *stats, u32 type) 1278{ 1279 ++stats->nr_events[0]; 1280 ++stats->nr_events[type]; 1281} 1282 1283void hists__inc_nr_events(struct hists *hists, u32 type) 1284{ 1285 events_stats__inc(&hists->stats, type); 1286} 1287 1288void hists__inc_nr_samples(struct hists *hists, bool filtered) 1289{ 1290 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 1291 if (!filtered) 1292 hists->stats.nr_non_filtered_samples++; 1293} 1294 1295static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 1296 struct hist_entry *pair) 1297{ 1298 struct rb_root *root; 1299 struct rb_node **p; 1300 struct rb_node *parent = NULL; 1301 struct hist_entry *he; 1302 int64_t cmp; 1303 1304 if (sort__need_collapse) 1305 root = &hists->entries_collapsed; 1306 else 1307 root = hists->entries_in; 1308 1309 p = &root->rb_node; 1310 1311 while (*p != NULL) { 1312 parent = *p; 1313 he = rb_entry(parent, struct hist_entry, rb_node_in); 1314 1315 cmp = hist_entry__collapse(he, pair); 1316 1317 if (!cmp) 1318 goto out; 1319 1320 if (cmp < 0) 1321 p = &(*p)->rb_left; 1322 else 1323 p = &(*p)->rb_right; 1324 } 1325 1326 he = hist_entry__new(pair, true); 1327 if (he) { 1328 memset(&he->stat, 0, sizeof(he->stat)); 1329 he->hists = hists; 1330 rb_link_node(&he->rb_node_in, parent, p); 1331 rb_insert_color(&he->rb_node_in, root); 1332 hists__inc_stats(hists, he); 1333 he->dummy = true; 1334 } 1335out: 1336 return he; 1337} 1338 1339static struct hist_entry *hists__find_entry(struct hists *hists, 1340 struct hist_entry *he) 1341{ 1342 struct rb_node *n; 1343 1344 if (sort__need_collapse) 1345 n = hists->entries_collapsed.rb_node; 1346 else 1347 n = hists->entries_in->rb_node; 1348 1349 while (n) { 1350 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 1351 int64_t cmp = hist_entry__collapse(iter, he); 1352 1353 if (cmp < 0) 1354 n = n->rb_left; 1355 else if (cmp > 0) 1356 n = n->rb_right; 1357 else 1358 return iter; 1359 } 1360 1361 return NULL; 1362} 1363 1364/* 1365 * Look for pairs to link to the leader buckets (hist_entries): 1366 */ 1367void hists__match(struct hists *leader, struct hists *other) 1368{ 1369 struct rb_root *root; 1370 struct rb_node *nd; 1371 struct hist_entry *pos, *pair; 1372 1373 if (sort__need_collapse) 1374 root = &leader->entries_collapsed; 1375 else 1376 root = leader->entries_in; 1377 1378 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 1379 pos = rb_entry(nd, struct hist_entry, rb_node_in); 1380 pair = hists__find_entry(other, pos); 1381 1382 if (pair) 1383 hist_entry__add_pair(pair, pos); 1384 } 1385} 1386 1387/* 1388 * Look for entries in the other hists that are not present in the leader, if 1389 * we find them, just add a dummy entry on the leader hists, with period=0, 1390 * nr_events=0, to serve as the list header. 1391 */ 1392int hists__link(struct hists *leader, struct hists *other) 1393{ 1394 struct rb_root *root; 1395 struct rb_node *nd; 1396 struct hist_entry *pos, *pair; 1397 1398 if (sort__need_collapse) 1399 root = &other->entries_collapsed; 1400 else 1401 root = other->entries_in; 1402 1403 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 1404 pos = rb_entry(nd, struct hist_entry, rb_node_in); 1405 1406 if (!hist_entry__has_pairs(pos)) { 1407 pair = hists__add_dummy_entry(leader, pos); 1408 if (pair == NULL) 1409 return -1; 1410 hist_entry__add_pair(pos, pair); 1411 } 1412 } 1413 1414 return 0; 1415} 1416 1417 1418size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp) 1419{ 1420 struct perf_evsel *pos; 1421 size_t ret = 0; 1422 1423 evlist__for_each(evlist, pos) { 1424 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1425 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 1426 } 1427 1428 return ret; 1429} 1430 1431 1432u64 hists__total_period(struct hists *hists) 1433{ 1434 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 1435 hists->stats.total_period; 1436} 1437 1438int parse_filter_percentage(const struct option *opt __maybe_unused, 1439 const char *arg, int unset __maybe_unused) 1440{ 1441 if (!strcmp(arg, "relative")) 1442 symbol_conf.filter_relative = true; 1443 else if (!strcmp(arg, "absolute")) 1444 symbol_conf.filter_relative = false; 1445 else 1446 return -1; 1447 1448 return 0; 1449} 1450 1451int perf_hist_config(const char *var, const char *value) 1452{ 1453 if (!strcmp(var, "hist.percentage")) 1454 return parse_filter_percentage(NULL, value, 0); 1455 1456 return 0; 1457} 1458 1459static int hists_evsel__init(struct perf_evsel *evsel) 1460{ 1461 struct hists *hists = evsel__hists(evsel); 1462 1463 memset(hists, 0, sizeof(*hists)); 1464 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; 1465 hists->entries_in = &hists->entries_in_array[0]; 1466 hists->entries_collapsed = RB_ROOT; 1467 hists->entries = RB_ROOT; 1468 pthread_mutex_init(&hists->lock, NULL); 1469 return 0; 1470} 1471 1472/* 1473 * XXX We probably need a hists_evsel__exit() to free the hist_entries 1474 * stored in the rbtree... 1475 */ 1476 1477int hists__init(void) 1478{ 1479 int err = perf_evsel__object_config(sizeof(struct hists_evsel), 1480 hists_evsel__init, NULL); 1481 if (err) 1482 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 1483 1484 return err; 1485} 1486