root/kernel/trace/trace_selftest.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. trace_valid_entry
  2. trace_test_buffer_cpu
  3. trace_test_buffer
  4. warn_failed_init_tracer
  5. trace_selftest_test_probe1_func
  6. trace_selftest_test_probe2_func
  7. trace_selftest_test_probe3_func
  8. trace_selftest_test_global_func
  9. trace_selftest_test_dyn_func
  10. print_counts
  11. reset_counts
  12. trace_selftest_ops
  13. trace_selftest_startup_dynamic_tracing
  14. trace_selftest_test_recursion_func
  15. trace_selftest_test_recursion_safe_func
  16. trace_selftest_function_recursion
  17. trace_selftest_test_regs_func
  18. trace_selftest_function_regs
  19. trace_selftest_startup_function
  20. trace_graph_entry_watchdog
  21. trace_selftest_startup_function_graph
  22. trace_selftest_startup_irqsoff
  23. trace_selftest_startup_preemptoff
  24. trace_selftest_startup_preemptirqsoff
  25. trace_selftest_startup_nop
  26. trace_wakeup_test_thread
  27. trace_selftest_startup_wakeup
  28. trace_selftest_startup_branch

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Include in trace.c */
   3 
   4 #include <uapi/linux/sched/types.h>
   5 #include <linux/stringify.h>
   6 #include <linux/kthread.h>
   7 #include <linux/delay.h>
   8 #include <linux/slab.h>
   9 
  10 static inline int trace_valid_entry(struct trace_entry *entry)
  11 {
  12         switch (entry->type) {
  13         case TRACE_FN:
  14         case TRACE_CTX:
  15         case TRACE_WAKE:
  16         case TRACE_STACK:
  17         case TRACE_PRINT:
  18         case TRACE_BRANCH:
  19         case TRACE_GRAPH_ENT:
  20         case TRACE_GRAPH_RET:
  21                 return 1;
  22         }
  23         return 0;
  24 }
  25 
  26 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  27 {
  28         struct ring_buffer_event *event;
  29         struct trace_entry *entry;
  30         unsigned int loops = 0;
  31 
  32         while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33                 entry = ring_buffer_event_data(event);
  34 
  35                 /*
  36                  * The ring buffer is a size of trace_buf_size, if
  37                  * we loop more than the size, there's something wrong
  38                  * with the ring buffer.
  39                  */
  40                 if (loops++ > trace_buf_size) {
  41                         printk(KERN_CONT ".. bad ring buffer ");
  42                         goto failed;
  43                 }
  44                 if (!trace_valid_entry(entry)) {
  45                         printk(KERN_CONT ".. invalid entry %d ",
  46                                 entry->type);
  47                         goto failed;
  48                 }
  49         }
  50         return 0;
  51 
  52  failed:
  53         /* disable tracing */
  54         tracing_disabled = 1;
  55         printk(KERN_CONT ".. corrupted trace buffer .. ");
  56         return -1;
  57 }
  58 
  59 /*
  60  * Test the trace buffer to see if all the elements
  61  * are still sane.
  62  */
  63 static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
  64 {
  65         unsigned long flags, cnt = 0;
  66         int cpu, ret = 0;
  67 
  68         /* Don't allow flipping of max traces now */
  69         local_irq_save(flags);
  70         arch_spin_lock(&buf->tr->max_lock);
  71 
  72         cnt = ring_buffer_entries(buf->buffer);
  73 
  74         /*
  75          * The trace_test_buffer_cpu runs a while loop to consume all data.
  76          * If the calling tracer is broken, and is constantly filling
  77          * the buffer, this will run forever, and hard lock the box.
  78          * We disable the ring buffer while we do this test to prevent
  79          * a hard lock up.
  80          */
  81         tracing_off();
  82         for_each_possible_cpu(cpu) {
  83                 ret = trace_test_buffer_cpu(buf, cpu);
  84                 if (ret)
  85                         break;
  86         }
  87         tracing_on();
  88         arch_spin_unlock(&buf->tr->max_lock);
  89         local_irq_restore(flags);
  90 
  91         if (count)
  92                 *count = cnt;
  93 
  94         return ret;
  95 }
  96 
  97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98 {
  99         printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100                 trace->name, init_ret);
 101 }
 102 #ifdef CONFIG_FUNCTION_TRACER
 103 
 104 #ifdef CONFIG_DYNAMIC_FTRACE
 105 
 106 static int trace_selftest_test_probe1_cnt;
 107 static void trace_selftest_test_probe1_func(unsigned long ip,
 108                                             unsigned long pip,
 109                                             struct ftrace_ops *op,
 110                                             struct pt_regs *pt_regs)
 111 {
 112         trace_selftest_test_probe1_cnt++;
 113 }
 114 
 115 static int trace_selftest_test_probe2_cnt;
 116 static void trace_selftest_test_probe2_func(unsigned long ip,
 117                                             unsigned long pip,
 118                                             struct ftrace_ops *op,
 119                                             struct pt_regs *pt_regs)
 120 {
 121         trace_selftest_test_probe2_cnt++;
 122 }
 123 
 124 static int trace_selftest_test_probe3_cnt;
 125 static void trace_selftest_test_probe3_func(unsigned long ip,
 126                                             unsigned long pip,
 127                                             struct ftrace_ops *op,
 128                                             struct pt_regs *pt_regs)
 129 {
 130         trace_selftest_test_probe3_cnt++;
 131 }
 132 
 133 static int trace_selftest_test_global_cnt;
 134 static void trace_selftest_test_global_func(unsigned long ip,
 135                                             unsigned long pip,
 136                                             struct ftrace_ops *op,
 137                                             struct pt_regs *pt_regs)
 138 {
 139         trace_selftest_test_global_cnt++;
 140 }
 141 
 142 static int trace_selftest_test_dyn_cnt;
 143 static void trace_selftest_test_dyn_func(unsigned long ip,
 144                                          unsigned long pip,
 145                                          struct ftrace_ops *op,
 146                                          struct pt_regs *pt_regs)
 147 {
 148         trace_selftest_test_dyn_cnt++;
 149 }
 150 
 151 static struct ftrace_ops test_probe1 = {
 152         .func                   = trace_selftest_test_probe1_func,
 153         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 154 };
 155 
 156 static struct ftrace_ops test_probe2 = {
 157         .func                   = trace_selftest_test_probe2_func,
 158         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 159 };
 160 
 161 static struct ftrace_ops test_probe3 = {
 162         .func                   = trace_selftest_test_probe3_func,
 163         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 164 };
 165 
 166 static void print_counts(void)
 167 {
 168         printk("(%d %d %d %d %d) ",
 169                trace_selftest_test_probe1_cnt,
 170                trace_selftest_test_probe2_cnt,
 171                trace_selftest_test_probe3_cnt,
 172                trace_selftest_test_global_cnt,
 173                trace_selftest_test_dyn_cnt);
 174 }
 175 
 176 static void reset_counts(void)
 177 {
 178         trace_selftest_test_probe1_cnt = 0;
 179         trace_selftest_test_probe2_cnt = 0;
 180         trace_selftest_test_probe3_cnt = 0;
 181         trace_selftest_test_global_cnt = 0;
 182         trace_selftest_test_dyn_cnt = 0;
 183 }
 184 
 185 static int trace_selftest_ops(struct trace_array *tr, int cnt)
 186 {
 187         int save_ftrace_enabled = ftrace_enabled;
 188         struct ftrace_ops *dyn_ops;
 189         char *func1_name;
 190         char *func2_name;
 191         int len1;
 192         int len2;
 193         int ret = -1;
 194 
 195         printk(KERN_CONT "PASSED\n");
 196         pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 197 
 198         ftrace_enabled = 1;
 199         reset_counts();
 200 
 201         /* Handle PPC64 '.' name */
 202         func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 203         func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 204         len1 = strlen(func1_name);
 205         len2 = strlen(func2_name);
 206 
 207         /*
 208          * Probe 1 will trace function 1.
 209          * Probe 2 will trace function 2.
 210          * Probe 3 will trace functions 1 and 2.
 211          */
 212         ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 213         ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 214         ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 215         ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 216 
 217         register_ftrace_function(&test_probe1);
 218         register_ftrace_function(&test_probe2);
 219         register_ftrace_function(&test_probe3);
 220         /* First time we are running with main function */
 221         if (cnt > 1) {
 222                 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 223                 register_ftrace_function(tr->ops);
 224         }
 225 
 226         DYN_FTRACE_TEST_NAME();
 227 
 228         print_counts();
 229 
 230         if (trace_selftest_test_probe1_cnt != 1)
 231                 goto out;
 232         if (trace_selftest_test_probe2_cnt != 0)
 233                 goto out;
 234         if (trace_selftest_test_probe3_cnt != 1)
 235                 goto out;
 236         if (cnt > 1) {
 237                 if (trace_selftest_test_global_cnt == 0)
 238                         goto out;
 239         }
 240 
 241         DYN_FTRACE_TEST_NAME2();
 242 
 243         print_counts();
 244 
 245         if (trace_selftest_test_probe1_cnt != 1)
 246                 goto out;
 247         if (trace_selftest_test_probe2_cnt != 1)
 248                 goto out;
 249         if (trace_selftest_test_probe3_cnt != 2)
 250                 goto out;
 251 
 252         /* Add a dynamic probe */
 253         dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 254         if (!dyn_ops) {
 255                 printk("MEMORY ERROR ");
 256                 goto out;
 257         }
 258 
 259         dyn_ops->func = trace_selftest_test_dyn_func;
 260 
 261         register_ftrace_function(dyn_ops);
 262 
 263         trace_selftest_test_global_cnt = 0;
 264 
 265         DYN_FTRACE_TEST_NAME();
 266 
 267         print_counts();
 268 
 269         if (trace_selftest_test_probe1_cnt != 2)
 270                 goto out_free;
 271         if (trace_selftest_test_probe2_cnt != 1)
 272                 goto out_free;
 273         if (trace_selftest_test_probe3_cnt != 3)
 274                 goto out_free;
 275         if (cnt > 1) {
 276                 if (trace_selftest_test_global_cnt == 0)
 277                         goto out_free;
 278         }
 279         if (trace_selftest_test_dyn_cnt == 0)
 280                 goto out_free;
 281 
 282         DYN_FTRACE_TEST_NAME2();
 283 
 284         print_counts();
 285 
 286         if (trace_selftest_test_probe1_cnt != 2)
 287                 goto out_free;
 288         if (trace_selftest_test_probe2_cnt != 2)
 289                 goto out_free;
 290         if (trace_selftest_test_probe3_cnt != 4)
 291                 goto out_free;
 292 
 293         ret = 0;
 294  out_free:
 295         unregister_ftrace_function(dyn_ops);
 296         kfree(dyn_ops);
 297 
 298  out:
 299         /* Purposely unregister in the same order */
 300         unregister_ftrace_function(&test_probe1);
 301         unregister_ftrace_function(&test_probe2);
 302         unregister_ftrace_function(&test_probe3);
 303         if (cnt > 1)
 304                 unregister_ftrace_function(tr->ops);
 305         ftrace_reset_array_ops(tr);
 306 
 307         /* Make sure everything is off */
 308         reset_counts();
 309         DYN_FTRACE_TEST_NAME();
 310         DYN_FTRACE_TEST_NAME();
 311 
 312         if (trace_selftest_test_probe1_cnt ||
 313             trace_selftest_test_probe2_cnt ||
 314             trace_selftest_test_probe3_cnt ||
 315             trace_selftest_test_global_cnt ||
 316             trace_selftest_test_dyn_cnt)
 317                 ret = -1;
 318 
 319         ftrace_enabled = save_ftrace_enabled;
 320 
 321         return ret;
 322 }
 323 
 324 /* Test dynamic code modification and ftrace filters */
 325 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 326                                                   struct trace_array *tr,
 327                                                   int (*func)(void))
 328 {
 329         int save_ftrace_enabled = ftrace_enabled;
 330         unsigned long count;
 331         char *func_name;
 332         int ret;
 333 
 334         /* The ftrace test PASSED */
 335         printk(KERN_CONT "PASSED\n");
 336         pr_info("Testing dynamic ftrace: ");
 337 
 338         /* enable tracing, and record the filter function */
 339         ftrace_enabled = 1;
 340 
 341         /* passed in by parameter to fool gcc from optimizing */
 342         func();
 343 
 344         /*
 345          * Some archs *cough*PowerPC*cough* add characters to the
 346          * start of the function names. We simply put a '*' to
 347          * accommodate them.
 348          */
 349         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 350 
 351         /* filter only on our function */
 352         ftrace_set_global_filter(func_name, strlen(func_name), 1);
 353 
 354         /* enable tracing */
 355         ret = tracer_init(trace, tr);
 356         if (ret) {
 357                 warn_failed_init_tracer(trace, ret);
 358                 goto out;
 359         }
 360 
 361         /* Sleep for a 1/10 of a second */
 362         msleep(100);
 363 
 364         /* we should have nothing in the buffer */
 365         ret = trace_test_buffer(&tr->trace_buffer, &count);
 366         if (ret)
 367                 goto out;
 368 
 369         if (count) {
 370                 ret = -1;
 371                 printk(KERN_CONT ".. filter did not filter .. ");
 372                 goto out;
 373         }
 374 
 375         /* call our function again */
 376         func();
 377 
 378         /* sleep again */
 379         msleep(100);
 380 
 381         /* stop the tracing. */
 382         tracing_stop();
 383         ftrace_enabled = 0;
 384 
 385         /* check the trace buffer */
 386         ret = trace_test_buffer(&tr->trace_buffer, &count);
 387 
 388         ftrace_enabled = 1;
 389         tracing_start();
 390 
 391         /* we should only have one item */
 392         if (!ret && count != 1) {
 393                 trace->reset(tr);
 394                 printk(KERN_CONT ".. filter failed count=%ld ..", count);
 395                 ret = -1;
 396                 goto out;
 397         }
 398 
 399         /* Test the ops with global tracing running */
 400         ret = trace_selftest_ops(tr, 1);
 401         trace->reset(tr);
 402 
 403  out:
 404         ftrace_enabled = save_ftrace_enabled;
 405 
 406         /* Enable tracing on all functions again */
 407         ftrace_set_global_filter(NULL, 0, 1);
 408 
 409         /* Test the ops with global tracing off */
 410         if (!ret)
 411                 ret = trace_selftest_ops(tr, 2);
 412 
 413         return ret;
 414 }
 415 
 416 static int trace_selftest_recursion_cnt;
 417 static void trace_selftest_test_recursion_func(unsigned long ip,
 418                                                unsigned long pip,
 419                                                struct ftrace_ops *op,
 420                                                struct pt_regs *pt_regs)
 421 {
 422         /*
 423          * This function is registered without the recursion safe flag.
 424          * The ftrace infrastructure should provide the recursion
 425          * protection. If not, this will crash the kernel!
 426          */
 427         if (trace_selftest_recursion_cnt++ > 10)
 428                 return;
 429         DYN_FTRACE_TEST_NAME();
 430 }
 431 
 432 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 433                                                     unsigned long pip,
 434                                                     struct ftrace_ops *op,
 435                                                     struct pt_regs *pt_regs)
 436 {
 437         /*
 438          * We said we would provide our own recursion. By calling
 439          * this function again, we should recurse back into this function
 440          * and count again. But this only happens if the arch supports
 441          * all of ftrace features and nothing else is using the function
 442          * tracing utility.
 443          */
 444         if (trace_selftest_recursion_cnt++)
 445                 return;
 446         DYN_FTRACE_TEST_NAME();
 447 }
 448 
 449 static struct ftrace_ops test_rec_probe = {
 450         .func                   = trace_selftest_test_recursion_func,
 451 };
 452 
 453 static struct ftrace_ops test_recsafe_probe = {
 454         .func                   = trace_selftest_test_recursion_safe_func,
 455         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 456 };
 457 
 458 static int
 459 trace_selftest_function_recursion(void)
 460 {
 461         int save_ftrace_enabled = ftrace_enabled;
 462         char *func_name;
 463         int len;
 464         int ret;
 465 
 466         /* The previous test PASSED */
 467         pr_cont("PASSED\n");
 468         pr_info("Testing ftrace recursion: ");
 469 
 470 
 471         /* enable tracing, and record the filter function */
 472         ftrace_enabled = 1;
 473 
 474         /* Handle PPC64 '.' name */
 475         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 476         len = strlen(func_name);
 477 
 478         ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 479         if (ret) {
 480                 pr_cont("*Could not set filter* ");
 481                 goto out;
 482         }
 483 
 484         ret = register_ftrace_function(&test_rec_probe);
 485         if (ret) {
 486                 pr_cont("*could not register callback* ");
 487                 goto out;
 488         }
 489 
 490         DYN_FTRACE_TEST_NAME();
 491 
 492         unregister_ftrace_function(&test_rec_probe);
 493 
 494         ret = -1;
 495         if (trace_selftest_recursion_cnt != 1) {
 496                 pr_cont("*callback not called once (%d)* ",
 497                         trace_selftest_recursion_cnt);
 498                 goto out;
 499         }
 500 
 501         trace_selftest_recursion_cnt = 1;
 502 
 503         pr_cont("PASSED\n");
 504         pr_info("Testing ftrace recursion safe: ");
 505 
 506         ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 507         if (ret) {
 508                 pr_cont("*Could not set filter* ");
 509                 goto out;
 510         }
 511 
 512         ret = register_ftrace_function(&test_recsafe_probe);
 513         if (ret) {
 514                 pr_cont("*could not register callback* ");
 515                 goto out;
 516         }
 517 
 518         DYN_FTRACE_TEST_NAME();
 519 
 520         unregister_ftrace_function(&test_recsafe_probe);
 521 
 522         ret = -1;
 523         if (trace_selftest_recursion_cnt != 2) {
 524                 pr_cont("*callback not called expected 2 times (%d)* ",
 525                         trace_selftest_recursion_cnt);
 526                 goto out;
 527         }
 528 
 529         ret = 0;
 530 out:
 531         ftrace_enabled = save_ftrace_enabled;
 532 
 533         return ret;
 534 }
 535 #else
 536 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 537 # define trace_selftest_function_recursion() ({ 0; })
 538 #endif /* CONFIG_DYNAMIC_FTRACE */
 539 
 540 static enum {
 541         TRACE_SELFTEST_REGS_START,
 542         TRACE_SELFTEST_REGS_FOUND,
 543         TRACE_SELFTEST_REGS_NOT_FOUND,
 544 } trace_selftest_regs_stat;
 545 
 546 static void trace_selftest_test_regs_func(unsigned long ip,
 547                                           unsigned long pip,
 548                                           struct ftrace_ops *op,
 549                                           struct pt_regs *pt_regs)
 550 {
 551         if (pt_regs)
 552                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 553         else
 554                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 555 }
 556 
 557 static struct ftrace_ops test_regs_probe = {
 558         .func           = trace_selftest_test_regs_func,
 559         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 560 };
 561 
 562 static int
 563 trace_selftest_function_regs(void)
 564 {
 565         int save_ftrace_enabled = ftrace_enabled;
 566         char *func_name;
 567         int len;
 568         int ret;
 569         int supported = 0;
 570 
 571 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 572         supported = 1;
 573 #endif
 574 
 575         /* The previous test PASSED */
 576         pr_cont("PASSED\n");
 577         pr_info("Testing ftrace regs%s: ",
 578                 !supported ? "(no arch support)" : "");
 579 
 580         /* enable tracing, and record the filter function */
 581         ftrace_enabled = 1;
 582 
 583         /* Handle PPC64 '.' name */
 584         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 585         len = strlen(func_name);
 586 
 587         ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 588         /*
 589          * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 590          * This test really doesn't care.
 591          */
 592         if (ret && ret != -ENODEV) {
 593                 pr_cont("*Could not set filter* ");
 594                 goto out;
 595         }
 596 
 597         ret = register_ftrace_function(&test_regs_probe);
 598         /*
 599          * Now if the arch does not support passing regs, then this should
 600          * have failed.
 601          */
 602         if (!supported) {
 603                 if (!ret) {
 604                         pr_cont("*registered save-regs without arch support* ");
 605                         goto out;
 606                 }
 607                 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 608                 ret = register_ftrace_function(&test_regs_probe);
 609         }
 610         if (ret) {
 611                 pr_cont("*could not register callback* ");
 612                 goto out;
 613         }
 614 
 615 
 616         DYN_FTRACE_TEST_NAME();
 617 
 618         unregister_ftrace_function(&test_regs_probe);
 619 
 620         ret = -1;
 621 
 622         switch (trace_selftest_regs_stat) {
 623         case TRACE_SELFTEST_REGS_START:
 624                 pr_cont("*callback never called* ");
 625                 goto out;
 626 
 627         case TRACE_SELFTEST_REGS_FOUND:
 628                 if (supported)
 629                         break;
 630                 pr_cont("*callback received regs without arch support* ");
 631                 goto out;
 632 
 633         case TRACE_SELFTEST_REGS_NOT_FOUND:
 634                 if (!supported)
 635                         break;
 636                 pr_cont("*callback received NULL regs* ");
 637                 goto out;
 638         }
 639 
 640         ret = 0;
 641 out:
 642         ftrace_enabled = save_ftrace_enabled;
 643 
 644         return ret;
 645 }
 646 
 647 /*
 648  * Simple verification test of ftrace function tracer.
 649  * Enable ftrace, sleep 1/10 second, and then read the trace
 650  * buffer to see if all is in order.
 651  */
 652 __init int
 653 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 654 {
 655         int save_ftrace_enabled = ftrace_enabled;
 656         unsigned long count;
 657         int ret;
 658 
 659 #ifdef CONFIG_DYNAMIC_FTRACE
 660         if (ftrace_filter_param) {
 661                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 662                 return 0;
 663         }
 664 #endif
 665 
 666         /* make sure msleep has been recorded */
 667         msleep(1);
 668 
 669         /* start the tracing */
 670         ftrace_enabled = 1;
 671 
 672         ret = tracer_init(trace, tr);
 673         if (ret) {
 674                 warn_failed_init_tracer(trace, ret);
 675                 goto out;
 676         }
 677 
 678         /* Sleep for a 1/10 of a second */
 679         msleep(100);
 680         /* stop the tracing. */
 681         tracing_stop();
 682         ftrace_enabled = 0;
 683 
 684         /* check the trace buffer */
 685         ret = trace_test_buffer(&tr->trace_buffer, &count);
 686 
 687         ftrace_enabled = 1;
 688         trace->reset(tr);
 689         tracing_start();
 690 
 691         if (!ret && !count) {
 692                 printk(KERN_CONT ".. no entries found ..");
 693                 ret = -1;
 694                 goto out;
 695         }
 696 
 697         ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 698                                                      DYN_FTRACE_TEST_NAME);
 699         if (ret)
 700                 goto out;
 701 
 702         ret = trace_selftest_function_recursion();
 703         if (ret)
 704                 goto out;
 705 
 706         ret = trace_selftest_function_regs();
 707  out:
 708         ftrace_enabled = save_ftrace_enabled;
 709 
 710         /* kill ftrace totally if we failed */
 711         if (ret)
 712                 ftrace_kill();
 713 
 714         return ret;
 715 }
 716 #endif /* CONFIG_FUNCTION_TRACER */
 717 
 718 
 719 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 720 
 721 /* Maximum number of functions to trace before diagnosing a hang */
 722 #define GRAPH_MAX_FUNC_TEST     100000000
 723 
 724 static unsigned int graph_hang_thresh;
 725 
 726 /* Wrap the real function entry probe to avoid possible hanging */
 727 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 728 {
 729         /* This is harmlessly racy, we want to approximately detect a hang */
 730         if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 731                 ftrace_graph_stop();
 732                 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 733                 if (ftrace_dump_on_oops) {
 734                         ftrace_dump(DUMP_ALL);
 735                         /* ftrace_dump() disables tracing */
 736                         tracing_on();
 737                 }
 738                 return 0;
 739         }
 740 
 741         return trace_graph_entry(trace);
 742 }
 743 
 744 static struct fgraph_ops fgraph_ops __initdata  = {
 745         .entryfunc              = &trace_graph_entry_watchdog,
 746         .retfunc                = &trace_graph_return,
 747 };
 748 
 749 /*
 750  * Pretty much the same than for the function tracer from which the selftest
 751  * has been borrowed.
 752  */
 753 __init int
 754 trace_selftest_startup_function_graph(struct tracer *trace,
 755                                         struct trace_array *tr)
 756 {
 757         int ret;
 758         unsigned long count;
 759 
 760 #ifdef CONFIG_DYNAMIC_FTRACE
 761         if (ftrace_filter_param) {
 762                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 763                 return 0;
 764         }
 765 #endif
 766 
 767         /*
 768          * Simulate the init() callback but we attach a watchdog callback
 769          * to detect and recover from possible hangs
 770          */
 771         tracing_reset_online_cpus(&tr->trace_buffer);
 772         set_graph_array(tr);
 773         ret = register_ftrace_graph(&fgraph_ops);
 774         if (ret) {
 775                 warn_failed_init_tracer(trace, ret);
 776                 goto out;
 777         }
 778         tracing_start_cmdline_record();
 779 
 780         /* Sleep for a 1/10 of a second */
 781         msleep(100);
 782 
 783         /* Have we just recovered from a hang? */
 784         if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 785                 tracing_selftest_disabled = true;
 786                 ret = -1;
 787                 goto out;
 788         }
 789 
 790         tracing_stop();
 791 
 792         /* check the trace buffer */
 793         ret = trace_test_buffer(&tr->trace_buffer, &count);
 794 
 795         /* Need to also simulate the tr->reset to remove this fgraph_ops */
 796         tracing_stop_cmdline_record();
 797         unregister_ftrace_graph(&fgraph_ops);
 798 
 799         tracing_start();
 800 
 801         if (!ret && !count) {
 802                 printk(KERN_CONT ".. no entries found ..");
 803                 ret = -1;
 804                 goto out;
 805         }
 806 
 807         /* Don't test dynamic tracing, the function tracer already did */
 808 
 809 out:
 810         /* Stop it if we failed */
 811         if (ret)
 812                 ftrace_graph_stop();
 813 
 814         return ret;
 815 }
 816 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 817 
 818 
 819 #ifdef CONFIG_IRQSOFF_TRACER
 820 int
 821 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 822 {
 823         unsigned long save_max = tr->max_latency;
 824         unsigned long count;
 825         int ret;
 826 
 827         /* start the tracing */
 828         ret = tracer_init(trace, tr);
 829         if (ret) {
 830                 warn_failed_init_tracer(trace, ret);
 831                 return ret;
 832         }
 833 
 834         /* reset the max latency */
 835         tr->max_latency = 0;
 836         /* disable interrupts for a bit */
 837         local_irq_disable();
 838         udelay(100);
 839         local_irq_enable();
 840 
 841         /*
 842          * Stop the tracer to avoid a warning subsequent
 843          * to buffer flipping failure because tracing_stop()
 844          * disables the tr and max buffers, making flipping impossible
 845          * in case of parallels max irqs off latencies.
 846          */
 847         trace->stop(tr);
 848         /* stop the tracing. */
 849         tracing_stop();
 850         /* check both trace buffers */
 851         ret = trace_test_buffer(&tr->trace_buffer, NULL);
 852         if (!ret)
 853                 ret = trace_test_buffer(&tr->max_buffer, &count);
 854         trace->reset(tr);
 855         tracing_start();
 856 
 857         if (!ret && !count) {
 858                 printk(KERN_CONT ".. no entries found ..");
 859                 ret = -1;
 860         }
 861 
 862         tr->max_latency = save_max;
 863 
 864         return ret;
 865 }
 866 #endif /* CONFIG_IRQSOFF_TRACER */
 867 
 868 #ifdef CONFIG_PREEMPT_TRACER
 869 int
 870 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 871 {
 872         unsigned long save_max = tr->max_latency;
 873         unsigned long count;
 874         int ret;
 875 
 876         /*
 877          * Now that the big kernel lock is no longer preemptable,
 878          * and this is called with the BKL held, it will always
 879          * fail. If preemption is already disabled, simply
 880          * pass the test. When the BKL is removed, or becomes
 881          * preemptible again, we will once again test this,
 882          * so keep it in.
 883          */
 884         if (preempt_count()) {
 885                 printk(KERN_CONT "can not test ... force ");
 886                 return 0;
 887         }
 888 
 889         /* start the tracing */
 890         ret = tracer_init(trace, tr);
 891         if (ret) {
 892                 warn_failed_init_tracer(trace, ret);
 893                 return ret;
 894         }
 895 
 896         /* reset the max latency */
 897         tr->max_latency = 0;
 898         /* disable preemption for a bit */
 899         preempt_disable();
 900         udelay(100);
 901         preempt_enable();
 902 
 903         /*
 904          * Stop the tracer to avoid a warning subsequent
 905          * to buffer flipping failure because tracing_stop()
 906          * disables the tr and max buffers, making flipping impossible
 907          * in case of parallels max preempt off latencies.
 908          */
 909         trace->stop(tr);
 910         /* stop the tracing. */
 911         tracing_stop();
 912         /* check both trace buffers */
 913         ret = trace_test_buffer(&tr->trace_buffer, NULL);
 914         if (!ret)
 915                 ret = trace_test_buffer(&tr->max_buffer, &count);
 916         trace->reset(tr);
 917         tracing_start();
 918 
 919         if (!ret && !count) {
 920                 printk(KERN_CONT ".. no entries found ..");
 921                 ret = -1;
 922         }
 923 
 924         tr->max_latency = save_max;
 925 
 926         return ret;
 927 }
 928 #endif /* CONFIG_PREEMPT_TRACER */
 929 
 930 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 931 int
 932 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 933 {
 934         unsigned long save_max = tr->max_latency;
 935         unsigned long count;
 936         int ret;
 937 
 938         /*
 939          * Now that the big kernel lock is no longer preemptable,
 940          * and this is called with the BKL held, it will always
 941          * fail. If preemption is already disabled, simply
 942          * pass the test. When the BKL is removed, or becomes
 943          * preemptible again, we will once again test this,
 944          * so keep it in.
 945          */
 946         if (preempt_count()) {
 947                 printk(KERN_CONT "can not test ... force ");
 948                 return 0;
 949         }
 950 
 951         /* start the tracing */
 952         ret = tracer_init(trace, tr);
 953         if (ret) {
 954                 warn_failed_init_tracer(trace, ret);
 955                 goto out_no_start;
 956         }
 957 
 958         /* reset the max latency */
 959         tr->max_latency = 0;
 960 
 961         /* disable preemption and interrupts for a bit */
 962         preempt_disable();
 963         local_irq_disable();
 964         udelay(100);
 965         preempt_enable();
 966         /* reverse the order of preempt vs irqs */
 967         local_irq_enable();
 968 
 969         /*
 970          * Stop the tracer to avoid a warning subsequent
 971          * to buffer flipping failure because tracing_stop()
 972          * disables the tr and max buffers, making flipping impossible
 973          * in case of parallels max irqs/preempt off latencies.
 974          */
 975         trace->stop(tr);
 976         /* stop the tracing. */
 977         tracing_stop();
 978         /* check both trace buffers */
 979         ret = trace_test_buffer(&tr->trace_buffer, NULL);
 980         if (ret)
 981                 goto out;
 982 
 983         ret = trace_test_buffer(&tr->max_buffer, &count);
 984         if (ret)
 985                 goto out;
 986 
 987         if (!ret && !count) {
 988                 printk(KERN_CONT ".. no entries found ..");
 989                 ret = -1;
 990                 goto out;
 991         }
 992 
 993         /* do the test by disabling interrupts first this time */
 994         tr->max_latency = 0;
 995         tracing_start();
 996         trace->start(tr);
 997 
 998         preempt_disable();
 999         local_irq_disable();
1000         udelay(100);
1001         preempt_enable();
1002         /* reverse the order of preempt vs irqs */
1003         local_irq_enable();
1004 
1005         trace->stop(tr);
1006         /* stop the tracing. */
1007         tracing_stop();
1008         /* check both trace buffers */
1009         ret = trace_test_buffer(&tr->trace_buffer, NULL);
1010         if (ret)
1011                 goto out;
1012 
1013         ret = trace_test_buffer(&tr->max_buffer, &count);
1014 
1015         if (!ret && !count) {
1016                 printk(KERN_CONT ".. no entries found ..");
1017                 ret = -1;
1018                 goto out;
1019         }
1020 
1021 out:
1022         tracing_start();
1023 out_no_start:
1024         trace->reset(tr);
1025         tr->max_latency = save_max;
1026 
1027         return ret;
1028 }
1029 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1030 
1031 #ifdef CONFIG_NOP_TRACER
1032 int
1033 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1034 {
1035         /* What could possibly go wrong? */
1036         return 0;
1037 }
1038 #endif
1039 
1040 #ifdef CONFIG_SCHED_TRACER
1041 
1042 struct wakeup_test_data {
1043         struct completion       is_ready;
1044         int                     go;
1045 };
1046 
1047 static int trace_wakeup_test_thread(void *data)
1048 {
1049         /* Make this a -deadline thread */
1050         static const struct sched_attr attr = {
1051                 .sched_policy = SCHED_DEADLINE,
1052                 .sched_runtime = 100000ULL,
1053                 .sched_deadline = 10000000ULL,
1054                 .sched_period = 10000000ULL
1055         };
1056         struct wakeup_test_data *x = data;
1057 
1058         sched_setattr(current, &attr);
1059 
1060         /* Make it know we have a new prio */
1061         complete(&x->is_ready);
1062 
1063         /* now go to sleep and let the test wake us up */
1064         set_current_state(TASK_INTERRUPTIBLE);
1065         while (!x->go) {
1066                 schedule();
1067                 set_current_state(TASK_INTERRUPTIBLE);
1068         }
1069 
1070         complete(&x->is_ready);
1071 
1072         set_current_state(TASK_INTERRUPTIBLE);
1073 
1074         /* we are awake, now wait to disappear */
1075         while (!kthread_should_stop()) {
1076                 schedule();
1077                 set_current_state(TASK_INTERRUPTIBLE);
1078         }
1079 
1080         __set_current_state(TASK_RUNNING);
1081 
1082         return 0;
1083 }
1084 int
1085 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1086 {
1087         unsigned long save_max = tr->max_latency;
1088         struct task_struct *p;
1089         struct wakeup_test_data data;
1090         unsigned long count;
1091         int ret;
1092 
1093         memset(&data, 0, sizeof(data));
1094 
1095         init_completion(&data.is_ready);
1096 
1097         /* create a -deadline thread */
1098         p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1099         if (IS_ERR(p)) {
1100                 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1101                 return -1;
1102         }
1103 
1104         /* make sure the thread is running at -deadline policy */
1105         wait_for_completion(&data.is_ready);
1106 
1107         /* start the tracing */
1108         ret = tracer_init(trace, tr);
1109         if (ret) {
1110                 warn_failed_init_tracer(trace, ret);
1111                 return ret;
1112         }
1113 
1114         /* reset the max latency */
1115         tr->max_latency = 0;
1116 
1117         while (p->on_rq) {
1118                 /*
1119                  * Sleep to make sure the -deadline thread is asleep too.
1120                  * On virtual machines we can't rely on timings,
1121                  * but we want to make sure this test still works.
1122                  */
1123                 msleep(100);
1124         }
1125 
1126         init_completion(&data.is_ready);
1127 
1128         data.go = 1;
1129         /* memory barrier is in the wake_up_process() */
1130 
1131         wake_up_process(p);
1132 
1133         /* Wait for the task to wake up */
1134         wait_for_completion(&data.is_ready);
1135 
1136         /* stop the tracing. */
1137         tracing_stop();
1138         /* check both trace buffers */
1139         ret = trace_test_buffer(&tr->trace_buffer, NULL);
1140         if (!ret)
1141                 ret = trace_test_buffer(&tr->max_buffer, &count);
1142 
1143 
1144         trace->reset(tr);
1145         tracing_start();
1146 
1147         tr->max_latency = save_max;
1148 
1149         /* kill the thread */
1150         kthread_stop(p);
1151 
1152         if (!ret && !count) {
1153                 printk(KERN_CONT ".. no entries found ..");
1154                 ret = -1;
1155         }
1156 
1157         return ret;
1158 }
1159 #endif /* CONFIG_SCHED_TRACER */
1160 
1161 #ifdef CONFIG_BRANCH_TRACER
1162 int
1163 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1164 {
1165         unsigned long count;
1166         int ret;
1167 
1168         /* start the tracing */
1169         ret = tracer_init(trace, tr);
1170         if (ret) {
1171                 warn_failed_init_tracer(trace, ret);
1172                 return ret;
1173         }
1174 
1175         /* Sleep for a 1/10 of a second */
1176         msleep(100);
1177         /* stop the tracing. */
1178         tracing_stop();
1179         /* check the trace buffer */
1180         ret = trace_test_buffer(&tr->trace_buffer, &count);
1181         trace->reset(tr);
1182         tracing_start();
1183 
1184         if (!ret && !count) {
1185                 printk(KERN_CONT ".. no entries found ..");
1186                 ret = -1;
1187         }
1188 
1189         return ret;
1190 }
1191 #endif /* CONFIG_BRANCH_TRACER */
1192 

/* [<][>][^][v][top][bottom][index][help] */