This source file includes following definitions.
- func_prolog_preempt_disable
- wakeup_display_graph
- wakeup_graph_entry
- wakeup_graph_return
- wakeup_trace_open
- wakeup_trace_close
- wakeup_print_line
- wakeup_print_header
- wakeup_tracer_call
- register_wakeup_function
- unregister_wakeup_function
- wakeup_function_set
- register_wakeup_function
- unregister_wakeup_function
- wakeup_function_set
- wakeup_print_line
- wakeup_trace_open
- wakeup_trace_close
- wakeup_print_header
- __trace_function
- wakeup_flag_changed
- start_func_tracer
- stop_func_tracer
- report_latency
- probe_wakeup_migrate_task
- tracing_sched_switch_trace
- tracing_sched_wakeup_trace
- probe_wakeup_sched_switch
- __wakeup_reset
- wakeup_reset
- probe_wakeup
- start_wakeup_tracer
- stop_wakeup_tracer
- __wakeup_tracer_init
- wakeup_tracer_init
- wakeup_rt_tracer_init
- wakeup_dl_tracer_init
- wakeup_tracer_reset
- wakeup_tracer_start
- wakeup_tracer_stop
- init_wakeup_tracer
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 #include <linux/module.h>
  14 #include <linux/kallsyms.h>
  15 #include <linux/uaccess.h>
  16 #include <linux/ftrace.h>
  17 #include <linux/sched/rt.h>
  18 #include <linux/sched/deadline.h>
  19 #include <trace/events/sched.h>
  20 #include "trace.h"
  21 
  22 static struct trace_array       *wakeup_trace;
  23 static int __read_mostly        tracer_enabled;
  24 
  25 static struct task_struct       *wakeup_task;
  26 static int                      wakeup_cpu;
  27 static int                      wakeup_current_cpu;
  28 static unsigned                 wakeup_prio = -1;
  29 static int                      wakeup_rt;
  30 static int                      wakeup_dl;
  31 static int                      tracing_dl = 0;
  32 
  33 static arch_spinlock_t wakeup_lock =
  34         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  35 
  36 static void wakeup_reset(struct trace_array *tr);
  37 static void __wakeup_reset(struct trace_array *tr);
  38 static int start_func_tracer(struct trace_array *tr, int graph);
  39 static void stop_func_tracer(struct trace_array *tr, int graph);
  40 
  41 static int save_flags;
  42 
  43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  44 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  45 #else
  46 # define is_graph(tr) false
  47 #endif
  48 
  49 #ifdef CONFIG_FUNCTION_TRACER
  50 
  51 static bool function_enabled;
  52 
  53 
  54 
  55 
  56 
  57 
  58 
  59 
  60 
  61 
  62 
  63 
  64 
  65 
  66 
  67 static int
  68 func_prolog_preempt_disable(struct trace_array *tr,
  69                             struct trace_array_cpu **data,
  70                             int *pc)
  71 {
  72         long disabled;
  73         int cpu;
  74 
  75         if (likely(!wakeup_task))
  76                 return 0;
  77 
  78         *pc = preempt_count();
  79         preempt_disable_notrace();
  80 
  81         cpu = raw_smp_processor_id();
  82         if (cpu != wakeup_current_cpu)
  83                 goto out_enable;
  84 
  85         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  86         disabled = atomic_inc_return(&(*data)->disabled);
  87         if (unlikely(disabled != 1))
  88                 goto out;
  89 
  90         return 1;
  91 
  92 out:
  93         atomic_dec(&(*data)->disabled);
  94 
  95 out_enable:
  96         preempt_enable_notrace();
  97         return 0;
  98 }
  99 
 100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 101 
 102 static int wakeup_display_graph(struct trace_array *tr, int set)
 103 {
 104         if (!(is_graph(tr) ^ set))
 105                 return 0;
 106 
 107         stop_func_tracer(tr, !set);
 108 
 109         wakeup_reset(wakeup_trace);
 110         tr->max_latency = 0;
 111 
 112         return start_func_tracer(tr, set);
 113 }
 114 
 115 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
 116 {
 117         struct trace_array *tr = wakeup_trace;
 118         struct trace_array_cpu *data;
 119         unsigned long flags;
 120         int pc, ret = 0;
 121 
 122         if (ftrace_graph_ignore_func(trace))
 123                 return 0;
 124         
 125 
 126 
 127 
 128 
 129 
 130 
 131         if (ftrace_graph_notrace_addr(trace->func))
 132                 return 1;
 133 
 134         if (!func_prolog_preempt_disable(tr, &data, &pc))
 135                 return 0;
 136 
 137         local_save_flags(flags);
 138         ret = __trace_graph_entry(tr, trace, flags, pc);
 139         atomic_dec(&data->disabled);
 140         preempt_enable_notrace();
 141 
 142         return ret;
 143 }
 144 
 145 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
 146 {
 147         struct trace_array *tr = wakeup_trace;
 148         struct trace_array_cpu *data;
 149         unsigned long flags;
 150         int pc;
 151 
 152         ftrace_graph_addr_finish(trace);
 153 
 154         if (!func_prolog_preempt_disable(tr, &data, &pc))
 155                 return;
 156 
 157         local_save_flags(flags);
 158         __trace_graph_return(tr, trace, flags, pc);
 159         atomic_dec(&data->disabled);
 160 
 161         preempt_enable_notrace();
 162         return;
 163 }
 164 
 165 static struct fgraph_ops fgraph_wakeup_ops = {
 166         .entryfunc = &wakeup_graph_entry,
 167         .retfunc = &wakeup_graph_return,
 168 };
 169 
 170 static void wakeup_trace_open(struct trace_iterator *iter)
 171 {
 172         if (is_graph(iter->tr))
 173                 graph_trace_open(iter);
 174 }
 175 
 176 static void wakeup_trace_close(struct trace_iterator *iter)
 177 {
 178         if (iter->private)
 179                 graph_trace_close(iter);
 180 }
 181 
 182 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
 183                             TRACE_GRAPH_PRINT_CPU |  \
 184                             TRACE_GRAPH_PRINT_REL_TIME | \
 185                             TRACE_GRAPH_PRINT_DURATION | \
 186                             TRACE_GRAPH_PRINT_OVERHEAD | \
 187                             TRACE_GRAPH_PRINT_IRQS)
 188 
 189 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
 190 {
 191         
 192 
 193 
 194 
 195         if (is_graph(iter->tr))
 196                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
 197 
 198         return TRACE_TYPE_UNHANDLED;
 199 }
 200 
 201 static void wakeup_print_header(struct seq_file *s)
 202 {
 203         if (is_graph(wakeup_trace))
 204                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
 205         else
 206                 trace_default_header(s);
 207 }
 208 #endif 
 209 
 210 
 211 
 212 
 213 static void
 214 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
 215                    struct ftrace_ops *op, struct pt_regs *pt_regs)
 216 {
 217         struct trace_array *tr = wakeup_trace;
 218         struct trace_array_cpu *data;
 219         unsigned long flags;
 220         int pc;
 221 
 222         if (!func_prolog_preempt_disable(tr, &data, &pc))
 223                 return;
 224 
 225         local_irq_save(flags);
 226         trace_function(tr, ip, parent_ip, flags, pc);
 227         local_irq_restore(flags);
 228 
 229         atomic_dec(&data->disabled);
 230         preempt_enable_notrace();
 231 }
 232 
 233 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
 234 {
 235         int ret;
 236 
 237         
 238         if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
 239                 return 0;
 240 
 241         if (graph)
 242                 ret = register_ftrace_graph(&fgraph_wakeup_ops);
 243         else
 244                 ret = register_ftrace_function(tr->ops);
 245 
 246         if (!ret)
 247                 function_enabled = true;
 248 
 249         return ret;
 250 }
 251 
 252 static void unregister_wakeup_function(struct trace_array *tr, int graph)
 253 {
 254         if (!function_enabled)
 255                 return;
 256 
 257         if (graph)
 258                 unregister_ftrace_graph(&fgraph_wakeup_ops);
 259         else
 260                 unregister_ftrace_function(tr->ops);
 261 
 262         function_enabled = false;
 263 }
 264 
 265 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
 266 {
 267         if (!(mask & TRACE_ITER_FUNCTION))
 268                 return 0;
 269 
 270         if (set)
 271                 register_wakeup_function(tr, is_graph(tr), 1);
 272         else
 273                 unregister_wakeup_function(tr, is_graph(tr));
 274         return 1;
 275 }
 276 #else 
 277 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
 278 {
 279         return 0;
 280 }
 281 static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
 282 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
 283 {
 284         return 0;
 285 }
 286 #endif 
 287 
 288 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
 289 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
 290 {
 291         return TRACE_TYPE_UNHANDLED;
 292 }
 293 
 294 static void wakeup_trace_open(struct trace_iterator *iter) { }
 295 static void wakeup_trace_close(struct trace_iterator *iter) { }
 296 
 297 static void wakeup_print_header(struct seq_file *s)
 298 {
 299         trace_default_header(s);
 300 }
 301 #endif 
 302 
 303 static void
 304 __trace_function(struct trace_array *tr,
 305                  unsigned long ip, unsigned long parent_ip,
 306                  unsigned long flags, int pc)
 307 {
 308         if (is_graph(tr))
 309                 trace_graph_function(tr, ip, parent_ip, flags, pc);
 310         else
 311                 trace_function(tr, ip, parent_ip, flags, pc);
 312 }
 313 
 314 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
 315 {
 316         struct tracer *tracer = tr->current_trace;
 317 
 318         if (wakeup_function_set(tr, mask, set))
 319                 return 0;
 320 
 321 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 322         if (mask & TRACE_ITER_DISPLAY_GRAPH)
 323                 return wakeup_display_graph(tr, set);
 324 #endif
 325 
 326         return trace_keep_overwrite(tracer, mask, set);
 327 }
 328 
 329 static int start_func_tracer(struct trace_array *tr, int graph)
 330 {
 331         int ret;
 332 
 333         ret = register_wakeup_function(tr, graph, 0);
 334 
 335         if (!ret && tracing_is_enabled())
 336                 tracer_enabled = 1;
 337         else
 338                 tracer_enabled = 0;
 339 
 340         return ret;
 341 }
 342 
 343 static void stop_func_tracer(struct trace_array *tr, int graph)
 344 {
 345         tracer_enabled = 0;
 346 
 347         unregister_wakeup_function(tr, graph);
 348 }
 349 
 350 
 351 
 352 
 353 static bool report_latency(struct trace_array *tr, u64 delta)
 354 {
 355         if (tracing_thresh) {
 356                 if (delta < tracing_thresh)
 357                         return false;
 358         } else {
 359                 if (delta <= tr->max_latency)
 360                         return false;
 361         }
 362         return true;
 363 }
 364 
 365 static void
 366 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
 367 {
 368         if (task != wakeup_task)
 369                 return;
 370 
 371         wakeup_current_cpu = cpu;
 372 }
 373 
 374 static void
 375 tracing_sched_switch_trace(struct trace_array *tr,
 376                            struct task_struct *prev,
 377                            struct task_struct *next,
 378                            unsigned long flags, int pc)
 379 {
 380         struct trace_event_call *call = &event_context_switch;
 381         struct ring_buffer *buffer = tr->trace_buffer.buffer;
 382         struct ring_buffer_event *event;
 383         struct ctx_switch_entry *entry;
 384 
 385         event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
 386                                           sizeof(*entry), flags, pc);
 387         if (!event)
 388                 return;
 389         entry   = ring_buffer_event_data(event);
 390         entry->prev_pid                 = prev->pid;
 391         entry->prev_prio                = prev->prio;
 392         entry->prev_state               = task_state_index(prev);
 393         entry->next_pid                 = next->pid;
 394         entry->next_prio                = next->prio;
 395         entry->next_state               = task_state_index(next);
 396         entry->next_cpu = task_cpu(next);
 397 
 398         if (!call_filter_check_discard(call, entry, buffer, event))
 399                 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
 400 }
 401 
 402 static void
 403 tracing_sched_wakeup_trace(struct trace_array *tr,
 404                            struct task_struct *wakee,
 405                            struct task_struct *curr,
 406                            unsigned long flags, int pc)
 407 {
 408         struct trace_event_call *call = &event_wakeup;
 409         struct ring_buffer_event *event;
 410         struct ctx_switch_entry *entry;
 411         struct ring_buffer *buffer = tr->trace_buffer.buffer;
 412 
 413         event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
 414                                           sizeof(*entry), flags, pc);
 415         if (!event)
 416                 return;
 417         entry   = ring_buffer_event_data(event);
 418         entry->prev_pid                 = curr->pid;
 419         entry->prev_prio                = curr->prio;
 420         entry->prev_state               = task_state_index(curr);
 421         entry->next_pid                 = wakee->pid;
 422         entry->next_prio                = wakee->prio;
 423         entry->next_state               = task_state_index(wakee);
 424         entry->next_cpu                 = task_cpu(wakee);
 425 
 426         if (!call_filter_check_discard(call, entry, buffer, event))
 427                 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
 428 }
 429 
 430 static void notrace
 431 probe_wakeup_sched_switch(void *ignore, bool preempt,
 432                           struct task_struct *prev, struct task_struct *next)
 433 {
 434         struct trace_array_cpu *data;
 435         u64 T0, T1, delta;
 436         unsigned long flags;
 437         long disabled;
 438         int cpu;
 439         int pc;
 440 
 441         tracing_record_cmdline(prev);
 442 
 443         if (unlikely(!tracer_enabled))
 444                 return;
 445 
 446         
 447 
 448 
 449 
 450 
 451 
 452 
 453         smp_rmb();
 454 
 455         if (next != wakeup_task)
 456                 return;
 457 
 458         pc = preempt_count();
 459 
 460         
 461         cpu = raw_smp_processor_id();
 462         disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 463         if (likely(disabled != 1))
 464                 goto out;
 465 
 466         local_irq_save(flags);
 467         arch_spin_lock(&wakeup_lock);
 468 
 469         
 470         if (unlikely(!tracer_enabled || next != wakeup_task))
 471                 goto out_unlock;
 472 
 473         
 474         data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
 475 
 476         __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
 477         tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
 478         __trace_stack(wakeup_trace, flags, 0, pc);
 479 
 480         T0 = data->preempt_timestamp;
 481         T1 = ftrace_now(cpu);
 482         delta = T1-T0;
 483 
 484         if (!report_latency(wakeup_trace, delta))
 485                 goto out_unlock;
 486 
 487         if (likely(!is_tracing_stopped())) {
 488                 wakeup_trace->max_latency = delta;
 489                 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
 490         }
 491 
 492 out_unlock:
 493         __wakeup_reset(wakeup_trace);
 494         arch_spin_unlock(&wakeup_lock);
 495         local_irq_restore(flags);
 496 out:
 497         atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 498 }
 499 
 500 static void __wakeup_reset(struct trace_array *tr)
 501 {
 502         wakeup_cpu = -1;
 503         wakeup_prio = -1;
 504         tracing_dl = 0;
 505 
 506         if (wakeup_task)
 507                 put_task_struct(wakeup_task);
 508 
 509         wakeup_task = NULL;
 510 }
 511 
 512 static void wakeup_reset(struct trace_array *tr)
 513 {
 514         unsigned long flags;
 515 
 516         tracing_reset_online_cpus(&tr->trace_buffer);
 517 
 518         local_irq_save(flags);
 519         arch_spin_lock(&wakeup_lock);
 520         __wakeup_reset(tr);
 521         arch_spin_unlock(&wakeup_lock);
 522         local_irq_restore(flags);
 523 }
 524 
 525 static void
 526 probe_wakeup(void *ignore, struct task_struct *p)
 527 {
 528         struct trace_array_cpu *data;
 529         int cpu = smp_processor_id();
 530         unsigned long flags;
 531         long disabled;
 532         int pc;
 533 
 534         if (likely(!tracer_enabled))
 535                 return;
 536 
 537         tracing_record_cmdline(p);
 538         tracing_record_cmdline(current);
 539 
 540         
 541 
 542 
 543 
 544 
 545 
 546 
 547 
 548         if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
 549             (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
 550             (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
 551                 return;
 552 
 553         pc = preempt_count();
 554         disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 555         if (unlikely(disabled != 1))
 556                 goto out;
 557 
 558         
 559         arch_spin_lock(&wakeup_lock);
 560 
 561         
 562         if (!tracer_enabled || tracing_dl ||
 563             (!dl_task(p) && p->prio >= wakeup_prio))
 564                 goto out_locked;
 565 
 566         
 567         __wakeup_reset(wakeup_trace);
 568 
 569         wakeup_cpu = task_cpu(p);
 570         wakeup_current_cpu = wakeup_cpu;
 571         wakeup_prio = p->prio;
 572 
 573         
 574 
 575 
 576 
 577         if (dl_task(p))
 578                 tracing_dl = 1;
 579         else
 580                 tracing_dl = 0;
 581 
 582         wakeup_task = get_task_struct(p);
 583 
 584         local_save_flags(flags);
 585 
 586         data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
 587         data->preempt_timestamp = ftrace_now(cpu);
 588         tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
 589         __trace_stack(wakeup_trace, flags, 0, pc);
 590 
 591         
 592 
 593 
 594 
 595 
 596         __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 597 
 598 out_locked:
 599         arch_spin_unlock(&wakeup_lock);
 600 out:
 601         atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 602 }
 603 
 604 static void start_wakeup_tracer(struct trace_array *tr)
 605 {
 606         int ret;
 607 
 608         ret = register_trace_sched_wakeup(probe_wakeup, NULL);
 609         if (ret) {
 610                 pr_info("wakeup trace: Couldn't activate tracepoint"
 611                         " probe to kernel_sched_wakeup\n");
 612                 return;
 613         }
 614 
 615         ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
 616         if (ret) {
 617                 pr_info("wakeup trace: Couldn't activate tracepoint"
 618                         " probe to kernel_sched_wakeup_new\n");
 619                 goto fail_deprobe;
 620         }
 621 
 622         ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
 623         if (ret) {
 624                 pr_info("sched trace: Couldn't activate tracepoint"
 625                         " probe to kernel_sched_switch\n");
 626                 goto fail_deprobe_wake_new;
 627         }
 628 
 629         ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
 630         if (ret) {
 631                 pr_info("wakeup trace: Couldn't activate tracepoint"
 632                         " probe to kernel_sched_migrate_task\n");
 633                 goto fail_deprobe_sched_switch;
 634         }
 635 
 636         wakeup_reset(tr);
 637 
 638         
 639 
 640 
 641 
 642 
 643 
 644 
 645         smp_wmb();
 646 
 647         if (start_func_tracer(tr, is_graph(tr)))
 648                 printk(KERN_ERR "failed to start wakeup tracer\n");
 649 
 650         return;
 651 fail_deprobe_sched_switch:
 652         unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
 653 fail_deprobe_wake_new:
 654         unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
 655 fail_deprobe:
 656         unregister_trace_sched_wakeup(probe_wakeup, NULL);
 657 }
 658 
 659 static void stop_wakeup_tracer(struct trace_array *tr)
 660 {
 661         tracer_enabled = 0;
 662         stop_func_tracer(tr, is_graph(tr));
 663         unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
 664         unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
 665         unregister_trace_sched_wakeup(probe_wakeup, NULL);
 666         unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
 667 }
 668 
 669 static bool wakeup_busy;
 670 
 671 static int __wakeup_tracer_init(struct trace_array *tr)
 672 {
 673         save_flags = tr->trace_flags;
 674 
 675         
 676         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 677         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
 678 
 679         tr->max_latency = 0;
 680         wakeup_trace = tr;
 681         ftrace_init_array_ops(tr, wakeup_tracer_call);
 682         start_wakeup_tracer(tr);
 683 
 684         wakeup_busy = true;
 685         return 0;
 686 }
 687 
 688 static int wakeup_tracer_init(struct trace_array *tr)
 689 {
 690         if (wakeup_busy)
 691                 return -EBUSY;
 692 
 693         wakeup_dl = 0;
 694         wakeup_rt = 0;
 695         return __wakeup_tracer_init(tr);
 696 }
 697 
 698 static int wakeup_rt_tracer_init(struct trace_array *tr)
 699 {
 700         if (wakeup_busy)
 701                 return -EBUSY;
 702 
 703         wakeup_dl = 0;
 704         wakeup_rt = 1;
 705         return __wakeup_tracer_init(tr);
 706 }
 707 
 708 static int wakeup_dl_tracer_init(struct trace_array *tr)
 709 {
 710         if (wakeup_busy)
 711                 return -EBUSY;
 712 
 713         wakeup_dl = 1;
 714         wakeup_rt = 0;
 715         return __wakeup_tracer_init(tr);
 716 }
 717 
 718 static void wakeup_tracer_reset(struct trace_array *tr)
 719 {
 720         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
 721         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
 722 
 723         stop_wakeup_tracer(tr);
 724         
 725         wakeup_reset(tr);
 726 
 727         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
 728         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
 729         ftrace_reset_array_ops(tr);
 730         wakeup_busy = false;
 731 }
 732 
 733 static void wakeup_tracer_start(struct trace_array *tr)
 734 {
 735         wakeup_reset(tr);
 736         tracer_enabled = 1;
 737 }
 738 
 739 static void wakeup_tracer_stop(struct trace_array *tr)
 740 {
 741         tracer_enabled = 0;
 742 }
 743 
 744 static struct tracer wakeup_tracer __read_mostly =
 745 {
 746         .name           = "wakeup",
 747         .init           = wakeup_tracer_init,
 748         .reset          = wakeup_tracer_reset,
 749         .start          = wakeup_tracer_start,
 750         .stop           = wakeup_tracer_stop,
 751         .print_max      = true,
 752         .print_header   = wakeup_print_header,
 753         .print_line     = wakeup_print_line,
 754         .flag_changed   = wakeup_flag_changed,
 755 #ifdef CONFIG_FTRACE_SELFTEST
 756         .selftest    = trace_selftest_startup_wakeup,
 757 #endif
 758         .open           = wakeup_trace_open,
 759         .close          = wakeup_trace_close,
 760         .allow_instances = true,
 761         .use_max_tr     = true,
 762 };
 763 
 764 static struct tracer wakeup_rt_tracer __read_mostly =
 765 {
 766         .name           = "wakeup_rt",
 767         .init           = wakeup_rt_tracer_init,
 768         .reset          = wakeup_tracer_reset,
 769         .start          = wakeup_tracer_start,
 770         .stop           = wakeup_tracer_stop,
 771         .print_max      = true,
 772         .print_header   = wakeup_print_header,
 773         .print_line     = wakeup_print_line,
 774         .flag_changed   = wakeup_flag_changed,
 775 #ifdef CONFIG_FTRACE_SELFTEST
 776         .selftest    = trace_selftest_startup_wakeup,
 777 #endif
 778         .open           = wakeup_trace_open,
 779         .close          = wakeup_trace_close,
 780         .allow_instances = true,
 781         .use_max_tr     = true,
 782 };
 783 
 784 static struct tracer wakeup_dl_tracer __read_mostly =
 785 {
 786         .name           = "wakeup_dl",
 787         .init           = wakeup_dl_tracer_init,
 788         .reset          = wakeup_tracer_reset,
 789         .start          = wakeup_tracer_start,
 790         .stop           = wakeup_tracer_stop,
 791         .print_max      = true,
 792         .print_header   = wakeup_print_header,
 793         .print_line     = wakeup_print_line,
 794         .flag_changed   = wakeup_flag_changed,
 795 #ifdef CONFIG_FTRACE_SELFTEST
 796         .selftest    = trace_selftest_startup_wakeup,
 797 #endif
 798         .open           = wakeup_trace_open,
 799         .close          = wakeup_trace_close,
 800         .allow_instances = true,
 801         .use_max_tr     = true,
 802 };
 803 
 804 __init static int init_wakeup_tracer(void)
 805 {
 806         int ret;
 807 
 808         ret = register_tracer(&wakeup_tracer);
 809         if (ret)
 810                 return ret;
 811 
 812         ret = register_tracer(&wakeup_rt_tracer);
 813         if (ret)
 814                 return ret;
 815 
 816         ret = register_tracer(&wakeup_dl_tracer);
 817         if (ret)
 818                 return ret;
 819 
 820         return 0;
 821 }
 822 core_initcall(init_wakeup_tracer);