Lines Matching refs:trace_buffer
328 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) in buffer_ftrace_now()
344 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); in ftrace_now()
479 if (tr->trace_buffer.buffer) in tracer_tracing_on()
480 ring_buffer_record_on(tr->trace_buffer.buffer); in tracer_tracing_on()
532 buffer = global_trace.trace_buffer.buffer; in __trace_puts()
580 buffer = global_trace.trace_buffer.buffer; in __trace_bputs()
644 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
645 struct trace_buffer *size_buf, int cpu_id);
646 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
656 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); in alloc_snapshot()
745 if (tr->trace_buffer.buffer) in tracer_tracing_off()
746 ring_buffer_record_off(tr->trace_buffer.buffer); in tracer_tracing_off()
788 if (tr->trace_buffer.buffer) in tracer_tracing_is_on()
789 return ring_buffer_record_is_on(tr->trace_buffer.buffer); in tracer_tracing_is_on()
1019 struct trace_buffer *trace_buf = &tr->trace_buffer; in __update_max_tr()
1020 struct trace_buffer *max_buf = &tr->max_buffer; in __update_max_tr()
1077 buf = tr->trace_buffer.buffer; in update_max_tr()
1078 tr->trace_buffer.buffer = tr->max_buffer.buffer; in update_max_tr()
1110 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); in update_max_tr_single()
1136 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, in wait_on_pipe()
1157 tracing_reset_online_cpus(&tr->trace_buffer); in run_tracer_selftest()
1183 tracing_reset_online_cpus(&tr->trace_buffer); in run_tracer_selftest()
1281 void tracing_reset(struct trace_buffer *buf, int cpu) in tracing_reset()
1297 void tracing_reset_online_cpus(struct trace_buffer *buf) in tracing_reset_online_cpus()
1324 tracing_reset_online_cpus(&tr->trace_buffer); in tracing_reset_all_online_cpus()
1430 buffer = global_trace.trace_buffer.buffer; in tracing_start()
1469 buffer = tr->trace_buffer.buffer; in tracing_start_tr()
1495 buffer = global_trace.trace_buffer.buffer; in tracing_stop()
1524 buffer = tr->trace_buffer.buffer; in tracing_stop_tr()
1702 *current_rb = ftrace_file->tr->trace_buffer.buffer; in trace_event_buffer_lock_reserve()
1725 *current_rb = global_trace.trace_buffer.buffer; in trace_current_buffer_lock_reserve()
1764 struct ring_buffer *buffer = tr->trace_buffer.buffer; in trace_function()
1896 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); in __trace_stack()
1917 __ftrace_trace_stack(global_trace.trace_buffer.buffer, in trace_dump_stack()
2103 if (global_trace.trace_buffer.buffer) in trace_printk_init_buffers()
2163 buffer = tr->trace_buffer.buffer; in trace_vbprintk()
2239 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); in trace_array_vprintk()
2297 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, in peek_next_entry()
2312 struct ring_buffer *buffer = iter->trace_buffer->buffer; in __find_next_entry()
2389 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, in trace_consume()
2427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; in tracing_iter_reset()
2441 if (ts >= iter->trace_buffer->time_start) in tracing_iter_reset()
2447 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset()
2532 get_total_entries(struct trace_buffer *buf, in get_total_entries()
2571 static void print_event_info(struct trace_buffer *buf, struct seq_file *m) in print_event_info()
2582 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) in print_func_help_header()
2589 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) in print_func_help_header_irq()
2605 struct trace_buffer *buf = iter->trace_buffer; in print_trace_header()
2676 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) in test_cpu_buff_start()
2805 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) in trace_empty()
2817 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) in trace_empty()
2902 print_func_help_header_irq(iter->trace_buffer, m); in trace_default_header()
2904 print_func_help_header(iter->trace_buffer, m); in trace_default_header()
3061 iter->trace_buffer = &tr->max_buffer; in __tracing_open()
3064 iter->trace_buffer = &tr->trace_buffer; in __tracing_open()
3075 if (ring_buffer_overruns(iter->trace_buffer->buffer)) in __tracing_open()
3089 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); in __tracing_open()
3099 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); in __tracing_open()
3223 tracing_reset_online_cpus(&tr->trace_buffer); in tracing_open()
3225 tracing_reset(&tr->trace_buffer, cpu); in tracing_open()
3434 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
3435 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); in tracing_cpumask_write()
3439 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
3440 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); in tracing_cpumask_write()
3561 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); in set_tracer_flag()
4142 tracing_reset_online_cpus(&tr->trace_buffer); in tracer_init()
4146 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) in set_buffer_entries()
4156 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, in resize_buffer_duplicate_size()
4157 struct trace_buffer *size_buf, int cpu_id) in resize_buffer_duplicate_size()
4195 if (!tr->trace_buffer.buffer) in __tracing_resize_ring_buffer()
4198 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
4209 int r = resize_buffer_duplicate_size(&tr->trace_buffer, in __tracing_resize_ring_buffer()
4210 &tr->trace_buffer, cpu); in __tracing_resize_ring_buffer()
4241 set_buffer_entries(&tr->trace_buffer, size); in __tracing_resize_ring_buffer()
4243 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; in __tracing_resize_ring_buffer()
4579 iter->trace_buffer = &tr->trace_buffer; in tracing_open_pipe()
4638 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, in trace_poll()
4955 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; in tracing_entries_read()
4956 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { in tracing_entries_read()
4972 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); in tracing_entries_read()
5019 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; in tracing_total_entries_read()
5127 buffer = tr->trace_buffer.buffer; in tracing_mark_write()
5197 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
5203 tracing_reset_online_cpus(&tr->trace_buffer); in tracing_set_clock()
5297 iter->trace_buffer = &tr->max_buffer; in tracing_snapshot_open()
5425 info->iter.trace_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
5541 info->iter.trace_buffer = &tr->trace_buffer; in tracing_buffers_open()
5586 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, in tracing_buffers_read()
5597 ret = ring_buffer_read_page(iter->trace_buffer->buffer, in tracing_buffers_read()
5647 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); in tracing_buffers_release()
5748 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
5761 ref->buffer = iter->trace_buffer->buffer; in tracing_buffers_splice_read()
5794 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
5836 struct trace_buffer *trace_buf = &tr->trace_buffer; in tracing_stats_read()
6395 struct ring_buffer *buffer = tr->trace_buffer.buffer; in rb_simple_write()
6436 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) in allocate_trace_buffer()
6455 set_buffer_entries(&tr->trace_buffer, in allocate_trace_buffer()
6456 ring_buffer_size(tr->trace_buffer.buffer, 0)); in allocate_trace_buffer()
6465 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); in allocate_trace_buffers()
6473 ring_buffer_free(tr->trace_buffer.buffer); in allocate_trace_buffers()
6474 free_percpu(tr->trace_buffer.data); in allocate_trace_buffers()
6488 static void free_trace_buffer(struct trace_buffer *buf) in free_trace_buffer()
6503 free_trace_buffer(&tr->trace_buffer); in free_trace_buffers()
6950 iter->trace_buffer = &global_trace.trace_buffer; in trace_init_global_iter()
6956 if (ring_buffer_overruns(iter->trace_buffer->buffer)) in trace_init_global_iter()
6995 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled); in ftrace_dump()
7067 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump()