Lines Matching refs:iter
1144 static int wait_on_pipe(struct trace_iterator *iter, bool full) in wait_on_pipe() argument
1147 if (trace_buffer_iter(iter, iter->cpu_file)) in wait_on_pipe()
1150 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, in wait_on_pipe()
2275 static void trace_iterator_increment(struct trace_iterator *iter) in trace_iterator_increment() argument
2277 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); in trace_iterator_increment()
2279 iter->idx++; in trace_iterator_increment()
2285 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, in peek_next_entry() argument
2289 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); in peek_next_entry()
2294 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, in peek_next_entry()
2298 iter->ent_size = ring_buffer_event_length(event); in peek_next_entry()
2301 iter->ent_size = 0; in peek_next_entry()
2306 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, in __find_next_entry() argument
2309 struct ring_buffer *buffer = iter->trace_buffer->buffer; in __find_next_entry()
2312 int cpu_file = iter->cpu_file; in __find_next_entry()
2325 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); in __find_next_entry()
2337 ent = peek_next_entry(iter, cpu, &ts, &lost_events); in __find_next_entry()
2347 next_size = iter->ent_size; in __find_next_entry()
2351 iter->ent_size = next_size; in __find_next_entry()
2366 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, in trace_find_next_entry() argument
2369 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); in trace_find_next_entry()
2373 void *trace_find_next_entry_inc(struct trace_iterator *iter) in trace_find_next_entry_inc() argument
2375 iter->ent = __find_next_entry(iter, &iter->cpu, in trace_find_next_entry_inc()
2376 &iter->lost_events, &iter->ts); in trace_find_next_entry_inc()
2378 if (iter->ent) in trace_find_next_entry_inc()
2379 trace_iterator_increment(iter); in trace_find_next_entry_inc()
2381 return iter->ent ? iter : NULL; in trace_find_next_entry_inc()
2384 static void trace_consume(struct trace_iterator *iter) in trace_consume() argument
2386 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, in trace_consume()
2387 &iter->lost_events); in trace_consume()
2392 struct trace_iterator *iter = m->private; in s_next() local
2396 WARN_ON_ONCE(iter->leftover); in s_next()
2401 if (iter->idx > i) in s_next()
2404 if (iter->idx < 0) in s_next()
2405 ent = trace_find_next_entry_inc(iter); in s_next()
2407 ent = iter; in s_next()
2409 while (ent && iter->idx < i) in s_next()
2410 ent = trace_find_next_entry_inc(iter); in s_next()
2412 iter->pos = *pos; in s_next()
2417 void tracing_iter_reset(struct trace_iterator *iter, int cpu) in tracing_iter_reset() argument
2424 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; in tracing_iter_reset()
2426 buf_iter = trace_buffer_iter(iter, cpu); in tracing_iter_reset()
2438 if (ts >= iter->trace_buffer->time_start) in tracing_iter_reset()
2444 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset()
2453 struct trace_iterator *iter = m->private; in s_start() local
2454 struct trace_array *tr = iter->tr; in s_start()
2455 int cpu_file = iter->cpu_file; in s_start()
2467 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) in s_start()
2468 *iter->trace = *tr->current_trace; in s_start()
2472 if (iter->snapshot && iter->trace->use_max_tr) in s_start()
2476 if (!iter->snapshot) in s_start()
2479 if (*pos != iter->pos) { in s_start()
2480 iter->ent = NULL; in s_start()
2481 iter->cpu = 0; in s_start()
2482 iter->idx = -1; in s_start()
2486 tracing_iter_reset(iter, cpu); in s_start()
2488 tracing_iter_reset(iter, cpu_file); in s_start()
2490 iter->leftover = 0; in s_start()
2491 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) in s_start()
2499 if (iter->leftover) in s_start()
2500 p = iter; in s_start()
2514 struct trace_iterator *iter = m->private; in s_stop() local
2517 if (iter->snapshot && iter->trace->use_max_tr) in s_stop()
2521 if (!iter->snapshot) in s_stop()
2524 trace_access_unlock(iter->cpu_file); in s_stop()
2599 print_trace_header(struct seq_file *m, struct trace_iterator *iter) in print_trace_header() argument
2602 struct trace_buffer *buf = iter->trace_buffer; in print_trace_header()
2604 struct tracer *type = iter->trace; in print_trace_header()
2649 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); in print_trace_header()
2650 trace_print_seq(m, &iter->seq); in print_trace_header()
2652 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); in print_trace_header()
2653 trace_print_seq(m, &iter->seq); in print_trace_header()
2660 static void test_cpu_buff_start(struct trace_iterator *iter) in test_cpu_buff_start() argument
2662 struct trace_seq *s = &iter->seq; in test_cpu_buff_start()
2663 struct trace_array *tr = iter->tr; in test_cpu_buff_start()
2668 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) in test_cpu_buff_start()
2671 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started)) in test_cpu_buff_start()
2674 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) in test_cpu_buff_start()
2677 if (iter->started) in test_cpu_buff_start()
2678 cpumask_set_cpu(iter->cpu, iter->started); in test_cpu_buff_start()
2681 if (iter->idx > 1) in test_cpu_buff_start()
2683 iter->cpu); in test_cpu_buff_start()
2686 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) in print_trace_fmt() argument
2688 struct trace_array *tr = iter->tr; in print_trace_fmt()
2689 struct trace_seq *s = &iter->seq; in print_trace_fmt()
2694 entry = iter->ent; in print_trace_fmt()
2696 test_cpu_buff_start(iter); in print_trace_fmt()
2701 if (iter->iter_flags & TRACE_FILE_LAT_FMT) in print_trace_fmt()
2702 trace_print_lat_context(iter); in print_trace_fmt()
2704 trace_print_context(iter); in print_trace_fmt()
2711 return event->funcs->trace(iter, sym_flags, event); in print_trace_fmt()
2718 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) in print_raw_fmt() argument
2720 struct trace_array *tr = iter->tr; in print_raw_fmt()
2721 struct trace_seq *s = &iter->seq; in print_raw_fmt()
2725 entry = iter->ent; in print_raw_fmt()
2729 entry->pid, iter->cpu, iter->ts); in print_raw_fmt()
2736 return event->funcs->raw(iter, 0, event); in print_raw_fmt()
2743 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) in print_hex_fmt() argument
2745 struct trace_array *tr = iter->tr; in print_hex_fmt()
2746 struct trace_seq *s = &iter->seq; in print_hex_fmt()
2751 entry = iter->ent; in print_hex_fmt()
2755 SEQ_PUT_HEX_FIELD(s, iter->cpu); in print_hex_fmt()
2756 SEQ_PUT_HEX_FIELD(s, iter->ts); in print_hex_fmt()
2763 enum print_line_t ret = event->funcs->hex(iter, 0, event); in print_hex_fmt()
2773 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) in print_bin_fmt() argument
2775 struct trace_array *tr = iter->tr; in print_bin_fmt()
2776 struct trace_seq *s = &iter->seq; in print_bin_fmt()
2780 entry = iter->ent; in print_bin_fmt()
2784 SEQ_PUT_FIELD(s, iter->cpu); in print_bin_fmt()
2785 SEQ_PUT_FIELD(s, iter->ts); in print_bin_fmt()
2791 return event ? event->funcs->binary(iter, 0, event) : in print_bin_fmt()
2795 int trace_empty(struct trace_iterator *iter) in trace_empty() argument
2801 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { in trace_empty()
2802 cpu = iter->cpu_file; in trace_empty()
2803 buf_iter = trace_buffer_iter(iter, cpu); in trace_empty()
2808 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) in trace_empty()
2815 buf_iter = trace_buffer_iter(iter, cpu); in trace_empty()
2820 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) in trace_empty()
2829 enum print_line_t print_trace_line(struct trace_iterator *iter) in print_trace_line() argument
2831 struct trace_array *tr = iter->tr; in print_trace_line()
2835 if (iter->lost_events) { in print_trace_line()
2836 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", in print_trace_line()
2837 iter->cpu, iter->lost_events); in print_trace_line()
2838 if (trace_seq_has_overflowed(&iter->seq)) in print_trace_line()
2842 if (iter->trace && iter->trace->print_line) { in print_trace_line()
2843 ret = iter->trace->print_line(iter); in print_trace_line()
2848 if (iter->ent->type == TRACE_BPUTS && in print_trace_line()
2851 return trace_print_bputs_msg_only(iter); in print_trace_line()
2853 if (iter->ent->type == TRACE_BPRINT && in print_trace_line()
2856 return trace_print_bprintk_msg_only(iter); in print_trace_line()
2858 if (iter->ent->type == TRACE_PRINT && in print_trace_line()
2861 return trace_print_printk_msg_only(iter); in print_trace_line()
2864 return print_bin_fmt(iter); in print_trace_line()
2867 return print_hex_fmt(iter); in print_trace_line()
2870 return print_raw_fmt(iter); in print_trace_line()
2872 return print_trace_fmt(iter); in print_trace_line()
2877 struct trace_iterator *iter = m->private; in trace_latency_header() local
2878 struct trace_array *tr = iter->tr; in trace_latency_header()
2881 if (trace_empty(iter)) in trace_latency_header()
2884 if (iter->iter_flags & TRACE_FILE_LAT_FMT) in trace_latency_header()
2885 print_trace_header(m, iter); in trace_latency_header()
2893 struct trace_iterator *iter = m->private; in trace_default_header() local
2894 struct trace_array *tr = iter->tr; in trace_default_header()
2900 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { in trace_default_header()
2902 if (trace_empty(iter)) in trace_default_header()
2904 print_trace_header(m, iter); in trace_default_header()
2910 print_func_help_header_irq(iter->trace_buffer, m); in trace_default_header()
2912 print_func_help_header(iter->trace_buffer, m); in trace_default_header()
2951 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) in print_snapshot_help() argument
2953 if (iter->tr->allocated_snapshot) in print_snapshot_help()
2959 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) in print_snapshot_help()
2966 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } in print_snapshot_help() argument
2971 struct trace_iterator *iter = v; in s_show() local
2974 if (iter->ent == NULL) { in s_show()
2975 if (iter->tr) { in s_show()
2976 seq_printf(m, "# tracer: %s\n", iter->trace->name); in s_show()
2980 if (iter->snapshot && trace_empty(iter)) in s_show()
2981 print_snapshot_help(m, iter); in s_show()
2982 else if (iter->trace && iter->trace->print_header) in s_show()
2983 iter->trace->print_header(m); in s_show()
2987 } else if (iter->leftover) { in s_show()
2992 ret = trace_print_seq(m, &iter->seq); in s_show()
2995 iter->leftover = ret; in s_show()
2998 print_trace_line(iter); in s_show()
2999 ret = trace_print_seq(m, &iter->seq); in s_show()
3007 iter->leftover = ret; in s_show()
3035 struct trace_iterator *iter; in __tracing_open() local
3041 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); in __tracing_open()
3042 if (!iter) in __tracing_open()
3045 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), in __tracing_open()
3047 if (!iter->buffer_iter) in __tracing_open()
3055 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); in __tracing_open()
3056 if (!iter->trace) in __tracing_open()
3059 *iter->trace = *tr->current_trace; in __tracing_open()
3061 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) in __tracing_open()
3064 iter->tr = tr; in __tracing_open()
3069 iter->trace_buffer = &tr->max_buffer; in __tracing_open()
3072 iter->trace_buffer = &tr->trace_buffer; in __tracing_open()
3073 iter->snapshot = snapshot; in __tracing_open()
3074 iter->pos = -1; in __tracing_open()
3075 iter->cpu_file = tracing_get_cpu(inode); in __tracing_open()
3076 mutex_init(&iter->mutex); in __tracing_open()
3079 if (iter->trace && iter->trace->open) in __tracing_open()
3080 iter->trace->open(iter); in __tracing_open()
3083 if (ring_buffer_overruns(iter->trace_buffer->buffer)) in __tracing_open()
3084 iter->iter_flags |= TRACE_FILE_ANNOTATE; in __tracing_open()
3088 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in __tracing_open()
3091 if (!iter->snapshot) in __tracing_open()
3094 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { in __tracing_open()
3096 iter->buffer_iter[cpu] = in __tracing_open()
3097 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); in __tracing_open()
3101 ring_buffer_read_start(iter->buffer_iter[cpu]); in __tracing_open()
3102 tracing_iter_reset(iter, cpu); in __tracing_open()
3105 cpu = iter->cpu_file; in __tracing_open()
3106 iter->buffer_iter[cpu] = in __tracing_open()
3107 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); in __tracing_open()
3109 ring_buffer_read_start(iter->buffer_iter[cpu]); in __tracing_open()
3110 tracing_iter_reset(iter, cpu); in __tracing_open()
3115 return iter; in __tracing_open()
3119 kfree(iter->trace); in __tracing_open()
3120 kfree(iter->buffer_iter); in __tracing_open()
3163 struct trace_iterator *iter; in tracing_release() local
3172 iter = m->private; in tracing_release()
3176 if (iter->buffer_iter[cpu]) in tracing_release()
3177 ring_buffer_read_finish(iter->buffer_iter[cpu]); in tracing_release()
3180 if (iter->trace && iter->trace->close) in tracing_release()
3181 iter->trace->close(iter); in tracing_release()
3183 if (!iter->snapshot) in tracing_release()
3191 mutex_destroy(&iter->mutex); in tracing_release()
3192 free_cpumask_var(iter->started); in tracing_release()
3193 kfree(iter->trace); in tracing_release()
3194 kfree(iter->buffer_iter); in tracing_release()
3220 struct trace_iterator *iter; in tracing_open() local
3237 iter = __tracing_open(inode, file, false); in tracing_open()
3238 if (IS_ERR(iter)) in tracing_open()
3239 ret = PTR_ERR(iter); in tracing_open()
3241 iter->iter_flags |= TRACE_FILE_LAT_FMT; in tracing_open()
4572 struct trace_iterator *iter; in tracing_open_pipe() local
4584 iter = kzalloc(sizeof(*iter), GFP_KERNEL); in tracing_open_pipe()
4585 if (!iter) { in tracing_open_pipe()
4591 trace_seq_init(&iter->seq); in tracing_open_pipe()
4592 iter->trace = tr->current_trace; in tracing_open_pipe()
4594 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { in tracing_open_pipe()
4600 cpumask_setall(iter->started); in tracing_open_pipe()
4603 iter->iter_flags |= TRACE_FILE_LAT_FMT; in tracing_open_pipe()
4607 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in tracing_open_pipe()
4609 iter->tr = tr; in tracing_open_pipe()
4610 iter->trace_buffer = &tr->trace_buffer; in tracing_open_pipe()
4611 iter->cpu_file = tracing_get_cpu(inode); in tracing_open_pipe()
4612 mutex_init(&iter->mutex); in tracing_open_pipe()
4613 filp->private_data = iter; in tracing_open_pipe()
4615 if (iter->trace->pipe_open) in tracing_open_pipe()
4616 iter->trace->pipe_open(iter); in tracing_open_pipe()
4626 kfree(iter->trace); in tracing_open_pipe()
4627 kfree(iter); in tracing_open_pipe()
4635 struct trace_iterator *iter = file->private_data; in tracing_release_pipe() local
4642 if (iter->trace->pipe_close) in tracing_release_pipe()
4643 iter->trace->pipe_close(iter); in tracing_release_pipe()
4647 free_cpumask_var(iter->started); in tracing_release_pipe()
4648 mutex_destroy(&iter->mutex); in tracing_release_pipe()
4649 kfree(iter); in tracing_release_pipe()
4657 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) in trace_poll() argument
4659 struct trace_array *tr = iter->tr; in trace_poll()
4662 if (trace_buffer_iter(iter, iter->cpu_file)) in trace_poll()
4671 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, in trace_poll()
4678 struct trace_iterator *iter = filp->private_data; in tracing_poll_pipe() local
4680 return trace_poll(iter, filp, poll_table); in tracing_poll_pipe()
4686 struct trace_iterator *iter = filp->private_data; in tracing_wait_pipe() local
4689 while (trace_empty(iter)) { in tracing_wait_pipe()
4704 if (!tracing_is_on() && iter->pos) in tracing_wait_pipe()
4707 mutex_unlock(&iter->mutex); in tracing_wait_pipe()
4709 ret = wait_on_pipe(iter, false); in tracing_wait_pipe()
4711 mutex_lock(&iter->mutex); in tracing_wait_pipe()
4727 struct trace_iterator *iter = filp->private_data; in tracing_read_pipe() local
4731 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); in tracing_read_pipe()
4735 trace_seq_init(&iter->seq); in tracing_read_pipe()
4742 mutex_lock(&iter->mutex); in tracing_read_pipe()
4743 if (iter->trace->read) { in tracing_read_pipe()
4744 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); in tracing_read_pipe()
4755 if (trace_empty(iter)) { in tracing_read_pipe()
4764 memset(&iter->seq, 0, in tracing_read_pipe()
4767 cpumask_clear(iter->started); in tracing_read_pipe()
4768 iter->pos = -1; in tracing_read_pipe()
4771 trace_access_lock(iter->cpu_file); in tracing_read_pipe()
4772 while (trace_find_next_entry_inc(iter) != NULL) { in tracing_read_pipe()
4774 int save_len = iter->seq.seq.len; in tracing_read_pipe()
4776 ret = print_trace_line(iter); in tracing_read_pipe()
4779 iter->seq.seq.len = save_len; in tracing_read_pipe()
4783 trace_consume(iter); in tracing_read_pipe()
4785 if (trace_seq_used(&iter->seq) >= cnt) in tracing_read_pipe()
4793 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", in tracing_read_pipe()
4794 iter->ent->type); in tracing_read_pipe()
4796 trace_access_unlock(iter->cpu_file); in tracing_read_pipe()
4800 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); in tracing_read_pipe()
4801 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) in tracing_read_pipe()
4802 trace_seq_init(&iter->seq); in tracing_read_pipe()
4812 mutex_unlock(&iter->mutex); in tracing_read_pipe()
4832 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) in tracing_fill_pipe_page() argument
4840 save_len = iter->seq.seq.len; in tracing_fill_pipe_page()
4841 ret = print_trace_line(iter); in tracing_fill_pipe_page()
4843 if (trace_seq_has_overflowed(&iter->seq)) { in tracing_fill_pipe_page()
4844 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
4854 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
4858 count = trace_seq_used(&iter->seq) - save_len; in tracing_fill_pipe_page()
4861 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
4866 trace_consume(iter); in tracing_fill_pipe_page()
4868 if (!trace_find_next_entry_inc(iter)) { in tracing_fill_pipe_page()
4870 iter->ent = NULL; in tracing_fill_pipe_page()
4886 struct trace_iterator *iter = filp->private_data; in tracing_splice_read_pipe() local
4903 mutex_lock(&iter->mutex); in tracing_splice_read_pipe()
4905 if (iter->trace->splice_read) { in tracing_splice_read_pipe()
4906 ret = iter->trace->splice_read(iter, filp, in tracing_splice_read_pipe()
4916 if (!iter->ent && !trace_find_next_entry_inc(iter)) { in tracing_splice_read_pipe()
4922 trace_access_lock(iter->cpu_file); in tracing_splice_read_pipe()
4930 rem = tracing_fill_pipe_page(rem, iter); in tracing_splice_read_pipe()
4933 ret = trace_seq_to_buffer(&iter->seq, in tracing_splice_read_pipe()
4935 trace_seq_used(&iter->seq)); in tracing_splice_read_pipe()
4941 spd.partial[i].len = trace_seq_used(&iter->seq); in tracing_splice_read_pipe()
4943 trace_seq_init(&iter->seq); in tracing_splice_read_pipe()
4946 trace_access_unlock(iter->cpu_file); in tracing_splice_read_pipe()
4948 mutex_unlock(&iter->mutex); in tracing_splice_read_pipe()
4961 mutex_unlock(&iter->mutex); in tracing_splice_read_pipe()
5296 struct trace_iterator iter; member
5305 struct trace_iterator *iter; in tracing_snapshot_open() local
5313 iter = __tracing_open(inode, file, true); in tracing_snapshot_open()
5314 if (IS_ERR(iter)) in tracing_snapshot_open()
5315 ret = PTR_ERR(iter); in tracing_snapshot_open()
5322 iter = kzalloc(sizeof(*iter), GFP_KERNEL); in tracing_snapshot_open()
5323 if (!iter) { in tracing_snapshot_open()
5329 iter->tr = tr; in tracing_snapshot_open()
5330 iter->trace_buffer = &tr->max_buffer; in tracing_snapshot_open()
5331 iter->cpu_file = tracing_get_cpu(inode); in tracing_snapshot_open()
5332 m->private = iter; in tracing_snapshot_open()
5347 struct trace_iterator *iter = m->private; in tracing_snapshot_write() local
5348 struct trace_array *tr = iter->tr; in tracing_snapshot_write()
5369 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { in tracing_snapshot_write()
5379 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { in tracing_snapshot_write()
5391 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) in tracing_snapshot_write()
5394 update_max_tr_single(tr, current, iter->cpu_file); in tracing_snapshot_write()
5399 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) in tracing_snapshot_write()
5402 tracing_reset(&tr->max_buffer, iter->cpu_file); in tracing_snapshot_write()
5452 if (info->iter.trace->use_max_tr) { in snapshot_raw_open()
5457 info->iter.snapshot = true; in snapshot_raw_open()
5458 info->iter.trace_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
5573 info->iter.tr = tr; in tracing_buffers_open()
5574 info->iter.cpu_file = tracing_get_cpu(inode); in tracing_buffers_open()
5575 info->iter.trace = tr->current_trace; in tracing_buffers_open()
5576 info->iter.trace_buffer = &tr->trace_buffer; in tracing_buffers_open()
5598 struct trace_iterator *iter = &info->iter; in tracing_buffers_poll() local
5600 return trace_poll(iter, filp, poll_table); in tracing_buffers_poll()
5608 struct trace_iterator *iter = &info->iter; in tracing_buffers_read() local
5616 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_read()
5621 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, in tracing_buffers_read()
5622 iter->cpu_file); in tracing_buffers_read()
5631 trace_access_lock(iter->cpu_file); in tracing_buffers_read()
5632 ret = ring_buffer_read_page(iter->trace_buffer->buffer, in tracing_buffers_read()
5635 iter->cpu_file, 0); in tracing_buffers_read()
5636 trace_access_unlock(iter->cpu_file); in tracing_buffers_read()
5639 if (trace_empty(iter)) { in tracing_buffers_read()
5643 ret = wait_on_pipe(iter, false); in tracing_buffers_read()
5673 struct trace_iterator *iter = &info->iter; in tracing_buffers_release() local
5677 iter->tr->current_trace->ref--; in tracing_buffers_release()
5679 __trace_array_put(iter->tr); in tracing_buffers_release()
5682 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); in tracing_buffers_release()
5749 struct trace_iterator *iter = &info->iter; in tracing_buffers_splice_read() local
5765 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_splice_read()
5782 trace_access_lock(iter->cpu_file); in tracing_buffers_splice_read()
5783 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
5796 ref->buffer = iter->trace_buffer->buffer; in tracing_buffers_splice_read()
5797 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); in tracing_buffers_splice_read()
5805 len, iter->cpu_file, 1); in tracing_buffers_splice_read()
5829 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
5832 trace_access_unlock(iter->cpu_file); in tracing_buffers_splice_read()
5843 ret = wait_on_pipe(iter, true); in tracing_buffers_splice_read()
7070 void trace_init_global_iter(struct trace_iterator *iter) in trace_init_global_iter() argument
7072 iter->tr = &global_trace; in trace_init_global_iter()
7073 iter->trace = iter->tr->current_trace; in trace_init_global_iter()
7074 iter->cpu_file = RING_BUFFER_ALL_CPUS; in trace_init_global_iter()
7075 iter->trace_buffer = &global_trace.trace_buffer; in trace_init_global_iter()
7077 if (iter->trace && iter->trace->open) in trace_init_global_iter()
7078 iter->trace->open(iter); in trace_init_global_iter()
7081 if (ring_buffer_overruns(iter->trace_buffer->buffer)) in trace_init_global_iter()
7082 iter->iter_flags |= TRACE_FILE_ANNOTATE; in trace_init_global_iter()
7085 if (trace_clocks[iter->tr->clock_id].in_ns) in trace_init_global_iter()
7086 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in trace_init_global_iter()
7092 static struct trace_iterator iter; in ftrace_dump() local
7118 trace_init_global_iter(&iter); in ftrace_dump()
7121 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump()
7131 iter.cpu_file = RING_BUFFER_ALL_CPUS; in ftrace_dump()
7134 iter.cpu_file = raw_smp_processor_id(); in ftrace_dump()
7140 iter.cpu_file = RING_BUFFER_ALL_CPUS; in ftrace_dump()
7158 while (!trace_empty(&iter)) { in ftrace_dump()
7166 memset(&iter.seq, 0, in ftrace_dump()
7169 iter.iter_flags |= TRACE_FILE_LAT_FMT; in ftrace_dump()
7170 iter.pos = -1; in ftrace_dump()
7172 if (trace_find_next_entry_inc(&iter) != NULL) { in ftrace_dump()
7175 ret = print_trace_line(&iter); in ftrace_dump()
7177 trace_consume(&iter); in ftrace_dump()
7181 trace_printk_seq(&iter.seq); in ftrace_dump()
7193 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump()