Lines Matching refs:ring_buffer_per_cpu
433 struct ring_buffer_per_cpu { struct
480 struct ring_buffer_per_cpu **buffers; argument
491 struct ring_buffer_per_cpu *cpu_buffer;
528 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); in ring_buffer_wait()
633 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait()
674 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
675 struct ring_buffer_per_cpu *__b = \
812 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_is_head_page()
842 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_set_list_to_head()
855 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate()
880 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate()
891 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set()
914 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update()
923 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head()
932 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal()
941 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_inc_page()
950 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page()
1007 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update()
1074 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage()
1088 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_list()
1105 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages()
1179 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages()
1204 static struct ring_buffer_per_cpu *
1207 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer()
1265 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer()
1420 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1433 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages()
1543 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages()
1619 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages()
1635 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler()
1636 struct ring_buffer_per_cpu, update_pages_work); in update_pages_handler()
1654 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize()
1853 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event()
1877 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index()
1892 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
1917 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page()
2068 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail()
2142 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail()
2267 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2282 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event()
2352 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard()
2391 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit()
2398 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write()
2450 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit()
2496 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_event_is_commit()
2510 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_write_stamp()
2537 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit()
2546 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups()
2611 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock()
2636 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock()
2653 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit()
2671 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_timestamp()
2687 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next()
2743 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event()
2834 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve()
2881 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry()
2936 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit()
2988 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write()
3039 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty()
3152 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu()
3172 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu()
3189 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries()
3203 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts()
3235 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu()
3255 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu()
3274 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu()
3297 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu()
3319 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu()
3340 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu()
3359 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries()
3382 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns()
3398 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
3422 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset()
3442 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty()
3452 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp()
3514 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page()
3646 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader()
3671 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter()
3711 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events()
3717 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek()
3786 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek()
3862 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock()
3887 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock()
3908 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
3942 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
3972 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume()
4030 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare()
4079 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start()
4105 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
4135 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read()
4175 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu()
4223 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
4273 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty()
4303 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu()
4336 struct ring_buffer_per_cpu *cpu_buffer_a; in ring_buffer_swap_cpu()
4337 struct ring_buffer_per_cpu *cpu_buffer_b; in ring_buffer_swap_cpu()
4480 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()