Lines Matching refs:event
198 #define skip_time_extend(event) \ argument
199 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
201 static inline int rb_null_event(struct ring_buffer_event *event) in rb_null_event() argument
203 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
206 static void rb_event_set_padding(struct ring_buffer_event *event) in rb_event_set_padding() argument
209 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
210 event->time_delta = 0; in rb_event_set_padding()
214 rb_event_data_length(struct ring_buffer_event *event) in rb_event_data_length() argument
218 if (event->type_len) in rb_event_data_length()
219 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
221 length = event->array[0]; in rb_event_data_length()
231 rb_event_length(struct ring_buffer_event *event) in rb_event_length() argument
233 switch (event->type_len) { in rb_event_length()
235 if (rb_null_event(event)) in rb_event_length()
238 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
247 return rb_event_data_length(event); in rb_event_length()
260 rb_event_ts_length(struct ring_buffer_event *event) in rb_event_ts_length() argument
264 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { in rb_event_ts_length()
267 event = skip_time_extend(event); in rb_event_ts_length()
269 return len + rb_event_length(event); in rb_event_ts_length()
282 unsigned ring_buffer_event_length(struct ring_buffer_event *event) in ring_buffer_event_length() argument
286 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) in ring_buffer_event_length()
287 event = skip_time_extend(event); in ring_buffer_event_length()
289 length = rb_event_length(event); in ring_buffer_event_length()
290 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
293 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
294 length -= sizeof(event->array[0]); in ring_buffer_event_length()
301 rb_event_data(struct ring_buffer_event *event) in rb_event_data() argument
303 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) in rb_event_data()
304 event = skip_time_extend(event); in rb_event_data()
305 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
307 if (event->type_len) in rb_event_data()
308 return (void *)&event->array[0]; in rb_event_data()
310 return (void *)&event->array[1]; in rb_event_data()
317 void *ring_buffer_event_data(struct ring_buffer_event *event) in ring_buffer_event_data() argument
319 return rb_event_data(event); in ring_buffer_event_data()
1907 rb_event_index(struct ring_buffer_event *event) in rb_event_index() argument
1909 unsigned long addr = (unsigned long)event; in rb_event_index()
1916 struct ring_buffer_event *event) in rb_event_is_commit() argument
1918 unsigned long addr = (unsigned long)event; in rb_event_is_commit()
1921 index = rb_event_index(event); in rb_event_is_commit()
2008 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) in rb_add_time_stamp() argument
2010 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
2013 if (rb_event_index(event)) { in rb_add_time_stamp()
2014 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
2015 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
2018 event->time_delta = 0; in rb_add_time_stamp()
2019 event->array[0] = 0; in rb_add_time_stamp()
2022 return skip_time_extend(event); in rb_add_time_stamp()
2038 struct ring_buffer_event *event, unsigned length, in rb_update_event() argument
2042 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) in rb_update_event()
2050 event = rb_add_time_stamp(event, delta); in rb_update_event()
2055 event->time_delta = delta; in rb_update_event()
2058 event->type_len = 0; in rb_update_event()
2059 event->array[0] = length; in rb_update_event()
2061 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
2224 struct ring_buffer_event event; /* Used only for sizeof array */ in rb_calculate_event_length() local
2231 length += sizeof(event.array[0]); in rb_calculate_event_length()
2244 struct ring_buffer_event *event; in rb_reset_tail() local
2263 event = __rb_page_index(tail_page, tail); in rb_reset_tail()
2264 kmemcheck_annotate_bitfield(event, bitfield); in rb_reset_tail()
2291 rb_event_set_padding(event); in rb_reset_tail()
2299 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
2300 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
2302 event->time_delta = 1; in rb_reset_tail()
2424 struct ring_buffer_event *event; in __rb_reserve_next() local
2456 event = __rb_page_index(tail_page, tail); in __rb_reserve_next()
2457 kmemcheck_annotate_bitfield(event, bitfield); in __rb_reserve_next()
2458 rb_update_event(cpu_buffer, event, length, add_timestamp, delta); in __rb_reserve_next()
2472 return event; in __rb_reserve_next()
2477 struct ring_buffer_event *event) in rb_try_to_discard() argument
2484 new_index = rb_event_index(event); in rb_try_to_discard()
2485 old_index = new_index + rb_event_ts_length(event); in rb_try_to_discard()
2486 addr = (unsigned long)event; in rb_try_to_discard()
2494 unsigned long event_length = rb_event_length(event); in rb_try_to_discard()
2558 struct ring_buffer_event *event; in rb_reserve_next_event() local
2625 event = __rb_reserve_next(cpu_buffer, length, ts, in rb_reserve_next_event()
2627 if (unlikely(PTR_ERR(event) == -EAGAIN)) in rb_reserve_next_event()
2630 if (!event) in rb_reserve_next_event()
2633 return event; in rb_reserve_next_event()
2737 struct ring_buffer_event *event; in ring_buffer_lock_reserve() local
2765 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
2766 if (!event) in ring_buffer_lock_reserve()
2769 return event; in ring_buffer_lock_reserve()
2781 struct ring_buffer_event *event) in rb_update_write_stamp() argument
2789 if (rb_event_is_commit(cpu_buffer, event)) { in rb_update_write_stamp()
2794 if (!rb_event_index(event)) in rb_update_write_stamp()
2797 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { in rb_update_write_stamp()
2798 delta = event->array[0]; in rb_update_write_stamp()
2800 delta += event->time_delta; in rb_update_write_stamp()
2803 cpu_buffer->write_stamp += event->time_delta; in rb_update_write_stamp()
2808 struct ring_buffer_event *event) in rb_commit() argument
2811 rb_update_write_stamp(cpu_buffer, event); in rb_commit()
2852 struct ring_buffer_event *event) in ring_buffer_unlock_commit() argument
2859 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
2871 static inline void rb_event_discard(struct ring_buffer_event *event) in rb_event_discard() argument
2873 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) in rb_event_discard()
2874 event = skip_time_extend(event); in rb_event_discard()
2877 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
2878 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
2880 if (!event->time_delta) in rb_event_discard()
2881 event->time_delta = 1; in rb_event_discard()
2892 struct ring_buffer_event *event) in rb_decrement_entry() argument
2894 unsigned long addr = (unsigned long)event; in rb_decrement_entry()
2944 struct ring_buffer_event *event) in ring_buffer_discard_commit() argument
2950 rb_event_discard(event); in ring_buffer_discard_commit()
2962 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
2963 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
2970 rb_update_write_stamp(cpu_buffer, event); in ring_buffer_discard_commit()
2999 struct ring_buffer_event *event; in ring_buffer_write() local
3025 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3026 if (!event) in ring_buffer_write()
3029 body = rb_event_data(event); in ring_buffer_write()
3033 rb_commit(cpu_buffer, event); in ring_buffer_write()
3459 struct ring_buffer_event *event) in rb_update_read_stamp() argument
3463 switch (event->type_len) { in rb_update_read_stamp()
3468 delta = event->array[0]; in rb_update_read_stamp()
3470 delta += event->time_delta; in rb_update_read_stamp()
3479 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
3490 struct ring_buffer_event *event) in rb_update_iter_read_stamp() argument
3494 switch (event->type_len) { in rb_update_iter_read_stamp()
3499 delta = event->array[0]; in rb_update_iter_read_stamp()
3501 delta += event->time_delta; in rb_update_iter_read_stamp()
3510 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
3650 struct ring_buffer_event *event; in rb_advance_reader() local
3660 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
3662 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
3665 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
3667 length = rb_event_length(event); in rb_advance_reader()
3674 struct ring_buffer_event *event; in rb_advance_iter() local
3690 event = rb_iter_head_event(iter); in rb_advance_iter()
3692 length = rb_event_length(event); in rb_advance_iter()
3703 rb_update_iter_read_stamp(iter, event); in rb_advance_iter()
3722 struct ring_buffer_event *event; in rb_buffer_peek() local
3740 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
3742 switch (event->type_len) { in rb_buffer_peek()
3744 if (rb_null_event(event)) in rb_buffer_peek()
3754 return event; in rb_buffer_peek()
3768 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
3774 return event; in rb_buffer_peek()
3789 struct ring_buffer_event *event; in rb_iter_peek() local
3827 event = rb_iter_head_event(iter); in rb_iter_peek()
3829 switch (event->type_len) { in rb_iter_peek()
3831 if (rb_null_event(event)) { in rb_iter_peek()
3836 return event; in rb_iter_peek()
3850 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
3854 return event; in rb_iter_peek()
3894 struct ring_buffer_event *event; in ring_buffer_peek() local
3906 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
3907 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
3913 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
3916 return event; in ring_buffer_peek()
3931 struct ring_buffer_event *event; in ring_buffer_iter_peek() local
3936 event = rb_iter_peek(iter, ts); in ring_buffer_iter_peek()
3939 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
3942 return event; in ring_buffer_iter_peek()
3961 struct ring_buffer_event *event = NULL; in ring_buffer_consume() local
3979 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
3980 if (event) { in ring_buffer_consume()
3992 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
3995 return event; in ring_buffer_consume()
4126 struct ring_buffer_event *event; in ring_buffer_read() local
4132 event = rb_iter_peek(iter, ts); in ring_buffer_read()
4133 if (!event) in ring_buffer_read()
4136 if (event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_read()
4143 return event; in ring_buffer_read()
4484 struct ring_buffer_event *event; in ring_buffer_read_page() local
4519 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4548 size = rb_event_ts_length(event); in ring_buffer_read_page()
4564 size = rb_event_length(event); in ring_buffer_read_page()
4576 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4578 size = rb_event_ts_length(event); in ring_buffer_read_page()
4756 struct ring_buffer_event *event; in rb_write_something() local
4776 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
4777 if (!event) { in rb_write_something()
4788 event_len = ring_buffer_event_length(event); in rb_write_something()
4793 item = ring_buffer_event_data(event); in rb_write_something()
4816 ring_buffer_unlock_commit(data->buffer, event); in rb_write_something()
4932 struct ring_buffer_event *event; in test_ringbuffer() local
4970 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
4972 item = ring_buffer_event_data(event); in test_ringbuffer()
4973 total_len += ring_buffer_event_length(event); in test_ringbuffer()