Lines Matching refs:buffer
271 #define for_each_buffer_cpu(buffer, cpu) \ argument
272 for_each_cpu(cpu, buffer->cpumask)
436 struct ring_buffer *buffer; member
526 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) in ring_buffer_wait() argument
539 work = &buffer->irq_work; in ring_buffer_wait()
543 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
545 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
586 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
590 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
630 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
637 work = &buffer->irq_work; in ring_buffer_poll_wait()
639 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
642 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
663 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
664 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
677 atomic_inc(&__b->buffer->record_disabled); \
688 static inline u64 rb_time_stamp(struct ring_buffer *buffer) in rb_time_stamp() argument
691 return buffer->clock() << DEBUG_SHIFT; in rb_time_stamp()
694 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) in ring_buffer_time_stamp() argument
699 time = rb_time_stamp(buffer); in ring_buffer_time_stamp()
706 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, in ring_buffer_normalize_time_stamp() argument
1205 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1218 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1220 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1304 struct ring_buffer *buffer; in __ring_buffer_alloc() local
1310 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in __ring_buffer_alloc()
1312 if (!buffer) in __ring_buffer_alloc()
1315 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1319 buffer->flags = flags; in __ring_buffer_alloc()
1320 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1321 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1323 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1324 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1337 cpumask_copy(buffer->cpumask, cpu_online_mask); in __ring_buffer_alloc()
1339 cpumask_copy(buffer->cpumask, cpu_possible_mask); in __ring_buffer_alloc()
1341 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1344 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1346 if (!buffer->buffers) in __ring_buffer_alloc()
1349 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1350 buffer->buffers[cpu] = in __ring_buffer_alloc()
1351 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1352 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1357 buffer->cpu_notify.notifier_call = rb_cpu_notify; in __ring_buffer_alloc()
1358 buffer->cpu_notify.priority = 0; in __ring_buffer_alloc()
1359 __register_cpu_notifier(&buffer->cpu_notify); in __ring_buffer_alloc()
1363 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1365 return buffer; in __ring_buffer_alloc()
1368 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1369 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1370 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1372 kfree(buffer->buffers); in __ring_buffer_alloc()
1375 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1381 kfree(buffer); in __ring_buffer_alloc()
1391 ring_buffer_free(struct ring_buffer *buffer) in ring_buffer_free() argument
1397 __unregister_cpu_notifier(&buffer->cpu_notify); in ring_buffer_free()
1400 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1401 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1407 kfree(buffer->buffers); in ring_buffer_free()
1408 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1410 kfree(buffer); in ring_buffer_free()
1414 void ring_buffer_set_clock(struct ring_buffer *buffer, in ring_buffer_set_clock() argument
1417 buffer->clock = clock; in ring_buffer_set_clock()
1651 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, in ring_buffer_resize() argument
1661 if (!buffer) in ring_buffer_resize()
1666 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
1682 if (atomic_read(&buffer->resize_disabled)) in ring_buffer_resize()
1686 mutex_lock(&buffer->mutex); in ring_buffer_resize()
1690 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1691 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1719 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1720 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1735 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1736 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1748 if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
1751 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
1790 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
1791 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
1799 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1800 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1803 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
1806 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
1810 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
1813 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1825 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
1830 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) in ring_buffer_change_overwrite() argument
1832 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
1834 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
1836 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
1837 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2147 struct ring_buffer *buffer = cpu_buffer->buffer; in rb_move_tail() local
2191 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2230 ts = rb_time_stamp(buffer); in rb_move_tail()
2546 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
2550 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
2551 buffer->irq_work.waiters_pending = false; in rb_wakeups()
2553 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
2650 int ring_buffer_unlock_commit(struct ring_buffer *buffer, in ring_buffer_unlock_commit() argument
2656 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
2660 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
2742 rb_reserve_next_event(struct ring_buffer *buffer, in rb_reserve_next_event() argument
2761 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
2785 info.ts = rb_time_stamp(cpu_buffer->buffer); in rb_reserve_next_event()
2832 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) in ring_buffer_lock_reserve() argument
2841 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
2846 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
2849 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
2860 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
2933 void ring_buffer_discard_commit(struct ring_buffer *buffer, in ring_buffer_discard_commit() argument
2943 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
2950 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
2984 int ring_buffer_write(struct ring_buffer *buffer, in ring_buffer_write() argument
2996 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3001 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3004 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3015 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3025 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3064 void ring_buffer_record_disable(struct ring_buffer *buffer) in ring_buffer_record_disable() argument
3066 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
3077 void ring_buffer_record_enable(struct ring_buffer *buffer) in ring_buffer_record_enable() argument
3079 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
3094 void ring_buffer_record_off(struct ring_buffer *buffer) in ring_buffer_record_off() argument
3100 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
3102 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off()
3117 void ring_buffer_record_on(struct ring_buffer *buffer) in ring_buffer_record_on() argument
3123 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
3125 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()
3135 int ring_buffer_record_is_on(struct ring_buffer *buffer) in ring_buffer_record_is_on() argument
3137 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
3150 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
3154 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
3157 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3170 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
3174 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
3177 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3200 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
3207 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
3210 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3233 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
3238 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
3241 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3253 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
3257 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
3260 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
3272 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
3277 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
3280 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
3295 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
3300 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
3303 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
3317 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
3322 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
3325 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
3338 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
3342 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
3345 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
3357 unsigned long ring_buffer_entries(struct ring_buffer *buffer) in ring_buffer_entries() argument
3364 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
3365 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
3380 unsigned long ring_buffer_overruns(struct ring_buffer *buffer) in ring_buffer_overruns() argument
3387 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
3388 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
3767 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3785 struct ring_buffer *buffer; in rb_iter_peek() local
3791 buffer = cpu_buffer->buffer; in rb_iter_peek()
3849 ring_buffer_normalize_time_stamp(buffer, in rb_iter_peek()
3905 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
3908 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
3913 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
3969 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
3981 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
3984 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4028 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) in ring_buffer_read_prepare() argument
4033 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
4040 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4044 atomic_inc(&buffer->resize_disabled); in ring_buffer_read_prepare()
4119 atomic_dec(&cpu_buffer->buffer->resize_disabled); in ring_buffer_read_finish()
4159 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) in ring_buffer_size() argument
4167 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
4170 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
4221 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
4223 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
4226 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
4229 atomic_inc(&buffer->resize_disabled); in ring_buffer_reset_cpu()
4250 atomic_dec(&buffer->resize_disabled); in ring_buffer_reset_cpu()
4258 void ring_buffer_reset(struct ring_buffer *buffer) in ring_buffer_reset() argument
4262 for_each_buffer_cpu(buffer, cpu) in ring_buffer_reset()
4263 ring_buffer_reset_cpu(buffer, cpu); in ring_buffer_reset()
4271 bool ring_buffer_empty(struct ring_buffer *buffer) in ring_buffer_empty() argument
4280 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
4281 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
4301 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
4308 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
4311 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
4383 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
4384 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
4413 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
4438 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) in ring_buffer_free_read_page() argument
4477 int ring_buffer_read_page(struct ring_buffer *buffer, in ring_buffer_read_page() argument
4480 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
4491 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
4645 struct ring_buffer *buffer = in rb_cpu_notify() local
4655 if (cpumask_test_cpu(cpu, buffer->cpumask)) in rb_cpu_notify()
4661 for_each_buffer_cpu(buffer, cpu_i) { in rb_cpu_notify()
4664 nr_pages = buffer->buffers[cpu_i]->nr_pages; in rb_cpu_notify()
4665 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in rb_cpu_notify()
4673 buffer->buffers[cpu] = in rb_cpu_notify()
4674 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in rb_cpu_notify()
4675 if (!buffer->buffers[cpu]) { in rb_cpu_notify()
4681 cpumask_set_cpu(cpu, buffer->cpumask); in rb_cpu_notify()
4717 struct ring_buffer *buffer; member
4773 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
4787 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
4813 ring_buffer_unlock_commit(data->buffer, event); in rb_write_something()
4859 struct ring_buffer *buffer; in test_ringbuffer() local
4865 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); in test_ringbuffer()
4866 if (WARN_ON(!buffer)) in test_ringbuffer()
4870 ring_buffer_record_off(buffer); in test_ringbuffer()
4873 rb_data[cpu].buffer = buffer; in test_ringbuffer()
4896 ring_buffer_record_on(buffer); in test_ringbuffer()
4922 ring_buffer_free(buffer); in test_ringbuffer()
4962 if (RB_WARN_ON(buffer, total_dropped)) in test_ringbuffer()
4967 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
4976 RB_WARN_ON(buffer, 1); in test_ringbuffer()
4996 if (RB_WARN_ON(buffer, total_len != total_alloc || in test_ringbuffer()
5000 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) in test_ringbuffer()
5008 ring_buffer_free(buffer); in test_ringbuffer()