Lines Matching refs:rb
3449 struct ring_buffer *rb);
3597 if (event->rb) { in _free_event()
3917 struct ring_buffer *rb; in perf_poll() local
3930 rb = event->rb; in perf_poll()
3931 if (rb) in perf_poll()
3932 events = atomic_xchg(&rb->poll, 0); in perf_poll()
4241 struct ring_buffer *rb; in perf_event_init_userpage() local
4244 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
4245 if (!rb) in perf_event_init_userpage()
4248 userpg = rb->user_page; in perf_event_init_userpage()
4254 userpg->data_size = perf_data_size(rb); in perf_event_init_userpage()
4273 struct ring_buffer *rb; in perf_event_update_userpage() local
4277 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
4278 if (!rb) in perf_event_update_userpage()
4292 userpg = rb->user_page; in perf_event_update_userpage()
4323 struct ring_buffer *rb; in perf_mmap_fault() local
4333 rb = rcu_dereference(event->rb); in perf_mmap_fault()
4334 if (!rb) in perf_mmap_fault()
4340 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); in perf_mmap_fault()
4356 struct ring_buffer *rb) in ring_buffer_attach() argument
4361 if (event->rb) { in ring_buffer_attach()
4368 old_rb = event->rb; in ring_buffer_attach()
4377 if (rb) { in ring_buffer_attach()
4383 spin_lock_irqsave(&rb->event_lock, flags); in ring_buffer_attach()
4384 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
4385 spin_unlock_irqrestore(&rb->event_lock, flags); in ring_buffer_attach()
4388 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
4403 struct ring_buffer *rb; in ring_buffer_wakeup() local
4406 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
4407 if (rb) { in ring_buffer_wakeup()
4408 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
4416 struct ring_buffer *rb; in ring_buffer_get() local
4419 rb = rcu_dereference(event->rb); in ring_buffer_get()
4420 if (rb) { in ring_buffer_get()
4421 if (!atomic_inc_not_zero(&rb->refcount)) in ring_buffer_get()
4422 rb = NULL; in ring_buffer_get()
4426 return rb; in ring_buffer_get()
4429 void ring_buffer_put(struct ring_buffer *rb) in ring_buffer_put() argument
4431 if (!atomic_dec_and_test(&rb->refcount)) in ring_buffer_put()
4434 WARN_ON_ONCE(!list_empty(&rb->event_list)); in ring_buffer_put()
4436 call_rcu(&rb->rcu_head, rb_free_rcu); in ring_buffer_put()
4444 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
4447 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
4465 struct ring_buffer *rb = ring_buffer_get(event); in perf_mmap_close() local
4466 struct user_struct *mmap_user = rb->mmap_user; in perf_mmap_close()
4467 int mmap_locked = rb->mmap_locked; in perf_mmap_close()
4468 unsigned long size = perf_data_size(rb); in perf_mmap_close()
4478 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && in perf_mmap_close()
4479 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
4480 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); in perf_mmap_close()
4481 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; in perf_mmap_close()
4483 rb_free_aux(rb); in perf_mmap_close()
4487 atomic_dec(&rb->mmap_count); in perf_mmap_close()
4496 if (atomic_read(&rb->mmap_count)) in perf_mmap_close()
4506 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
4527 if (event->rb == rb) in perf_mmap_close()
4555 ring_buffer_put(rb); /* could be last */ in perf_mmap_close()
4571 struct ring_buffer *rb = NULL; in perf_mmap() local
4600 if (!event->rb) in perf_mmap()
4608 rb = event->rb; in perf_mmap()
4609 if (!rb) in perf_mmap()
4612 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); in perf_mmap()
4613 aux_size = ACCESS_ONCE(rb->user_page->aux_size); in perf_mmap()
4615 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) in perf_mmap()
4622 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) in perf_mmap()
4629 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) in perf_mmap()
4635 if (!atomic_inc_not_zero(&rb->mmap_count)) in perf_mmap()
4638 if (rb_has_aux(rb)) { in perf_mmap()
4639 atomic_inc(&rb->aux_mmap_count); in perf_mmap()
4644 atomic_set(&rb->aux_mmap_count, 1); in perf_mmap()
4663 if (event->rb) { in perf_mmap()
4664 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
4669 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
4707 WARN_ON(!rb && event->rb); in perf_mmap()
4712 if (!rb) { in perf_mmap()
4713 rb = rb_alloc(nr_pages, in perf_mmap()
4717 if (!rb) { in perf_mmap()
4722 atomic_set(&rb->mmap_count, 1); in perf_mmap()
4723 rb->mmap_user = get_current_user(); in perf_mmap()
4724 rb->mmap_locked = extra; in perf_mmap()
4726 ring_buffer_attach(event, rb); in perf_mmap()
4731 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
4734 rb->aux_mmap_locked = extra; in perf_mmap()
4743 } else if (rb) { in perf_mmap()
4744 atomic_dec(&rb->mmap_count); in perf_mmap()
5301 struct ring_buffer *rb = handle->rb; in perf_output_sample() local
5302 int events = local_inc_return(&rb->events); in perf_output_sample()
5305 local_sub(wakeup_events, &rb->events); in perf_output_sample()
5306 local_inc(&rb->wakeup); in perf_output_sample()
7799 struct ring_buffer *rb = NULL; in perf_event_set_output() local
7842 rb = ring_buffer_get(output_event); in perf_event_set_output()
7843 if (!rb) in perf_event_set_output()
7847 ring_buffer_attach(event, rb); in perf_event_set_output()