Lines Matching refs:rb
3560 struct ring_buffer *rb);
3712 if (event->rb) { in _free_event()
4076 struct ring_buffer *rb; in perf_poll() local
4089 rb = event->rb; in perf_poll()
4090 if (rb) in perf_poll()
4091 events = atomic_xchg(&rb->poll, 0); in perf_poll()
4407 struct ring_buffer *rb; in perf_event_init_userpage() local
4410 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
4411 if (!rb) in perf_event_init_userpage()
4414 userpg = rb->user_page; in perf_event_init_userpage()
4420 userpg->data_size = perf_data_size(rb); in perf_event_init_userpage()
4439 struct ring_buffer *rb; in perf_event_update_userpage() local
4443 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
4444 if (!rb) in perf_event_update_userpage()
4458 userpg = rb->user_page; in perf_event_update_userpage()
4489 struct ring_buffer *rb; in perf_mmap_fault() local
4499 rb = rcu_dereference(event->rb); in perf_mmap_fault()
4500 if (!rb) in perf_mmap_fault()
4506 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); in perf_mmap_fault()
4522 struct ring_buffer *rb) in ring_buffer_attach() argument
4527 if (event->rb) { in ring_buffer_attach()
4534 old_rb = event->rb; in ring_buffer_attach()
4543 if (rb) { in ring_buffer_attach()
4549 spin_lock_irqsave(&rb->event_lock, flags); in ring_buffer_attach()
4550 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
4551 spin_unlock_irqrestore(&rb->event_lock, flags); in ring_buffer_attach()
4554 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
4569 struct ring_buffer *rb; in ring_buffer_wakeup() local
4572 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
4573 if (rb) { in ring_buffer_wakeup()
4574 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
4582 struct ring_buffer *rb; in ring_buffer_get() local
4585 rb = rcu_dereference(event->rb); in ring_buffer_get()
4586 if (rb) { in ring_buffer_get()
4587 if (!atomic_inc_not_zero(&rb->refcount)) in ring_buffer_get()
4588 rb = NULL; in ring_buffer_get()
4592 return rb; in ring_buffer_get()
4595 void ring_buffer_put(struct ring_buffer *rb) in ring_buffer_put() argument
4597 if (!atomic_dec_and_test(&rb->refcount)) in ring_buffer_put()
4600 WARN_ON_ONCE(!list_empty(&rb->event_list)); in ring_buffer_put()
4602 call_rcu(&rb->rcu_head, rb_free_rcu); in ring_buffer_put()
4610 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
4613 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
4631 struct ring_buffer *rb = ring_buffer_get(event); in perf_mmap_close() local
4632 struct user_struct *mmap_user = rb->mmap_user; in perf_mmap_close()
4633 int mmap_locked = rb->mmap_locked; in perf_mmap_close()
4634 unsigned long size = perf_data_size(rb); in perf_mmap_close()
4644 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && in perf_mmap_close()
4645 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
4646 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); in perf_mmap_close()
4647 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; in perf_mmap_close()
4649 rb_free_aux(rb); in perf_mmap_close()
4653 atomic_dec(&rb->mmap_count); in perf_mmap_close()
4662 if (atomic_read(&rb->mmap_count)) in perf_mmap_close()
4672 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
4693 if (event->rb == rb) in perf_mmap_close()
4721 ring_buffer_put(rb); /* could be last */ in perf_mmap_close()
4737 struct ring_buffer *rb = NULL; in perf_mmap() local
4766 if (!event->rb) in perf_mmap()
4774 rb = event->rb; in perf_mmap()
4775 if (!rb) in perf_mmap()
4778 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); in perf_mmap()
4779 aux_size = ACCESS_ONCE(rb->user_page->aux_size); in perf_mmap()
4781 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) in perf_mmap()
4788 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) in perf_mmap()
4795 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) in perf_mmap()
4801 if (!atomic_inc_not_zero(&rb->mmap_count)) in perf_mmap()
4804 if (rb_has_aux(rb)) { in perf_mmap()
4805 atomic_inc(&rb->aux_mmap_count); in perf_mmap()
4810 atomic_set(&rb->aux_mmap_count, 1); in perf_mmap()
4829 if (event->rb) { in perf_mmap()
4830 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
4835 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
4873 WARN_ON(!rb && event->rb); in perf_mmap()
4878 if (!rb) { in perf_mmap()
4879 rb = rb_alloc(nr_pages, in perf_mmap()
4883 if (!rb) { in perf_mmap()
4888 atomic_set(&rb->mmap_count, 1); in perf_mmap()
4889 rb->mmap_user = get_current_user(); in perf_mmap()
4890 rb->mmap_locked = extra; in perf_mmap()
4892 ring_buffer_attach(event, rb); in perf_mmap()
4897 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
4900 rb->aux_mmap_locked = extra; in perf_mmap()
4909 } else if (rb) { in perf_mmap()
4910 atomic_dec(&rb->mmap_count); in perf_mmap()
5473 struct ring_buffer *rb = handle->rb; in perf_output_sample() local
5474 int events = local_inc_return(&rb->events); in perf_output_sample()
5477 local_sub(wakeup_events, &rb->events); in perf_output_sample()
5478 local_inc(&rb->wakeup); in perf_output_sample()
8128 struct ring_buffer *rb = NULL; in perf_event_set_output() local
8171 rb = ring_buffer_get(output_event); in perf_event_set_output()
8172 if (!rb) in perf_event_set_output()
8176 ring_buffer_attach(event, rb); in perf_event_set_output()