Lines Matching refs:rb
22 atomic_set(&handle->rb->poll, POLLIN); in perf_output_wakeup()
38 struct ring_buffer *rb = handle->rb; in perf_output_get_handle() local
41 local_inc(&rb->nest); in perf_output_get_handle()
42 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle()
47 struct ring_buffer *rb = handle->rb; in perf_output_put_handle() local
51 head = local_read(&rb->head); in perf_output_put_handle()
57 if (!local_dec_and_test(&rb->nest)) in perf_output_put_handle()
87 rb->user_page->data_head = head; in perf_output_put_handle()
93 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
94 local_inc(&rb->nest); in perf_output_put_handle()
98 if (handle->wakeup != local_read(&rb->wakeup)) in perf_output_put_handle()
108 struct ring_buffer *rb; in perf_output_begin() local
124 rb = rcu_dereference(event->rb); in perf_output_begin()
125 if (unlikely(!rb)) in perf_output_begin()
128 if (unlikely(!rb->nr_pages)) in perf_output_begin()
131 handle->rb = rb; in perf_output_begin()
134 have_lost = local_read(&rb->lost); in perf_output_begin()
144 tail = ACCESS_ONCE(rb->user_page->data_tail); in perf_output_begin()
145 offset = head = local_read(&rb->head); in perf_output_begin()
146 if (!rb->overwrite && in perf_output_begin()
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) in perf_output_begin()
163 } while (local_cmpxchg(&rb->head, offset, head) != offset); in perf_output_begin()
170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) in perf_output_begin()
171 local_add(rb->watermark, &rb->wakeup); in perf_output_begin()
173 page_shift = PAGE_SHIFT + page_order(rb); in perf_output_begin()
175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in perf_output_begin()
177 handle->addr = rb->data_pages[handle->page] + offset; in perf_output_begin()
187 lost_event.lost = local_xchg(&rb->lost, 0); in perf_output_begin()
198 local_inc(&rb->lost); in perf_output_begin()
227 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) in ring_buffer_init() argument
229 long max_size = perf_data_size(rb); in ring_buffer_init()
232 rb->watermark = min(max_size, watermark); in ring_buffer_init()
234 if (!rb->watermark) in ring_buffer_init()
235 rb->watermark = max_size / 2; in ring_buffer_init()
238 rb->overwrite = 0; in ring_buffer_init()
240 rb->overwrite = 1; in ring_buffer_init()
242 atomic_set(&rb->refcount, 1); in ring_buffer_init()
244 INIT_LIST_HEAD(&rb->event_list); in ring_buffer_init()
245 spin_lock_init(&rb->event_lock); in ring_buffer_init()
246 init_irq_work(&rb->irq_work, rb_irq_work); in ring_buffer_init()
249 static void ring_buffer_put_async(struct ring_buffer *rb) in ring_buffer_put_async() argument
251 if (!atomic_dec_and_test(&rb->refcount)) in ring_buffer_put_async()
254 rb->rcu_head.next = (void *)rb; in ring_buffer_put_async()
255 irq_work_queue(&rb->irq_work); in ring_buffer_put_async()
273 struct ring_buffer *rb; in perf_aux_output_begin() local
283 rb = ring_buffer_get(output_event); in perf_aux_output_begin()
284 if (!rb) in perf_aux_output_begin()
287 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount)) in perf_aux_output_begin()
294 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1))) in perf_aux_output_begin()
297 aux_head = local_read(&rb->aux_head); in perf_aux_output_begin()
299 handle->rb = rb; in perf_aux_output_begin()
309 if (!rb->aux_overwrite) { in perf_aux_output_begin()
310 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail); in perf_aux_output_begin()
311 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark; in perf_aux_output_begin()
312 if (aux_head - aux_tail < perf_aux_size(rb)) in perf_aux_output_begin()
313 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); in perf_aux_output_begin()
323 local_set(&rb->aux_nest, 0); in perf_aux_output_begin()
328 return handle->rb->aux_priv; in perf_aux_output_begin()
331 rb_free_aux(rb); in perf_aux_output_begin()
334 ring_buffer_put_async(rb); in perf_aux_output_begin()
349 struct ring_buffer *rb = handle->rb; in perf_aux_output_end() local
358 if (rb->aux_overwrite) { in perf_aux_output_end()
362 local_set(&rb->aux_head, aux_head); in perf_aux_output_end()
364 aux_head = local_read(&rb->aux_head); in perf_aux_output_end()
365 local_add(size, &rb->aux_head); in perf_aux_output_end()
376 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); in perf_aux_output_end()
378 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { in perf_aux_output_end()
380 local_add(rb->aux_watermark, &rb->aux_wakeup); in perf_aux_output_end()
391 local_set(&rb->aux_nest, 0); in perf_aux_output_end()
392 rb_free_aux(rb); in perf_aux_output_end()
393 ring_buffer_put_async(rb); in perf_aux_output_end()
402 struct ring_buffer *rb = handle->rb; in perf_aux_output_skip() local
408 local_add(size, &rb->aux_head); in perf_aux_output_skip()
410 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); in perf_aux_output_skip()
411 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { in perf_aux_output_skip()
413 local_add(rb->aux_watermark, &rb->aux_wakeup); in perf_aux_output_skip()
414 handle->wakeup = local_read(&rb->aux_wakeup) + in perf_aux_output_skip()
415 rb->aux_watermark; in perf_aux_output_skip()
430 return handle->rb->aux_priv; in perf_get_aux()
458 static void rb_free_aux_page(struct ring_buffer *rb, int idx) in rb_free_aux_page() argument
460 struct page *page = virt_to_page(rb->aux_pages[idx]); in rb_free_aux_page()
467 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, in rb_alloc_aux() argument
497 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); in rb_alloc_aux()
498 if (!rb->aux_pages) in rb_alloc_aux()
501 rb->free_aux = event->pmu->free_aux; in rb_alloc_aux()
502 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
506 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
511 for (last = rb->aux_nr_pages + (1 << page_private(page)); in rb_alloc_aux()
512 last > rb->aux_nr_pages; rb->aux_nr_pages++) in rb_alloc_aux()
513 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); in rb_alloc_aux()
524 struct page *page = virt_to_page(rb->aux_pages[0]); in rb_alloc_aux()
530 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, in rb_alloc_aux()
532 if (!rb->aux_priv) in rb_alloc_aux()
543 atomic_set(&rb->aux_refcount, 1); in rb_alloc_aux()
545 rb->aux_overwrite = overwrite; in rb_alloc_aux()
546 rb->aux_watermark = watermark; in rb_alloc_aux()
548 if (!rb->aux_watermark && !rb->aux_overwrite) in rb_alloc_aux()
549 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux()
553 rb->aux_pgoff = pgoff; in rb_alloc_aux()
555 rb_free_aux(rb); in rb_alloc_aux()
560 static void __rb_free_aux(struct ring_buffer *rb) in __rb_free_aux() argument
564 if (rb->aux_priv) { in __rb_free_aux()
565 rb->free_aux(rb->aux_priv); in __rb_free_aux()
566 rb->free_aux = NULL; in __rb_free_aux()
567 rb->aux_priv = NULL; in __rb_free_aux()
570 if (rb->aux_nr_pages) { in __rb_free_aux()
571 for (pg = 0; pg < rb->aux_nr_pages; pg++) in __rb_free_aux()
572 rb_free_aux_page(rb, pg); in __rb_free_aux()
574 kfree(rb->aux_pages); in __rb_free_aux()
575 rb->aux_nr_pages = 0; in __rb_free_aux()
579 void rb_free_aux(struct ring_buffer *rb) in rb_free_aux() argument
581 if (atomic_dec_and_test(&rb->aux_refcount)) in rb_free_aux()
582 irq_work_queue(&rb->irq_work); in rb_free_aux()
587 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work); in rb_irq_work() local
589 if (!atomic_read(&rb->aux_refcount)) in rb_irq_work()
590 __rb_free_aux(rb); in rb_irq_work()
592 if (rb->rcu_head.next == (void *)rb) in rb_irq_work()
593 call_rcu(&rb->rcu_head, rb_free_rcu); in rb_irq_work()
603 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
605 if (pgoff > rb->nr_pages) in __perf_mmap_to_page()
609 return virt_to_page(rb->user_page); in __perf_mmap_to_page()
611 return virt_to_page(rb->data_pages[pgoff - 1]); in __perf_mmap_to_page()
629 struct ring_buffer *rb; in rb_alloc() local
636 rb = kzalloc(size, GFP_KERNEL); in rb_alloc()
637 if (!rb) in rb_alloc()
640 rb->user_page = perf_mmap_alloc_page(cpu); in rb_alloc()
641 if (!rb->user_page) in rb_alloc()
645 rb->data_pages[i] = perf_mmap_alloc_page(cpu); in rb_alloc()
646 if (!rb->data_pages[i]) in rb_alloc()
650 rb->nr_pages = nr_pages; in rb_alloc()
652 ring_buffer_init(rb, watermark, flags); in rb_alloc()
654 return rb; in rb_alloc()
658 free_page((unsigned long)rb->data_pages[i]); in rb_alloc()
660 free_page((unsigned long)rb->user_page); in rb_alloc()
663 kfree(rb); in rb_alloc()
677 void rb_free(struct ring_buffer *rb) in rb_free() argument
681 perf_mmap_free_page((unsigned long)rb->user_page); in rb_free()
682 for (i = 0; i < rb->nr_pages; i++) in rb_free()
683 perf_mmap_free_page((unsigned long)rb->data_pages[i]); in rb_free()
684 kfree(rb); in rb_free()
688 static int data_page_nr(struct ring_buffer *rb) in data_page_nr() argument
690 return rb->nr_pages << page_order(rb); in data_page_nr()
694 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
697 if (pgoff > data_page_nr(rb)) in __perf_mmap_to_page()
700 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); in __perf_mmap_to_page()
712 struct ring_buffer *rb; in rb_free_work() local
716 rb = container_of(work, struct ring_buffer, work); in rb_free_work()
717 nr = data_page_nr(rb); in rb_free_work()
719 base = rb->user_page; in rb_free_work()
725 kfree(rb); in rb_free_work()
728 void rb_free(struct ring_buffer *rb) in rb_free() argument
730 schedule_work(&rb->work); in rb_free()
735 struct ring_buffer *rb; in rb_alloc() local
742 rb = kzalloc(size, GFP_KERNEL); in rb_alloc()
743 if (!rb) in rb_alloc()
746 INIT_WORK(&rb->work, rb_free_work); in rb_alloc()
752 rb->user_page = all_buf; in rb_alloc()
753 rb->data_pages[0] = all_buf + PAGE_SIZE; in rb_alloc()
754 rb->page_order = ilog2(nr_pages); in rb_alloc()
755 rb->nr_pages = !!nr_pages; in rb_alloc()
757 ring_buffer_init(rb, watermark, flags); in rb_alloc()
759 return rb; in rb_alloc()
762 kfree(rb); in rb_alloc()
771 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) in perf_mmap_to_page() argument
773 if (rb->aux_nr_pages) { in perf_mmap_to_page()
775 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages) in perf_mmap_to_page()
779 if (pgoff >= rb->aux_pgoff) in perf_mmap_to_page()
780 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]); in perf_mmap_to_page()
783 return __perf_mmap_to_page(rb, pgoff); in perf_mmap_to_page()