Lines Matching refs:cpu_buffer

491 	struct ring_buffer_per_cpu	*cpu_buffer;  member
528 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); in ring_buffer_wait()
545 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
546 work = &cpu_buffer->irq_work; in ring_buffer_wait()
597 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
598 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
599 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
633 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
642 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
643 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
812 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_is_head_page() argument
842 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_set_list_to_head() argument
855 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
859 head = cpu_buffer->head_page; in rb_head_page_activate()
866 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate()
880 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
885 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
887 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
891 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
914 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
919 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
923 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
928 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
932 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
937 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
941 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_inc_page() argument
950 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
957 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
961 list = cpu_buffer->pages; in rb_set_head_page()
962 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
965 page = head = cpu_buffer->head_page; in rb_set_head_page()
974 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { in rb_set_head_page()
975 cpu_buffer->head_page = page; in rb_set_head_page()
978 rb_inc_page(cpu_buffer, &page); in rb_set_head_page()
982 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1007 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1039 if (tail_page == cpu_buffer->tail_page) { in rb_tail_page_update()
1064 old_tail = cmpxchg(&cpu_buffer->tail_page, in rb_tail_page_update()
1074 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1079 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) in rb_check_bpage()
1088 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_list() argument
1091 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) in rb_check_list()
1093 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) in rb_check_list()
1105 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1107 struct list_head *head = cpu_buffer->pages; in rb_check_pages()
1111 if (cpu_buffer->head_page) in rb_check_pages()
1112 rb_set_head_page(cpu_buffer); in rb_check_pages()
1114 rb_head_page_deactivate(cpu_buffer); in rb_check_pages()
1116 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
1118 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
1121 if (rb_check_list(cpu_buffer, head)) in rb_check_pages()
1125 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1128 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1131 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1135 rb_head_page_activate(cpu_buffer); in rb_check_pages()
1179 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1186 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages()
1194 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1197 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1199 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1207 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1212 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1214 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1217 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1218 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1219 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1220 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1221 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1222 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1223 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1224 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1225 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1226 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1233 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1235 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1242 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1243 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1245 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1249 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1250 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1251 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1253 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1255 return cpu_buffer; in rb_allocate_cpu_buffer()
1258 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1261 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1265 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1267 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1270 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1272 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1283 kfree(cpu_buffer); in rb_free_cpu_buffer()
1420 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1433 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1444 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1445 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1455 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1461 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1487 cpu_buffer->pages = next_page; in rb_remove_pages()
1491 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1498 cpu_buffer->read = 0; in rb_remove_pages()
1501 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1502 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1504 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1513 rb_inc_page(cpu_buffer, &tmp_iter_page); in rb_remove_pages()
1524 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1525 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_remove_pages()
1537 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
1543 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
1545 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1548 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1570 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
1604 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
1605 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1610 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
1619 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
1623 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
1624 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
1626 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
1627 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
1630 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
1635 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
1637 rb_update_pages(cpu_buffer); in update_pages_handler()
1638 complete(&cpu_buffer->update_done); in update_pages_handler()
1654 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
1691 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1693 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1694 cpu_buffer->nr_pages; in ring_buffer_resize()
1698 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
1704 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1705 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1706 &cpu_buffer->new_pages, cpu)) { in ring_buffer_resize()
1720 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1721 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1726 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1727 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1730 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1736 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1737 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1741 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1742 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1751 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
1753 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
1756 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1757 cpu_buffer->nr_pages; in ring_buffer_resize()
1759 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1760 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
1761 __rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1762 &cpu_buffer->new_pages, cpu_id)) { in ring_buffer_resize()
1771 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1774 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1775 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1778 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1800 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1801 rb_check_pages(cpu_buffer); in ring_buffer_resize()
1813 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1814 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1816 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
1819 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
1853 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
1855 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
1856 cpu_buffer->reader_page->read); in rb_reader_event()
1877 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
1879 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
1892 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
1900 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
1901 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
1903 rb_inc_page(cpu_buffer, &iter->head_page); in rb_inc_iter()
1917 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
1933 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
1954 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
1955 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_handle_head_page()
1986 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2005 rb_inc_page(cpu_buffer, &new_head); in rb_handle_head_page()
2007 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2024 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2043 if (cpu_buffer->tail_page != tail_page && in rb_handle_head_page()
2044 cpu_buffer->tail_page != next_page) in rb_handle_head_page()
2045 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2056 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2059 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2068 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2096 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2142 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2146 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2147 struct ring_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2154 rb_inc_page(cpu_buffer, &next_page); in rb_move_tail()
2162 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2180 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { in rb_move_tail()
2186 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2192 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2196 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2214 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2215 cpu_buffer->tail_page) && in rb_move_tail()
2216 (cpu_buffer->commit_page == in rb_move_tail()
2217 cpu_buffer->reader_page))) { in rb_move_tail()
2218 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2224 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2236 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2243 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2267 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2282 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2290 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) in rb_update_event()
2352 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
2365 bpage = cpu_buffer->tail_page; in rb_try_to_discard()
2382 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2391 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
2393 local_inc(&cpu_buffer->committing); in rb_start_commit()
2394 local_inc(&cpu_buffer->commits); in rb_start_commit()
2398 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
2411 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2413 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { in rb_set_commit_to_write()
2414 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
2416 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2417 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2419 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2420 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2421 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); in rb_set_commit_to_write()
2422 cpu_buffer->write_stamp = in rb_set_commit_to_write()
2423 cpu_buffer->commit_page->page->time_stamp; in rb_set_commit_to_write()
2427 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
2428 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
2430 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2431 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2432 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2433 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
2446 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) in rb_set_commit_to_write()
2450 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
2454 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
2455 !local_read(&cpu_buffer->committing))) in rb_end_commit()
2459 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
2462 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
2463 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
2465 local_dec(&cpu_buffer->committing); in rb_end_commit()
2475 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
2476 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
2477 local_inc(&cpu_buffer->committing); in rb_end_commit()
2496 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_event_is_commit() argument
2505 return cpu_buffer->commit_page->page == (void *)addr && in rb_event_is_commit()
2506 rb_commit_index(cpu_buffer) == index; in rb_event_is_commit()
2510 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_write_stamp() argument
2519 if (rb_event_is_commit(cpu_buffer, event)) { in rb_update_write_stamp()
2525 cpu_buffer->write_stamp = in rb_update_write_stamp()
2526 cpu_buffer->commit_page->page->time_stamp; in rb_update_write_stamp()
2531 cpu_buffer->write_stamp += delta; in rb_update_write_stamp()
2533 cpu_buffer->write_stamp += event->time_delta; in rb_update_write_stamp()
2537 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit() argument
2540 local_inc(&cpu_buffer->entries); in rb_commit()
2541 rb_update_write_stamp(cpu_buffer, event); in rb_commit()
2542 rb_end_commit(cpu_buffer); in rb_commit()
2546 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
2556 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
2557 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
2559 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2562 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_wakeups()
2564 if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { in rb_wakeups()
2565 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
2566 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
2568 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2611 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
2613 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
2630 cpu_buffer->current_context = val; in trace_recursive_lock()
2636 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
2638 cpu_buffer->current_context &= cpu_buffer->current_context - 1; in trace_recursive_unlock()
2653 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
2656 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
2658 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
2660 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
2662 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
2671 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_timestamp() argument
2678 (unsigned long long)cpu_buffer->write_stamp, in rb_handle_timestamp()
2687 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
2702 tail_page = info->tail_page = cpu_buffer->tail_page; in __rb_reserve_next()
2718 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
2724 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
2736 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
2743 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
2751 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
2761 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
2762 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
2763 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
2782 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
2785 info.ts = rb_time_stamp(cpu_buffer->buffer); in rb_reserve_next_event()
2786 diff = info.ts - cpu_buffer->write_stamp; in rb_reserve_next_event()
2792 if (likely(info.ts >= cpu_buffer->write_stamp)) { in rb_reserve_next_event()
2795 rb_handle_timestamp(cpu_buffer, &info); in rb_reserve_next_event()
2798 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
2812 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
2834 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
2849 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
2851 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
2857 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
2860 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
2867 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
2881 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
2885 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
2900 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
2907 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
2911 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
2936 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
2943 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
2950 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
2952 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
2953 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
2960 rb_update_write_stamp(cpu_buffer, event); in ring_buffer_discard_commit()
2962 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
2964 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
2988 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3004 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3006 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3012 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
3015 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3023 rb_commit(cpu_buffer, event); in ring_buffer_write()
3025 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3030 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
3039 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3041 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3042 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
3043 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
3152 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
3157 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3158 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
3172 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
3177 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3178 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
3189 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
3191 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
3192 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
3203 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
3210 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3211 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3216 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
3217 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
3219 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
3222 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3235 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
3241 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3242 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
3255 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
3260 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
3262 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
3274 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
3280 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
3281 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
3297 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
3303 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
3304 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
3319 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
3325 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
3326 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
3340 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
3345 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
3346 return cpu_buffer->read; in ring_buffer_read_events_cpu()
3359 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
3365 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
3366 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
3382 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
3388 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
3389 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
3398 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
3401 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
3402 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
3405 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
3408 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
3422 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
3428 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
3430 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3432 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3442 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
3444 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
3446 return iter->head_page == cpu_buffer->commit_page && in ring_buffer_iter_empty()
3447 iter->head == rb_commit_index(cpu_buffer); in ring_buffer_iter_empty()
3452 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
3465 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
3473 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
3514 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
3523 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
3532 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
3537 reader = cpu_buffer->reader_page; in rb_get_reader_page()
3540 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
3544 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
3545 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
3550 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
3554 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
3560 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
3561 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
3562 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
3563 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
3569 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
3572 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
3573 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
3580 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
3583 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); in rb_get_reader_page()
3595 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
3608 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
3621 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
3622 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); in rb_get_reader_page()
3625 cpu_buffer->reader_page = reader; in rb_get_reader_page()
3626 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
3628 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
3629 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
3630 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
3638 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
3640 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
3646 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
3652 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
3655 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
3658 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
3661 cpu_buffer->read++; in rb_advance_reader()
3663 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
3666 cpu_buffer->reader_page->read += length; in rb_advance_reader()
3671 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
3675 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
3682 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
3696 if (RB_WARN_ON(cpu_buffer, in rb_advance_iter()
3697 (iter->head_page == cpu_buffer->commit_page) && in rb_advance_iter()
3698 (iter->head + length > rb_commit_index(cpu_buffer)))) in rb_advance_iter()
3707 (iter->head_page != cpu_buffer->commit_page)) in rb_advance_iter()
3711 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
3713 return cpu_buffer->lost_events; in rb_lost_events()
3717 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
3731 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
3734 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
3738 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
3743 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
3756 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3761 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3766 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
3767 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3768 cpu_buffer->cpu, ts); in rb_buffer_peek()
3771 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
3786 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
3790 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
3791 buffer = cpu_buffer->buffer; in rb_iter_peek()
3798 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
3799 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
3814 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) in rb_iter_peek()
3817 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
3850 cpu_buffer->cpu, ts); in rb_iter_peek()
3862 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
3865 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
3878 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
3882 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
3887 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
3890 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
3908 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
3918 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
3919 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
3921 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
3922 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
3942 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
3947 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
3949 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
3972 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
3984 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
3986 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
3988 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
3990 cpu_buffer->lost_events = 0; in ring_buffer_consume()
3991 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
3994 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
4030 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
4040 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4042 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
4045 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_read_prepare()
4079 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
4085 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
4087 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4088 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
4090 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
4091 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4105 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
4114 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4115 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
4116 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4118 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_read_finish()
4119 atomic_dec(&cpu_buffer->buffer->resize_disabled); in ring_buffer_read_finish()
4135 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read() local
4138 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4149 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4175 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
4177 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
4179 cpu_buffer->head_page in rb_reset_cpu()
4180 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
4181 local_set(&cpu_buffer->head_page->write, 0); in rb_reset_cpu()
4182 local_set(&cpu_buffer->head_page->entries, 0); in rb_reset_cpu()
4183 local_set(&cpu_buffer->head_page->page->commit, 0); in rb_reset_cpu()
4185 cpu_buffer->head_page->read = 0; in rb_reset_cpu()
4187 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
4188 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
4190 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
4191 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
4192 local_set(&cpu_buffer->reader_page->write, 0); in rb_reset_cpu()
4193 local_set(&cpu_buffer->reader_page->entries, 0); in rb_reset_cpu()
4194 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_reset_cpu()
4195 cpu_buffer->reader_page->read = 0; in rb_reset_cpu()
4197 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
4198 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
4199 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
4200 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
4201 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
4202 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
4203 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
4204 cpu_buffer->read = 0; in rb_reset_cpu()
4205 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
4207 cpu_buffer->write_stamp = 0; in rb_reset_cpu()
4208 cpu_buffer->read_stamp = 0; in rb_reset_cpu()
4210 cpu_buffer->lost_events = 0; in rb_reset_cpu()
4211 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
4213 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
4223 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
4230 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4235 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4237 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in ring_buffer_reset_cpu()
4240 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4242 rb_reset_cpu(cpu_buffer); in ring_buffer_reset_cpu()
4244 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4247 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4249 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4273 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
4281 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
4283 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
4284 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
4285 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
4303 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
4311 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
4313 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
4314 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
4315 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
4480 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
4510 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
4512 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
4516 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4522 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
4532 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
4533 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
4551 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
4566 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
4573 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4586 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
4587 cpu_buffer->read_bytes += BUF_PAGE_SIZE; in ring_buffer_read_page()
4608 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
4634 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()