Lines Matching refs:cpu_buffer

515 	struct ring_buffer_per_cpu	*cpu_buffer;  member
552 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); in ring_buffer_wait()
569 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
570 work = &cpu_buffer->irq_work; in ring_buffer_wait()
621 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
622 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
623 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
657 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
666 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
667 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
836 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_is_head_page() argument
866 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_set_list_to_head() argument
879 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
883 head = cpu_buffer->head_page; in rb_head_page_activate()
890 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate()
904 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
909 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
911 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
915 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
938 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
943 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
947 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
952 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
956 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
961 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
965 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_inc_page() argument
974 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
981 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
985 list = cpu_buffer->pages; in rb_set_head_page()
986 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
989 page = head = cpu_buffer->head_page; in rb_set_head_page()
998 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { in rb_set_head_page()
999 cpu_buffer->head_page = page; in rb_set_head_page()
1002 rb_inc_page(cpu_buffer, &page); in rb_set_head_page()
1006 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1031 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1063 if (tail_page == cpu_buffer->tail_page) { in rb_tail_page_update()
1088 old_tail = cmpxchg(&cpu_buffer->tail_page, in rb_tail_page_update()
1098 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1103 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) in rb_check_bpage()
1112 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_list() argument
1115 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) in rb_check_list()
1117 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) in rb_check_list()
1129 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1131 struct list_head *head = cpu_buffer->pages; in rb_check_pages()
1135 if (cpu_buffer->head_page) in rb_check_pages()
1136 rb_set_head_page(cpu_buffer); in rb_check_pages()
1138 rb_head_page_deactivate(cpu_buffer); in rb_check_pages()
1140 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
1142 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
1145 if (rb_check_list(cpu_buffer, head)) in rb_check_pages()
1149 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1152 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1155 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1159 rb_head_page_activate(cpu_buffer); in rb_check_pages()
1203 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1210 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages()
1218 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1221 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1223 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1231 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1236 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1238 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1241 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1242 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1243 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1244 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1245 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1246 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1247 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1248 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1249 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1250 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1257 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1259 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1266 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1267 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1269 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1273 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1274 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1275 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1277 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1279 return cpu_buffer; in rb_allocate_cpu_buffer()
1282 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1285 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1289 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1291 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1294 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1296 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1307 kfree(cpu_buffer); in rb_free_cpu_buffer()
1444 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1457 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1468 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1469 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1479 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1485 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1511 cpu_buffer->pages = next_page; in rb_remove_pages()
1515 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1522 cpu_buffer->read = 0; in rb_remove_pages()
1525 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1526 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1528 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1537 rb_inc_page(cpu_buffer, &tmp_iter_page); in rb_remove_pages()
1548 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1549 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_remove_pages()
1561 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
1567 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
1569 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1572 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1594 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
1628 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
1629 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1634 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
1643 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
1647 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
1648 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
1650 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
1651 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
1654 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
1659 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
1661 rb_update_pages(cpu_buffer); in update_pages_handler()
1662 complete(&cpu_buffer->update_done); in update_pages_handler()
1678 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
1715 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1717 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1718 cpu_buffer->nr_pages; in ring_buffer_resize()
1722 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
1728 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1729 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1730 &cpu_buffer->new_pages, cpu)) { in ring_buffer_resize()
1744 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1745 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1750 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1751 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1754 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1760 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1761 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1765 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1766 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1775 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
1777 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
1780 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1781 cpu_buffer->nr_pages; in ring_buffer_resize()
1783 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1784 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
1785 __rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1786 &cpu_buffer->new_pages, cpu_id)) { in ring_buffer_resize()
1795 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1798 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1799 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1802 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1824 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1825 rb_check_pages(cpu_buffer); in ring_buffer_resize()
1837 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1838 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1840 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
1843 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
1877 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
1879 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
1880 cpu_buffer->reader_page->read); in rb_reader_event()
1901 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
1903 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
1915 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_event_is_commit() argument
1924 return cpu_buffer->commit_page->page == (void *)addr && in rb_event_is_commit()
1925 rb_commit_index(cpu_buffer) == index; in rb_event_is_commit()
1929 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
1942 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
1944 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { in rb_set_commit_to_write()
1945 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
1947 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
1948 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
1950 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
1951 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
1952 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); in rb_set_commit_to_write()
1953 cpu_buffer->write_stamp = in rb_set_commit_to_write()
1954 cpu_buffer->commit_page->page->time_stamp; in rb_set_commit_to_write()
1958 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
1959 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
1961 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
1962 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
1963 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
1964 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
1977 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) in rb_set_commit_to_write()
1981 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_reader_page() argument
1983 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_reset_reader_page()
1984 cpu_buffer->reader_page->read = 0; in rb_reset_reader_page()
1989 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
1997 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
1998 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2000 rb_inc_page(cpu_buffer, &iter->head_page); in rb_inc_iter()
2037 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2042 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) in rb_update_event()
2072 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
2088 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
2109 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2110 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_handle_head_page()
2141 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2160 rb_inc_page(cpu_buffer, &new_head); in rb_handle_head_page()
2162 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2179 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2198 if (cpu_buffer->tail_page != tail_page && in rb_handle_head_page()
2199 cpu_buffer->tail_page != next_page) in rb_handle_head_page()
2200 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2211 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2214 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2240 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2267 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2313 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2317 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2318 struct ring_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2324 rb_inc_page(cpu_buffer, &next_page); in rb_move_tail()
2332 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2350 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { in rb_move_tail()
2356 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2362 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2366 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2384 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2385 cpu_buffer->tail_page) && in rb_move_tail()
2386 (cpu_buffer->commit_page == in rb_move_tail()
2387 cpu_buffer->reader_page))) { in rb_move_tail()
2388 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2394 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2406 rb_reset_tail(cpu_buffer, tail_page, tail, length); in rb_move_tail()
2413 rb_reset_tail(cpu_buffer, tail_page, tail, length); in rb_move_tail()
2419 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
2435 tail_page = cpu_buffer->tail_page; in __rb_reserve_next()
2451 return rb_move_tail(cpu_buffer, length, tail, in __rb_reserve_next()
2458 rb_update_event(cpu_buffer, event, length, add_timestamp, delta); in __rb_reserve_next()
2470 local_add(length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
2476 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
2489 bpage = cpu_buffer->tail_page; in rb_try_to_discard()
2506 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2515 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
2517 local_inc(&cpu_buffer->committing); in rb_start_commit()
2518 local_inc(&cpu_buffer->commits); in rb_start_commit()
2521 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
2525 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
2526 !local_read(&cpu_buffer->committing))) in rb_end_commit()
2530 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
2533 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
2534 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
2536 local_dec(&cpu_buffer->committing); in rb_end_commit()
2546 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
2547 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
2548 local_inc(&cpu_buffer->committing); in rb_end_commit()
2555 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
2564 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
2574 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
2575 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
2576 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
2595 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
2598 ts = rb_time_stamp(cpu_buffer->buffer); in rb_reserve_next_event()
2599 diff = ts - cpu_buffer->write_stamp; in rb_reserve_next_event()
2605 if (likely(ts >= cpu_buffer->write_stamp)) { in rb_reserve_next_event()
2616 (unsigned long long)cpu_buffer->write_stamp, in rb_reserve_next_event()
2625 event = __rb_reserve_next(cpu_buffer, length, ts, in rb_reserve_next_event()
2636 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
2681 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
2683 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
2700 cpu_buffer->current_context = val; in trace_recursive_lock()
2706 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
2708 cpu_buffer->current_context &= cpu_buffer->current_context - 1; in trace_recursive_unlock()
2713 #define trace_recursive_lock(cpu_buffer) (0) argument
2714 #define trace_recursive_unlock(cpu_buffer) do { } while (0) argument
2736 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
2754 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
2756 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
2762 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
2765 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
2772 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
2780 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_write_stamp() argument
2789 if (rb_event_is_commit(cpu_buffer, event)) { in rb_update_write_stamp()
2795 cpu_buffer->write_stamp = in rb_update_write_stamp()
2796 cpu_buffer->commit_page->page->time_stamp; in rb_update_write_stamp()
2801 cpu_buffer->write_stamp += delta; in rb_update_write_stamp()
2803 cpu_buffer->write_stamp += event->time_delta; in rb_update_write_stamp()
2807 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit() argument
2810 local_inc(&cpu_buffer->entries); in rb_commit()
2811 rb_update_write_stamp(cpu_buffer, event); in rb_commit()
2812 rb_end_commit(cpu_buffer); in rb_commit()
2816 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
2826 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
2827 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
2829 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2832 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_wakeups()
2834 if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { in rb_wakeups()
2835 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
2836 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
2838 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2854 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
2857 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
2859 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
2861 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
2863 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
2891 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
2895 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
2910 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
2917 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
2921 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
2946 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
2953 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
2960 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
2962 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
2963 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
2970 rb_update_write_stamp(cpu_buffer, event); in ring_buffer_discard_commit()
2972 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
2974 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
2998 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3017 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3019 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3025 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3033 rb_commit(cpu_buffer, event); in ring_buffer_write()
3035 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3045 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3047 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3048 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
3049 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
3158 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
3163 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3164 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
3178 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
3183 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3184 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
3195 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
3197 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
3198 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
3209 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
3216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3217 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3222 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
3223 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
3225 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
3228 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3241 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
3247 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3248 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
3261 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
3266 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
3268 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
3280 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
3286 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
3287 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
3303 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
3309 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
3310 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
3325 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
3331 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
3332 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
3346 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
3351 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
3352 return cpu_buffer->read; in ring_buffer_read_events_cpu()
3365 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
3371 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
3372 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
3388 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
3394 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
3395 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
3404 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
3407 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
3408 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
3411 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
3414 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
3428 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
3434 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
3436 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3438 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3448 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
3450 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
3452 return iter->head_page == cpu_buffer->commit_page && in ring_buffer_iter_empty()
3453 iter->head == rb_commit_index(cpu_buffer); in ring_buffer_iter_empty()
3458 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
3471 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
3479 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
3520 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
3529 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
3538 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
3543 reader = cpu_buffer->reader_page; in rb_get_reader_page()
3546 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
3550 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
3551 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
3556 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
3560 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
3566 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
3567 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
3568 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
3569 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
3575 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
3578 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
3579 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
3586 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
3589 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); in rb_get_reader_page()
3601 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
3614 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
3627 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
3628 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); in rb_get_reader_page()
3631 cpu_buffer->reader_page = reader; in rb_get_reader_page()
3632 rb_reset_reader_page(cpu_buffer); in rb_get_reader_page()
3634 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
3635 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
3636 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
3642 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
3648 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
3654 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
3657 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
3660 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
3663 cpu_buffer->read++; in rb_advance_reader()
3665 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
3668 cpu_buffer->reader_page->read += length; in rb_advance_reader()
3673 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
3677 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
3684 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
3698 if (RB_WARN_ON(cpu_buffer, in rb_advance_iter()
3699 (iter->head_page == cpu_buffer->commit_page) && in rb_advance_iter()
3700 (iter->head + length > rb_commit_index(cpu_buffer)))) in rb_advance_iter()
3709 (iter->head_page != cpu_buffer->commit_page)) in rb_advance_iter()
3713 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
3715 return cpu_buffer->lost_events; in rb_lost_events()
3719 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
3733 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
3736 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
3740 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
3745 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
3758 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3763 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3768 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
3769 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3770 cpu_buffer->cpu, ts); in rb_buffer_peek()
3773 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
3788 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
3792 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
3793 buffer = cpu_buffer->buffer; in rb_iter_peek()
3800 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
3801 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
3816 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) in rb_iter_peek()
3819 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
3852 cpu_buffer->cpu, ts); in rb_iter_peek()
3893 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
3905 raw_spin_lock(&cpu_buffer->reader_lock); in ring_buffer_peek()
3906 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
3908 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
3910 raw_spin_unlock(&cpu_buffer->reader_lock); in ring_buffer_peek()
3930 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
3935 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
3937 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
3960 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
3974 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
3977 raw_spin_lock(&cpu_buffer->reader_lock); in ring_buffer_consume()
3979 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
3981 cpu_buffer->lost_events = 0; in ring_buffer_consume()
3982 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
3986 raw_spin_unlock(&cpu_buffer->reader_lock); in ring_buffer_consume()
4022 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
4032 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4034 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
4037 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_read_prepare()
4071 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
4077 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
4079 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4080 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
4082 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
4083 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4097 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
4106 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4107 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
4108 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4110 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_read_finish()
4111 atomic_dec(&cpu_buffer->buffer->resize_disabled); in ring_buffer_read_finish()
4127 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read() local
4130 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4141 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4167 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
4169 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
4171 cpu_buffer->head_page in rb_reset_cpu()
4172 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
4173 local_set(&cpu_buffer->head_page->write, 0); in rb_reset_cpu()
4174 local_set(&cpu_buffer->head_page->entries, 0); in rb_reset_cpu()
4175 local_set(&cpu_buffer->head_page->page->commit, 0); in rb_reset_cpu()
4177 cpu_buffer->head_page->read = 0; in rb_reset_cpu()
4179 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
4180 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
4182 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
4183 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
4184 local_set(&cpu_buffer->reader_page->write, 0); in rb_reset_cpu()
4185 local_set(&cpu_buffer->reader_page->entries, 0); in rb_reset_cpu()
4186 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_reset_cpu()
4187 cpu_buffer->reader_page->read = 0; in rb_reset_cpu()
4189 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
4190 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
4191 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
4192 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
4193 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
4194 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
4195 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
4196 cpu_buffer->read = 0; in rb_reset_cpu()
4197 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
4199 cpu_buffer->write_stamp = 0; in rb_reset_cpu()
4200 cpu_buffer->read_stamp = 0; in rb_reset_cpu()
4202 cpu_buffer->lost_events = 0; in rb_reset_cpu()
4203 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
4205 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
4215 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
4222 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4227 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4229 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in ring_buffer_reset_cpu()
4232 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4234 rb_reset_cpu(cpu_buffer); in ring_buffer_reset_cpu()
4236 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4239 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4241 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4265 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
4275 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
4278 raw_spin_lock(&cpu_buffer->reader_lock); in ring_buffer_empty()
4279 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
4281 raw_spin_unlock(&cpu_buffer->reader_lock); in ring_buffer_empty()
4299 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
4309 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
4312 raw_spin_lock(&cpu_buffer->reader_lock); in ring_buffer_empty_cpu()
4313 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
4315 raw_spin_unlock(&cpu_buffer->reader_lock); in ring_buffer_empty_cpu()
4483 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
4513 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
4515 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
4519 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4525 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
4535 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
4536 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
4554 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
4569 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
4576 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4589 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
4590 cpu_buffer->read_bytes += BUF_PAGE_SIZE; in ring_buffer_read_page()
4611 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
4637 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()