cpu_buffer        266 drivers/usb/host/imx21-hcd.c 						etd->cpu_buffer, etd->len);
cpu_buffer        280 drivers/usb/host/imx21-hcd.c 				etd->bounce_buffer = kmemdup(etd->cpu_buffer,
cpu_buffer        600 drivers/usb/host/imx21-hcd.c 		etd->cpu_buffer = td->cpu_buffer;
cpu_buffer        661 drivers/usb/host/imx21-hcd.c 			memcpy_fromio(etd->cpu_buffer,
cpu_buffer        840 drivers/usb/host/imx21-hcd.c 		td->cpu_buffer = urb->transfer_buffer + offset;
cpu_buffer        934 drivers/usb/host/imx21-hcd.c 			etd->cpu_buffer = urb->setup_packet;
cpu_buffer        951 drivers/usb/host/imx21-hcd.c 		etd->cpu_buffer = urb->transfer_buffer;
cpu_buffer       1058 drivers/usb/host/imx21-hcd.c 			memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
cpu_buffer       1062 drivers/usb/host/imx21-hcd.c 			memcpy_fromio(etd->cpu_buffer,
cpu_buffer        342 drivers/usb/host/imx21-hcd.h 	void *cpu_buffer;
cpu_buffer        355 drivers/usb/host/imx21-hcd.h 	void *cpu_buffer;
cpu_buffer        506 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu	*cpu_buffer;
cpu_buffer        578 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
cpu_buffer        595 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu_buffer        596 kernel/trace/ring_buffer.c 		work = &cpu_buffer->irq_work;
cpu_buffer        649 kernel/trace/ring_buffer.c 			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer        650 kernel/trace/ring_buffer.c 			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
cpu_buffer        651 kernel/trace/ring_buffer.c 			nr_pages = cpu_buffer->nr_pages;
cpu_buffer        653 kernel/trace/ring_buffer.c 			if (!cpu_buffer->shortest_full ||
cpu_buffer        654 kernel/trace/ring_buffer.c 			    cpu_buffer->shortest_full < full)
cpu_buffer        655 kernel/trace/ring_buffer.c 				cpu_buffer->shortest_full = full;
cpu_buffer        656 kernel/trace/ring_buffer.c 			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer        690 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer        699 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu_buffer        700 kernel/trace/ring_buffer.c 		work = &cpu_buffer->irq_work;
cpu_buffer        869 kernel/trace/ring_buffer.c rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer        899 kernel/trace/ring_buffer.c static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer        912 kernel/trace/ring_buffer.c static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer        916 kernel/trace/ring_buffer.c 	head = cpu_buffer->head_page;
cpu_buffer        923 kernel/trace/ring_buffer.c 	rb_set_list_to_head(cpu_buffer, head->list.prev);
cpu_buffer        937 kernel/trace/ring_buffer.c rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer        942 kernel/trace/ring_buffer.c 	rb_list_head_clear(cpu_buffer->pages);
cpu_buffer        944 kernel/trace/ring_buffer.c 	list_for_each(hd, cpu_buffer->pages)
cpu_buffer        948 kernel/trace/ring_buffer.c static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer        971 kernel/trace/ring_buffer.c static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer        976 kernel/trace/ring_buffer.c 	return rb_head_page_set(cpu_buffer, head, prev,
cpu_buffer        980 kernel/trace/ring_buffer.c static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer        985 kernel/trace/ring_buffer.c 	return rb_head_page_set(cpu_buffer, head, prev,
cpu_buffer        989 kernel/trace/ring_buffer.c static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer        994 kernel/trace/ring_buffer.c 	return rb_head_page_set(cpu_buffer, head, prev,
cpu_buffer        998 kernel/trace/ring_buffer.c static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       1007 kernel/trace/ring_buffer.c rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       1014 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
cpu_buffer       1018 kernel/trace/ring_buffer.c 	list = cpu_buffer->pages;
cpu_buffer       1019 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
cpu_buffer       1022 kernel/trace/ring_buffer.c 	page = head = cpu_buffer->head_page;
cpu_buffer       1031 kernel/trace/ring_buffer.c 			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
cpu_buffer       1032 kernel/trace/ring_buffer.c 				cpu_buffer->head_page = page;
cpu_buffer       1035 kernel/trace/ring_buffer.c 			rb_inc_page(cpu_buffer, &page);
cpu_buffer       1039 kernel/trace/ring_buffer.c 	RB_WARN_ON(cpu_buffer, 1);
cpu_buffer       1062 kernel/trace/ring_buffer.c static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       1081 kernel/trace/ring_buffer.c 	local_inc(&cpu_buffer->pages_touched);
cpu_buffer       1093 kernel/trace/ring_buffer.c 	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
cpu_buffer       1119 kernel/trace/ring_buffer.c 		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
cpu_buffer       1123 kernel/trace/ring_buffer.c static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       1128 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
cpu_buffer       1137 kernel/trace/ring_buffer.c static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       1140 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
cpu_buffer       1142 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
cpu_buffer       1154 kernel/trace/ring_buffer.c static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       1156 kernel/trace/ring_buffer.c 	struct list_head *head = cpu_buffer->pages;
cpu_buffer       1160 kernel/trace/ring_buffer.c 	if (cpu_buffer->head_page)
cpu_buffer       1161 kernel/trace/ring_buffer.c 		rb_set_head_page(cpu_buffer);
cpu_buffer       1163 kernel/trace/ring_buffer.c 	rb_head_page_deactivate(cpu_buffer);
cpu_buffer       1165 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
cpu_buffer       1167 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
cpu_buffer       1170 kernel/trace/ring_buffer.c 	if (rb_check_list(cpu_buffer, head))
cpu_buffer       1174 kernel/trace/ring_buffer.c 		if (RB_WARN_ON(cpu_buffer,
cpu_buffer       1177 kernel/trace/ring_buffer.c 		if (RB_WARN_ON(cpu_buffer,
cpu_buffer       1180 kernel/trace/ring_buffer.c 		if (rb_check_list(cpu_buffer, &bpage->list))
cpu_buffer       1184 kernel/trace/ring_buffer.c 	rb_head_page_activate(cpu_buffer);
cpu_buffer       1260 kernel/trace/ring_buffer.c static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       1267 kernel/trace/ring_buffer.c 	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
cpu_buffer       1275 kernel/trace/ring_buffer.c 	cpu_buffer->pages = pages.next;
cpu_buffer       1278 kernel/trace/ring_buffer.c 	cpu_buffer->nr_pages = nr_pages;
cpu_buffer       1280 kernel/trace/ring_buffer.c 	rb_check_pages(cpu_buffer);
cpu_buffer       1288 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       1293 kernel/trace/ring_buffer.c 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
cpu_buffer       1295 kernel/trace/ring_buffer.c 	if (!cpu_buffer)
cpu_buffer       1298 kernel/trace/ring_buffer.c 	cpu_buffer->cpu = cpu;
cpu_buffer       1299 kernel/trace/ring_buffer.c 	cpu_buffer->buffer = buffer;
cpu_buffer       1300 kernel/trace/ring_buffer.c 	raw_spin_lock_init(&cpu_buffer->reader_lock);
cpu_buffer       1301 kernel/trace/ring_buffer.c 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer       1302 kernel/trace/ring_buffer.c 	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
cpu_buffer       1303 kernel/trace/ring_buffer.c 	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
cpu_buffer       1304 kernel/trace/ring_buffer.c 	init_completion(&cpu_buffer->update_done);
cpu_buffer       1305 kernel/trace/ring_buffer.c 	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
cpu_buffer       1306 kernel/trace/ring_buffer.c 	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
cpu_buffer       1307 kernel/trace/ring_buffer.c 	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
cpu_buffer       1314 kernel/trace/ring_buffer.c 	rb_check_bpage(cpu_buffer, bpage);
cpu_buffer       1316 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page = bpage;
cpu_buffer       1323 kernel/trace/ring_buffer.c 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
cpu_buffer       1324 kernel/trace/ring_buffer.c 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
cpu_buffer       1326 kernel/trace/ring_buffer.c 	ret = rb_allocate_pages(cpu_buffer, nr_pages);
cpu_buffer       1330 kernel/trace/ring_buffer.c 	cpu_buffer->head_page
cpu_buffer       1331 kernel/trace/ring_buffer.c 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
cpu_buffer       1332 kernel/trace/ring_buffer.c 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
cpu_buffer       1334 kernel/trace/ring_buffer.c 	rb_head_page_activate(cpu_buffer);
cpu_buffer       1336 kernel/trace/ring_buffer.c 	return cpu_buffer;
cpu_buffer       1339 kernel/trace/ring_buffer.c 	free_buffer_page(cpu_buffer->reader_page);
cpu_buffer       1342 kernel/trace/ring_buffer.c 	kfree(cpu_buffer);
cpu_buffer       1346 kernel/trace/ring_buffer.c static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       1348 kernel/trace/ring_buffer.c 	struct list_head *head = cpu_buffer->pages;
cpu_buffer       1351 kernel/trace/ring_buffer.c 	free_buffer_page(cpu_buffer->reader_page);
cpu_buffer       1353 kernel/trace/ring_buffer.c 	rb_head_page_deactivate(cpu_buffer);
cpu_buffer       1364 kernel/trace/ring_buffer.c 	kfree(cpu_buffer);
cpu_buffer       1482 kernel/trace/ring_buffer.c static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
cpu_buffer       1495 kernel/trace/ring_buffer.c rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
cpu_buffer       1506 kernel/trace/ring_buffer.c 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
cpu_buffer       1507 kernel/trace/ring_buffer.c 	atomic_inc(&cpu_buffer->record_disabled);
cpu_buffer       1517 kernel/trace/ring_buffer.c 	tail_page = &cpu_buffer->tail_page->list;
cpu_buffer       1523 kernel/trace/ring_buffer.c 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
cpu_buffer       1549 kernel/trace/ring_buffer.c 	cpu_buffer->pages = next_page;
cpu_buffer       1553 kernel/trace/ring_buffer.c 		cpu_buffer->head_page = list_entry(next_page,
cpu_buffer       1560 kernel/trace/ring_buffer.c 	cpu_buffer->read = 0;
cpu_buffer       1563 kernel/trace/ring_buffer.c 	atomic_dec(&cpu_buffer->record_disabled);
cpu_buffer       1564 kernel/trace/ring_buffer.c 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
cpu_buffer       1566 kernel/trace/ring_buffer.c 	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
cpu_buffer       1577 kernel/trace/ring_buffer.c 		rb_inc_page(cpu_buffer, &tmp_iter_page);
cpu_buffer       1588 kernel/trace/ring_buffer.c 			local_add(page_entries, &cpu_buffer->overrun);
cpu_buffer       1589 kernel/trace/ring_buffer.c 			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
cpu_buffer       1601 kernel/trace/ring_buffer.c 	RB_WARN_ON(cpu_buffer, nr_removed);
cpu_buffer       1607 kernel/trace/ring_buffer.c rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       1609 kernel/trace/ring_buffer.c 	struct list_head *pages = &cpu_buffer->new_pages;
cpu_buffer       1612 kernel/trace/ring_buffer.c 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
cpu_buffer       1634 kernel/trace/ring_buffer.c 		head_page = &rb_set_head_page(cpu_buffer)->list;
cpu_buffer       1668 kernel/trace/ring_buffer.c 	RB_WARN_ON(cpu_buffer, !success);
cpu_buffer       1669 kernel/trace/ring_buffer.c 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
cpu_buffer       1674 kernel/trace/ring_buffer.c 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
cpu_buffer       1683 kernel/trace/ring_buffer.c static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       1687 kernel/trace/ring_buffer.c 	if (cpu_buffer->nr_pages_to_update > 0)
cpu_buffer       1688 kernel/trace/ring_buffer.c 		success = rb_insert_pages(cpu_buffer);
cpu_buffer       1690 kernel/trace/ring_buffer.c 		success = rb_remove_pages(cpu_buffer,
cpu_buffer       1691 kernel/trace/ring_buffer.c 					-cpu_buffer->nr_pages_to_update);
cpu_buffer       1694 kernel/trace/ring_buffer.c 		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
cpu_buffer       1699 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
cpu_buffer       1701 kernel/trace/ring_buffer.c 	rb_update_pages(cpu_buffer);
cpu_buffer       1702 kernel/trace/ring_buffer.c 	complete(&cpu_buffer->update_done);
cpu_buffer       1718 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       1755 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu_buffer       1757 kernel/trace/ring_buffer.c 			cpu_buffer->nr_pages_to_update = nr_pages -
cpu_buffer       1758 kernel/trace/ring_buffer.c 							cpu_buffer->nr_pages;
cpu_buffer       1762 kernel/trace/ring_buffer.c 			if (cpu_buffer->nr_pages_to_update <= 0)
cpu_buffer       1768 kernel/trace/ring_buffer.c 			INIT_LIST_HEAD(&cpu_buffer->new_pages);
cpu_buffer       1769 kernel/trace/ring_buffer.c 			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
cpu_buffer       1770 kernel/trace/ring_buffer.c 						&cpu_buffer->new_pages, cpu)) {
cpu_buffer       1784 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu_buffer       1785 kernel/trace/ring_buffer.c 			if (!cpu_buffer->nr_pages_to_update)
cpu_buffer       1790 kernel/trace/ring_buffer.c 				rb_update_pages(cpu_buffer);
cpu_buffer       1791 kernel/trace/ring_buffer.c 				cpu_buffer->nr_pages_to_update = 0;
cpu_buffer       1794 kernel/trace/ring_buffer.c 						&cpu_buffer->update_pages_work);
cpu_buffer       1800 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu_buffer       1801 kernel/trace/ring_buffer.c 			if (!cpu_buffer->nr_pages_to_update)
cpu_buffer       1805 kernel/trace/ring_buffer.c 				wait_for_completion(&cpu_buffer->update_done);
cpu_buffer       1806 kernel/trace/ring_buffer.c 			cpu_buffer->nr_pages_to_update = 0;
cpu_buffer       1815 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu_id];
cpu_buffer       1817 kernel/trace/ring_buffer.c 		if (nr_pages == cpu_buffer->nr_pages)
cpu_buffer       1820 kernel/trace/ring_buffer.c 		cpu_buffer->nr_pages_to_update = nr_pages -
cpu_buffer       1821 kernel/trace/ring_buffer.c 						cpu_buffer->nr_pages;
cpu_buffer       1823 kernel/trace/ring_buffer.c 		INIT_LIST_HEAD(&cpu_buffer->new_pages);
cpu_buffer       1824 kernel/trace/ring_buffer.c 		if (cpu_buffer->nr_pages_to_update > 0 &&
cpu_buffer       1825 kernel/trace/ring_buffer.c 			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
cpu_buffer       1826 kernel/trace/ring_buffer.c 					    &cpu_buffer->new_pages, cpu_id)) {
cpu_buffer       1835 kernel/trace/ring_buffer.c 			rb_update_pages(cpu_buffer);
cpu_buffer       1838 kernel/trace/ring_buffer.c 					 &cpu_buffer->update_pages_work);
cpu_buffer       1839 kernel/trace/ring_buffer.c 			wait_for_completion(&cpu_buffer->update_done);
cpu_buffer       1842 kernel/trace/ring_buffer.c 		cpu_buffer->nr_pages_to_update = 0;
cpu_buffer       1864 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu_buffer       1865 kernel/trace/ring_buffer.c 			rb_check_pages(cpu_buffer);
cpu_buffer       1877 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu_buffer       1878 kernel/trace/ring_buffer.c 		cpu_buffer->nr_pages_to_update = 0;
cpu_buffer       1880 kernel/trace/ring_buffer.c 		if (list_empty(&cpu_buffer->new_pages))
cpu_buffer       1883 kernel/trace/ring_buffer.c 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
cpu_buffer       1911 kernel/trace/ring_buffer.c rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       1913 kernel/trace/ring_buffer.c 	return __rb_page_index(cpu_buffer->reader_page,
cpu_buffer       1914 kernel/trace/ring_buffer.c 			       cpu_buffer->reader_page->read);
cpu_buffer       1935 kernel/trace/ring_buffer.c rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       1937 kernel/trace/ring_buffer.c 	return rb_page_commit(cpu_buffer->commit_page);
cpu_buffer       1950 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
cpu_buffer       1958 kernel/trace/ring_buffer.c 	if (iter->head_page == cpu_buffer->reader_page)
cpu_buffer       1959 kernel/trace/ring_buffer.c 		iter->head_page = rb_set_head_page(cpu_buffer);
cpu_buffer       1961 kernel/trace/ring_buffer.c 		rb_inc_page(cpu_buffer, &iter->head_page);
cpu_buffer       1975 kernel/trace/ring_buffer.c rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       1991 kernel/trace/ring_buffer.c 	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
cpu_buffer       2012 kernel/trace/ring_buffer.c 		local_add(entries, &cpu_buffer->overrun);
cpu_buffer       2013 kernel/trace/ring_buffer.c 		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
cpu_buffer       2044 kernel/trace/ring_buffer.c 		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
cpu_buffer       2063 kernel/trace/ring_buffer.c 	rb_inc_page(cpu_buffer, &new_head);
cpu_buffer       2065 kernel/trace/ring_buffer.c 	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
cpu_buffer       2082 kernel/trace/ring_buffer.c 		RB_WARN_ON(cpu_buffer, 1);
cpu_buffer       2099 kernel/trace/ring_buffer.c 		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
cpu_buffer       2106 kernel/trace/ring_buffer.c 			rb_head_page_set_normal(cpu_buffer, new_head,
cpu_buffer       2117 kernel/trace/ring_buffer.c 		ret = rb_head_page_set_normal(cpu_buffer, next_page,
cpu_buffer       2120 kernel/trace/ring_buffer.c 		if (RB_WARN_ON(cpu_buffer,
cpu_buffer       2129 kernel/trace/ring_buffer.c rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2156 kernel/trace/ring_buffer.c 	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
cpu_buffer       2198 kernel/trace/ring_buffer.c static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
cpu_buffer       2204 kernel/trace/ring_buffer.c rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2208 kernel/trace/ring_buffer.c 	struct buffer_page *commit_page = cpu_buffer->commit_page;
cpu_buffer       2209 kernel/trace/ring_buffer.c 	struct ring_buffer *buffer = cpu_buffer->buffer;
cpu_buffer       2215 kernel/trace/ring_buffer.c 	rb_inc_page(cpu_buffer, &next_page);
cpu_buffer       2223 kernel/trace/ring_buffer.c 		local_inc(&cpu_buffer->commit_overrun);
cpu_buffer       2241 kernel/trace/ring_buffer.c 	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
cpu_buffer       2247 kernel/trace/ring_buffer.c 		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
cpu_buffer       2253 kernel/trace/ring_buffer.c 				local_inc(&cpu_buffer->dropped_events);
cpu_buffer       2257 kernel/trace/ring_buffer.c 			ret = rb_handle_head_page(cpu_buffer,
cpu_buffer       2275 kernel/trace/ring_buffer.c 			if (unlikely((cpu_buffer->commit_page !=
cpu_buffer       2276 kernel/trace/ring_buffer.c 				      cpu_buffer->tail_page) &&
cpu_buffer       2277 kernel/trace/ring_buffer.c 				     (cpu_buffer->commit_page ==
cpu_buffer       2278 kernel/trace/ring_buffer.c 				      cpu_buffer->reader_page))) {
cpu_buffer       2279 kernel/trace/ring_buffer.c 				local_inc(&cpu_buffer->commit_overrun);
cpu_buffer       2285 kernel/trace/ring_buffer.c 	rb_tail_page_update(cpu_buffer, tail_page, next_page);
cpu_buffer       2289 kernel/trace/ring_buffer.c 	rb_reset_tail(cpu_buffer, tail, info);
cpu_buffer       2292 kernel/trace/ring_buffer.c 	rb_end_commit(cpu_buffer);
cpu_buffer       2294 kernel/trace/ring_buffer.c 	local_inc(&cpu_buffer->committing);
cpu_buffer       2301 kernel/trace/ring_buffer.c 	rb_reset_tail(cpu_buffer, tail, info);
cpu_buffer       2328 kernel/trace/ring_buffer.c static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2343 kernel/trace/ring_buffer.c rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2351 kernel/trace/ring_buffer.c 	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
cpu_buffer       2359 kernel/trace/ring_buffer.c 		bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
cpu_buffer       2415 kernel/trace/ring_buffer.c rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2428 kernel/trace/ring_buffer.c 	bpage = READ_ONCE(cpu_buffer->tail_page);
cpu_buffer       2445 kernel/trace/ring_buffer.c 			local_sub(event_length, &cpu_buffer->entries_bytes);
cpu_buffer       2454 kernel/trace/ring_buffer.c static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       2456 kernel/trace/ring_buffer.c 	local_inc(&cpu_buffer->committing);
cpu_buffer       2457 kernel/trace/ring_buffer.c 	local_inc(&cpu_buffer->commits);
cpu_buffer       2461 kernel/trace/ring_buffer.c rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       2474 kernel/trace/ring_buffer.c 	max_count = cpu_buffer->nr_pages * 100;
cpu_buffer       2476 kernel/trace/ring_buffer.c 	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
cpu_buffer       2477 kernel/trace/ring_buffer.c 		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
cpu_buffer       2479 kernel/trace/ring_buffer.c 		if (RB_WARN_ON(cpu_buffer,
cpu_buffer       2480 kernel/trace/ring_buffer.c 			       rb_is_reader_page(cpu_buffer->tail_page)))
cpu_buffer       2482 kernel/trace/ring_buffer.c 		local_set(&cpu_buffer->commit_page->page->commit,
cpu_buffer       2483 kernel/trace/ring_buffer.c 			  rb_page_write(cpu_buffer->commit_page));
cpu_buffer       2484 kernel/trace/ring_buffer.c 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
cpu_buffer       2486 kernel/trace/ring_buffer.c 		if (rb_page_write(cpu_buffer->commit_page))
cpu_buffer       2487 kernel/trace/ring_buffer.c 			cpu_buffer->write_stamp =
cpu_buffer       2488 kernel/trace/ring_buffer.c 				cpu_buffer->commit_page->page->time_stamp;
cpu_buffer       2492 kernel/trace/ring_buffer.c 	while (rb_commit_index(cpu_buffer) !=
cpu_buffer       2493 kernel/trace/ring_buffer.c 	       rb_page_write(cpu_buffer->commit_page)) {
cpu_buffer       2495 kernel/trace/ring_buffer.c 		local_set(&cpu_buffer->commit_page->page->commit,
cpu_buffer       2496 kernel/trace/ring_buffer.c 			  rb_page_write(cpu_buffer->commit_page));
cpu_buffer       2497 kernel/trace/ring_buffer.c 		RB_WARN_ON(cpu_buffer,
cpu_buffer       2498 kernel/trace/ring_buffer.c 			   local_read(&cpu_buffer->commit_page->page->commit) &
cpu_buffer       2511 kernel/trace/ring_buffer.c 	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
cpu_buffer       2515 kernel/trace/ring_buffer.c static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       2519 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer,
cpu_buffer       2520 kernel/trace/ring_buffer.c 		       !local_read(&cpu_buffer->committing)))
cpu_buffer       2524 kernel/trace/ring_buffer.c 	commits = local_read(&cpu_buffer->commits);
cpu_buffer       2527 kernel/trace/ring_buffer.c 	if (local_read(&cpu_buffer->committing) == 1)
cpu_buffer       2528 kernel/trace/ring_buffer.c 		rb_set_commit_to_write(cpu_buffer);
cpu_buffer       2530 kernel/trace/ring_buffer.c 	local_dec(&cpu_buffer->committing);
cpu_buffer       2540 kernel/trace/ring_buffer.c 	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
cpu_buffer       2541 kernel/trace/ring_buffer.c 	    !local_read(&cpu_buffer->committing)) {
cpu_buffer       2542 kernel/trace/ring_buffer.c 		local_inc(&cpu_buffer->committing);
cpu_buffer       2561 kernel/trace/ring_buffer.c rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2570 kernel/trace/ring_buffer.c 	return cpu_buffer->commit_page->page == (void *)addr &&
cpu_buffer       2571 kernel/trace/ring_buffer.c 		rb_commit_index(cpu_buffer) == index;
cpu_buffer       2575 kernel/trace/ring_buffer.c rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2584 kernel/trace/ring_buffer.c 	if (rb_event_is_commit(cpu_buffer, event)) {
cpu_buffer       2590 kernel/trace/ring_buffer.c 			cpu_buffer->write_stamp =
cpu_buffer       2591 kernel/trace/ring_buffer.c 				cpu_buffer->commit_page->page->time_stamp;
cpu_buffer       2594 kernel/trace/ring_buffer.c 			cpu_buffer->write_stamp += delta;
cpu_buffer       2597 kernel/trace/ring_buffer.c 			cpu_buffer->write_stamp = delta;
cpu_buffer       2599 kernel/trace/ring_buffer.c 			cpu_buffer->write_stamp += event->time_delta;
cpu_buffer       2603 kernel/trace/ring_buffer.c static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2606 kernel/trace/ring_buffer.c 	local_inc(&cpu_buffer->entries);
cpu_buffer       2607 kernel/trace/ring_buffer.c 	rb_update_write_stamp(cpu_buffer, event);
cpu_buffer       2608 kernel/trace/ring_buffer.c 	rb_end_commit(cpu_buffer);
cpu_buffer       2612 kernel/trace/ring_buffer.c rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       2624 kernel/trace/ring_buffer.c 	if (cpu_buffer->irq_work.waiters_pending) {
cpu_buffer       2625 kernel/trace/ring_buffer.c 		cpu_buffer->irq_work.waiters_pending = false;
cpu_buffer       2627 kernel/trace/ring_buffer.c 		irq_work_queue(&cpu_buffer->irq_work.work);
cpu_buffer       2630 kernel/trace/ring_buffer.c 	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
cpu_buffer       2633 kernel/trace/ring_buffer.c 	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
cpu_buffer       2636 kernel/trace/ring_buffer.c 	if (!cpu_buffer->irq_work.full_waiters_pending)
cpu_buffer       2639 kernel/trace/ring_buffer.c 	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
cpu_buffer       2641 kernel/trace/ring_buffer.c 	full = cpu_buffer->shortest_full;
cpu_buffer       2642 kernel/trace/ring_buffer.c 	nr_pages = cpu_buffer->nr_pages;
cpu_buffer       2643 kernel/trace/ring_buffer.c 	dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
cpu_buffer       2647 kernel/trace/ring_buffer.c 	cpu_buffer->irq_work.wakeup_full = true;
cpu_buffer       2648 kernel/trace/ring_buffer.c 	cpu_buffer->irq_work.full_waiters_pending = false;
cpu_buffer       2650 kernel/trace/ring_buffer.c 	irq_work_queue(&cpu_buffer->irq_work.work);
cpu_buffer       2692 kernel/trace/ring_buffer.c trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       2694 kernel/trace/ring_buffer.c 	unsigned int val = cpu_buffer->current_context;
cpu_buffer       2704 kernel/trace/ring_buffer.c 	if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
cpu_buffer       2707 kernel/trace/ring_buffer.c 	val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer       2708 kernel/trace/ring_buffer.c 	cpu_buffer->current_context = val;
cpu_buffer       2714 kernel/trace/ring_buffer.c trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       2716 kernel/trace/ring_buffer.c 	cpu_buffer->current_context &=
cpu_buffer       2717 kernel/trace/ring_buffer.c 		cpu_buffer->current_context - (1 << cpu_buffer->nest);
cpu_buffer       2738 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       2744 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       2746 kernel/trace/ring_buffer.c 	cpu_buffer->nest += NESTED_BITS;
cpu_buffer       2758 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       2763 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       2765 kernel/trace/ring_buffer.c 	cpu_buffer->nest -= NESTED_BITS;
cpu_buffer       2781 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       2784 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       2786 kernel/trace/ring_buffer.c 	rb_commit(cpu_buffer, event);
cpu_buffer       2788 kernel/trace/ring_buffer.c 	rb_wakeups(buffer, cpu_buffer);
cpu_buffer       2790 kernel/trace/ring_buffer.c 	trace_recursive_unlock(cpu_buffer);
cpu_buffer       2799 kernel/trace/ring_buffer.c rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2806 kernel/trace/ring_buffer.c 		  (unsigned long long)cpu_buffer->write_stamp,
cpu_buffer       2816 kernel/trace/ring_buffer.c __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2832 kernel/trace/ring_buffer.c 	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
cpu_buffer       2843 kernel/trace/ring_buffer.c 	if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer))
cpu_buffer       2848 kernel/trace/ring_buffer.c 		return rb_move_tail(cpu_buffer, tail, info);
cpu_buffer       2853 kernel/trace/ring_buffer.c 	rb_update_event(cpu_buffer, event, info);
cpu_buffer       2865 kernel/trace/ring_buffer.c 	local_add(info->length, &cpu_buffer->entries_bytes);
cpu_buffer       2872 kernel/trace/ring_buffer.c 		      struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       2880 kernel/trace/ring_buffer.c 	rb_start_commit(cpu_buffer);
cpu_buffer       2890 kernel/trace/ring_buffer.c 	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
cpu_buffer       2891 kernel/trace/ring_buffer.c 		local_dec(&cpu_buffer->committing);
cpu_buffer       2892 kernel/trace/ring_buffer.c 		local_dec(&cpu_buffer->commits);
cpu_buffer       2911 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
cpu_buffer       2914 kernel/trace/ring_buffer.c 	info.ts = rb_time_stamp(cpu_buffer->buffer);
cpu_buffer       2915 kernel/trace/ring_buffer.c 	diff = info.ts - cpu_buffer->write_stamp;
cpu_buffer       2922 kernel/trace/ring_buffer.c 		rb_handle_timestamp(cpu_buffer, &info);
cpu_buffer       2924 kernel/trace/ring_buffer.c 		if (likely(info.ts >= cpu_buffer->write_stamp)) {
cpu_buffer       2927 kernel/trace/ring_buffer.c 			rb_handle_timestamp(cpu_buffer, &info);
cpu_buffer       2930 kernel/trace/ring_buffer.c 	event = __rb_reserve_next(cpu_buffer, &info);
cpu_buffer       2944 kernel/trace/ring_buffer.c 	rb_end_commit(cpu_buffer);
cpu_buffer       2966 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       2981 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       2983 kernel/trace/ring_buffer.c 	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
cpu_buffer       2989 kernel/trace/ring_buffer.c 	if (unlikely(trace_recursive_lock(cpu_buffer)))
cpu_buffer       2992 kernel/trace/ring_buffer.c 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
cpu_buffer       2999 kernel/trace/ring_buffer.c 	trace_recursive_unlock(cpu_buffer);
cpu_buffer       3013 kernel/trace/ring_buffer.c rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       3017 kernel/trace/ring_buffer.c 	struct buffer_page *bpage = cpu_buffer->commit_page;
cpu_buffer       3032 kernel/trace/ring_buffer.c 	rb_inc_page(cpu_buffer, &bpage);
cpu_buffer       3039 kernel/trace/ring_buffer.c 		rb_inc_page(cpu_buffer, &bpage);
cpu_buffer       3043 kernel/trace/ring_buffer.c 	RB_WARN_ON(cpu_buffer, 1);
cpu_buffer       3068 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3075 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3082 kernel/trace/ring_buffer.c 	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
cpu_buffer       3084 kernel/trace/ring_buffer.c 	rb_decrement_entry(cpu_buffer, event);
cpu_buffer       3085 kernel/trace/ring_buffer.c 	if (rb_try_to_discard(cpu_buffer, event))
cpu_buffer       3092 kernel/trace/ring_buffer.c 	rb_update_write_stamp(cpu_buffer, event);
cpu_buffer       3094 kernel/trace/ring_buffer.c 	rb_end_commit(cpu_buffer);
cpu_buffer       3096 kernel/trace/ring_buffer.c 	trace_recursive_unlock(cpu_buffer);
cpu_buffer       3120 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3136 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3138 kernel/trace/ring_buffer.c 	if (atomic_read(&cpu_buffer->record_disabled))
cpu_buffer       3144 kernel/trace/ring_buffer.c 	if (unlikely(trace_recursive_lock(cpu_buffer)))
cpu_buffer       3147 kernel/trace/ring_buffer.c 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
cpu_buffer       3155 kernel/trace/ring_buffer.c 	rb_commit(cpu_buffer, event);
cpu_buffer       3157 kernel/trace/ring_buffer.c 	rb_wakeups(buffer, cpu_buffer);
cpu_buffer       3162 kernel/trace/ring_buffer.c 	trace_recursive_unlock(cpu_buffer);
cpu_buffer       3171 kernel/trace/ring_buffer.c static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       3173 kernel/trace/ring_buffer.c 	struct buffer_page *reader = cpu_buffer->reader_page;
cpu_buffer       3174 kernel/trace/ring_buffer.c 	struct buffer_page *head = rb_set_head_page(cpu_buffer);
cpu_buffer       3175 kernel/trace/ring_buffer.c 	struct buffer_page *commit = cpu_buffer->commit_page;
cpu_buffer       3300 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3305 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3306 kernel/trace/ring_buffer.c 	atomic_inc(&cpu_buffer->record_disabled);
cpu_buffer       3320 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3325 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3326 kernel/trace/ring_buffer.c 	atomic_dec(&cpu_buffer->record_disabled);
cpu_buffer       3337 kernel/trace/ring_buffer.c rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       3339 kernel/trace/ring_buffer.c 	return local_read(&cpu_buffer->entries) -
cpu_buffer       3340 kernel/trace/ring_buffer.c 		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
cpu_buffer       3351 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3358 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3359 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       3364 kernel/trace/ring_buffer.c 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
cpu_buffer       3365 kernel/trace/ring_buffer.c 		bpage = cpu_buffer->reader_page;
cpu_buffer       3367 kernel/trace/ring_buffer.c 		bpage = rb_set_head_page(cpu_buffer);
cpu_buffer       3370 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer       3383 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3389 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3390 kernel/trace/ring_buffer.c 	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
cpu_buffer       3403 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3408 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3410 kernel/trace/ring_buffer.c 	return rb_num_of_entries(cpu_buffer);
cpu_buffer       3422 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3428 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3429 kernel/trace/ring_buffer.c 	ret = local_read(&cpu_buffer->overrun);
cpu_buffer       3445 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3451 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3452 kernel/trace/ring_buffer.c 	ret = local_read(&cpu_buffer->commit_overrun);
cpu_buffer       3467 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3473 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3474 kernel/trace/ring_buffer.c 	ret = local_read(&cpu_buffer->dropped_events);
cpu_buffer       3488 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3493 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3494 kernel/trace/ring_buffer.c 	return cpu_buffer->read;
cpu_buffer       3507 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3513 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3514 kernel/trace/ring_buffer.c 		entries += rb_num_of_entries(cpu_buffer);
cpu_buffer       3530 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3536 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu_buffer       3537 kernel/trace/ring_buffer.c 		overruns += local_read(&cpu_buffer->overrun);
cpu_buffer       3546 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
cpu_buffer       3549 kernel/trace/ring_buffer.c 	iter->head_page = cpu_buffer->reader_page;
cpu_buffer       3550 kernel/trace/ring_buffer.c 	iter->head = cpu_buffer->reader_page->read;
cpu_buffer       3553 kernel/trace/ring_buffer.c 	iter->cache_read = cpu_buffer->read;
cpu_buffer       3556 kernel/trace/ring_buffer.c 		iter->read_stamp = cpu_buffer->read_stamp;
cpu_buffer       3570 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3576 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
cpu_buffer       3578 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       3580 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer       3590 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3596 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
cpu_buffer       3599 kernel/trace/ring_buffer.c 	reader = cpu_buffer->reader_page;
cpu_buffer       3600 kernel/trace/ring_buffer.c 	head_page = cpu_buffer->head_page;
cpu_buffer       3601 kernel/trace/ring_buffer.c 	commit_page = cpu_buffer->commit_page;
cpu_buffer       3607 kernel/trace/ring_buffer.c 		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
cpu_buffer       3612 kernel/trace/ring_buffer.c rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer       3623 kernel/trace/ring_buffer.c 		cpu_buffer->read_stamp += delta;
cpu_buffer       3628 kernel/trace/ring_buffer.c 		cpu_buffer->read_stamp = delta;
cpu_buffer       3632 kernel/trace/ring_buffer.c 		cpu_buffer->read_stamp += event->time_delta;
cpu_buffer       3672 kernel/trace/ring_buffer.c rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       3681 kernel/trace/ring_buffer.c 	arch_spin_lock(&cpu_buffer->lock);
cpu_buffer       3690 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
cpu_buffer       3695 kernel/trace/ring_buffer.c 	reader = cpu_buffer->reader_page;
cpu_buffer       3698 kernel/trace/ring_buffer.c 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
cpu_buffer       3702 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer,
cpu_buffer       3703 kernel/trace/ring_buffer.c 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
cpu_buffer       3708 kernel/trace/ring_buffer.c 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
cpu_buffer       3712 kernel/trace/ring_buffer.c 	if (rb_num_of_entries(cpu_buffer) == 0)
cpu_buffer       3718 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->write, 0);
cpu_buffer       3719 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->entries, 0);
cpu_buffer       3720 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer       3721 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page->real_end = 0;
cpu_buffer       3727 kernel/trace/ring_buffer.c 	reader = rb_set_head_page(cpu_buffer);
cpu_buffer       3730 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
cpu_buffer       3731 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page->list.prev = reader->list.prev;
cpu_buffer       3738 kernel/trace/ring_buffer.c 	cpu_buffer->pages = reader->list.prev;
cpu_buffer       3741 kernel/trace/ring_buffer.c 	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
cpu_buffer       3753 kernel/trace/ring_buffer.c 	overwrite = local_read(&(cpu_buffer->overrun));
cpu_buffer       3766 kernel/trace/ring_buffer.c 	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
cpu_buffer       3779 kernel/trace/ring_buffer.c 	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
cpu_buffer       3780 kernel/trace/ring_buffer.c 	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
cpu_buffer       3782 kernel/trace/ring_buffer.c 	local_inc(&cpu_buffer->pages_read);
cpu_buffer       3785 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page = reader;
cpu_buffer       3786 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page->read = 0;
cpu_buffer       3788 kernel/trace/ring_buffer.c 	if (overwrite != cpu_buffer->last_overrun) {
cpu_buffer       3789 kernel/trace/ring_buffer.c 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
cpu_buffer       3790 kernel/trace/ring_buffer.c 		cpu_buffer->last_overrun = overwrite;
cpu_buffer       3798 kernel/trace/ring_buffer.c 		cpu_buffer->read_stamp = reader->page->time_stamp;
cpu_buffer       3800 kernel/trace/ring_buffer.c 	arch_spin_unlock(&cpu_buffer->lock);
cpu_buffer       3806 kernel/trace/ring_buffer.c static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       3812 kernel/trace/ring_buffer.c 	reader = rb_get_reader_page(cpu_buffer);
cpu_buffer       3815 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, !reader))
cpu_buffer       3818 kernel/trace/ring_buffer.c 	event = rb_reader_event(cpu_buffer);
cpu_buffer       3821 kernel/trace/ring_buffer.c 		cpu_buffer->read++;
cpu_buffer       3823 kernel/trace/ring_buffer.c 	rb_update_read_stamp(cpu_buffer, event);
cpu_buffer       3826 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page->read += length;
cpu_buffer       3831 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3835 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
cpu_buffer       3842 kernel/trace/ring_buffer.c 		if (iter->head_page == cpu_buffer->commit_page)
cpu_buffer       3856 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer,
cpu_buffer       3857 kernel/trace/ring_buffer.c 		       (iter->head_page == cpu_buffer->commit_page) &&
cpu_buffer       3858 kernel/trace/ring_buffer.c 		       (iter->head + length > rb_commit_index(cpu_buffer))))
cpu_buffer       3867 kernel/trace/ring_buffer.c 	    (iter->head_page != cpu_buffer->commit_page))
cpu_buffer       3871 kernel/trace/ring_buffer.c static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       3873 kernel/trace/ring_buffer.c 	return cpu_buffer->lost_events;
cpu_buffer       3877 kernel/trace/ring_buffer.c rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
cpu_buffer       3893 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
cpu_buffer       3896 kernel/trace/ring_buffer.c 	reader = rb_get_reader_page(cpu_buffer);
cpu_buffer       3900 kernel/trace/ring_buffer.c 	event = rb_reader_event(cpu_buffer);
cpu_buffer       3905 kernel/trace/ring_buffer.c 			RB_WARN_ON(cpu_buffer, 1);
cpu_buffer       3918 kernel/trace/ring_buffer.c 		rb_advance_reader(cpu_buffer);
cpu_buffer       3924 kernel/trace/ring_buffer.c 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer       3925 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu_buffer       3928 kernel/trace/ring_buffer.c 		rb_advance_reader(cpu_buffer);
cpu_buffer       3933 kernel/trace/ring_buffer.c 			*ts = cpu_buffer->read_stamp + event->time_delta;
cpu_buffer       3934 kernel/trace/ring_buffer.c 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer       3935 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu_buffer       3938 kernel/trace/ring_buffer.c 			*lost_events = rb_lost_events(cpu_buffer);
cpu_buffer       3953 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       3960 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
cpu_buffer       3961 kernel/trace/ring_buffer.c 	buffer = cpu_buffer->buffer;
cpu_buffer       3968 kernel/trace/ring_buffer.c 	if (unlikely(iter->cache_read != cpu_buffer->read ||
cpu_buffer       3969 kernel/trace/ring_buffer.c 		     iter->cache_reader_page != cpu_buffer->reader_page))
cpu_buffer       3984 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
cpu_buffer       3987 kernel/trace/ring_buffer.c 	if (rb_per_cpu_empty(cpu_buffer))
cpu_buffer       4014 kernel/trace/ring_buffer.c 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer       4015 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu_buffer       4025 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu_buffer       4037 kernel/trace/ring_buffer.c static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       4040 kernel/trace/ring_buffer.c 		raw_spin_lock(&cpu_buffer->reader_lock);
cpu_buffer       4053 kernel/trace/ring_buffer.c 	if (raw_spin_trylock(&cpu_buffer->reader_lock))
cpu_buffer       4057 kernel/trace/ring_buffer.c 	atomic_inc(&cpu_buffer->record_disabled);
cpu_buffer       4062 kernel/trace/ring_buffer.c rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
cpu_buffer       4065 kernel/trace/ring_buffer.c 		raw_spin_unlock(&cpu_buffer->reader_lock);
cpu_buffer       4083 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4093 kernel/trace/ring_buffer.c 	dolock = rb_reader_lock(cpu_buffer);
cpu_buffer       4094 kernel/trace/ring_buffer.c 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
cpu_buffer       4096 kernel/trace/ring_buffer.c 		rb_advance_reader(cpu_buffer);
cpu_buffer       4097 kernel/trace/ring_buffer.c 	rb_reader_unlock(cpu_buffer, dolock);
cpu_buffer       4117 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
cpu_buffer       4122 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       4124 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer       4147 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       4159 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4161 kernel/trace/ring_buffer.c 	dolock = rb_reader_lock(cpu_buffer);
cpu_buffer       4163 kernel/trace/ring_buffer.c 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
cpu_buffer       4165 kernel/trace/ring_buffer.c 		cpu_buffer->lost_events = 0;
cpu_buffer       4166 kernel/trace/ring_buffer.c 		rb_advance_reader(cpu_buffer);
cpu_buffer       4169 kernel/trace/ring_buffer.c 	rb_reader_unlock(cpu_buffer, dolock);
cpu_buffer       4206 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       4216 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4218 kernel/trace/ring_buffer.c 	iter->cpu_buffer = cpu_buffer;
cpu_buffer       4221 kernel/trace/ring_buffer.c 	atomic_inc(&cpu_buffer->record_disabled);
cpu_buffer       4255 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       4261 kernel/trace/ring_buffer.c 	cpu_buffer = iter->cpu_buffer;
cpu_buffer       4263 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       4264 kernel/trace/ring_buffer.c 	arch_spin_lock(&cpu_buffer->lock);
cpu_buffer       4266 kernel/trace/ring_buffer.c 	arch_spin_unlock(&cpu_buffer->lock);
cpu_buffer       4267 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer       4281 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
cpu_buffer       4290 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       4291 kernel/trace/ring_buffer.c 	rb_check_pages(cpu_buffer);
cpu_buffer       4292 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer       4294 kernel/trace/ring_buffer.c 	atomic_dec(&cpu_buffer->record_disabled);
cpu_buffer       4295 kernel/trace/ring_buffer.c 	atomic_dec(&cpu_buffer->buffer->resize_disabled);
cpu_buffer       4311 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
cpu_buffer       4314 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       4325 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer       4351 kernel/trace/ring_buffer.c rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer       4353 kernel/trace/ring_buffer.c 	rb_head_page_deactivate(cpu_buffer);
cpu_buffer       4355 kernel/trace/ring_buffer.c 	cpu_buffer->head_page
cpu_buffer       4356 kernel/trace/ring_buffer.c 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
cpu_buffer       4357 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->head_page->write, 0);
cpu_buffer       4358 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->head_page->entries, 0);
cpu_buffer       4359 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer       4361 kernel/trace/ring_buffer.c 	cpu_buffer->head_page->read = 0;
cpu_buffer       4363 kernel/trace/ring_buffer.c 	cpu_buffer->tail_page = cpu_buffer->head_page;
cpu_buffer       4364 kernel/trace/ring_buffer.c 	cpu_buffer->commit_page = cpu_buffer->head_page;
cpu_buffer       4366 kernel/trace/ring_buffer.c 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
cpu_buffer       4367 kernel/trace/ring_buffer.c 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
cpu_buffer       4368 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->write, 0);
cpu_buffer       4369 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->entries, 0);
cpu_buffer       4370 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer       4371 kernel/trace/ring_buffer.c 	cpu_buffer->reader_page->read = 0;
cpu_buffer       4373 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->entries_bytes, 0);
cpu_buffer       4374 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->overrun, 0);
cpu_buffer       4375 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->commit_overrun, 0);
cpu_buffer       4376 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->dropped_events, 0);
cpu_buffer       4377 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->entries, 0);
cpu_buffer       4378 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->committing, 0);
cpu_buffer       4379 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->commits, 0);
cpu_buffer       4380 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->pages_touched, 0);
cpu_buffer       4381 kernel/trace/ring_buffer.c 	local_set(&cpu_buffer->pages_read, 0);
cpu_buffer       4382 kernel/trace/ring_buffer.c 	cpu_buffer->last_pages_touch = 0;
cpu_buffer       4383 kernel/trace/ring_buffer.c 	cpu_buffer->shortest_full = 0;
cpu_buffer       4384 kernel/trace/ring_buffer.c 	cpu_buffer->read = 0;
cpu_buffer       4385 kernel/trace/ring_buffer.c 	cpu_buffer->read_bytes = 0;
cpu_buffer       4387 kernel/trace/ring_buffer.c 	cpu_buffer->write_stamp = 0;
cpu_buffer       4388 kernel/trace/ring_buffer.c 	cpu_buffer->read_stamp = 0;
cpu_buffer       4390 kernel/trace/ring_buffer.c 	cpu_buffer->lost_events = 0;
cpu_buffer       4391 kernel/trace/ring_buffer.c 	cpu_buffer->last_overrun = 0;
cpu_buffer       4393 kernel/trace/ring_buffer.c 	rb_head_page_activate(cpu_buffer);
cpu_buffer       4403 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4410 kernel/trace/ring_buffer.c 	atomic_inc(&cpu_buffer->record_disabled);
cpu_buffer       4415 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       4417 kernel/trace/ring_buffer.c 	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
cpu_buffer       4420 kernel/trace/ring_buffer.c 	arch_spin_lock(&cpu_buffer->lock);
cpu_buffer       4422 kernel/trace/ring_buffer.c 	rb_reset_cpu(cpu_buffer);
cpu_buffer       4424 kernel/trace/ring_buffer.c 	arch_spin_unlock(&cpu_buffer->lock);
cpu_buffer       4427 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
cpu_buffer       4429 kernel/trace/ring_buffer.c 	atomic_dec(&cpu_buffer->record_disabled);
cpu_buffer       4453 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       4461 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4463 kernel/trace/ring_buffer.c 		dolock = rb_reader_lock(cpu_buffer);
cpu_buffer       4464 kernel/trace/ring_buffer.c 		ret = rb_per_cpu_empty(cpu_buffer);
cpu_buffer       4465 kernel/trace/ring_buffer.c 		rb_reader_unlock(cpu_buffer, dolock);
cpu_buffer       4483 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       4491 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4493 kernel/trace/ring_buffer.c 	dolock = rb_reader_lock(cpu_buffer);
cpu_buffer       4494 kernel/trace/ring_buffer.c 	ret = rb_per_cpu_empty(cpu_buffer);
cpu_buffer       4495 kernel/trace/ring_buffer.c 	rb_reader_unlock(cpu_buffer, dolock);
cpu_buffer       4595 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer       4603 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4605 kernel/trace/ring_buffer.c 	arch_spin_lock(&cpu_buffer->lock);
cpu_buffer       4607 kernel/trace/ring_buffer.c 	if (cpu_buffer->free_page) {
cpu_buffer       4608 kernel/trace/ring_buffer.c 		bpage = cpu_buffer->free_page;
cpu_buffer       4609 kernel/trace/ring_buffer.c 		cpu_buffer->free_page = NULL;
cpu_buffer       4612 kernel/trace/ring_buffer.c 	arch_spin_unlock(&cpu_buffer->lock);
cpu_buffer       4642 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4652 kernel/trace/ring_buffer.c 	arch_spin_lock(&cpu_buffer->lock);
cpu_buffer       4654 kernel/trace/ring_buffer.c 	if (!cpu_buffer->free_page) {
cpu_buffer       4655 kernel/trace/ring_buffer.c 		cpu_buffer->free_page = bpage;
cpu_buffer       4659 kernel/trace/ring_buffer.c 	arch_spin_unlock(&cpu_buffer->lock);
cpu_buffer       4703 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu_buffer       4733 kernel/trace/ring_buffer.c 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer       4735 kernel/trace/ring_buffer.c 	reader = rb_get_reader_page(cpu_buffer);
cpu_buffer       4739 kernel/trace/ring_buffer.c 	event = rb_reader_event(cpu_buffer);
cpu_buffer       4745 kernel/trace/ring_buffer.c 	missed_events = cpu_buffer->lost_events;
cpu_buffer       4755 kernel/trace/ring_buffer.c 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
cpu_buffer       4756 kernel/trace/ring_buffer.c 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
cpu_buffer       4774 kernel/trace/ring_buffer.c 		save_timestamp = cpu_buffer->read_stamp;
cpu_buffer       4789 kernel/trace/ring_buffer.c 			rb_advance_reader(cpu_buffer);
cpu_buffer       4796 kernel/trace/ring_buffer.c 			event = rb_reader_event(cpu_buffer);
cpu_buffer       4809 kernel/trace/ring_buffer.c 		cpu_buffer->read += rb_page_entries(reader);
cpu_buffer       4810 kernel/trace/ring_buffer.c 		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
cpu_buffer       4831 kernel/trace/ring_buffer.c 	cpu_buffer->lost_events = 0;
cpu_buffer       4857 kernel/trace/ring_buffer.c 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);