Lines Matching refs:bpage

373 static void rb_init_page(struct buffer_data_page *bpage)  in rb_init_page()  argument
375 local_set(&bpage->commit, 0); in rb_init_page()
394 static void free_buffer_page(struct buffer_page *bpage) in free_buffer_page() argument
396 free_page((unsigned long)bpage->page); in free_buffer_page()
397 kfree(bpage); in free_buffer_page()
966 struct buffer_page **bpage) in rb_inc_page() argument
968 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
970 *bpage = list_entry(p, struct buffer_page, list); in rb_inc_page()
1099 struct buffer_page *bpage) in rb_check_bpage() argument
1101 unsigned long val = (unsigned long)bpage; in rb_check_bpage()
1132 struct buffer_page *bpage, *tmp; in rb_check_pages() local
1148 list_for_each_entry_safe(bpage, tmp, head, list) { in rb_check_pages()
1150 bpage->list.next->prev != &bpage->list)) in rb_check_pages()
1153 bpage->list.prev->next != &bpage->list)) in rb_check_pages()
1155 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1166 struct buffer_page *bpage, *tmp; in __rb_allocate_pages() local
1176 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), in __rb_allocate_pages()
1179 if (!bpage) in __rb_allocate_pages()
1182 list_add(&bpage->list, pages); in __rb_allocate_pages()
1188 bpage->page = page_address(page); in __rb_allocate_pages()
1189 rb_init_page(bpage->page); in __rb_allocate_pages()
1195 list_for_each_entry_safe(bpage, tmp, pages, list) { in __rb_allocate_pages()
1196 list_del_init(&bpage->list); in __rb_allocate_pages()
1197 free_buffer_page(bpage); in __rb_allocate_pages()
1232 struct buffer_page *bpage; in rb_allocate_cpu_buffer() local
1252 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), in rb_allocate_cpu_buffer()
1254 if (!bpage) in rb_allocate_cpu_buffer()
1257 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1259 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1263 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
1264 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
1292 struct buffer_page *bpage, *tmp; in rb_free_cpu_buffer() local
1299 list_for_each_entry_safe(bpage, tmp, head, list) { in rb_free_cpu_buffer()
1300 list_del_init(&bpage->list); in rb_free_cpu_buffer()
1301 free_buffer_page(bpage); in rb_free_cpu_buffer()
1303 bpage = list_entry(head, struct buffer_page, list); in rb_free_cpu_buffer()
1304 free_buffer_page(bpage); in rb_free_cpu_buffer()
1446 static inline unsigned long rb_page_entries(struct buffer_page *bpage) in rb_page_entries() argument
1448 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
1451 static inline unsigned long rb_page_write(struct buffer_page *bpage) in rb_page_write() argument
1453 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
1633 struct buffer_page *bpage, *tmp; in rb_insert_pages() local
1634 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
1636 list_del_init(&bpage->list); in rb_insert_pages()
1637 free_buffer_page(bpage); in rb_insert_pages()
1835 struct buffer_page *bpage, *tmp; in ring_buffer_resize() local
1843 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
1845 list_del_init(&bpage->list); in ring_buffer_resize()
1846 free_buffer_page(bpage); in ring_buffer_resize()
1866 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) in __rb_data_page_index() argument
1868 return bpage->data + index; in __rb_data_page_index()
1871 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) in __rb_page_index() argument
1873 return bpage->page->data + index; in __rb_page_index()
1889 static inline unsigned rb_page_commit(struct buffer_page *bpage) in rb_page_commit() argument
1891 return local_read(&bpage->page->commit); in rb_page_commit()
1895 static inline unsigned rb_page_size(struct buffer_page *bpage) in rb_page_size() argument
1897 return rb_page_commit(bpage); in rb_page_size()
2480 struct buffer_page *bpage; in rb_try_to_discard() local
2489 bpage = cpu_buffer->tail_page; in rb_try_to_discard()
2491 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
2493 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
2503 index = local_cmpxchg(&bpage->write, old_index, new_index); in rb_try_to_discard()
2895 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry() local
2901 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
2902 local_dec(&bpage->entries); in rb_decrement_entry()
2910 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
2911 start = bpage; in rb_decrement_entry()
2913 if (bpage->page == (void *)addr) { in rb_decrement_entry()
2914 local_dec(&bpage->entries); in rb_decrement_entry()
2917 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
2918 } while (bpage != start); in rb_decrement_entry()
3210 struct buffer_page *bpage; in ring_buffer_oldest_event_ts() local
3223 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
3225 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
3226 if (bpage) in ring_buffer_oldest_event_ts()
3227 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
4418 struct buffer_data_page *bpage; in ring_buffer_alloc_read_page() local
4426 bpage = page_address(page); in ring_buffer_alloc_read_page()
4428 rb_init_page(bpage); in ring_buffer_alloc_read_page()
4430 return bpage; in ring_buffer_alloc_read_page()
4485 struct buffer_data_page *bpage; in ring_buffer_read_page() local
4509 bpage = *data_page; in ring_buffer_read_page()
4510 if (!bpage) in ring_buffer_read_page()
4565 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
4582 local_set(&bpage->commit, pos); in ring_buffer_read_page()
4583 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
4593 rb_init_page(bpage); in ring_buffer_read_page()
4594 bpage = reader->page; in ring_buffer_read_page()
4599 *data_page = bpage; in ring_buffer_read_page()
4607 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
4613 commit = local_read(&bpage->commit); in ring_buffer_read_page()
4622 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
4624 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
4627 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
4634 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); in ring_buffer_read_page()