Searched refs:head_page (Results 1 - 4 of 4) sorted by relevance
/linux-4.1.27/kernel/trace/ |
H A D | ring_buffer.c | 455 * head_page == tail_page && head == tail then buffer is empty. 467 struct buffer_page *head_page; /* read from head */ member in struct:ring_buffer_per_cpu 517 struct buffer_page *head_page; member in struct:ring_buffer_iter 830 * Because the reader may move the head_page pointer, we can 883 head = cpu_buffer->head_page; rb_head_page_activate() 981 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) rb_set_head_page() 989 page = head = cpu_buffer->head_page; rb_set_head_page() 999 cpu_buffer->head_page = page; rb_set_head_page() 1135 if (cpu_buffer->head_page) rb_check_pages() 1273 cpu_buffer->head_page rb_allocate_cpu_buffer() 1275 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; rb_allocate_cpu_buffer() 1515 cpu_buffer->head_page = list_entry(next_page, rb_remove_pages() 1590 struct list_head *head_page, *prev_page, *r; rb_insert_pages() local 1594 head_page = &rb_set_head_page(cpu_buffer)->list; rb_insert_pages() 1595 if (!head_page) rb_insert_pages() 1597 prev_page = head_page->prev; rb_insert_pages() 1603 ((unsigned long)head_page | RB_PAGE_HEAD); rb_insert_pages() 1616 head_page->prev = last_page; rb_insert_pages() 1886 return __rb_page_index(iter->head_page, iter->head); rb_iter_head_event() 1997 if (iter->head_page == cpu_buffer->reader_page) rb_inc_iter() 1998 iter->head_page = rb_set_head_page(cpu_buffer); rb_inc_iter() 2000 rb_inc_page(cpu_buffer, &iter->head_page); rb_inc_iter() 2002 iter->read_stamp = iter->head_page->page->time_stamp; rb_inc_iter() 3407 iter->head_page = cpu_buffer->reader_page; rb_iter_reset() 3410 iter->cache_reader_page = iter->head_page; rb_iter_reset() 3416 iter->read_stamp = iter->head_page->page->time_stamp; rb_iter_reset() 3452 return iter->head_page == cpu_buffer->commit_page && ring_buffer_iter_empty() 3628 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); rb_get_reader_page() 3682 if (iter->head >= rb_page_size(iter->head_page)) { rb_advance_iter() 3684 if (iter->head_page == cpu_buffer->commit_page) rb_advance_iter() 3699 (iter->head_page == cpu_buffer->commit_page) && rb_advance_iter() 3708 if ((iter->head >= rb_page_size(iter->head_page)) && rb_advance_iter() 3709 (iter->head_page != cpu_buffer->commit_page)) rb_advance_iter() 3822 if (iter->head >= rb_page_size(iter->head_page)) { rb_iter_peek() 4171 cpu_buffer->head_page rb_reset_cpu() 4173 local_set(&cpu_buffer->head_page->write, 0); rb_reset_cpu() 4174 local_set(&cpu_buffer->head_page->entries, 0); rb_reset_cpu() 4175 local_set(&cpu_buffer->head_page->page->commit, 0); rb_reset_cpu() 4177 cpu_buffer->head_page->read = 0; rb_reset_cpu() 4179 cpu_buffer->tail_page = cpu_buffer->head_page; rb_reset_cpu() 4180 cpu_buffer->commit_page = cpu_buffer->head_page; rb_reset_cpu()
|
H A D | trace.h | 694 extern void *head_page(struct trace_array_cpu *data);
|
/linux-4.1.27/drivers/infiniband/core/ |
H A D | umem_odp.c | 648 struct page *head_page = compound_head(page); ib_umem_odp_unmap_dma_pages() local 658 set_page_dirty(head_page); ib_umem_odp_unmap_dma_pages()
|
/linux-4.1.27/mm/ |
H A D | page_alloc.c | 786 static int free_tail_pages_check(struct page *head_page, struct page *page) free_tail_pages_check() argument 794 if (unlikely(page->first_page != head_page)) { free_tail_pages_check()
|
Completed in 206 milliseconds