Searched refs:rb (Results 1 - 200 of 240) sorted by relevance

12

/linux-4.1.27/lib/
H A Drbtree_test.c12 struct rb_node rb; member in struct:test_node
31 if (key < rb_entry(parent, struct test_node, rb)->key) insert()
37 rb_link_node(&node->rb, parent, new); insert()
38 rb_insert_color(&node->rb, root); insert()
43 rb_erase(&node->rb, root); erase()
49 if (node->rb.rb_left) { augment_recompute()
50 child_augmented = rb_entry(node->rb.rb_left, struct test_node, augment_recompute()
51 rb)->augmented; augment_recompute()
55 if (node->rb.rb_right) { augment_recompute()
56 child_augmented = rb_entry(node->rb.rb_right, struct test_node, augment_recompute()
57 rb)->augmented; augment_recompute()
64 RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
76 parent = rb_entry(rb_parent, struct test_node, rb); insert_augmented()
80 new = &parent->rb.rb_left; insert_augmented()
82 new = &parent->rb.rb_right; insert_augmented()
86 rb_link_node(&node->rb, rb_parent, new); insert_augmented()
87 rb_insert_augmented(&node->rb, root, &augment_callbacks); insert_augmented()
92 rb_erase_augmented(&node->rb, root, &augment_callbacks); erase_augmented()
104 static bool is_red(struct rb_node *rb) is_red() argument
106 return !(rb->__rb_parent_color & 1); is_red()
109 static int black_path_count(struct rb_node *rb) black_path_count() argument
112 for (count = 0; rb; rb = rb_parent(rb)) black_path_count()
113 count += !is_red(rb); black_path_count()
121 rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) check_postorder_foreach()
129 struct rb_node *rb; check_postorder() local
131 for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb)) check_postorder()
139 struct rb_node *rb; check() local
143 for (rb = rb_first(&root); rb; rb = rb_next(rb)) { check()
144 struct test_node *node = rb_entry(rb, struct test_node, rb); check()
146 WARN_ON_ONCE(is_red(rb) && check()
147 (!rb_parent(rb) || is_red(rb_parent(rb)))); check()
149 blacks = black_path_count(rb); check()
151 WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) && check()
152 blacks != black_path_count(rb)); check()
166 struct rb_node *rb; check_augmented() local
169 for (rb = rb_first(&root); rb; rb = rb_next(rb)) { check_augmented()
170 struct test_node *node = rb_entry(rb, struct test_node, rb); check_augmented()
H A Dinterval_tree.c9 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
H A Drbtree.c47 static inline void rb_set_black(struct rb_node *rb) rb_set_black() argument
49 rb->__rb_parent_color |= RB_BLACK; rb_set_black()
/linux-4.1.27/kernel/events/
H A Dring_buffer.c22 atomic_set(&handle->rb->poll, POLLIN); perf_output_wakeup()
38 struct ring_buffer *rb = handle->rb; perf_output_get_handle() local
41 local_inc(&rb->nest); perf_output_get_handle()
42 handle->wakeup = local_read(&rb->wakeup); perf_output_get_handle()
47 struct ring_buffer *rb = handle->rb; perf_output_put_handle() local
51 head = local_read(&rb->head); perf_output_put_handle()
57 if (!local_dec_and_test(&rb->nest)) perf_output_put_handle()
87 rb->user_page->data_head = head; perf_output_put_handle()
93 if (unlikely(head != local_read(&rb->head))) { perf_output_put_handle()
94 local_inc(&rb->nest); perf_output_put_handle()
98 if (handle->wakeup != local_read(&rb->wakeup)) perf_output_put_handle()
108 struct ring_buffer *rb; perf_output_begin() local
124 rb = rcu_dereference(event->rb); perf_output_begin()
125 if (unlikely(!rb)) perf_output_begin()
128 if (unlikely(!rb->nr_pages)) perf_output_begin()
131 handle->rb = rb; perf_output_begin()
134 have_lost = local_read(&rb->lost); perf_output_begin()
144 tail = ACCESS_ONCE(rb->user_page->data_tail); perf_output_begin()
145 offset = head = local_read(&rb->head); perf_output_begin()
146 if (!rb->overwrite && perf_output_begin()
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) perf_output_begin()
163 } while (local_cmpxchg(&rb->head, offset, head) != offset); perf_output_begin()
170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) perf_output_begin()
171 local_add(rb->watermark, &rb->wakeup); perf_output_begin()
173 page_shift = PAGE_SHIFT + page_order(rb); perf_output_begin()
175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); perf_output_begin()
177 handle->addr = rb->data_pages[handle->page] + offset; perf_output_begin()
187 lost_event.lost = local_xchg(&rb->lost, 0); perf_output_begin()
198 local_inc(&rb->lost); perf_output_begin()
227 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) ring_buffer_init() argument
229 long max_size = perf_data_size(rb); ring_buffer_init()
232 rb->watermark = min(max_size, watermark); ring_buffer_init()
234 if (!rb->watermark) ring_buffer_init()
235 rb->watermark = max_size / 2; ring_buffer_init()
238 rb->overwrite = 0; ring_buffer_init()
240 rb->overwrite = 1; ring_buffer_init()
242 atomic_set(&rb->refcount, 1); ring_buffer_init()
244 INIT_LIST_HEAD(&rb->event_list); ring_buffer_init()
245 spin_lock_init(&rb->event_lock); ring_buffer_init()
246 init_irq_work(&rb->irq_work, rb_irq_work); ring_buffer_init()
249 static void ring_buffer_put_async(struct ring_buffer *rb) ring_buffer_put_async() argument
251 if (!atomic_dec_and_test(&rb->refcount)) ring_buffer_put_async()
254 rb->rcu_head.next = (void *)rb; ring_buffer_put_async()
255 irq_work_queue(&rb->irq_work); ring_buffer_put_async()
273 struct ring_buffer *rb; perf_aux_output_begin() local
283 rb = ring_buffer_get(output_event); perf_aux_output_begin()
284 if (!rb) perf_aux_output_begin()
287 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount)) perf_aux_output_begin()
294 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1))) perf_aux_output_begin()
297 aux_head = local_read(&rb->aux_head); perf_aux_output_begin()
299 handle->rb = rb; perf_aux_output_begin()
309 if (!rb->aux_overwrite) { perf_aux_output_begin()
310 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail); perf_aux_output_begin()
311 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark; perf_aux_output_begin()
312 if (aux_head - aux_tail < perf_aux_size(rb)) perf_aux_output_begin()
313 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); perf_aux_output_begin()
323 local_set(&rb->aux_nest, 0); perf_aux_output_begin()
328 return handle->rb->aux_priv; perf_aux_output_begin()
331 rb_free_aux(rb); perf_aux_output_begin()
334 ring_buffer_put_async(rb); perf_aux_output_begin()
349 struct ring_buffer *rb = handle->rb; perf_aux_output_end() local
358 if (rb->aux_overwrite) { perf_aux_output_end()
362 local_set(&rb->aux_head, aux_head); perf_aux_output_end()
364 aux_head = local_read(&rb->aux_head); perf_aux_output_end()
365 local_add(size, &rb->aux_head); perf_aux_output_end()
376 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); perf_aux_output_end()
378 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { perf_aux_output_end()
380 local_add(rb->aux_watermark, &rb->aux_wakeup); perf_aux_output_end()
391 local_set(&rb->aux_nest, 0); perf_aux_output_end()
392 rb_free_aux(rb); perf_aux_output_end()
393 ring_buffer_put_async(rb); perf_aux_output_end()
402 struct ring_buffer *rb = handle->rb; perf_aux_output_skip() local
408 local_add(size, &rb->aux_head); perf_aux_output_skip()
410 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); perf_aux_output_skip()
411 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { perf_aux_output_skip()
413 local_add(rb->aux_watermark, &rb->aux_wakeup); perf_aux_output_skip()
414 handle->wakeup = local_read(&rb->aux_wakeup) + perf_aux_output_skip()
415 rb->aux_watermark; perf_aux_output_skip()
430 return handle->rb->aux_priv; perf_get_aux()
458 static void rb_free_aux_page(struct ring_buffer *rb, int idx) rb_free_aux_page() argument
460 struct page *page = virt_to_page(rb->aux_pages[idx]); rb_free_aux_page()
467 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, rb_alloc_aux() argument
497 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); rb_alloc_aux()
498 if (!rb->aux_pages) rb_alloc_aux()
501 rb->free_aux = event->pmu->free_aux; rb_alloc_aux()
502 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { rb_alloc_aux()
506 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); rb_alloc_aux()
511 for (last = rb->aux_nr_pages + (1 << page_private(page)); rb_alloc_aux()
512 last > rb->aux_nr_pages; rb->aux_nr_pages++) rb_alloc_aux()
513 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); rb_alloc_aux()
524 struct page *page = virt_to_page(rb->aux_pages[0]); rb_alloc_aux()
530 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, rb_alloc_aux()
532 if (!rb->aux_priv) rb_alloc_aux()
543 atomic_set(&rb->aux_refcount, 1); rb_alloc_aux()
545 rb->aux_overwrite = overwrite; rb_alloc_aux()
546 rb->aux_watermark = watermark; rb_alloc_aux()
548 if (!rb->aux_watermark && !rb->aux_overwrite) rb_alloc_aux()
549 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); rb_alloc_aux()
553 rb->aux_pgoff = pgoff; rb_alloc_aux()
555 rb_free_aux(rb); rb_alloc_aux()
560 static void __rb_free_aux(struct ring_buffer *rb) __rb_free_aux() argument
564 if (rb->aux_priv) { __rb_free_aux()
565 rb->free_aux(rb->aux_priv); __rb_free_aux()
566 rb->free_aux = NULL; __rb_free_aux()
567 rb->aux_priv = NULL; __rb_free_aux()
570 if (rb->aux_nr_pages) { __rb_free_aux()
571 for (pg = 0; pg < rb->aux_nr_pages; pg++) __rb_free_aux()
572 rb_free_aux_page(rb, pg); __rb_free_aux()
574 kfree(rb->aux_pages); __rb_free_aux()
575 rb->aux_nr_pages = 0; __rb_free_aux()
579 void rb_free_aux(struct ring_buffer *rb) rb_free_aux() argument
581 if (atomic_dec_and_test(&rb->aux_refcount)) rb_free_aux()
582 irq_work_queue(&rb->irq_work); rb_free_aux()
587 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work); rb_irq_work() local
589 if (!atomic_read(&rb->aux_refcount)) rb_irq_work()
590 __rb_free_aux(rb); rb_irq_work()
592 if (rb->rcu_head.next == (void *)rb) rb_irq_work()
593 call_rcu(&rb->rcu_head, rb_free_rcu); rb_irq_work()
603 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) __perf_mmap_to_page() argument
605 if (pgoff > rb->nr_pages) __perf_mmap_to_page()
609 return virt_to_page(rb->user_page); __perf_mmap_to_page()
611 return virt_to_page(rb->data_pages[pgoff - 1]); __perf_mmap_to_page()
629 struct ring_buffer *rb; rb_alloc() local
636 rb = kzalloc(size, GFP_KERNEL); rb_alloc()
637 if (!rb) rb_alloc()
640 rb->user_page = perf_mmap_alloc_page(cpu); rb_alloc()
641 if (!rb->user_page) rb_alloc()
645 rb->data_pages[i] = perf_mmap_alloc_page(cpu); rb_alloc()
646 if (!rb->data_pages[i]) rb_alloc()
650 rb->nr_pages = nr_pages; rb_alloc()
652 ring_buffer_init(rb, watermark, flags); rb_alloc()
654 return rb; rb_alloc()
658 free_page((unsigned long)rb->data_pages[i]); rb_alloc()
660 free_page((unsigned long)rb->user_page); rb_alloc()
663 kfree(rb); rb_alloc()
677 void rb_free(struct ring_buffer *rb) rb_free() argument
681 perf_mmap_free_page((unsigned long)rb->user_page); rb_free()
682 for (i = 0; i < rb->nr_pages; i++) rb_free()
683 perf_mmap_free_page((unsigned long)rb->data_pages[i]); rb_free()
684 kfree(rb); rb_free()
688 static int data_page_nr(struct ring_buffer *rb) data_page_nr() argument
690 return rb->nr_pages << page_order(rb); data_page_nr()
694 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) __perf_mmap_to_page() argument
697 if (pgoff > data_page_nr(rb)) __perf_mmap_to_page()
700 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); __perf_mmap_to_page()
712 struct ring_buffer *rb; rb_free_work() local
716 rb = container_of(work, struct ring_buffer, work); rb_free_work()
717 nr = data_page_nr(rb); rb_free_work()
719 base = rb->user_page; rb_free_work()
725 kfree(rb); rb_free_work()
728 void rb_free(struct ring_buffer *rb) rb_free() argument
730 schedule_work(&rb->work); rb_free()
735 struct ring_buffer *rb; rb_alloc() local
742 rb = kzalloc(size, GFP_KERNEL); rb_alloc()
743 if (!rb) rb_alloc()
746 INIT_WORK(&rb->work, rb_free_work); rb_alloc()
752 rb->user_page = all_buf; rb_alloc()
753 rb->data_pages[0] = all_buf + PAGE_SIZE; rb_alloc()
754 rb->page_order = ilog2(nr_pages); rb_alloc()
755 rb->nr_pages = !!nr_pages; rb_alloc()
757 ring_buffer_init(rb, watermark, flags); rb_alloc()
759 return rb; rb_alloc()
762 kfree(rb); rb_alloc()
771 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) perf_mmap_to_page() argument
773 if (rb->aux_nr_pages) { perf_mmap_to_page()
775 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages) perf_mmap_to_page()
779 if (pgoff >= rb->aux_pgoff) perf_mmap_to_page()
780 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]); perf_mmap_to_page()
783 return __perf_mmap_to_page(rb, pgoff); perf_mmap_to_page()
H A Dinternal.h58 extern void rb_free(struct ring_buffer *rb);
62 struct ring_buffer *rb; rb_free_rcu() local
64 rb = container_of(rcu_head, struct ring_buffer, rcu_head); rb_free_rcu()
65 rb_free(rb); rb_free_rcu()
71 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
73 extern void rb_free_aux(struct ring_buffer *rb);
75 extern void ring_buffer_put(struct ring_buffer *rb);
77 static inline bool rb_has_aux(struct ring_buffer *rb) rb_has_aux() argument
79 return !!rb->aux_nr_pages; rb_has_aux()
95 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
104 static inline int page_order(struct ring_buffer *rb) page_order() argument
106 return rb->page_order; page_order()
111 static inline int page_order(struct ring_buffer *rb) page_order() argument
117 static inline unsigned long perf_data_size(struct ring_buffer *rb) perf_data_size() argument
119 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); perf_data_size()
122 static inline unsigned long perf_aux_size(struct ring_buffer *rb) perf_aux_size() argument
124 return rb->aux_nr_pages << PAGE_SHIFT; perf_aux_size()
144 struct ring_buffer *rb = handle->rb; \
147 handle->page &= rb->nr_pages - 1; \
148 handle->addr = rb->data_pages[handle->page]; \
149 handle->size = PAGE_SIZE << page_order(rb); \
H A Dcore.c3449 struct ring_buffer *rb);
3597 if (event->rb) { _free_event()
3917 struct ring_buffer *rb; perf_poll() local
3926 * Pin the event->rb by taking event->mmap_mutex; otherwise perf_poll()
3927 * perf_event_set_output() can swizzle our rb and make us miss wakeups. perf_poll()
3930 rb = event->rb; perf_poll()
3931 if (rb) perf_poll()
3932 events = atomic_xchg(&rb->poll, 0); perf_poll()
4241 struct ring_buffer *rb; perf_event_init_userpage() local
4244 rb = rcu_dereference(event->rb); perf_event_init_userpage()
4245 if (!rb) perf_event_init_userpage()
4248 userpg = rb->user_page; perf_event_init_userpage()
4254 userpg->data_size = perf_data_size(rb); perf_event_init_userpage()
4273 struct ring_buffer *rb; perf_event_update_userpage() local
4277 rb = rcu_dereference(event->rb); perf_event_update_userpage()
4278 if (!rb) perf_event_update_userpage()
4292 userpg = rb->user_page; perf_event_update_userpage()
4323 struct ring_buffer *rb; perf_mmap_fault() local
4333 rb = rcu_dereference(event->rb); perf_mmap_fault()
4334 if (!rb) perf_mmap_fault()
4340 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); perf_mmap_fault()
4356 struct ring_buffer *rb) ring_buffer_attach()
4361 if (event->rb) { ring_buffer_attach()
4368 old_rb = event->rb; ring_buffer_attach()
4377 if (rb) { ring_buffer_attach()
4383 spin_lock_irqsave(&rb->event_lock, flags); ring_buffer_attach()
4384 list_add_rcu(&event->rb_entry, &rb->event_list); ring_buffer_attach()
4385 spin_unlock_irqrestore(&rb->event_lock, flags); ring_buffer_attach()
4388 rcu_assign_pointer(event->rb, rb); ring_buffer_attach()
4393 * Since we detached before setting the new rb, so that we ring_buffer_attach()
4394 * could attach the new rb, we could have missed a wakeup. ring_buffer_attach()
4403 struct ring_buffer *rb; ring_buffer_wakeup() local
4406 rb = rcu_dereference(event->rb); ring_buffer_wakeup()
4407 if (rb) { ring_buffer_wakeup()
4408 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) ring_buffer_wakeup()
4416 struct ring_buffer *rb; ring_buffer_get() local
4419 rb = rcu_dereference(event->rb); ring_buffer_get()
4420 if (rb) { ring_buffer_get()
4421 if (!atomic_inc_not_zero(&rb->refcount)) ring_buffer_get()
4422 rb = NULL; ring_buffer_get()
4426 return rb; ring_buffer_get()
4429 void ring_buffer_put(struct ring_buffer *rb) ring_buffer_put() argument
4431 if (!atomic_dec_and_test(&rb->refcount)) ring_buffer_put()
4434 WARN_ON_ONCE(!list_empty(&rb->event_list)); ring_buffer_put()
4436 call_rcu(&rb->rcu_head, rb_free_rcu); ring_buffer_put()
4444 atomic_inc(&event->rb->mmap_count); perf_mmap_open()
4447 atomic_inc(&event->rb->aux_mmap_count); perf_mmap_open()
4465 struct ring_buffer *rb = ring_buffer_get(event); perf_mmap_close() local
4466 struct user_struct *mmap_user = rb->mmap_user; perf_mmap_close()
4467 int mmap_locked = rb->mmap_locked; perf_mmap_close()
4468 unsigned long size = perf_data_size(rb); perf_mmap_close()
4474 * rb->aux_mmap_count will always drop before rb->mmap_count and perf_mmap_close()
4478 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && perf_mmap_close()
4479 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { perf_mmap_close()
4480 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); perf_mmap_close()
4481 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; perf_mmap_close()
4483 rb_free_aux(rb); perf_mmap_close()
4487 atomic_dec(&rb->mmap_count); perf_mmap_close()
4496 if (atomic_read(&rb->mmap_count)) perf_mmap_close()
4502 * fact that rb::event_lock otherwise nests inside mmap_mutex. perf_mmap_close()
4506 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { perf_mmap_close()
4519 * swizzle the rb from under us while we were waiting to perf_mmap_close()
4522 * If we find a different rb; ignore this event, a next perf_mmap_close()
4527 if (event->rb == rb) perf_mmap_close()
4544 * ref on the rb and will free it whenever they are done with it. perf_mmap_close()
4555 ring_buffer_put(rb); /* could be last */ perf_mmap_close()
4571 struct ring_buffer *rb = NULL; perf_mmap() local
4580 * same rb. perf_mmap()
4594 * AUX area mapping: if rb->aux_nr_pages != 0, it's already perf_mmap()
4600 if (!event->rb) perf_mmap()
4608 rb = event->rb; perf_mmap()
4609 if (!rb) perf_mmap()
4612 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); perf_mmap()
4613 aux_size = ACCESS_ONCE(rb->user_page->aux_size); perf_mmap()
4615 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) perf_mmap()
4622 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) perf_mmap()
4629 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) perf_mmap()
4635 if (!atomic_inc_not_zero(&rb->mmap_count)) perf_mmap()
4638 if (rb_has_aux(rb)) { perf_mmap()
4639 atomic_inc(&rb->aux_mmap_count); perf_mmap()
4644 atomic_set(&rb->aux_mmap_count, 1); perf_mmap()
4651 * If we have rb pages ensure they're a power-of-two number, so we perf_mmap()
4663 if (event->rb) { perf_mmap()
4664 if (event->rb->nr_pages != nr_pages) { perf_mmap()
4669 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { perf_mmap()
4707 WARN_ON(!rb && event->rb); perf_mmap()
4712 if (!rb) { perf_mmap()
4713 rb = rb_alloc(nr_pages, perf_mmap()
4717 if (!rb) { perf_mmap()
4722 atomic_set(&rb->mmap_count, 1); perf_mmap()
4723 rb->mmap_user = get_current_user(); perf_mmap()
4724 rb->mmap_locked = extra; perf_mmap()
4726 ring_buffer_attach(event, rb); perf_mmap()
4731 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, perf_mmap()
4734 rb->aux_mmap_locked = extra; perf_mmap()
4743 } else if (rb) { perf_mmap()
4744 atomic_dec(&rb->mmap_count); perf_mmap()
5301 struct ring_buffer *rb = handle->rb; perf_output_sample() local
5302 int events = local_inc_return(&rb->events); perf_output_sample()
5305 local_sub(wakeup_events, &rb->events); perf_output_sample()
5306 local_inc(&rb->wakeup); perf_output_sample()
5846 * d_path() works from the end of the rb backwards, so we perf_event_mmap_event()
7799 struct ring_buffer *rb = NULL; perf_event_set_output() local
7816 * If its not a per-cpu rb, it must be the same task. perf_event_set_output()
7841 /* get the rb we want to redirect to */ perf_event_set_output()
7842 rb = ring_buffer_get(output_event); perf_event_set_output()
7843 if (!rb) perf_event_set_output()
7847 ring_buffer_attach(event, rb); perf_event_set_output()
4355 ring_buffer_attach(struct perf_event *event, struct ring_buffer *rb) ring_buffer_attach() argument
/linux-4.1.27/drivers/scsi/bfa/
H A Dbfa_ioc_ct.c192 void __iomem *rb; bfa_ioc_ct_reg_init() local
195 rb = bfa_ioc_bar0(ioc); bfa_ioc_ct_reg_init()
197 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; bfa_ioc_ct_reg_init()
198 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; bfa_ioc_ct_reg_init()
199 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; bfa_ioc_ct_reg_init()
202 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; bfa_ioc_ct_reg_init()
203 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; bfa_ioc_ct_reg_init()
204 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; bfa_ioc_ct_reg_init()
205 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; bfa_ioc_ct_reg_init()
206 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; bfa_ioc_ct_reg_init()
207 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct_reg_init()
208 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct_reg_init()
210 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); bfa_ioc_ct_reg_init()
211 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); bfa_ioc_ct_reg_init()
212 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; bfa_ioc_ct_reg_init()
213 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; bfa_ioc_ct_reg_init()
214 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; bfa_ioc_ct_reg_init()
215 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct_reg_init()
216 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct_reg_init()
222 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); bfa_ioc_ct_reg_init()
223 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); bfa_ioc_ct_reg_init()
224 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_ct_reg_init()
225 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_ct_reg_init()
230 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); bfa_ioc_ct_reg_init()
231 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); bfa_ioc_ct_reg_init()
232 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); bfa_ioc_ct_reg_init()
233 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); bfa_ioc_ct_reg_init()
234 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); bfa_ioc_ct_reg_init()
239 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); bfa_ioc_ct_reg_init()
245 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); bfa_ioc_ct_reg_init()
251 void __iomem *rb; bfa_ioc_ct2_reg_init() local
254 rb = bfa_ioc_bar0(ioc); bfa_ioc_ct2_reg_init()
256 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; bfa_ioc_ct2_reg_init()
257 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; bfa_ioc_ct2_reg_init()
258 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; bfa_ioc_ct2_reg_init()
259 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; bfa_ioc_ct2_reg_init()
260 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; bfa_ioc_ct2_reg_init()
261 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; bfa_ioc_ct2_reg_init()
264 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; bfa_ioc_ct2_reg_init()
265 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; bfa_ioc_ct2_reg_init()
266 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; bfa_ioc_ct2_reg_init()
267 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct2_reg_init()
268 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct2_reg_init()
270 ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG); bfa_ioc_ct2_reg_init()
271 ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG); bfa_ioc_ct2_reg_init()
272 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; bfa_ioc_ct2_reg_init()
273 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct2_reg_init()
274 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct2_reg_init()
280 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); bfa_ioc_ct2_reg_init()
281 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); bfa_ioc_ct2_reg_init()
282 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG); bfa_ioc_ct2_reg_init()
283 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG); bfa_ioc_ct2_reg_init()
288 ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); bfa_ioc_ct2_reg_init()
289 ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); bfa_ioc_ct2_reg_init()
290 ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); bfa_ioc_ct2_reg_init()
291 ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT); bfa_ioc_ct2_reg_init()
292 ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC); bfa_ioc_ct2_reg_init()
297 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); bfa_ioc_ct2_reg_init()
303 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); bfa_ioc_ct2_reg_init()
314 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_ioc_ct_map_port() local
320 r32 = readl(rb + FNC_PERS_REG); bfa_ioc_ct_map_port()
331 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_ioc_ct2_map_port() local
334 r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); bfa_ioc_ct2_map_port()
347 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_ioc_ct_isr_mode_set() local
350 r32 = readl(rb + FNC_PERS_REG); bfa_ioc_ct_isr_mode_set()
371 writel(r32, rb + FNC_PERS_REG); bfa_ioc_ct_isr_mode_set()
571 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_ioc_ct2_poweron() local
574 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); bfa_ioc_ct2_poweron()
577 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); bfa_ioc_ct2_poweron()
583 rb + HOSTFN_MSIX_VT_OFST_NUMVT); bfa_ioc_ct2_poweron()
585 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); bfa_ioc_ct2_poweron()
589 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode) bfa_ioc_ct_pll_init() argument
604 writel(0, (rb + OP_MODE)); bfa_ioc_ct_pll_init()
606 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); bfa_ioc_ct_pll_init()
608 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); bfa_ioc_ct_pll_init()
609 writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); bfa_ioc_ct_pll_init()
611 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); bfa_ioc_ct_pll_init()
612 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); bfa_ioc_ct_pll_init()
613 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); bfa_ioc_ct_pll_init()
614 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); bfa_ioc_ct_pll_init()
615 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); bfa_ioc_ct_pll_init()
616 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); bfa_ioc_ct_pll_init()
617 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); bfa_ioc_ct_pll_init()
618 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); bfa_ioc_ct_pll_init()
620 rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_ct_pll_init()
622 rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_ct_pll_init()
624 __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_ct_pll_init()
626 __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_ct_pll_init()
627 readl(rb + HOSTFN0_INT_MSK); bfa_ioc_ct_pll_init()
629 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); bfa_ioc_ct_pll_init()
630 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); bfa_ioc_ct_pll_init()
631 writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_ct_pll_init()
632 writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_ct_pll_init()
635 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); bfa_ioc_ct_pll_init()
636 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); bfa_ioc_ct_pll_init()
638 r32 = readl((rb + PSS_CTL_REG)); bfa_ioc_ct_pll_init()
640 writel(r32, (rb + PSS_CTL_REG)); bfa_ioc_ct_pll_init()
643 writel(0, (rb + PMM_1T_RESET_REG_P0)); bfa_ioc_ct_pll_init()
644 writel(0, (rb + PMM_1T_RESET_REG_P1)); bfa_ioc_ct_pll_init()
647 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); bfa_ioc_ct_pll_init()
649 r32 = readl((rb + MBIST_STAT_REG)); bfa_ioc_ct_pll_init()
650 writel(0, (rb + MBIST_CTL_REG)); bfa_ioc_ct_pll_init()
655 bfa_ioc_ct2_sclk_init(void __iomem *rb) bfa_ioc_ct2_sclk_init() argument
662 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
666 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
672 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
674 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
679 r32 = readl((rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_sclk_init()
680 writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_sclk_init()
682 r32 = readl((rb + CT2_PCIE_MISC_REG)); bfa_ioc_ct2_sclk_init()
683 writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); bfa_ioc_ct2_sclk_init()
688 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
691 writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
700 bfa_ioc_ct2_lclk_init(void __iomem *rb) bfa_ioc_ct2_lclk_init() argument
707 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
711 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
716 r32 = readl((rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_lclk_init()
717 writel(r32, (rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_lclk_init()
722 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
723 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
728 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
731 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
740 bfa_ioc_ct2_mem_init(void __iomem *rb) bfa_ioc_ct2_mem_init() argument
744 r32 = readl((rb + PSS_CTL_REG)); bfa_ioc_ct2_mem_init()
746 writel(r32, (rb + PSS_CTL_REG)); bfa_ioc_ct2_mem_init()
749 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); bfa_ioc_ct2_mem_init()
751 writel(0, (rb + CT2_MBIST_CTL_REG)); bfa_ioc_ct2_mem_init()
755 bfa_ioc_ct2_mac_reset(void __iomem *rb) bfa_ioc_ct2_mac_reset() argument
759 rb + CT2_CSI_MAC_CONTROL_REG(0)); bfa_ioc_ct2_mac_reset()
761 rb + CT2_CSI_MAC_CONTROL_REG(1)); bfa_ioc_ct2_mac_reset()
765 bfa_ioc_ct2_enable_flash(void __iomem *rb) bfa_ioc_ct2_enable_flash() argument
769 r32 = readl((rb + PSS_GPIO_OUT_REG)); bfa_ioc_ct2_enable_flash()
770 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); bfa_ioc_ct2_enable_flash()
771 r32 = readl((rb + PSS_GPIO_OE_REG)); bfa_ioc_ct2_enable_flash()
772 writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); bfa_ioc_ct2_enable_flash()
782 bfa_ioc_ct2_nfc_halted(void __iomem *rb) bfa_ioc_ct2_nfc_halted() argument
786 r32 = readl(rb + CT2_NFC_CSR_SET_REG); bfa_ioc_ct2_nfc_halted()
794 bfa_ioc_ct2_nfc_halt(void __iomem *rb) bfa_ioc_ct2_nfc_halt() argument
798 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); bfa_ioc_ct2_nfc_halt()
800 if (bfa_ioc_ct2_nfc_halted(rb)) bfa_ioc_ct2_nfc_halt()
804 WARN_ON(!bfa_ioc_ct2_nfc_halted(rb)); bfa_ioc_ct2_nfc_halt()
808 bfa_ioc_ct2_nfc_resume(void __iomem *rb) bfa_ioc_ct2_nfc_resume() argument
813 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); bfa_ioc_ct2_nfc_resume()
815 r32 = readl(rb + CT2_NFC_CSR_SET_REG); bfa_ioc_ct2_nfc_resume()
824 bfa_ioc_ct2_clk_reset(void __iomem *rb) bfa_ioc_ct2_clk_reset() argument
828 bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_clk_reset()
829 bfa_ioc_ct2_lclk_init(rb); bfa_ioc_ct2_clk_reset()
834 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_clk_reset()
836 (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_clk_reset()
838 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_clk_reset()
840 (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_clk_reset()
845 bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb) bfa_ioc_ct2_nfc_clk_reset() argument
849 r32 = readl((rb + PSS_CTL_REG)); bfa_ioc_ct2_nfc_clk_reset()
851 writel(r32, (rb + PSS_CTL_REG)); bfa_ioc_ct2_nfc_clk_reset()
853 writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG); bfa_ioc_ct2_nfc_clk_reset()
856 r32 = readl(rb + CT2_NFC_FLASH_STS_REG); bfa_ioc_ct2_nfc_clk_reset()
864 r32 = readl(rb + CT2_NFC_FLASH_STS_REG); bfa_ioc_ct2_nfc_clk_reset()
871 r32 = readl(rb + CT2_CSI_FW_CTL_REG); bfa_ioc_ct2_nfc_clk_reset()
876 bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb) bfa_ioc_ct2_wait_till_nfc_running() argument
881 if (bfa_ioc_ct2_nfc_halted(rb)) bfa_ioc_ct2_wait_till_nfc_running()
882 bfa_ioc_ct2_nfc_resume(rb); bfa_ioc_ct2_wait_till_nfc_running()
884 r32 = readl(rb + CT2_NFC_STS_REG); bfa_ioc_ct2_wait_till_nfc_running()
890 r32 = readl(rb + CT2_NFC_STS_REG); bfa_ioc_ct2_wait_till_nfc_running()
895 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) bfa_ioc_ct2_pll_init() argument
899 wgn = readl(rb + CT2_WGN_STATUS); bfa_ioc_ct2_pll_init()
905 bfa_ioc_ct2_clk_reset(rb); bfa_ioc_ct2_pll_init()
906 bfa_ioc_ct2_enable_flash(rb); bfa_ioc_ct2_pll_init()
908 bfa_ioc_ct2_mac_reset(rb); bfa_ioc_ct2_pll_init()
910 bfa_ioc_ct2_clk_reset(rb); bfa_ioc_ct2_pll_init()
911 bfa_ioc_ct2_enable_flash(rb); bfa_ioc_ct2_pll_init()
914 nfc_ver = readl(rb + CT2_RSC_GPR15_REG); bfa_ioc_ct2_pll_init()
919 bfa_ioc_ct2_wait_till_nfc_running(rb); bfa_ioc_ct2_pll_init()
921 bfa_ioc_ct2_nfc_clk_reset(rb); bfa_ioc_ct2_pll_init()
923 bfa_ioc_ct2_nfc_halt(rb); bfa_ioc_ct2_pll_init()
925 bfa_ioc_ct2_clk_reset(rb); bfa_ioc_ct2_pll_init()
926 bfa_ioc_ct2_mac_reset(rb); bfa_ioc_ct2_pll_init()
927 bfa_ioc_ct2_clk_reset(rb); bfa_ioc_ct2_pll_init()
939 r32 = readl(rb + CT2_CHIP_MISC_PRG); bfa_ioc_ct2_pll_init()
940 writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_pll_init()
947 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); bfa_ioc_ct2_pll_init()
948 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); bfa_ioc_ct2_pll_init()
951 r32 = readl(rb + HOST_SEM5_REG); bfa_ioc_ct2_pll_init()
953 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
955 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
956 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
958 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
960 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
961 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
965 bfa_ioc_ct2_mem_init(rb); bfa_ioc_ct2_pll_init()
967 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); bfa_ioc_ct2_pll_init()
968 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); bfa_ioc_ct2_pll_init()
H A Dbfa_ioc_cb.c145 void __iomem *rb; bfa_ioc_cb_reg_init() local
148 rb = bfa_ioc_bar0(ioc); bfa_ioc_cb_reg_init()
150 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; bfa_ioc_cb_reg_init()
151 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; bfa_ioc_cb_reg_init()
152 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; bfa_ioc_cb_reg_init()
155 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; bfa_ioc_cb_reg_init()
156 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; bfa_ioc_cb_reg_init()
157 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; bfa_ioc_cb_reg_init()
159 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); bfa_ioc_cb_reg_init()
160 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); bfa_ioc_cb_reg_init()
161 ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG); bfa_ioc_cb_reg_init()
167 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; bfa_ioc_cb_reg_init()
168 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu; bfa_ioc_cb_reg_init()
173 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); bfa_ioc_cb_reg_init()
174 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); bfa_ioc_cb_reg_init()
175 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_cb_reg_init()
176 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_cb_reg_init()
181 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); bfa_ioc_cb_reg_init()
182 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); bfa_ioc_cb_reg_init()
187 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); bfa_ioc_cb_reg_init()
193 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); bfa_ioc_cb_reg_init()
364 bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) bfa_ioc_cb_pll_init() argument
376 join_bits = readl(rb + BFA_IOC0_STATE_REG) & bfa_ioc_cb_pll_init()
378 writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG)); bfa_ioc_cb_pll_init()
379 join_bits = readl(rb + BFA_IOC1_STATE_REG) & bfa_ioc_cb_pll_init()
381 writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG)); bfa_ioc_cb_pll_init()
382 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); bfa_ioc_cb_pll_init()
383 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); bfa_ioc_cb_pll_init()
384 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); bfa_ioc_cb_pll_init()
385 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); bfa_ioc_cb_pll_init()
386 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); bfa_ioc_cb_pll_init()
387 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); bfa_ioc_cb_pll_init()
388 writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_cb_pll_init()
390 rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_cb_pll_init()
391 writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_cb_pll_init()
393 rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_cb_pll_init()
395 writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_cb_pll_init()
396 writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_cb_pll_init()
398 rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_cb_pll_init()
400 rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_cb_pll_init()
402 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); bfa_ioc_cb_pll_init()
403 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); bfa_ioc_cb_pll_init()
404 writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG)); bfa_ioc_cb_pll_init()
405 writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG)); bfa_ioc_cb_pll_init()
H A Dbfad_debugfs.c259 void __iomem *rb, *reg_addr; bfad_debugfs_write_regrd() local
289 rb = bfa_ioc_bar0(ioc); bfad_debugfs_write_regrd()
303 reg_addr = rb + addr; bfad_debugfs_write_regrd()
H A Dbfa_ioc.h334 bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
860 bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
861 bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
862 bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
/linux-4.1.27/drivers/net/ethernet/brocade/bna/
H A Dbfa_ioc_ct.c58 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
60 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
260 void __iomem *rb; bfa_ioc_ct_reg_init() local
263 rb = bfa_ioc_bar0(ioc); bfa_ioc_ct_reg_init()
265 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; bfa_ioc_ct_reg_init()
266 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; bfa_ioc_ct_reg_init()
267 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; bfa_ioc_ct_reg_init()
270 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; bfa_ioc_ct_reg_init()
271 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; bfa_ioc_ct_reg_init()
272 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; bfa_ioc_ct_reg_init()
273 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; bfa_ioc_ct_reg_init()
274 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; bfa_ioc_ct_reg_init()
275 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct_reg_init()
276 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct_reg_init()
278 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; bfa_ioc_ct_reg_init()
279 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; bfa_ioc_ct_reg_init()
280 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; bfa_ioc_ct_reg_init()
281 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; bfa_ioc_ct_reg_init()
282 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; bfa_ioc_ct_reg_init()
283 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct_reg_init()
284 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct_reg_init()
290 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; bfa_ioc_ct_reg_init()
291 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; bfa_ioc_ct_reg_init()
292 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; bfa_ioc_ct_reg_init()
293 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; bfa_ioc_ct_reg_init()
298 ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; bfa_ioc_ct_reg_init()
299 ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; bfa_ioc_ct_reg_init()
300 ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; bfa_ioc_ct_reg_init()
301 ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; bfa_ioc_ct_reg_init()
302 ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; bfa_ioc_ct_reg_init()
307 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; bfa_ioc_ct_reg_init()
313 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); bfa_ioc_ct_reg_init()
319 void __iomem *rb; bfa_ioc_ct2_reg_init() local
322 rb = bfa_ioc_bar0(ioc); bfa_ioc_ct2_reg_init()
324 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; bfa_ioc_ct2_reg_init()
325 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; bfa_ioc_ct2_reg_init()
326 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; bfa_ioc_ct2_reg_init()
327 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; bfa_ioc_ct2_reg_init()
328 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; bfa_ioc_ct2_reg_init()
329 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; bfa_ioc_ct2_reg_init()
332 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; bfa_ioc_ct2_reg_init()
333 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; bfa_ioc_ct2_reg_init()
334 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; bfa_ioc_ct2_reg_init()
335 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct2_reg_init()
336 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct2_reg_init()
338 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; bfa_ioc_ct2_reg_init()
339 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; bfa_ioc_ct2_reg_init()
340 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; bfa_ioc_ct2_reg_init()
341 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; bfa_ioc_ct2_reg_init()
342 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; bfa_ioc_ct2_reg_init()
348 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; bfa_ioc_ct2_reg_init()
349 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; bfa_ioc_ct2_reg_init()
350 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; bfa_ioc_ct2_reg_init()
351 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; bfa_ioc_ct2_reg_init()
356 ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; bfa_ioc_ct2_reg_init()
357 ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; bfa_ioc_ct2_reg_init()
358 ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; bfa_ioc_ct2_reg_init()
359 ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; bfa_ioc_ct2_reg_init()
360 ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; bfa_ioc_ct2_reg_init()
365 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; bfa_ioc_ct2_reg_init()
371 ioc->ioc_regs.err_set = rb + ERR_SET_REG; bfa_ioc_ct2_reg_init()
380 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_ioc_ct_map_port() local
386 r32 = readl(rb + FNC_PERS_REG); bfa_ioc_ct_map_port()
395 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_ioc_ct2_map_port() local
398 r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); bfa_ioc_ct2_map_port()
406 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_ioc_ct_isr_mode_set() local
409 r32 = readl(rb + FNC_PERS_REG); bfa_ioc_ct_isr_mode_set()
428 writel(r32, rb + FNC_PERS_REG); bfa_ioc_ct_isr_mode_set()
456 void __iomem *rb = ioc->pcidev.pci_bar_kva; bfa_nw_ioc_ct2_poweron() local
459 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); bfa_nw_ioc_ct2_poweron()
462 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); bfa_nw_ioc_ct2_poweron()
468 rb + HOSTFN_MSIX_VT_OFST_NUMVT); bfa_nw_ioc_ct2_poweron()
470 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); bfa_nw_ioc_ct2_poweron()
610 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) bfa_ioc_ct_pll_init() argument
625 writel(0, (rb + OP_MODE)); bfa_ioc_ct_pll_init()
629 (rb + ETH_MAC_SER_REG)); bfa_ioc_ct_pll_init()
631 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); bfa_ioc_ct_pll_init()
633 (rb + ETH_MAC_SER_REG)); bfa_ioc_ct_pll_init()
635 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); bfa_ioc_ct_pll_init()
636 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); bfa_ioc_ct_pll_init()
637 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); bfa_ioc_ct_pll_init()
638 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); bfa_ioc_ct_pll_init()
639 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); bfa_ioc_ct_pll_init()
640 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); bfa_ioc_ct_pll_init()
641 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); bfa_ioc_ct_pll_init()
642 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); bfa_ioc_ct_pll_init()
645 rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_ct_pll_init()
648 rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_ct_pll_init()
651 rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_ct_pll_init()
654 rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_ct_pll_init()
655 readl(rb + HOSTFN0_INT_MSK); bfa_ioc_ct_pll_init()
657 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); bfa_ioc_ct_pll_init()
658 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); bfa_ioc_ct_pll_init()
661 rb + APP_PLL_SCLK_CTL_REG); bfa_ioc_ct_pll_init()
664 rb + APP_PLL_LCLK_CTL_REG); bfa_ioc_ct_pll_init()
667 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); bfa_ioc_ct_pll_init()
668 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); bfa_ioc_ct_pll_init()
670 r32 = readl((rb + PSS_CTL_REG)); bfa_ioc_ct_pll_init()
672 writel(r32, (rb + PSS_CTL_REG)); bfa_ioc_ct_pll_init()
675 writel(0, (rb + PMM_1T_RESET_REG_P0)); bfa_ioc_ct_pll_init()
676 writel(0, (rb + PMM_1T_RESET_REG_P1)); bfa_ioc_ct_pll_init()
679 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); bfa_ioc_ct_pll_init()
681 r32 = readl((rb + MBIST_STAT_REG)); bfa_ioc_ct_pll_init()
682 writel(0, (rb + MBIST_CTL_REG)); bfa_ioc_ct_pll_init()
687 bfa_ioc_ct2_sclk_init(void __iomem *rb) bfa_ioc_ct2_sclk_init() argument
694 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
698 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
704 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
706 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
711 r32 = readl((rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_sclk_init()
713 (rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_sclk_init()
715 r32 = readl((rb + CT2_PCIE_MISC_REG)); bfa_ioc_ct2_sclk_init()
717 (rb + CT2_PCIE_MISC_REG)); bfa_ioc_ct2_sclk_init()
722 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
725 writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_sclk_init()
739 bfa_ioc_ct2_lclk_init(void __iomem *rb) bfa_ioc_ct2_lclk_init() argument
746 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
750 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
755 r32 = readl((rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_lclk_init()
756 writel(r32, (rb + CT2_CHIP_MISC_PRG)); bfa_ioc_ct2_lclk_init()
761 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
762 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
767 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
770 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_lclk_init()
779 bfa_ioc_ct2_mem_init(void __iomem *rb) bfa_ioc_ct2_mem_init() argument
783 r32 = readl((rb + PSS_CTL_REG)); bfa_ioc_ct2_mem_init()
785 writel(r32, (rb + PSS_CTL_REG)); bfa_ioc_ct2_mem_init()
788 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); bfa_ioc_ct2_mem_init()
790 writel(0, (rb + CT2_MBIST_CTL_REG)); bfa_ioc_ct2_mem_init()
794 bfa_ioc_ct2_mac_reset(void __iomem *rb) bfa_ioc_ct2_mac_reset() argument
798 bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_mac_reset()
799 bfa_ioc_ct2_lclk_init(rb); bfa_ioc_ct2_mac_reset()
804 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_mac_reset()
806 (rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_mac_reset()
811 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_mac_reset()
813 (rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_mac_reset()
817 (rb + CT2_CSI_MAC_CONTROL_REG(0))); bfa_ioc_ct2_mac_reset()
819 (rb + CT2_CSI_MAC_CONTROL_REG(1))); bfa_ioc_ct2_mac_reset()
827 bfa_ioc_ct2_nfc_halted(void __iomem *rb) bfa_ioc_ct2_nfc_halted() argument
831 r32 = readl(rb + CT2_NFC_CSR_SET_REG); bfa_ioc_ct2_nfc_halted()
839 bfa_ioc_ct2_nfc_resume(void __iomem *rb) bfa_ioc_ct2_nfc_resume() argument
844 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); bfa_ioc_ct2_nfc_resume()
846 r32 = readl(rb + CT2_NFC_CSR_SET_REG); bfa_ioc_ct2_nfc_resume()
855 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) bfa_ioc_ct2_pll_init() argument
860 wgn = readl(rb + CT2_WGN_STATUS); bfa_ioc_ct2_pll_init()
862 nfc_ver = readl(rb + CT2_RSC_GPR15_REG); bfa_ioc_ct2_pll_init()
866 if (bfa_ioc_ct2_nfc_halted(rb)) bfa_ioc_ct2_pll_init()
867 bfa_ioc_ct2_nfc_resume(rb); bfa_ioc_ct2_pll_init()
869 rb + CT2_CSI_FW_CTL_SET_REG); bfa_ioc_ct2_pll_init()
872 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); bfa_ioc_ct2_pll_init()
879 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); bfa_ioc_ct2_pll_init()
886 r32 = readl(rb + CT2_CSI_FW_CTL_REG); bfa_ioc_ct2_pll_init()
889 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); bfa_ioc_ct2_pll_init()
891 r32 = readl(rb + CT2_NFC_CSR_SET_REG); bfa_ioc_ct2_pll_init()
897 bfa_ioc_ct2_mac_reset(rb); bfa_ioc_ct2_pll_init()
898 bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_pll_init()
899 bfa_ioc_ct2_lclk_init(rb); bfa_ioc_ct2_pll_init()
902 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); bfa_ioc_ct2_pll_init()
904 rb + CT2_APP_PLL_SCLK_CTL_REG); bfa_ioc_ct2_pll_init()
905 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); bfa_ioc_ct2_pll_init()
907 rb + CT2_APP_PLL_LCLK_CTL_REG); bfa_ioc_ct2_pll_init()
912 r32 = readl((rb + PSS_GPIO_OUT_REG)); bfa_ioc_ct2_pll_init()
913 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG); bfa_ioc_ct2_pll_init()
914 r32 = readl((rb + PSS_GPIO_OE_REG)); bfa_ioc_ct2_pll_init()
915 writel(r32 | 1, rb + PSS_GPIO_OE_REG); bfa_ioc_ct2_pll_init()
922 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); bfa_ioc_ct2_pll_init()
923 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); bfa_ioc_ct2_pll_init()
926 r32 = readl(rb + HOST_SEM5_REG); bfa_ioc_ct2_pll_init()
928 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
930 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
931 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
933 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
935 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
936 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); bfa_ioc_ct2_pll_init()
940 bfa_ioc_ct2_mem_init(rb); bfa_ioc_ct2_pll_init()
942 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); bfa_ioc_ct2_pll_init()
943 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); bfa_ioc_ct2_pll_init()
H A Dbnad_debugfs.c320 void __iomem *rb, *reg_addr; bnad_debugfs_write_regrd() local
352 rb = bfa_ioc_bar0(ioc); bnad_debugfs_write_regrd()
366 reg_addr = rb + addr; bnad_debugfs_write_regrd()
H A Dbfa_ioc.h203 enum bfa_status (*ioc_pll_init) (void __iomem *rb,
/linux-4.1.27/mm/
H A Dinterval_tree.c24 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
39 if (!prev->shared.rb.rb_right) { vma_interval_tree_insert_after()
41 link = &prev->shared.rb.rb_right; vma_interval_tree_insert_after()
43 parent = rb_entry(prev->shared.rb.rb_right, vma_interval_tree_insert_after()
44 struct vm_area_struct, shared.rb); vma_interval_tree_insert_after()
47 while (parent->shared.rb.rb_left) { vma_interval_tree_insert_after()
48 parent = rb_entry(parent->shared.rb.rb_left, vma_interval_tree_insert_after()
49 struct vm_area_struct, shared.rb); vma_interval_tree_insert_after()
53 link = &parent->shared.rb.rb_left; vma_interval_tree_insert_after()
57 rb_link_node(&node->shared.rb, &parent->shared.rb, link); vma_interval_tree_insert_after()
58 rb_insert_augmented(&node->shared.rb, root, vma_interval_tree_insert_after()
72 INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
H A Dnommu.c1284 struct rb_node *rb; do_mmap_pgoff() local
1348 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { do_mmap_pgoff()
1349 pregion = rb_entry(rb, struct vm_region, vm_rb); do_mmap_pgoff()
H A Dkmemleak.c407 struct rb_node *rb = object_tree_root.rb_node; lookup_object() local
409 while (rb) { lookup_object()
411 rb_entry(rb, struct kmemleak_object, rb_node); lookup_object()
413 rb = object->rb_node.rb_left; lookup_object()
415 rb = object->rb_node.rb_right; lookup_object()
/linux-4.1.27/arch/arm/lib/
H A Dgetuser.S45 rb .req ip label
47 3: ldrbt rb, [r0], #0
49 rb .req r0 label
51 3: ldrb rb, [r0, #1]
54 orr r2, r2, rb, lsl #8
56 orr r2, rb, r2, lsl #8
105 rb .req ip label
107 10: ldrbt rb, [r0], #0
109 rb .req r0 label
111 10: ldrb rb, [r0, #1]
113 orr r3, rb, r3, lsl #8
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_interval.c11 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); interval_end()
27 if (node->rb.rb_left) { compute_subtree_last()
28 sector_t left = interval_end(node->rb.rb_left); compute_subtree_last()
32 if (node->rb.rb_right) { compute_subtree_last()
33 sector_t right = interval_end(node->rb.rb_right); compute_subtree_last()
40 RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb,
56 rb_entry(*new, struct drbd_interval, rb); drbd_insert_interval()
74 rb_link_node(&this->rb, parent, new); drbd_insert_interval()
75 rb_insert_augmented(&this->rb, root, &augment_callbacks); drbd_insert_interval()
97 rb_entry(node, struct drbd_interval, rb); drbd_contains_interval()
119 rb_erase_augmented(&this->rb, root, &augment_callbacks); drbd_remove_interval()
144 rb_entry(node, struct drbd_interval, rb); drbd_find_overlap()
170 node = rb_next(&i->rb); drbd_next_overlap()
173 i = rb_entry(node, struct drbd_interval, rb); drbd_next_overlap()
H A Ddrbd_interval.h8 struct rb_node rb; member in struct:drbd_interval
20 RB_CLEAR_NODE(&i->rb); drbd_clear_interval()
25 return RB_EMPTY_NODE(&i->rb); drbd_interval_empty()
/linux-4.1.27/arch/x86/mm/
H A Dpat_rbtree.c51 struct memtype *data = container_of(node, struct memtype, rb); get_subtree_max_end()
61 child_max_end = get_subtree_max_end(data->rb.rb_right); compute_subtree_max_end()
65 child_max_end = get_subtree_max_end(data->rb.rb_left); compute_subtree_max_end()
72 RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb,
75 /* Find the first (lowest start addr) overlapping range from rb tree */ memtype_rb_lowest_match()
83 struct memtype *data = container_of(node, struct memtype, rb); memtype_rb_lowest_match()
113 node = rb_next(&match->rb); memtype_rb_exact_match()
115 match = container_of(node, struct memtype, rb); memtype_rb_exact_match()
142 node = rb_next(&match->rb); memtype_rb_check_conflict()
144 match = container_of(node, struct memtype, rb); memtype_rb_check_conflict()
154 node = rb_next(&match->rb); memtype_rb_check_conflict()
175 struct memtype *data = container_of(*node, struct memtype, rb); memtype_rb_insert()
187 rb_link_node(&newdata->rb, parent, node); memtype_rb_insert()
188 rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); memtype_rb_insert()
217 rb_erase_augmented(&data->rb, &memtype_rbroot, &memtype_rb_augment_cb); rbt_memtype_erase()
242 struct memtype *this = container_of(node, struct memtype, rb); rbt_memtype_copy_nth_element()
H A Dpat_internal.h14 struct rb_node rb; member in struct:memtype
/linux-4.1.27/drivers/xen/xenbus/
H A Dxenbus_dev_frontend.c126 struct read_buffer *rb; xenbus_file_read() local
144 rb = list_entry(u->read_buffers.next, struct read_buffer, list); xenbus_file_read()
147 unsigned sz = min((unsigned)len - i, rb->len - rb->cons); xenbus_file_read()
149 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); xenbus_file_read()
152 rb->cons += sz - ret; xenbus_file_read()
161 if (rb->cons == rb->len) { xenbus_file_read()
162 list_del(&rb->list); xenbus_file_read()
163 kfree(rb); xenbus_file_read()
166 rb = list_entry(u->read_buffers.next, xenbus_file_read()
187 struct read_buffer *rb; queue_reply() local
192 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); queue_reply()
193 if (rb == NULL) queue_reply()
196 rb->cons = 0; queue_reply()
197 rb->len = len; queue_reply()
199 memcpy(rb->msg, data, len); queue_reply()
201 list_add_tail(&rb->list, queue); queue_reply()
211 struct read_buffer *rb; queue_cleanup() local
214 rb = list_entry(list->next, struct read_buffer, list); queue_cleanup()
216 kfree(rb); queue_cleanup()
561 struct read_buffer *rb, *tmp_rb; xenbus_file_release() local
580 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { xenbus_file_release()
581 list_del(&rb->list); xenbus_file_release()
582 kfree(rb); xenbus_file_release()
/linux-4.1.27/include/linux/
H A Drbtree_augmented.h66 rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
68 while (rb != stop) { \
69 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
74 rb = rb_parent(&node->rbfield); \
105 #define rb_color(rb) __rb_color((rb)->__rb_parent_color)
106 #define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
107 #define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
109 static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) rb_set_parent() argument
111 rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; rb_set_parent()
114 static inline void rb_set_parent_color(struct rb_node *rb, rb_set_parent_color() argument
117 rb->__rb_parent_color = (unsigned long)p | color; rb_set_parent_color()
H A Dinterval_tree.h7 struct rb_node rb; member in struct:interval_tree_node
H A Dinterval_tree_generic.h158 struct rb_node *rb = node->ITRB.rb_right, *prev; \
164 * rb == node->ITRB.rb_right \
168 if (rb) { \
169 ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
177 rb = rb_parent(&node->ITRB); \
178 if (!rb) \
181 node = rb_entry(rb, ITSTRUCT, ITRB); \
182 rb = node->ITRB.rb_right; \
183 } while (prev == rb); \
H A Drmap.h70 * The "rb" field indexes on an interval tree the anon_vma_chains
77 struct rb_node rb; /* locked by anon_vma->rwsem */ member in struct:anon_vma_chain
H A Delevator.h169 * rb support functions.
H A Dkernfs.h76 /* children rbtree starts here and goes through kn->rb */
121 struct rb_node rb; member in struct:kernfs_node
H A Dperf_event.h454 struct ring_buffer *rb; member in struct:perf_event
573 struct ring_buffer *rb; member in struct:perf_output_handle
H A Dmm_types.h278 struct rb_node rb; member in struct:vm_area_struct::__anon12173
/linux-4.1.27/drivers/target/iscsi/
H A Discsi_target_configfs.c56 ssize_t rb; lio_target_np_show_sctp() local
60 rb = sprintf(page, "1\n"); lio_target_np_show_sctp()
62 rb = sprintf(page, "0\n"); lio_target_np_show_sctp()
64 return rb; lio_target_np_show_sctp()
132 ssize_t rb; lio_target_np_show_iser() local
136 rb = sprintf(page, "1\n"); lio_target_np_show_iser()
138 rb = sprintf(page, "0\n"); lio_target_np_show_iser()
140 return rb; lio_target_np_show_iser()
581 ssize_t rb; \
586 rb = snprintf(page, PAGE_SIZE, \
590 rb = snprintf(page, PAGE_SIZE, "%u\n", \
595 return rb; \
659 ssize_t rb = 0; lio_target_nacl_show_info() local
664 rb += sprintf(page+rb, "No active iSCSI Session for Initiator" lio_target_nacl_show_info()
669 rb += sprintf(page+rb, "InitiatorName: %s\n", lio_target_nacl_show_info()
671 rb += sprintf(page+rb, "InitiatorAlias: %s\n", lio_target_nacl_show_info()
674 rb += sprintf(page+rb, lio_target_nacl_show_info()
677 rb += sprintf(page+rb, "SessionType: %s\n", lio_target_nacl_show_info()
680 rb += sprintf(page+rb, "Session State: "); lio_target_nacl_show_info()
683 rb += sprintf(page+rb, "TARG_SESS_FREE\n"); lio_target_nacl_show_info()
686 rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n"); lio_target_nacl_show_info()
689 rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n"); lio_target_nacl_show_info()
692 rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n"); lio_target_nacl_show_info()
695 rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n"); lio_target_nacl_show_info()
698 rb += sprintf(page+rb, "ERROR: Unknown Session" lio_target_nacl_show_info()
703 rb += sprintf(page+rb, "---------------------[iSCSI Session" lio_target_nacl_show_info()
705 rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" lio_target_nacl_show_info()
707 rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x" lio_target_nacl_show_info()
713 rb += sprintf(page+rb, "----------------------[iSCSI" lio_target_nacl_show_info()
718 rb += sprintf(page+rb, "CID: %hu Connection" lio_target_nacl_show_info()
722 rb += sprintf(page+rb, lio_target_nacl_show_info()
726 rb += sprintf(page+rb, lio_target_nacl_show_info()
730 rb += sprintf(page+rb, lio_target_nacl_show_info()
734 rb += sprintf(page+rb, lio_target_nacl_show_info()
738 rb += sprintf(page+rb, lio_target_nacl_show_info()
742 rb += sprintf(page+rb, lio_target_nacl_show_info()
746 rb += sprintf(page+rb, lio_target_nacl_show_info()
750 rb += sprintf(page+rb, lio_target_nacl_show_info()
755 rb += sprintf(page+rb, " Address %s %s", conn->login_ip, lio_target_nacl_show_info()
758 rb += sprintf(page+rb, " StatSN: 0x%08x\n", lio_target_nacl_show_info()
765 return rb; lio_target_nacl_show_info()
958 ssize_t rb; \
963 rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
965 return rb; \
1211 ssize_t rb; \
1222 rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
1225 return rb; \
/linux-4.1.27/arch/tile/kernel/
H A Dunaligned.c176 * unalign load/store shares same register with ra, rb and rd.
180 uint64_t *rb, uint64_t *clob1, uint64_t *clob2, find_regs()
208 *rb = reg; find_regs()
209 alias_reg_map = (1ULL << *ra) | (1ULL << *rb); find_regs()
260 *rb = reg; find_regs()
261 alias_reg_map = (1ULL << *ra) | (1ULL << *rb); find_regs()
312 * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them
316 static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb, check_regs() argument
330 if ((rb >= 56) && (rb != TREG_ZERO)) check_regs()
394 static tilegx_bundle_bits jit_x0_dblalign(int rd, int ra, int rb) jit_x0_dblalign() argument
399 create_SrcB_X0(rb); jit_x0_dblalign()
436 static tilegx_bundle_bits jit_x1_st1_add(int ra, int rb, int imm8) jit_x1_st1_add() argument
442 create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8); jit_x1_st1_add()
446 static tilegx_bundle_bits jit_x1_st(int ra, int rb) jit_x1_st() argument
450 create_SrcA_X1(ra) | create_SrcB_X1(rb); jit_x1_st()
454 static tilegx_bundle_bits jit_x1_st_add(int ra, int rb, int imm8) jit_x1_st_add() argument
460 create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8); jit_x1_st_add()
545 * registers: ra, rb and rd. and 3 scratch registers by calling
562 uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1; jit_bundle_gen() local
643 find_regs(bundle, 0, &ra, &rb, &clob1, &clob2, jit_bundle_gen()
678 find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2, jit_bundle_gen()
696 find_regs(bundle, &rd, &ra, &rb, &clob1, jit_bundle_gen()
723 find_regs(bundle, 0, &ra, &rb, jit_bundle_gen()
789 &ra, &rb, &clob1, &clob2, &clob3, &alias); jit_bundle_gen()
797 if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true) jit_bundle_gen()
809 * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0} jit_bundle_gen()
934 x = regs->regs[rb]; jit_bundle_gen()
1018 if ((ra != rb) && (rd != TREG_SP) && !alias && jit_bundle_gen()
1021 * Simple case: ra != rb and no register alias found, jit_bundle_gen()
1028 * Simple store: ra != rb, no need for scratch register. jit_bundle_gen()
1039 jit_x0_rotli(rb, rb, 56) | jit_bundle_gen()
1040 jit_x1_st1_add(ra, rb, jit_bundle_gen()
1053 frag.insn[n] |= jit_x0_rotli(rb, rb, 32); jit_bundle_gen()
1055 frag.insn[n] |= jit_x0_rotli(rb, rb, 16); jit_bundle_gen()
1188 jit_x0_rotli(rb, rb, 56) | jit_bundle_gen()
1189 jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA); jit_bundle_gen()
1201 jit_x0_rotli(rb, rb, 56) | jit_bundle_gen()
1202 jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA); jit_bundle_gen()
1208 * byte to recover rb for 4-byte store. jit_bundle_gen()
1210 frag.insn[n++] = jit_x0_rotli(rb, rb, 32) | jit_bundle_gen()
1214 jit_x0_addi(clob2, rb, 0) | jit_bundle_gen()
1218 jit_x0_shrui(rb, rb, 8) | jit_bundle_gen()
1219 jit_x1_st1_add(clob1, rb, jit_bundle_gen()
1223 jit_x0_addi(rb, clob2, 0) | jit_bundle_gen()
1378 (int)rb, (int)bundle_2_enable, jit_bundle_gen()
179 find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra, uint64_t *rb, uint64_t *clob1, uint64_t *clob2, uint64_t *clob3, bool *r_alias) find_regs() argument
/linux-4.1.27/drivers/leds/
H A Dleds-rb532.c7 * rb-diag.c (my own standalone driver for both LED and
16 #include <asm/mach-rc32434/rb.h>
/linux-4.1.27/tools/perf/
H A Dbuiltin-lock.c43 struct rb_node rb; /* used for sorting */ member in struct:lock_stat
110 struct rb_node rb; member in struct:thread_stat
125 st = container_of(node, struct thread_stat, rb); thread_stat_find()
139 struct rb_node **rb = &thread_stats.rb_node; thread_stat_insert() local
143 while (*rb) { thread_stat_insert()
144 p = container_of(*rb, struct thread_stat, rb); thread_stat_insert()
145 parent = *rb; thread_stat_insert()
148 rb = &(*rb)->rb_left; thread_stat_insert()
150 rb = &(*rb)->rb_right; thread_stat_insert()
155 rb_link_node(&new->rb, parent, rb); thread_stat_insert()
156 rb_insert_color(&new->rb, &thread_stats); thread_stat_insert()
197 rb_link_node(&st->rb, NULL, &thread_stats.rb_node); thread_stat_findnew_first()
198 rb_insert_color(&st->rb, &thread_stats); thread_stat_findnew_first()
280 struct rb_node **rb = &result.rb_node; insert_to_result() local
284 while (*rb) { insert_to_result()
285 p = container_of(*rb, struct lock_stat, rb); insert_to_result()
286 parent = *rb; insert_to_result()
289 rb = &(*rb)->rb_left; insert_to_result()
291 rb = &(*rb)->rb_right; insert_to_result()
294 rb_link_node(&st->rb, parent, rb); insert_to_result()
295 rb_insert_color(&st->rb, &result); insert_to_result()
310 return container_of(node, struct lock_stat, rb); pop_from_result()
768 st = container_of(node, struct thread_stat, rb); dump_threads()
H A Dbuiltin-kvm.c480 struct rb_node **rb = &result->rb_node; insert_to_result() local
484 while (*rb) { insert_to_result()
485 p = container_of(*rb, struct kvm_event, rb); insert_to_result()
486 parent = *rb; insert_to_result()
489 rb = &(*rb)->rb_left; insert_to_result()
491 rb = &(*rb)->rb_right; insert_to_result()
494 rb_link_node(&event->rb, parent, rb); insert_to_result()
495 rb_insert_color(&event->rb, result); insert_to_result()
538 return container_of(node, struct kvm_event, rb); pop_from_result()
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_pr_papr.c90 unsigned long v = 0, pteg, rb; kvmppc_h_pr_remove() local
108 rb = compute_tlbie_rb(pte[0], pte[1], pte_index); kvmppc_h_pr_remove()
109 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_h_pr_remove()
150 unsigned long pteg, rb, flags; kvmppc_h_pr_bulk_remove() local
189 rb = compute_tlbie_rb(pte[0], pte[1], kvmppc_h_pr_bulk_remove()
191 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_h_pr_bulk_remove()
208 unsigned long rb, pteg, r, v; kvmppc_h_pr_protect() local
233 rb = compute_tlbie_rb(v, r, pte_index); kvmppc_h_pr_protect()
234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_h_pr_protect()
H A De500_emulate.c52 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) kvmppc_e500_emul_msgclr() argument
54 ulong param = vcpu->arch.gpr[rb]; kvmppc_e500_emul_msgclr()
64 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) kvmppc_e500_emul_msgsnd() argument
66 ulong param = vcpu->arch.gpr[rb]; kvmppc_e500_emul_msgsnd()
67 int prio = dbell2prio(rb); kvmppc_e500_emul_msgsnd()
121 int rb = get_rb(inst); kvmppc_core_emulate_op_e500() local
135 emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); kvmppc_core_emulate_op_e500()
139 emulated = kvmppc_e500_emul_msgclr(vcpu, rb); kvmppc_core_emulate_op_e500()
152 ea = kvmppc_get_ea_indexed(vcpu, ra, rb); kvmppc_core_emulate_op_e500()
158 ea = kvmppc_get_ea_indexed(vcpu, ra, rb); kvmppc_core_emulate_op_e500()
164 ea = kvmppc_get_ea_indexed(vcpu, ra, rb); kvmppc_core_emulate_op_e500()
H A Dbook3s_64_mmu.c378 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) kvmppc_mmu_book3s_64_slbmte() argument
385 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); kvmppc_mmu_book3s_64_slbmte()
389 esid = GET_ESID(rb); kvmppc_mmu_book3s_64_slbmte()
390 esid_1t = GET_ESID_1T(rb); kvmppc_mmu_book3s_64_slbmte()
391 slb_nr = rb & 0xfff; kvmppc_mmu_book3s_64_slbmte()
402 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; kvmppc_mmu_book3s_64_slbmte()
423 slbe->orige = rb & (ESID_MASK | SLB_ESID_V); kvmppc_mmu_book3s_64_slbmte()
497 u64 rb = 0, rs = 0; kvmppc_mmu_book3s_64_mtsrin() local
520 rb |= (srnum & 0xf) << 28; kvmppc_mmu_book3s_64_mtsrin()
522 rb |= 1 << 27; kvmppc_mmu_book3s_64_mtsrin()
524 rb |= srnum; kvmppc_mmu_book3s_64_mtsrin()
531 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); kvmppc_mmu_book3s_64_mtsrin()
H A Dbook3s_hv_ras.c54 unsigned long rb = be64_to_cpu(slb->save_area[i].esid); reload_slb() local
57 rb = (rb & ~0xFFFul) | i; /* insert entry number */ reload_slb()
58 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); reload_slb()
H A Dbook3s_emulate.c96 int rb = get_rb(inst); kvmppc_core_emulate_op_pr() local
166 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; kvmppc_core_emulate_op_pr()
181 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, kvmppc_core_emulate_op_pr()
188 ulong addr = kvmppc_get_gpr(vcpu, rb); kvmppc_core_emulate_op_pr()
228 kvmppc_get_gpr(vcpu, rb)); kvmppc_core_emulate_op_pr()
235 kvmppc_get_gpr(vcpu, rb)); kvmppc_core_emulate_op_pr()
249 rb_val = kvmppc_get_gpr(vcpu, rb); kvmppc_core_emulate_op_pr()
260 rb_val = kvmppc_get_gpr(vcpu, rb); kvmppc_core_emulate_op_pr()
270 ulong rb_val = kvmppc_get_gpr(vcpu, rb); kvmppc_core_emulate_op_pr()
673 ulong rb = get_rb(inst); kvmppc_alignment_dar()
687 dar += kvmppc_get_gpr(vcpu, rb); kvmppc_alignment_dar()
H A Dbook3s_hv_rm_mmu.c404 unsigned long v, r, rb; kvmppc_do_h_remove() local
425 rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index); kvmppc_do_h_remove()
426 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); kvmppc_do_h_remove()
573 unsigned long v, r, rb, mask, bits; kvmppc_h_protect() local
617 rb = compute_tlbie_rb(v, r, pte_index); kvmppc_h_protect()
620 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), kvmppc_h_protect()
667 unsigned long rb; kvmppc_invalidate_hpte() local
670 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), kvmppc_invalidate_hpte()
672 do_tlbies(kvm, &rb, 1, 1, true); kvmppc_invalidate_hpte()
679 unsigned long rb; kvmppc_clear_ref_hpte() local
682 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), kvmppc_clear_ref_hpte()
687 do_tlbies(kvm, &rb, 1, 1, false); kvmppc_clear_ref_hpte()
/linux-4.1.27/arch/powerpc/lib/
H A Dsstep.c131 int ra, rb; xform_ea() local
135 rb = (instr >> 11) & 0x1f; xform_ea()
136 ea = regs->gpr[rb]; xform_ea()
645 unsigned int opcode, ra, rb, rd, spr, u; analyse_instr() local
728 rb = (instr >> 11) & 0x1f; analyse_instr()
731 rb = (regs->ccr >> (31 - rb)) & 1; analyse_instr()
732 val = (instr >> (6 + ra * 2 + rb)) & 1; analyse_instr()
769 rb = (instr >> 11) & 0x1f; analyse_instr()
842 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); analyse_instr()
849 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); analyse_instr()
855 rb = regs->gpr[rb] & 0x1f; analyse_instr()
857 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); analyse_instr()
897 sh = rb | ((instr & 2) << 4); analyse_instr()
916 sh = regs->gpr[rb] & 0x3f; analyse_instr()
934 (int)regs->gpr[rb]))) analyse_instr()
939 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) analyse_instr()
1031 val2 = regs->gpr[rb]; analyse_instr()
1044 val2 = regs->gpr[rb]; analyse_instr()
1060 regs->gpr[rb], 1); analyse_instr()
1065 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); analyse_instr()
1070 regs->gpr[rb], 0); analyse_instr()
1075 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); analyse_instr()
1079 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra]; analyse_instr()
1084 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); analyse_instr()
1089 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); analyse_instr()
1097 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], analyse_instr()
1102 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], analyse_instr()
1122 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb]; analyse_instr()
1132 (unsigned int) regs->gpr[rb]; analyse_instr()
1136 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb]; analyse_instr()
1140 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb]; analyse_instr()
1145 (unsigned int) regs->gpr[rb]; analyse_instr()
1150 (long int) regs->gpr[rb]; analyse_instr()
1155 (int) regs->gpr[rb]; analyse_instr()
1173 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb]; analyse_instr()
1177 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb]; analyse_instr()
1181 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]); analyse_instr()
1185 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]); analyse_instr()
1189 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb]; analyse_instr()
1193 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb]; analyse_instr()
1197 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb]; analyse_instr()
1201 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]); analyse_instr()
1221 sh = regs->gpr[rb] & 0x3f; analyse_instr()
1229 sh = regs->gpr[rb] & 0x3f; analyse_instr()
1237 sh = regs->gpr[rb] & 0x3f; analyse_instr()
1247 sh = rb; analyse_instr()
1258 sh = regs->gpr[rb] & 0x7f; analyse_instr()
1266 sh = regs->gpr[rb] & 0x7f; analyse_instr()
1274 sh = regs->gpr[rb] & 0x7f; analyse_instr()
1285 sh = rb | ((instr & 2) << 4); analyse_instr()
1445 if (rb == 0) analyse_instr()
1446 rb = 32; /* # bytes to load */ analyse_instr()
1447 op->type = MKOP(LOAD_MULTI, 0, rb); analyse_instr()
1501 if (rb == 0) analyse_instr()
1502 rb = 32; /* # bytes to store */ analyse_instr()
1503 op->type = MKOP(STORE_MULTI, 0, rb); analyse_instr()
/linux-4.1.27/fs/jffs2/
H A Dnodelist.h230 struct rb_node rb; member in struct:jffs2_tmp_dnode_info
271 struct rb_node rb; member in struct:jffs2_node_frag
334 return rb_entry(node, struct jffs2_node_frag, rb); frag_first()
344 return rb_entry(node, struct jffs2_node_frag, rb); frag_last()
347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
349 #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
352 #define frag_erase(frag, list) rb_erase(&frag->rb, list);
354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
356 #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
357 #define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb)
358 #define tn_right(tn) rb_entry((tn)->rb.rb_right, struct jffs2_tmp_dnode_info, rb)
359 #define tn_erase(tn, list) rb_erase(&tn->rb, list);
360 #define tn_last(list) rb_entry(rb_last(list), struct jffs2_tmp_dnode_info, rb)
361 #define tn_first(list) rb_entry(rb_first(list), struct jffs2_tmp_dnode_info, rb)
H A Dnodelist.c124 struct rb_node *parent = &base->rb; jffs2_fragtree_insert()
131 base = rb_entry(parent, struct jffs2_node_frag, rb); jffs2_fragtree_insert()
134 link = &base->rb.rb_right; jffs2_fragtree_insert()
136 link = &base->rb.rb_left; jffs2_fragtree_insert()
143 rb_link_node(&newfrag->rb, &base->rb, link); jffs2_fragtree_insert()
189 rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); no_overlapping_node()
193 rb_link_node(&holefrag->rb, NULL, &root->rb_node); no_overlapping_node()
195 rb_insert_color(&holefrag->rb, root); no_overlapping_node()
204 rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); no_overlapping_node()
207 rb_link_node(&newfrag->rb, NULL, &root->rb_node); no_overlapping_node()
209 rb_insert_color(&newfrag->rb, root); no_overlapping_node()
298 rb_insert_color(&newfrag->rb, root); jffs2_add_frag_to_fragtree()
301 rb_insert_color(&newfrag2->rb, root); jffs2_add_frag_to_fragtree()
310 rb_insert_color(&newfrag->rb, root); jffs2_add_frag_to_fragtree()
317 rb_replace_node(&this->rb, &newfrag->rb, root); jffs2_add_frag_to_fragtree()
327 rb_insert_color(&this->rb, root); jffs2_add_frag_to_fragtree()
338 rb_erase(&this->rb, root); jffs2_add_frag_to_fragtree()
537 frag = rb_entry(next, struct jffs2_node_frag, rb); jffs2_lookup_node_frag()
543 next = frag->rb.rb_right; jffs2_lookup_node_frag()
545 next = frag->rb.rb_left; jffs2_lookup_node_frag()
570 rbtree_postorder_for_each_entry_safe(frag, next, root, rb) { rbtree_postorder_for_each_entry_safe()
H A Dreadinode.c185 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb); jffs2_lookup_tn()
188 next = tn->rb.rb_right; jffs2_lookup_tn()
190 next = tn->rb.rb_left; jffs2_lookup_tn()
286 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); jffs2_add_tn_to_tree()
344 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); jffs2_add_tn_to_tree()
346 link = &insert_point->rb.rb_right; jffs2_add_tn_to_tree()
349 link = &insert_point->rb.rb_left; jffs2_add_tn_to_tree()
351 link = &insert_point->rb.rb_right; jffs2_add_tn_to_tree()
353 rb_link_node(&tn->rb, &insert_point->rb, link); jffs2_add_tn_to_tree()
354 rb_insert_color(&tn->rb, &rii->tn_root); jffs2_add_tn_to_tree()
432 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); ver_insert()
440 rb_link_node(&tn->rb, parent, link); ver_insert()
441 rb_insert_color(&tn->rb, ver_root); ver_insert()
476 eat_last(&rii->tn_root, &last->rb); jffs2_build_inode_fragtree()
499 eat_last(&ver_root, &this->rb); jffs2_build_inode_fragtree()
532 eat_last(&ver_root, &vers_next->rb); jffs2_build_inode_fragtree()
548 rbtree_postorder_for_each_entry_safe(tn, next, list, rb) { rbtree_postorder_for_each_entry_safe()
/linux-4.1.27/drivers/gpu/drm/msm/adreno/
H A Dadreno_gpu.c60 ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); adreno_hw_init()
70 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | adreno_hw_init()
107 gpu->rb->cur = gpu->rb->start; adreno_recover()
127 struct msm_ringbuffer *ring = gpu->rb; adreno_submit()
196 uint32_t wptr = get_wptr(gpu->rb); adreno_flush()
207 uint32_t wptr = get_wptr(gpu->rb); adreno_idle()
231 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); adreno_show()
267 printk("rb wptr: %d\n", get_wptr(gpu->rb)); adreno_dump()
286 uint32_t size = gpu->rb->size / 4; ring_freewords()
287 uint32_t wptr = get_wptr(gpu->rb); ring_freewords()
H A Da4xx_gpu.c112 struct msm_ringbuffer *ring = gpu->rb; a4xx_me_init()
H A Da3xx_gpu.c46 struct msm_ringbuffer *ring = gpu->rb; a3xx_me_init()
/linux-4.1.27/fs/nilfs2/
H A Drecovery.c353 struct nilfs_recovery_block *rb; nilfs_scan_dsync_log() local
361 rb = kmalloc(sizeof(*rb), GFP_NOFS); nilfs_scan_dsync_log()
362 if (unlikely(!rb)) { nilfs_scan_dsync_log()
366 rb->ino = ino; nilfs_scan_dsync_log()
367 rb->blocknr = blocknr++; nilfs_scan_dsync_log()
368 rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr); nilfs_scan_dsync_log()
369 rb->blkoff = le64_to_cpu(binfo->bi_blkoff); nilfs_scan_dsync_log()
370 /* INIT_LIST_HEAD(&rb->list); */ nilfs_scan_dsync_log()
371 list_add_tail(&rb->list, head); nilfs_scan_dsync_log()
390 struct nilfs_recovery_block *rb; dispose_recovery_list() local
392 rb = list_first_entry(head, struct nilfs_recovery_block, list); dispose_recovery_list()
393 list_del(&rb->list); dispose_recovery_list()
394 kfree(rb); dispose_recovery_list()
486 struct nilfs_recovery_block *rb, nilfs_recovery_copy_block()
492 bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); nilfs_recovery_copy_block()
510 struct nilfs_recovery_block *rb, *n; nilfs_recover_dsync_blocks() local
516 list_for_each_entry_safe(rb, n, head, list) { list_for_each_entry_safe()
517 inode = nilfs_iget(sb, root, rb->ino); list_for_each_entry_safe()
524 pos = rb->blkoff << inode->i_blkbits; list_for_each_entry_safe()
535 err = nilfs_recovery_copy_block(nilfs, rb, page); list_for_each_entry_safe()
560 err, (unsigned long)rb->ino, list_for_each_entry_safe()
561 (unsigned long long)rb->blkoff); list_for_each_entry_safe()
566 list_del_init(&rb->list); list_for_each_entry_safe()
567 kfree(rb); list_for_each_entry_safe()
485 nilfs_recovery_copy_block(struct the_nilfs *nilfs, struct nilfs_recovery_block *rb, struct page *page) nilfs_recovery_copy_block() argument
H A Dthe_nilfs.h79 * @ns_cptree: rb-tree of all mounted checkpoints (nilfs_root)
/linux-4.1.27/arch/powerpc/include/asm/
H A Dasm-compat.h76 #define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
79 #define PPC405_ERR77(ra,rb)
H A Dkvm_book3s_64.h137 unsigned long rb = 0, va_low, sllp; compute_tlbie_rb() local
162 rb = (v & ~0x7fUL) << 16; /* AVA field */ compute_tlbie_rb()
164 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */ compute_tlbie_rb()
188 rb |= sllp << 5; /* AP field */ compute_tlbie_rb()
189 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ compute_tlbie_rb()
198 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000; compute_tlbie_rb()
202 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1); compute_tlbie_rb()
209 rb |= ((va_low << aval_shift) & 0xfe); compute_tlbie_rb()
211 rb |= 1; /* L field */ compute_tlbie_rb()
213 rb |= penc << 12; /* LP field */ compute_tlbie_rb()
217 rb |= (v >> 54) & 0x300; /* B field */ compute_tlbie_rb()
218 return rb; compute_tlbie_rb()
H A Dppc_asm.h27 #define ACCOUNT_CPU_USER_ENTRY(ra, rb)
28 #define ACCOUNT_CPU_USER_EXIT(ra, rb)
31 #define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
33 ld rb,PACA_STARTTIME_USER(r13); \
35 subf rb,rb,ra; /* subtract start value */ \
37 add ra,ra,rb; /* add on to user time */ \
40 #define ACCOUNT_CPU_USER_EXIT(ra, rb) \
42 ld rb,PACA_STARTTIME(r13); \
44 subf rb,rb,ra; /* subtract start value */ \
46 add ra,ra,rb; /* add on to system time */ \
426 #define SET_DEFAULT_THREAD_PPR(ra, rb) \
429 ld rb,PACACURRENT(r13); \
431 std ra,TASKTHREADPPR(rb); \
534 #define FIX_SRR1(ra, rb)
H A Dexception-64s.h119 #define SAVE_PPR(area, ra, rb) \
122 ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \
123 std rb,TASKTHREADPPR(ra); \
H A Dkvm_ppc.h663 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) kvmppc_get_ea_indexed() argument
668 ea = kvmppc_get_gpr(vcpu, rb); kvmppc_get_ea_indexed()
H A Dkvm_host.h337 void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
H A Dqe.h531 __be16 rbptr; /* rb BD Pointer */
/linux-4.1.27/tools/testing/selftests/powerpc/primitives/asm/
H A Dasm-compat.h76 #define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
79 #define PPC405_ERR77(ra,rb)
/linux-4.1.27/arch/mips/include/asm/mach-rc32434/
H A Dirq.h7 #include <asm/mach-rc32434/rb.h>
H A Ddma.h14 #include <asm/mach-rc32434/rb.h>
H A Dinteg.h32 #include <asm/mach-rc32434/rb.h>
H A Dtimer.h32 #include <asm/mach-rc32434/rb.h>
H A Dddr.h32 #include <asm/mach-rc32434/rb.h>
/linux-4.1.27/arch/powerpc/kernel/
H A Dmce_power.c33 unsigned long rb; flush_tlb_206() local
38 rb = TLBIEL_INVAL_SET; flush_tlb_206()
41 rb = TLBIEL_INVAL_SET_LPID; flush_tlb_206()
50 asm volatile("tlbiel %0" : : "r" (rb)); flush_tlb_206()
51 rb += 1 << TLBIEL_INVAL_SET_SHIFT; flush_tlb_206()
107 unsigned long rb = be64_to_cpu(slb->save_area[i].esid); flush_and_reload_slb() local
110 rb = (rb & ~0xFFFul) | i; flush_and_reload_slb()
111 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); flush_and_reload_slb()
H A Dbtext.c425 static void draw_byte_32(unsigned char *font, unsigned int *base, int rb) draw_byte_32() argument
442 base = (unsigned int *) ((char *)base + rb); draw_byte_32()
446 static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb) draw_byte_16() argument
460 base = (unsigned int *) ((char *)base + rb); draw_byte_16()
464 static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb) draw_byte_8() argument
476 base = (unsigned int *) ((char *)base + rb); draw_byte_8()
484 int rb = dispDeviceRowBytes; draw_byte() local
490 draw_byte_32(font, (unsigned int *)base, rb); draw_byte()
494 draw_byte_16(font, (unsigned int *)base, rb); draw_byte()
497 draw_byte_8(font, (unsigned int *)base, rb); draw_byte()
H A Dkvm.c376 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) kvm_patch_ins_mtsrin() argument
401 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10); kvm_patch_ins_mtsrin()
H A Dtime.c1082 u64 ra, rb, rc; div128_by_32() local
1092 rb = ((u64) do_div(ra, divisor) << 32) + c; div128_by_32()
1095 rc = ((u64) do_div(rb, divisor) << 32) + d; div128_by_32()
1096 y = rb; div128_by_32()
H A Deeh_cache.c126 /* Insert address range into the rb tree. */
H A Dtraps.c350 unsigned int rb; check_io_access() local
353 rb = (*nip >> 11) & 0x1f; check_io_access()
356 regs->gpr[rb] - _IO_BASE, nip); check_io_access()
/linux-4.1.27/fs/ocfs2/
H A Drefcounttree.c84 struct ocfs2_refcount_block *rb = ocfs2_validate_refcount_block() local
96 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check); ocfs2_validate_refcount_block()
104 if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) { ocfs2_validate_refcount_block()
108 rb->rf_signature); ocfs2_validate_refcount_block()
112 if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) { ocfs2_validate_refcount_block()
117 (unsigned long long)le64_to_cpu(rb->rf_blkno)); ocfs2_validate_refcount_block()
121 if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) { ocfs2_validate_refcount_block()
126 le32_to_cpu(rb->rf_fs_generation)); ocfs2_validate_refcount_block()
460 struct ocfs2_refcount_block *rb; ocfs2_lock_refcount_tree() local
487 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ocfs2_lock_refcount_tree()
492 * Here we just remove the tree from the rb-tree, and the last ocfs2_lock_refcount_tree()
497 if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) { ocfs2_lock_refcount_tree()
569 struct ocfs2_refcount_block *rb; ocfs2_create_refcount_tree() local
631 rb = (struct ocfs2_refcount_block *)new_bh->b_data; ocfs2_create_refcount_tree()
632 memset(rb, 0, inode->i_sb->s_blocksize); ocfs2_create_refcount_tree()
633 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); ocfs2_create_refcount_tree()
634 rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); ocfs2_create_refcount_tree()
635 rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); ocfs2_create_refcount_tree()
636 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); ocfs2_create_refcount_tree()
637 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); ocfs2_create_refcount_tree()
638 rb->rf_blkno = cpu_to_le64(first_blkno); ocfs2_create_refcount_tree()
639 rb->rf_count = cpu_to_le32(1); ocfs2_create_refcount_tree()
640 rb->rf_records.rl_count = ocfs2_create_refcount_tree()
643 rb->rf_generation = osb->s_next_generation++; ocfs2_create_refcount_tree()
662 new_tree->rf_generation = le32_to_cpu(rb->rf_generation); ocfs2_create_refcount_tree()
710 struct ocfs2_refcount_block *rb; ocfs2_set_refcount_tree() local
743 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ocfs2_set_refcount_tree()
744 le32_add_cpu(&rb->rf_count, 1); ocfs2_set_refcount_tree()
771 struct ocfs2_refcount_block *rb; ocfs2_remove_refcount_tree() local
790 rb = (struct ocfs2_refcount_block *)blk_bh->b_data; ocfs2_remove_refcount_tree()
796 if (le32_to_cpu(rb->rf_count) == 1) { ocfs2_remove_refcount_tree()
797 blk = le64_to_cpu(rb->rf_blkno); ocfs2_remove_refcount_tree()
798 bit = le16_to_cpu(rb->rf_suballoc_bit); ocfs2_remove_refcount_tree()
799 if (rb->rf_suballoc_loc) ocfs2_remove_refcount_tree()
800 bg_blkno = le64_to_cpu(rb->rf_suballoc_loc); ocfs2_remove_refcount_tree()
806 le16_to_cpu(rb->rf_suballoc_slot)); ocfs2_remove_refcount_tree()
851 le32_add_cpu(&rb->rf_count , -1); ocfs2_remove_refcount_tree()
854 if (!rb->rf_count) { ocfs2_remove_refcount_tree()
891 struct ocfs2_refcount_block *rb = ocfs2_find_refcount_rec_in_rl() local
895 for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) { ocfs2_find_refcount_rec_in_rl()
896 rec = &rb->rf_records.rl_recs[i]; ocfs2_find_refcount_rec_in_rl()
914 if (i < le16_to_cpu(rb->rf_records.rl_used) && ocfs2_find_refcount_rec_in_rl()
1080 struct ocfs2_refcount_block *rb = ocfs2_get_refcount_rec() local
1083 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) { ocfs2_get_refcount_rec()
1091 el = &rb->rf_list; ocfs2_get_refcount_rec()
1160 ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb, ocfs2_refcount_rec_adjacent() argument
1163 if ((rb->rf_records.rl_recs[index].r_refcount == ocfs2_refcount_rec_adjacent()
1164 rb->rf_records.rl_recs[index + 1].r_refcount) && ocfs2_refcount_rec_adjacent()
1165 (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) + ocfs2_refcount_rec_adjacent()
1166 le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) == ocfs2_refcount_rec_adjacent()
1167 le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos))) ocfs2_refcount_rec_adjacent()
1174 ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb, ocfs2_refcount_rec_contig() argument
1179 if (index < le16_to_cpu(rb->rf_records.rl_used) - 1) ocfs2_refcount_rec_contig()
1180 ret = ocfs2_refcount_rec_adjacent(rb, index); ocfs2_refcount_rec_contig()
1185 tmp = ocfs2_refcount_rec_adjacent(rb, index - 1); ocfs2_refcount_rec_contig()
1198 static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb, ocfs2_rotate_refcount_rec_left() argument
1201 BUG_ON(rb->rf_records.rl_recs[index].r_refcount != ocfs2_rotate_refcount_rec_left()
1202 rb->rf_records.rl_recs[index+1].r_refcount); ocfs2_rotate_refcount_rec_left()
1204 le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters, ocfs2_rotate_refcount_rec_left()
1205 le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters)); ocfs2_rotate_refcount_rec_left()
1207 if (index < le16_to_cpu(rb->rf_records.rl_used) - 2) ocfs2_rotate_refcount_rec_left()
1208 memmove(&rb->rf_records.rl_recs[index + 1], ocfs2_rotate_refcount_rec_left()
1209 &rb->rf_records.rl_recs[index + 2], ocfs2_rotate_refcount_rec_left()
1211 (le16_to_cpu(rb->rf_records.rl_used) - index - 2)); ocfs2_rotate_refcount_rec_left()
1213 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], ocfs2_rotate_refcount_rec_left()
1215 le16_add_cpu(&rb->rf_records.rl_used, -1); ocfs2_rotate_refcount_rec_left()
1221 static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb, ocfs2_refcount_rec_merge() argument
1225 ocfs2_refcount_rec_contig(rb, index); ocfs2_refcount_rec_merge()
1235 ocfs2_rotate_refcount_rec_left(rb, index); ocfs2_refcount_rec_merge()
1238 ocfs2_rotate_refcount_rec_left(rb, index); ocfs2_refcount_rec_merge()
1251 struct ocfs2_refcount_block *rb = ocfs2_change_refcount_rec() local
1253 struct ocfs2_refcount_list *rl = &rb->rf_records; ocfs2_change_refcount_rec()
1279 ocfs2_refcount_rec_merge(rb, index); ocfs2_change_refcount_rec()
1466 struct ocfs2_refcount_block *rb = ocfs2_divide_leaf_refcount_block() local
1468 struct ocfs2_refcount_list *rl = &rb->rf_records; ocfs2_divide_leaf_refcount_block()
1487 * 5. dirty the new_rb and rb. ocfs2_divide_leaf_refcount_block()
1674 struct ocfs2_refcount_block *rb = ocfs2_adjust_refcount_rec() local
1678 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) ocfs2_adjust_refcount_rec()
1681 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; ocfs2_adjust_refcount_rec()
1682 old_cpos = le32_to_cpu(rb->rf_cpos); ocfs2_adjust_refcount_rec()
1738 rb->rf_cpos = cpu_to_le32(new_cpos); ocfs2_adjust_refcount_rec()
1757 struct ocfs2_refcount_block *rb = ocfs2_insert_refcount_rec() local
1759 struct ocfs2_refcount_list *rf_list = &rb->rf_records; ocfs2_insert_refcount_rec()
1762 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); ocfs2_insert_refcount_rec()
1784 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; ocfs2_insert_refcount_rec()
1785 rf_list = &rb->rf_records; ocfs2_insert_refcount_rec()
1811 ocfs2_refcount_rec_merge(rb, index); ocfs2_insert_refcount_rec()
1847 struct ocfs2_refcount_block *rb = ocfs2_split_refcount_rec() local
1849 struct ocfs2_refcount_list *rf_list = &rb->rf_records; ocfs2_split_refcount_rec()
1854 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); ocfs2_split_refcount_rec()
1914 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; ocfs2_split_refcount_rec()
1915 rf_list = &rb->rf_records; ocfs2_split_refcount_rec()
1982 ocfs2_refcount_rec_merge(rb, index); ocfs2_split_refcount_rec()
2095 struct ocfs2_refcount_block *rb = ocfs2_remove_refcount_extent() local
2099 BUG_ON(rb->rf_records.rl_used); ocfs2_remove_refcount_extent()
2104 le32_to_cpu(rb->rf_cpos)); ocfs2_remove_refcount_extent()
2107 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), ocfs2_remove_refcount_extent()
2121 le16_to_cpu(rb->rf_suballoc_slot), ocfs2_remove_refcount_extent()
2122 le64_to_cpu(rb->rf_suballoc_loc), ocfs2_remove_refcount_extent()
2123 le64_to_cpu(rb->rf_blkno), ocfs2_remove_refcount_extent()
2124 le16_to_cpu(rb->rf_suballoc_bit)); ocfs2_remove_refcount_extent()
2137 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ocfs2_remove_refcount_extent()
2139 le32_add_cpu(&rb->rf_clusters, -1); ocfs2_remove_refcount_extent()
2145 if (!rb->rf_list.l_next_free_rec) { ocfs2_remove_refcount_extent()
2146 BUG_ON(rb->rf_clusters); ocfs2_remove_refcount_extent()
2151 rb->rf_flags = 0; ocfs2_remove_refcount_extent()
2152 rb->rf_parent = 0; ocfs2_remove_refcount_extent()
2153 rb->rf_cpos = 0; ocfs2_remove_refcount_extent()
2154 memset(&rb->rf_records, 0, sb->s_blocksize - ocfs2_remove_refcount_extent()
2156 rb->rf_records.rl_count = ocfs2_remove_refcount_extent()
2187 struct ocfs2_refcount_block *rb = ocfs2_decrease_refcount_rec() local
2189 struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index]; ocfs2_decrease_refcount_rec()
2222 if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) { ocfs2_decrease_refcount_rec()
2395 struct ocfs2_refcount_block *rb; ocfs2_calc_refcount_meta_credits() local
2415 rb = (struct ocfs2_refcount_block *) ocfs2_calc_refcount_meta_credits()
2418 if (le16_to_cpu(rb->rf_records.rl_used) + ocfs2_calc_refcount_meta_credits()
2420 le16_to_cpu(rb->rf_records.rl_count)) ocfs2_calc_refcount_meta_credits()
2479 rb = (struct ocfs2_refcount_block *)prev_bh->b_data; ocfs2_calc_refcount_meta_credits()
2481 if (le16_to_cpu(rb->rf_records.rl_used) + recs_add > ocfs2_calc_refcount_meta_credits()
2482 le16_to_cpu(rb->rf_records.rl_count)) ocfs2_calc_refcount_meta_credits()
2501 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ocfs2_calc_refcount_meta_credits()
2502 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) { ocfs2_calc_refcount_meta_credits()
3565 struct ocfs2_refcount_block *rb; ocfs2_refcounted_xattr_delete_need() local
3592 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; ocfs2_refcounted_xattr_delete_need()
3601 if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 > ocfs2_refcounted_xattr_delete_need()
3602 le16_to_cpu(rb->rf_records.rl_count)) ocfs2_refcounted_xattr_delete_need()
3621 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ocfs2_refcounted_xattr_delete_need()
3622 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) ocfs2_refcounted_xattr_delete_need()
4142 struct ocfs2_refcount_block *rb; ocfs2_create_reflink_node() local
4169 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ocfs2_create_reflink_node()
H A Docfs2.h59 * structure into a rb tree when necessary. */
460 /* rb tree root for refcount lock. */
H A Dxattr.c6283 struct ocfs2_refcount_block *rb = ocfs2_reflink_lock_xattr_allocators() local
6307 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) ocfs2_reflink_lock_xattr_allocators()
6308 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * ocfs2_reflink_lock_xattr_allocators()
6309 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; ocfs2_reflink_lock_xattr_allocators()
6751 struct ocfs2_refcount_block *rb; ocfs2_lock_reflink_xattr_rec_allocators() local
6775 rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data; ocfs2_lock_reflink_xattr_rec_allocators()
6782 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) ocfs2_lock_reflink_xattr_rec_allocators()
6783 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * ocfs2_lock_reflink_xattr_rec_allocators()
6784 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; ocfs2_lock_reflink_xattr_rec_allocators()
H A Dalloc.c392 struct ocfs2_refcount_block *rb = et->et_object; ocfs2_refcount_tree_fill_root_el() local
394 et->et_root_el = &rb->rf_list; ocfs2_refcount_tree_fill_root_el()
400 struct ocfs2_refcount_block *rb = et->et_object; ocfs2_refcount_tree_set_last_eb_blk() local
402 rb->rf_last_eb_blk = cpu_to_le64(blkno); ocfs2_refcount_tree_set_last_eb_blk()
407 struct ocfs2_refcount_block *rb = et->et_object; ocfs2_refcount_tree_get_last_eb_blk() local
409 return le64_to_cpu(rb->rf_last_eb_blk); ocfs2_refcount_tree_get_last_eb_blk()
415 struct ocfs2_refcount_block *rb = et->et_object; ocfs2_refcount_tree_update_clusters() local
417 le32_add_cpu(&rb->rf_clusters, clusters); ocfs2_refcount_tree_update_clusters()
/linux-4.1.27/drivers/block/
H A Drbd_types.h34 * rb.<idhi>.<idlo>.00000000
35 * rb.<idhi>.<idlo>.00000001
/linux-4.1.27/drivers/i2c/
H A Di2c-stub.c101 struct smbus_block_data *b, *rb = NULL; stub_find_block() local
105 rb = b; stub_find_block()
109 if (rb == NULL && create) { stub_find_block()
110 rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL); stub_find_block()
111 if (rb == NULL) stub_find_block()
112 return rb; stub_find_block()
113 rb->command = command; stub_find_block()
114 list_add(&rb->node, &chip->smbus_blocks); stub_find_block()
116 return rb; stub_find_block()
/linux-4.1.27/arch/mips/rb532/
H A Dsetup.c13 #include <asm/mach-rc32434/rb.h>
H A Dserial.c35 #include <asm/mach-rc32434/rb.h>
H A Dgpio.c37 #include <asm/mach-rc32434/rb.h>
H A Ddevices.c36 #include <asm/mach-rc32434/rb.h>
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dxprt_rdma.h128 rdmab_addr(struct rpcrdma_regbuf *rb) rdmab_addr() argument
130 return rb->rg_iov.addr; rdmab_addr()
134 rdmab_length(struct rpcrdma_regbuf *rb) rdmab_length() argument
136 return rb->rg_iov.length; rdmab_length()
140 rdmab_lkey(struct rpcrdma_regbuf *rb) rdmab_lkey() argument
142 return rb->rg_iov.lkey; rdmab_lkey()
146 rdmab_to_msg(struct rpcrdma_regbuf *rb) rdmab_to_msg() argument
148 return (struct rpcrdma_msg *)rb->rg_base; rdmab_to_msg()
271 struct rpcrdma_regbuf *rb; rpcr_to_rdmar() local
273 rb = container_of(buffer, struct rpcrdma_regbuf, rg_base); rpcr_to_rdmar()
274 return rb->rg_owner; rpcr_to_rdmar()
H A Dtransport.c495 struct rpcrdma_regbuf *rb; xprt_rdma_allocate() local
522 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags); xprt_rdma_allocate()
523 if (IS_ERR(rb)) xprt_rdma_allocate()
525 req->rl_rdmabuf = rb; xprt_rdma_allocate()
549 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags); xprt_rdma_allocate()
550 if (IS_ERR(rb)) xprt_rdma_allocate()
552 rb->rg_owner = req; xprt_rdma_allocate()
556 req->rl_sendbuf = rb; xprt_rdma_allocate()
573 struct rpcrdma_regbuf *rb; xprt_rdma_free() local
579 rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]); xprt_rdma_free()
580 req = rb->rg_owner; xprt_rdma_free()
H A Dverbs.c1534 struct rpcrdma_regbuf *rb; rpcrdma_alloc_regbuf() local
1538 rb = kmalloc(sizeof(*rb) + size, flags); rpcrdma_alloc_regbuf()
1539 if (rb == NULL) rpcrdma_alloc_regbuf()
1542 rb->rg_size = size; rpcrdma_alloc_regbuf()
1543 rb->rg_owner = NULL; rpcrdma_alloc_regbuf()
1544 rc = rpcrdma_register_internal(ia, rb->rg_base, size, rpcrdma_alloc_regbuf()
1545 &rb->rg_mr, &rb->rg_iov); rpcrdma_alloc_regbuf()
1549 return rb; rpcrdma_alloc_regbuf()
1552 kfree(rb); rpcrdma_alloc_regbuf()
1560 * @rb: regbuf to be deregistered and freed
1563 rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) rpcrdma_free_regbuf() argument
1565 if (rb) { rpcrdma_free_regbuf()
1566 rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov); rpcrdma_free_regbuf()
1567 kfree(rb); rpcrdma_free_regbuf()
/linux-4.1.27/drivers/mtd/ubi/
H A Dattach.c270 av = rb_entry(parent, struct ubi_ainf_volume, rb); add_volume()
297 rb_link_node(&av->rb, parent, p); add_volume()
298 rb_insert_color(&av->rb, &ai->volumes); add_volume()
481 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); ubi_add_to_av()
592 rb_link_node(&aeb->u.rb, parent, p); ubi_add_to_av()
593 rb_insert_color(&aeb->u.rb, &av->root); ubi_add_to_av()
612 av = rb_entry(p, struct ubi_ainf_volume, rb); ubi_find_av()
633 struct rb_node *rb; ubi_remove_av() local
638 while ((rb = rb_first(&av->root))) { ubi_remove_av()
639 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb); ubi_remove_av()
640 rb_erase(&aeb->u.rb, &av->root); ubi_remove_av()
644 rb_erase(&av->rb, &ai->volumes); ubi_remove_av()
1148 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); destroy_av()
1151 if (this->rb_left == &aeb->u.rb) destroy_av()
1171 struct rb_node *rb; destroy_ai() local
1191 rb = ai->volumes.rb_node; destroy_ai()
1192 while (rb) { destroy_ai()
1193 if (rb->rb_left) destroy_ai()
1194 rb = rb->rb_left; destroy_ai()
1195 else if (rb->rb_right) destroy_ai()
1196 rb = rb->rb_right; destroy_ai()
1198 av = rb_entry(rb, struct ubi_ainf_volume, rb); destroy_ai()
1200 rb = rb_parent(rb); destroy_ai()
1201 if (rb) { destroy_ai()
1202 if (rb->rb_left == &av->rb) destroy_ai()
1203 rb->rb_left = NULL; destroy_ai()
1205 rb->rb_right = NULL; destroy_ai()
1269 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { scan_all()
1270 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) scan_all()
1519 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { self_check_ai()
1562 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { self_check_ai()
1633 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { self_check_ai()
1635 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { self_check_ai()
1721 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) self_check_ai()
1722 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) self_check_ai()
H A Dwl.c161 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); wl_tree_add()
176 rb_link_node(&e->u.rb, parent, p); wl_tree_add()
177 rb_insert_color(&e->u.rb, root); wl_tree_add()
257 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in_wl_tree()
317 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); find_wl_entry()
324 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); find_wl_entry()
358 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); find_mean_wl_entry()
359 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); find_mean_wl_entry()
362 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); find_mean_wl_entry()
398 rb_erase(&e->u.rb, &ubi->free); wl_get_wle()
700 rb_erase(&e1->u.rb, &ubi->used);
711 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
726 rb_erase(&e1->u.rb, &ubi->used);
732 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
738 rb_erase(&e1->u.rb, &ubi->scrub);
982 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); ensure_wear_leveling()
1213 rb_erase(&e->u.rb, &ubi->used); ubi_wl_put_peb()
1216 rb_erase(&e->u.rb, &ubi->scrub); ubi_wl_put_peb()
1219 rb_erase(&e->u.rb, &ubi->erroneous); ubi_wl_put_peb()
1288 rb_erase(&e->u.rb, &ubi->used); ubi_wl_scrub_peb()
1381 struct rb_node *rb; tree_destroy() local
1384 rb = root->rb_node; tree_destroy()
1385 while (rb) { tree_destroy()
1386 if (rb->rb_left) tree_destroy()
1387 rb = rb->rb_left; tree_destroy()
1388 else if (rb->rb_right) tree_destroy()
1389 rb = rb->rb_right; tree_destroy()
1391 e = rb_entry(rb, struct ubi_wl_entry, u.rb); tree_destroy()
1393 rb = rb_parent(rb); tree_destroy()
1394 if (rb) { tree_destroy()
1395 if (rb->rb_left == &e->u.rb) tree_destroy()
1396 rb->rb_left = NULL; tree_destroy()
1398 rb->rb_right = NULL; tree_destroy()
1555 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { ubi_wl_init()
1556 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { ubi_wl_init()
1764 rb_erase(&e->u.rb, &ubi->free); get_peb_for_wl()
H A Dfastmap-wl.c41 ubi_rb_for_each_entry(p, e, root, u.rb) { ubi_rb_for_each_entry()
74 ubi_rb_for_each_entry(p, e, root, u.rb) anchor_pebs_avalible()
109 rb_erase(&e->u.rb, &ubi->free); ubi_wl_get_fm_peb()
156 rb_erase(&e->u.rb, &ubi->free); ubi_refill_pools()
359 struct ubi_wl_entry, u.rb); may_reserve_for_fm()
H A Dubi.h167 * @u.rb: link in the corresponding (free/used) RB-tree
178 struct rb_node rb; member in union:ubi_wl_entry::__anon6046
187 * @rb: links RB-tree nodes
200 struct rb_node rb; member in struct:ubi_ltree_entry
639 * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
655 struct rb_node rb; member in union:ubi_ainf_peb::__anon6047
674 * @rb: link in the volume RB-tree
690 struct rb_node rb; member in struct:ubi_ainf_volume
941 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
950 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
959 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
973 * @rb: a pointer to type 'struct rb_node' to use as a loop counter
978 #define ubi_rb_for_each_entry(rb, pos, root, member) \
979 for (rb = rb_first(root), \
980 pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
981 rb; \
982 rb = rb_next(rb), \
983 pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
996 rb_erase(&aeb->u.rb, &av->root); ubi_move_aeb_to_list()
H A Deba.c105 le = rb_entry(p, struct ubi_ltree_entry, rb); ltree_lookup()
171 le1 = rb_entry(parent, struct ubi_ltree_entry, rb); ltree_add_entry()
186 rb_link_node(&le->rb, parent, p); ltree_add_entry()
187 rb_insert_color(&le->rb, &ubi->ltree); ltree_add_entry()
232 rb_erase(&le->rb, &ubi->ltree); leb_read_unlock()
284 rb_erase(&le->rb, &ubi->ltree); leb_write_trylock()
308 rb_erase(&le->rb, &ubi->ltree); leb_write_unlock()
1321 struct rb_node *rb; self_check_eba() local
1361 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) self_check_eba()
1368 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) self_check_eba()
1413 struct rb_node *rb; ubi_eba_init() local
1445 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { ubi_eba_init()
H A Dfastmap.c191 av = rb_entry(parent, struct ubi_ainf_volume, rb); add_vol()
215 rb_link_node(&av->rb, parent, p); add_vol()
216 rb_insert_color(&av->rb, &ai->volumes); add_vol()
240 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); assign_aeb_to_av()
255 rb_link_node(&aeb->u.rb, parent, p); assign_aeb_to_av()
256 rb_insert_color(&aeb->u.rb, &av->root); assign_aeb_to_av()
279 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); update_vol()
350 rb_link_node(&new_aeb->u.rb, parent, p); update_vol()
351 rb_insert_color(&new_aeb->u.rb, &av->root); update_vol()
383 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); process_pool_aeb()
423 av = rb_entry(node, struct ubi_ainf_volume, rb); unmap_peb()
427 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); unmap_peb()
429 rb_erase(&aeb->u.rb, &av->root); unmap_peb()
579 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) count_fastmap_pebs()
580 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) count_fastmap_pebs()
H A Dvtbl.c373 struct rb_node *rb; process_lvol() local
406 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { process_lvol()
/linux-4.1.27/drivers/tty/hvc/
H A Dhvc_iucv.c221 struct iucv_tty_buffer *rb; hvc_iucv_write() local
239 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); hvc_iucv_write()
242 if (!rb->mbuf) { /* message not yet received ... */ hvc_iucv_write()
245 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA); hvc_iucv_write()
246 if (!rb->mbuf) hvc_iucv_write()
249 rc = __iucv_message_receive(priv->path, &rb->msg, 0, hvc_iucv_write()
250 rb->mbuf, rb->msg.length, NULL); hvc_iucv_write()
262 if (rc || (rb->mbuf->version != MSG_VERSION) || hvc_iucv_write()
263 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen))) hvc_iucv_write()
267 switch (rb->mbuf->type) { hvc_iucv_write()
269 written = min_t(int, rb->mbuf->datalen - rb->offset, count); hvc_iucv_write()
270 memcpy(buf, rb->mbuf->data + rb->offset, written); hvc_iucv_write()
271 if (written < (rb->mbuf->datalen - rb->offset)) { hvc_iucv_write()
272 rb->offset += written; hvc_iucv_write()
279 if (rb->mbuf->datalen != sizeof(struct winsize)) hvc_iucv_write()
283 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); hvc_iucv_write()
293 list_del(&rb->list); hvc_iucv_write()
294 destroy_tty_buffer(rb); hvc_iucv_write()
906 struct iucv_tty_buffer *rb; hvc_iucv_msg_pending() local
923 rb = alloc_tty_buffer(0, GFP_ATOMIC); hvc_iucv_msg_pending()
924 if (!rb) { hvc_iucv_msg_pending()
928 rb->msg = *msg; hvc_iucv_msg_pending()
930 list_add_tail(&rb->list, &priv->tty_inqueue); hvc_iucv_msg_pending()
/linux-4.1.27/fs/ubifs/
H A Dlog.c50 bud = rb_entry(p, struct ubifs_bud, rb); ubifs_search_bud()
83 bud = rb_entry(p, struct ubifs_bud, rb); ubifs_get_wbuf()
134 b = rb_entry(parent, struct ubifs_bud, rb); ubifs_add_bud()
142 rb_link_node(&bud->rb, parent, p); ubifs_add_bud()
143 rb_insert_color(&bud->rb, &c->buds); ubifs_add_bud()
314 bud = rb_entry(p1, struct ubifs_bud, rb); remove_buds()
536 * @rb: rb-tree node
540 struct rb_node rb; member in struct:done_ref
546 * @done_tree: rb-tree to store references that have been done
559 dr = rb_entry(parent, struct done_ref, rb); done_already()
574 rb_link_node(&dr->rb, parent, p); done_already()
575 rb_insert_color(&dr->rb, done_tree); done_already()
588 rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb) destroy_done_tree()
H A Dorphan.c46 * Orphans are accumulated in a rb-tree. When an inode's link count drops to
47 * zero, the inode number is added to the rb-tree. It is removed from the tree
85 o = rb_entry(parent, struct ubifs_orphan, rb); ubifs_add_orphan()
99 rb_link_node(&orphan->rb, parent, p); ubifs_add_orphan()
100 rb_insert_color(&orphan->rb, &c->orph_tree); ubifs_add_orphan()
123 o = rb_entry(p, struct ubifs_orphan, rb); ubifs_delete_orphan()
451 rb_erase(&orphan->rb, &c->orph_tree); erase_deleted()
509 * must be kept until the next commit, so it is added to the rb-tree and the
525 o = rb_entry(parent, struct ubifs_orphan, rb); insert_dead_orphan()
537 rb_link_node(&orphan->rb, parent, p); insert_dead_orphan()
538 rb_insert_color(&orphan->rb, &c->orph_tree); insert_dead_orphan()
734 struct rb_node rb; member in struct:check_orphan
755 o = rb_entry(p, struct ubifs_orphan, rb); dbg_find_orphan()
782 o = rb_entry(parent, struct check_orphan, rb); dbg_ins_check_orphan()
792 rb_link_node(&orphan->rb, parent, p); dbg_ins_check_orphan()
793 rb_insert_color(&orphan->rb, root); dbg_ins_check_orphan()
804 o = rb_entry(p, struct check_orphan, rb); dbg_find_check_orphan()
819 rbtree_postorder_for_each_entry_safe(o, n, root, rb) dbg_free_check_tree()
H A Ddebug.c580 struct rb_node *rb; ubifs_dump_budg() local
622 for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { ubifs_dump_budg()
623 bud = rb_entry(rb, struct ubifs_bud, rb); ubifs_dump_budg()
648 struct rb_node *rb; ubifs_dump_lprop() local
710 for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) { ubifs_dump_lprop()
711 bud = rb_entry(rb, struct ubifs_bud, rb); ubifs_dump_lprop()
1741 * @rb: link in the RB-tree of inodes
1760 struct rb_node rb; member in struct:fsck_inode
1807 fscki = rb_entry(parent, struct fsck_inode, rb); add_inode()
1863 rb_link_node(&fscki->rb, parent, p); add_inode()
1864 rb_insert_color(&fscki->rb, &fsckd->inodes); add_inode()
1885 fscki = rb_entry(p, struct fsck_inode, rb); search_inode()
2129 rbtree_postorder_for_each_entry_safe(fscki, n, &fsckd->inodes, rb) free_inodes()
2154 fscki = rb_entry(this, struct fsck_inode, rb); check_inodes()
H A Drecovery.c1234 * @rb: link in the RB-tree of sizes
1242 struct rb_node rb; member in struct:size_entry
1266 e = rb_entry(parent, struct size_entry, rb); add_ino()
1282 rb_link_node(&e->rb, parent, p); add_ino()
1283 rb_insert_color(&e->rb, &c->size_tree); add_ino()
1299 e = rb_entry(p, struct size_entry, rb); find_ino()
1321 rb_erase(&e->rb, &c->size_tree); remove_ino()
1333 rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) { ubifs_destroy_size_tree()
1352 * To accomplish those purposes, a rb-tree is constructed containing an entry
1483 e = rb_entry(this, struct size_entry, rb); ubifs_recover_size()
1542 rb_erase(&e->rb, &c->size_tree); ubifs_recover_size()
H A Dreplay.c279 struct replay_entry *ra, *rb; replay_entries_cmp() local
286 rb = list_entry(b, struct replay_entry, list); replay_entries_cmp()
287 ubifs_assert(ra->sqnum != rb->sqnum); replay_entries_cmp()
288 if (ra->sqnum > rb->sqnum) replay_entries_cmp()
H A Dubifs.h278 * @rb: rb-tree node
283 struct rb_node rb; member in struct:ubifs_old_idx
716 * @rb: link in the tree of all buds
723 struct rb_node rb; member in struct:ubifs_bud
912 * @rb: rb-tree node of rb-tree of orphans sorted by inode number
923 struct rb_node rb; member in struct:ubifs_orphan
1133 * @orph_tree: rb-tree of orphan inode numbers
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
H A Drouter.c1235 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) lnet_destroy_rtrbuf() argument
1240 __free_page(rb->rb_kiov[npages].kiov_page); lnet_destroy_rtrbuf()
1242 LIBCFS_FREE(rb, sz); lnet_destroy_rtrbuf()
1251 lnet_rtrbuf_t *rb; lnet_new_rtrbuf() local
1254 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz); lnet_new_rtrbuf()
1255 if (rb == NULL) lnet_new_rtrbuf()
1258 rb->rb_pool = rbp; lnet_new_rtrbuf()
1266 __free_page(rb->rb_kiov[i].kiov_page); lnet_new_rtrbuf()
1268 LIBCFS_FREE(rb, sz); lnet_new_rtrbuf()
1272 rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; lnet_new_rtrbuf()
1273 rb->rb_kiov[i].kiov_offset = 0; lnet_new_rtrbuf()
1274 rb->rb_kiov[i].kiov_page = page; lnet_new_rtrbuf()
1277 return rb; lnet_new_rtrbuf()
1285 lnet_rtrbuf_t *rb; lnet_rtrpool_free_bufs() local
1296 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrpool_free_bufs()
1298 list_del(&rb->rb_list); lnet_rtrpool_free_bufs()
1299 lnet_destroy_rtrbuf(rb, npages); lnet_rtrpool_free_bufs()
1312 lnet_rtrbuf_t *rb; lnet_rtrpool_alloc_bufs() local
1321 rb = lnet_new_rtrbuf(rbp, cpt); lnet_rtrpool_alloc_bufs()
1323 if (rb == NULL) { lnet_rtrpool_alloc_bufs()
1332 list_add(&rb->rb_list, &rbp->rbp_bufs); lnet_rtrpool_alloc_bufs()
H A Dlib-move.c899 lnet_rtrbuf_t *rb; lnet_post_routed_recv_locked() local
950 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list); lnet_post_routed_recv_locked()
951 list_del(&rb->rb_list); lnet_post_routed_recv_locked()
954 msg->msg_kiov = &rb->rb_kiov[0]; lnet_post_routed_recv_locked()
1033 lnet_rtrbuf_t *rb; lnet_return_rx_credits_locked() local
1041 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]); lnet_return_rx_credits_locked()
1042 rbp = rb->rb_pool; lnet_return_rx_credits_locked()
1053 list_add(&rb->rb_list, &rbp->rbp_bufs); lnet_return_rx_credits_locked()
/linux-4.1.27/fs/btrfs/
H A Dextent_map.c228 struct rb_node *rb; try_merge_map() local
231 rb = rb_prev(&em->rb_node); try_merge_map()
232 if (rb) try_merge_map()
233 merge = rb_entry(rb, struct extent_map, rb_node); try_merge_map()
234 if (rb && mergable_maps(merge, em)) { try_merge_map()
250 rb = rb_next(&em->rb_node); try_merge_map()
251 if (rb) try_merge_map()
252 merge = rb_entry(rb, struct extent_map, rb_node); try_merge_map()
253 if (rb && mergable_maps(em, merge)) { try_merge_map()
H A Ddelayed-ref.c35 * us to buffer up frequently modified backrefs in an rb tree instead
88 * entries in the rb tree are ordered by the byte number of the extent,
/linux-4.1.27/drivers/mtd/nand/
H A Dsunxi_nand.c189 * @rb: the Ready/Busy description
193 struct sunxi_nand_rb rb; member in struct:sunxi_nand_chip_sel
339 struct sunxi_nand_rb *rb; sunxi_nfc_dev_ready() local
346 rb = &sunxi_nand->sels[sunxi_nand->selected].rb; sunxi_nfc_dev_ready()
348 switch (rb->type) { sunxi_nfc_dev_ready()
351 (NFC_RB_STATE0 << rb->info.nativeid)); sunxi_nfc_dev_ready()
357 (NFC_RB_STATE0 << rb->info.nativeid)); sunxi_nfc_dev_ready()
360 ret = gpio_get_value(rb->info.gpio); sunxi_nfc_dev_ready()
394 if (sel->rb.type == RB_NONE) { sunxi_nfc_select_chip()
398 if (sel->rb.type == RB_NATIVE) sunxi_nfc_select_chip()
399 ctl |= (sel->rb.info.nativeid << 3); sunxi_nfc_select_chip()
1186 if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) && sunxi_nand_chip_init()
1188 chip->sels[i].rb.type = RB_NATIVE; sunxi_nand_chip_init()
1189 chip->sels[i].rb.info.nativeid = tmp; sunxi_nand_chip_init()
1191 ret = of_get_named_gpio(np, "rb-gpios", i); sunxi_nand_chip_init()
1194 chip->sels[i].rb.type = RB_GPIO; sunxi_nand_chip_init()
1195 chip->sels[i].rb.info.gpio = tmp; sunxi_nand_chip_init()
1196 ret = devm_gpio_request(dev, tmp, "nand-rb"); sunxi_nand_chip_init()
1204 chip->sels[i].rb.type = RB_NONE; sunxi_nand_chip_init()
/linux-4.1.27/drivers/spi/
H A Dspi-mpc52xx-psc.c137 unsigned rb = 0; /* number of bytes receieved */ mpc52xx_psc_spi_transfer_rxtx() local
151 while (rb < t->len) { mpc52xx_psc_spi_transfer_rxtx()
152 if (t->len - rb > MPC52xx_PSC_BUFSIZE) { mpc52xx_psc_spi_transfer_rxtx()
157 rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb); mpc52xx_psc_spi_transfer_rxtx()
179 if (t->len - rb == 1) { mpc52xx_psc_spi_transfer_rxtx()
192 for (; recv_at_once; rb++, recv_at_once--) mpc52xx_psc_spi_transfer_rxtx()
193 rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8); mpc52xx_psc_spi_transfer_rxtx()
195 for (; recv_at_once; rb++, recv_at_once--) mpc52xx_psc_spi_transfer_rxtx()
/linux-4.1.27/arch/arm/crypto/
H A Dsha512-armv7-neon.S84 #define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \
114 veor.64 RT0, ra, rb; \
116 vbsl.64 RT0, rc, rb; \
151 vbsl.64 RT0, rb, ra; \
195 #define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \
226 veor.64 RT0, ra, rb; \
228 vbsl.64 RT0, rc, rb; \
263 vbsl.64 RT0, rb, ra; \
/linux-4.1.27/net/packet/
H A Daf_packet.c194 struct packet_ring_buffer *rb,
489 struct packet_ring_buffer *rb, packet_lookup_frame()
496 pg_vec_pos = position / rb->frames_per_block; packet_lookup_frame()
497 frame_offset = position % rb->frames_per_block; packet_lookup_frame()
499 h.raw = rb->pg_vec[pg_vec_pos].buffer + packet_lookup_frame()
500 (frame_offset * rb->frame_size); packet_lookup_frame()
509 struct packet_ring_buffer *rb, packet_current_frame()
512 return packet_lookup_frame(po, rb, rb->head, status); packet_current_frame()
608 struct packet_ring_buffer *rb, init_prb_bdqc()
612 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); init_prb_bdqc()
982 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) prb_clear_blk_fill_status() argument
984 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); prb_clear_blk_fill_status()
1127 struct packet_ring_buffer *rb, prb_lookup_block()
1131 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); prb_lookup_block()
1139 static int prb_previous_blk_num(struct packet_ring_buffer *rb) prb_previous_blk_num() argument
1142 if (rb->prb_bdqc.kactive_blk_num) prb_previous_blk_num()
1143 prev = rb->prb_bdqc.kactive_blk_num-1; prb_previous_blk_num()
1145 prev = rb->prb_bdqc.knum_blocks-1; prb_previous_blk_num()
1151 struct packet_ring_buffer *rb, __prb_previous_block()
1154 unsigned int previous = prb_previous_blk_num(rb); __prb_previous_block()
1155 return prb_lookup_block(po, rb, previous, status); __prb_previous_block()
1159 struct packet_ring_buffer *rb, packet_previous_rx_frame()
1163 return packet_previous_frame(po, rb, status); packet_previous_rx_frame()
1165 return __prb_previous_block(po, rb, status); packet_previous_rx_frame()
1169 struct packet_ring_buffer *rb) packet_increment_rx_head()
1174 return packet_increment_head(rb); packet_increment_rx_head()
1184 struct packet_ring_buffer *rb, packet_previous_frame()
1187 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; packet_previous_frame()
1188 return packet_lookup_frame(po, rb, previous, status); packet_previous_frame()
1196 static void packet_inc_pending(struct packet_ring_buffer *rb) packet_inc_pending() argument
1198 this_cpu_inc(*rb->pending_refcnt); packet_inc_pending()
1201 static void packet_dec_pending(struct packet_ring_buffer *rb) packet_dec_pending() argument
1203 this_cpu_dec(*rb->pending_refcnt); packet_dec_pending()
1206 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) packet_read_pending() argument
1212 if (rb->pending_refcnt == NULL) packet_read_pending()
1216 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); packet_read_pending()
3819 struct packet_ring_buffer *rb; packet_set_ring() local
3832 rb = tx_ring ? &po->tx_ring : &po->rx_ring; packet_set_ring()
3839 if (packet_read_pending(rb)) packet_set_ring()
3846 if (unlikely(rb->pg_vec)) packet_set_ring()
3876 rb->frames_per_block = req->tp_block_size/req->tp_frame_size; packet_set_ring()
3877 if (unlikely(rb->frames_per_block <= 0)) packet_set_ring()
3879 if (unlikely((rb->frames_per_block * req->tp_block_nr) != packet_set_ring()
3894 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); packet_set_ring()
3926 swap(rb->pg_vec, pg_vec); packet_set_ring()
3927 rb->frame_max = (req->tp_frame_nr - 1); packet_set_ring()
3928 rb->head = 0; packet_set_ring()
3929 rb->frame_size = req->tp_frame_size; packet_set_ring()
3932 swap(rb->pg_vec_order, order); packet_set_ring()
3933 swap(rb->pg_vec_len, req->tp_block_nr); packet_set_ring()
3935 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; packet_set_ring()
3970 struct packet_ring_buffer *rb; packet_mmap() local
3981 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { packet_mmap()
3982 if (rb->pg_vec) { packet_mmap()
3983 expected_size += rb->pg_vec_len packet_mmap()
3984 * rb->pg_vec_pages packet_mmap()
3997 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { packet_mmap()
3998 if (rb->pg_vec == NULL) packet_mmap()
4001 for (i = 0; i < rb->pg_vec_len; i++) { packet_mmap()
4003 void *kaddr = rb->pg_vec[i].buffer; packet_mmap()
4006 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { packet_mmap()
488 packet_lookup_frame(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int position, int status) packet_lookup_frame() argument
508 packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) packet_current_frame() argument
607 init_prb_bdqc(struct packet_sock *po, struct packet_ring_buffer *rb, struct pgv *pg_vec, union tpacket_req_u *req_u, int tx_ring) init_prb_bdqc() argument
1126 prb_lookup_block(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int idx, int status) prb_lookup_block() argument
1150 __prb_previous_block(struct packet_sock *po, struct packet_ring_buffer *rb, int status) __prb_previous_block() argument
1158 packet_previous_rx_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) packet_previous_rx_frame() argument
1168 packet_increment_rx_head(struct packet_sock *po, struct packet_ring_buffer *rb) packet_increment_rx_head() argument
1183 packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) packet_previous_frame() argument
/linux-4.1.27/drivers/usb/gadget/function/
H A Duvc_queue.h65 struct v4l2_requestbuffers *rb);
H A Duvc_queue.c152 struct v4l2_requestbuffers *rb) uvcg_alloc_buffers()
156 ret = vb2_reqbufs(&queue->queue, rb); uvcg_alloc_buffers()
158 return ret ? ret : rb->count; uvcg_alloc_buffers()
151 uvcg_alloc_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) uvcg_alloc_buffers() argument
/linux-4.1.27/include/video/
H A Dpxa168fb.h40 * bit0 is for rb swap.
H A Domapdss.h380 s16 rr, rg, rb; member in struct:omap_dss_cpr_coefs
/linux-4.1.27/tools/perf/util/
H A Dlzma.c37 infile = fopen(input, "rb"); lzma_decompress_to_file()
H A Dkvm-stat.h25 struct rb_node rb; member in struct:kvm_event
H A Dmachine.c388 * after rb tree is updated. __machine__findnew_thread()
392 * leader and that would screwed the rb tree. __machine__findnew_thread()
/linux-4.1.27/drivers/media/platform/exynos4-is/
H A Dfimc-isp-video.c534 struct v4l2_requestbuffers *rb) isp_video_reqbufs()
539 ret = vb2_ioctl_reqbufs(file, priv, rb); isp_video_reqbufs()
543 if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) { isp_video_reqbufs()
544 rb->count = 0; isp_video_reqbufs()
545 vb2_ioctl_reqbufs(file, priv, rb); isp_video_reqbufs()
549 isp->video_capture.reqbufs_count = rb->count; isp_video_reqbufs()
533 isp_video_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) isp_video_reqbufs() argument
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_gpu.c618 gpu->rb = msm_ringbuffer_new(gpu, ringsz); msm_gpu_init()
620 if (IS_ERR(gpu->rb)) { msm_gpu_init()
621 ret = PTR_ERR(gpu->rb); msm_gpu_init()
622 gpu->rb = NULL; msm_gpu_init()
643 if (gpu->rb) { msm_gpu_cleanup()
645 msm_gem_put_iova(gpu->rb->bo, gpu->id); msm_gpu_cleanup()
646 msm_ringbuffer_destroy(gpu->rb); msm_gpu_cleanup()
H A Dmsm_gpu.h80 struct msm_ringbuffer *rb; member in struct:msm_gpu
/linux-4.1.27/drivers/mtd/
H A Dmtdswap.c83 struct rb_node rb; member in struct:swap_eb
93 rb)->erase_count)
95 rb)->erase_count)
214 rb_erase(&eb->rb, eb->root); mtdswap_eb_detach()
226 cur = rb_entry(parent, struct swap_eb, rb); __mtdswap_rb_add()
233 rb_link_node(&eb->rb, parent, p); __mtdswap_rb_add()
234 rb_insert_color(&eb->rb, root); __mtdswap_rb_add()
453 median = rb_entry(medrb, struct swap_eb, rb)->erase_count; mtdswap_check_counts()
466 rb_erase(&eb->rb, &hist_root); mtdswap_check_counts()
625 eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); mtdswap_map_free_block()
626 rb_erase(&eb->rb, clean_root); mtdswap_map_free_block()
910 eb = rb_entry(rb_first(rp), struct swap_eb, rb); mtdswap_pick_gc_eblk()
912 rb_erase(&eb->rb, rp); mtdswap_pick_gc_eblk()
1239 rb)->erase_count; mtdswap_show()
1241 rb)->erase_count; mtdswap_show()
/linux-4.1.27/drivers/i2c/busses/
H A Di2c-cpm.c194 u_char *rb; cpm_i2c_parse_message() local
205 rb = cpm->rxbuf[rx]; cpm_i2c_parse_message()
208 rb = (u_char *) (((ulong) rb + 1) & ~1); cpm_i2c_parse_message()
254 u_char *rb; cpm_i2c_check_message() local
261 rb = cpm->rxbuf[rx]; cpm_i2c_check_message()
264 rb = (u_char *) (((uint) rb + 1) & ~1); cpm_i2c_check_message()
284 memcpy(pmsg->buf, rb, pmsg->len); cpm_i2c_check_message()
/linux-4.1.27/drivers/usb/class/
H A Dcdc-acm.c417 struct acm_rb *rb = urb->context; acm_read_bulk_callback() local
418 struct acm *acm = rb->instance; acm_read_bulk_callback()
423 rb->index, urb->actual_length); acm_read_bulk_callback()
426 set_bit(rb->index, &acm->read_urbs_free); acm_read_bulk_callback()
432 set_bit(rb->index, &acm->read_urbs_free); acm_read_bulk_callback()
447 set_bit(rb->index, &acm->read_urbs_free); acm_read_bulk_callback()
454 acm_submit_read_urb(acm, rb->index, GFP_ATOMIC); acm_read_bulk_callback()
1371 struct acm_rb *rb = &(acm->read_buffers[i]); acm_probe() local
1374 rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL, acm_probe()
1375 &rb->dma); acm_probe()
1376 if (!rb->base) acm_probe()
1378 rb->index = i; acm_probe()
1379 rb->instance = acm; acm_probe()
1386 urb->transfer_dma = rb->dma; acm_probe()
1390 rb->base, acm_probe()
1392 acm_read_bulk_callback, rb, acm_probe()
1397 rb->base, acm_probe()
1399 acm_read_bulk_callback, rb); acm_probe()
/linux-4.1.27/arch/mips/alchemy/common/
H A Dusb.c391 static inline int au1000_usb_init(unsigned long rb, int reg) au1000_usb_init() argument
393 void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg); au1000_usb_init()
424 static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg) __au1xx0_ohci_control() argument
426 void __iomem *base = (void __iomem *)KSEG1ADDR(rb); __au1xx0_ohci_control()
457 static inline int au1000_usb_control(int block, int enable, unsigned long rb, au1000_usb_control() argument
464 __au1xx0_ohci_control(enable, rb, creg); au1000_usb_control()
/linux-4.1.27/crypto/
H A Dtgr192.c401 static void tgr192_round(u64 * ra, u64 * rb, u64 * rc, u64 x, int mul) tgr192_round() argument
404 u64 b = *rb; tgr192_round()
415 *rb = b; tgr192_round()
420 static void tgr192_pass(u64 * ra, u64 * rb, u64 * rc, u64 * x, int mul) tgr192_pass() argument
423 u64 b = *rb; tgr192_pass()
436 *rb = b; tgr192_pass()
/linux-4.1.27/drivers/iio/
H A Dindustrialio-buffer.c101 struct iio_buffer *rb = indio_dev->buffer; iio_buffer_read_first_n_outer() local
110 if (!rb || !rb->access->read_first_n) iio_buffer_read_first_n_outer()
113 datum_size = rb->bytes_per_datum; iio_buffer_read_first_n_outer()
122 to_read = min_t(size_t, n / datum_size, rb->watermark); iio_buffer_read_first_n_outer()
128 ret = wait_event_interruptible(rb->pollq, iio_buffer_read_first_n_outer()
129 iio_buffer_ready(indio_dev, rb, to_wait, to_read)); iio_buffer_read_first_n_outer()
136 ret = rb->access->read_first_n(rb, n, buf); iio_buffer_read_first_n_outer()
151 struct iio_buffer *rb = indio_dev->buffer; iio_buffer_poll() local
156 poll_wait(filp, &rb->pollq, wait); iio_buffer_poll()
157 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) iio_buffer_poll()
/linux-4.1.27/drivers/crypto/vmx/
H A Dppc-xlate.pl144 my ($f, $vrt, $ra, $rb, $op) = @_;
145 " .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
/linux-4.1.27/arch/x86/crypto/
H A Daes-x86_64-asm_64.S83 #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \
94 xorl OFFSET+4(r8),rb ## E; \
/linux-4.1.27/fs/xfs/
H A Dxfs_rtalloc.h127 # define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb) (ENOSYS)
129 # define xfs_rtpick_extent(m,t,l,rb) (ENOSYS)
/linux-4.1.27/drivers/media/dvb-frontends/
H A Ddib3000mb.c56 u8 rb[2]; dib3000_read_reg() local
59 { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 }, dib3000_read_reg()
66 (rb[0] << 8) | rb[1],(rb[0] << 8) | rb[1]); dib3000_read_reg()
68 return (rb[0] << 8) | rb[1]; dib3000_read_reg()
H A Ddib3000mc.c53 u8 rb[2]; dib3000mc_read_word() local
56 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 }, dib3000mc_read_word()
62 return (rb[0] << 8) | rb[1]; dib3000mc_read_word()
/linux-4.1.27/arch/m68k/include/asm/
H A Dm68360_pram.h90 unsigned short rbptr; /* rb BD Pointer */
150 unsigned short rbptr; /* rb BD Pointer */
202 unsigned short rbptr; /* rb BD Pointer */
253 unsigned short rbptr; /* rb BD Pointer */
271 unsigned short rbptr; /* rb BD Pointer */
295 unsigned short rbptr; /* rb BD Pointer */
324 unsigned short rbptr; /* rb BD Pointer */
401 unsigned short rbptr; /* rb BD Pointer */
/linux-4.1.27/drivers/infiniband/hw/usnic/
H A Dusnic_uiom_interval_tree.h25 struct rb_node rb; member in struct:usnic_uiom_interval_node
H A Dusnic_uiom_interval_tree.c252 INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
/linux-4.1.27/drivers/input/misc/
H A Drb532_button.c12 #include <asm/mach-rc32434/rb.h>
/linux-4.1.27/arch/arm/boot/compressed/
H A Dhead.S30 .macro loadsp, rb, tmp
32 .macro writeb, ch, rb
36 .macro loadsp, rb, tmp
38 .macro writeb, ch, rb
42 .macro loadsp, rb, tmp
44 .macro writeb, ch, rb
53 .macro writeb, ch, rb
54 senduart \ch, \rb
58 .macro loadsp, rb, tmp
59 mov \rb, #0x80000000 @ physical base address
61 add \rb, \rb, #0x00050000 @ Ser3
63 add \rb, \rb, #0x00010000 @ Ser1
67 .macro loadsp, rb, tmp
68 addruart \rb, \tmp
/linux-4.1.27/drivers/video/fbdev/core/
H A Dfbcvt.c295 * @rb: compute with reduced blanking (for flatpanels)
305 int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb) fb_find_mode_cvt() argument
314 if (rb) fb_find_mode_cvt()
H A Dmodedb.c700 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0; fb_find_mode() local
713 if (cvt || rb) fb_find_mode()
724 if (cvt || rb) fb_find_mode()
745 rb = 1; fb_find_mode()
770 (rb) ? " reduced blanking" : "", fb_find_mode()
784 ret = fb_find_mode_cvt(&cvt_mode, margins, rb); fb_find_mode()
/linux-4.1.27/drivers/media/pci/ngene/
H A Dngene-core.c777 static void free_ringbuffer(struct ngene *dev, struct SRingBufferDescriptor *rb) free_ringbuffer() argument
779 struct SBufferHeader *Cur = rb->Head; free_ringbuffer()
785 for (j = 0; j < rb->NumBuffers; j++, Cur = Cur->Next) { free_ringbuffer()
788 rb->Buffer1Length, free_ringbuffer()
794 rb->Buffer2Length, free_ringbuffer()
799 if (rb->SCListMem) free_ringbuffer()
800 pci_free_consistent(dev->pci_dev, rb->SCListMemSize, free_ringbuffer()
801 rb->SCListMem, rb->PASCListMem); free_ringbuffer()
803 pci_free_consistent(dev->pci_dev, rb->MemSize, rb->Head, rb->PAHead); free_ringbuffer()
807 struct SRingBufferDescriptor *rb, free_idlebuffer()
813 if (!rb->Head) free_idlebuffer()
815 free_ringbuffer(dev, rb); free_idlebuffer()
806 free_idlebuffer(struct ngene *dev, struct SRingBufferDescriptor *rb, struct SRingBufferDescriptor *tb) free_idlebuffer() argument
/linux-4.1.27/fs/kernfs/
H A Ddir.c25 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
247 node = &pos->rb.rb_left; kernfs_link_sibling()
249 node = &pos->rb.rb_right; kernfs_link_sibling()
255 rb_link_node(&kn->rb, parent, node); kernfs_link_sibling()
256 rb_insert_color(&kn->rb, &kn->parent->dir.children); kernfs_link_sibling()
278 if (RB_EMPTY_NODE(&kn->rb)) kernfs_unlink_sibling()
284 rb_erase(&kn->rb, &kn->parent->dir.children); kernfs_unlink_sibling()
285 RB_CLEAR_NODE(&kn->rb); kernfs_unlink_sibling()
535 RB_CLEAR_NODE(&kn->rb); __kernfs_new_node()
985 rbn = rb_next(&pos->rb); kernfs_next_descendant_post()
1017 WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); kernfs_activate()
1038 if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb))) __kernfs_remove()
1223 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); kernfs_remove_self()
1384 struct rb_node *node = rb_next(&pos->rb); kernfs_dir_pos()
1399 struct rb_node *node = rb_next(&pos->rb); kernfs_dir_next_pos()
/linux-4.1.27/sound/pci/lola/
H A Dlola.c367 PAGE_SIZE, &chip->rb); setup_corb_rirb()
371 chip->corb.addr = chip->rb.addr; setup_corb_rirb()
372 chip->corb.buf = (u32 *)chip->rb.area; setup_corb_rirb()
373 chip->rirb.addr = chip->rb.addr + 2048; setup_corb_rirb()
374 chip->rirb.buf = (u32 *)(chip->rb.area + 2048); setup_corb_rirb()
556 if (chip->rb.area) lola_free()
557 snd_dma_free_pages(&chip->rb); lola_free()
H A Dlola.h344 struct snd_dma_buffer rb; member in struct:lola
/linux-4.1.27/drivers/media/platform/s3c-camif/
H A Dcamif-capture.c908 struct v4l2_requestbuffers *rb) s3c_camif_reqbufs()
913 pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n", s3c_camif_reqbufs()
914 vp->id, rb->count, vp->owner, priv); s3c_camif_reqbufs()
919 if (rb->count) s3c_camif_reqbufs()
920 rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count); s3c_camif_reqbufs()
924 ret = vb2_reqbufs(&vp->vb_queue, rb); s3c_camif_reqbufs()
928 if (rb->count && rb->count < CAMIF_REQ_BUFS_MIN) { s3c_camif_reqbufs()
929 rb->count = 0; s3c_camif_reqbufs()
930 vb2_reqbufs(&vp->vb_queue, rb); s3c_camif_reqbufs()
934 vp->reqbufs_count = rb->count; s3c_camif_reqbufs()
935 if (vp->owner == NULL && rb->count > 0) s3c_camif_reqbufs()
907 s3c_camif_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) s3c_camif_reqbufs() argument
/linux-4.1.27/arch/sparc/kernel/
H A Dbtext.c22 static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
23 static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
24 static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
197 int rb = dispDeviceRowBytes; draw_byte() local
202 draw_byte_32(font, (unsigned int *)base, rb); draw_byte()
206 draw_byte_16(font, (unsigned int *)base, rb); draw_byte()
209 draw_byte_8(font, (unsigned int *)base, rb); draw_byte()
241 static void draw_byte_32(unsigned char *font, unsigned int *base, int rb) draw_byte_32() argument
258 base = (unsigned int *) ((char *)base + rb); draw_byte_32()
262 static void draw_byte_16(unsigned char *font, unsigned int *base, int rb) draw_byte_16() argument
276 base = (unsigned int *) ((char *)base + rb); draw_byte_16()
280 static void draw_byte_8(unsigned char *font, unsigned int *base, int rb) draw_byte_8() argument
292 base = (unsigned int *) ((char *)base + rb); draw_byte_8()
/linux-4.1.27/arch/powerpc/sysdev/
H A Dmpic.c174 struct mpic_reg_bank *rb, _mpic_read()
180 return dcr_read(rb->dhost, reg); _mpic_read()
183 return in_be32(rb->base + (reg >> 2)); _mpic_read()
186 return in_le32(rb->base + (reg >> 2)); _mpic_read()
191 struct mpic_reg_bank *rb, _mpic_write()
197 dcr_write(rb->dhost, reg, value); _mpic_write()
201 out_be32(rb->base + (reg >> 2), value); _mpic_write()
205 out_le32(rb->base + (reg >> 2), value); _mpic_write()
315 struct mpic_reg_bank *rb, unsigned int offset, _mpic_map_mmio()
318 rb->base = ioremap(phys_addr + offset, size); _mpic_map_mmio()
319 BUG_ON(rb->base == NULL); _mpic_map_mmio()
323 static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb, _mpic_map_dcr() argument
327 rb->dhost = dcr_map(mpic->node, phys_addr + offset, size); _mpic_map_dcr()
328 BUG_ON(!DCR_MAP_OK(rb->dhost)); _mpic_map_dcr()
332 phys_addr_t phys_addr, struct mpic_reg_bank *rb, mpic_map()
336 _mpic_map_dcr(mpic, rb, offset, size); mpic_map()
338 _mpic_map_mmio(mpic, phys_addr, rb, offset, size); mpic_map()
173 _mpic_read(enum mpic_reg_type type, struct mpic_reg_bank *rb, unsigned int reg) _mpic_read() argument
190 _mpic_write(enum mpic_reg_type type, struct mpic_reg_bank *rb, unsigned int reg, u32 value) _mpic_write() argument
314 _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) _mpic_map_mmio() argument
331 mpic_map(struct mpic *mpic, phys_addr_t phys_addr, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) mpic_map() argument
H A Dfsl_pci.c864 unsigned int rd, ra, rb, d; mcheck_handle_load() local
868 rb = get_rb(inst); mcheck_handle_load()
881 regs->gpr[ra] += regs->gpr[rb]; mcheck_handle_load()
890 regs->gpr[ra] += regs->gpr[rb]; mcheck_handle_load()
900 regs->gpr[ra] += regs->gpr[rb]; mcheck_handle_load()
909 regs->gpr[ra] += regs->gpr[rb]; mcheck_handle_load()
/linux-4.1.27/drivers/media/usb/uvc/
H A Duvc_queue.c228 struct v4l2_requestbuffers *rb) uvc_request_buffers()
233 ret = vb2_reqbufs(&queue->queue, rb); uvc_request_buffers()
236 return ret ? ret : rb->count; uvc_request_buffers()
227 uvc_request_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) uvc_request_buffers() argument
H A Duvc_v4l2.c681 struct v4l2_requestbuffers *rb) uvc_ioctl_reqbufs()
692 ret = uvc_request_buffers(&stream->queue, rb); uvc_ioctl_reqbufs()
680 uvc_ioctl_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) uvc_ioctl_reqbufs() argument
H A Duvcvideo.h631 struct v4l2_requestbuffers *rb);
/linux-4.1.27/drivers/video/fbdev/omap2/dss/
H A Dmanager-sysfs.c382 info.cpr_coefs.rb, manager_cpr_coef_show()
403 &coefs.rr, &coefs.rg, &coefs.rb, manager_cpr_coef_store()
408 arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb, manager_cpr_coef_store()
/linux-4.1.27/drivers/infiniband/core/
H A Dumem_rbtree.c69 INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
/linux-4.1.27/drivers/media/usb/stkwebcam/
H A Dstk-webcam.c1041 void *priv, struct v4l2_requestbuffers *rb) stk_vidioc_reqbufs()
1047 if (rb->memory != V4L2_MEMORY_MMAP) stk_vidioc_reqbufs()
1053 if (rb->count == 0) { stk_vidioc_reqbufs()
1062 if (rb->count < 3) stk_vidioc_reqbufs()
1063 rb->count = 3; stk_vidioc_reqbufs()
1065 else if (rb->count > 5) stk_vidioc_reqbufs()
1066 rb->count = 5; stk_vidioc_reqbufs()
1068 stk_allocate_buffers(dev, rb->count); stk_vidioc_reqbufs()
1069 rb->count = dev->n_sbufs; stk_vidioc_reqbufs()
1040 stk_vidioc_reqbufs(struct file *filp, void *priv, struct v4l2_requestbuffers *rb) stk_vidioc_reqbufs() argument
/linux-4.1.27/drivers/media/usb/gspca/
H A Dgspca.c1371 struct v4l2_requestbuffers *rb) vidioc_reqbufs()
1376 i = rb->memory; /* (avoid compilation warning) */ vidioc_reqbufs()
1390 && gspca_dev->memory != rb->memory) { vidioc_reqbufs()
1424 if (rb->count == 0) /* unrequest */ vidioc_reqbufs()
1426 ret = frame_alloc(gspca_dev, file, rb->memory, rb->count); vidioc_reqbufs()
1428 rb->count = gspca_dev->nframes; vidioc_reqbufs()
1434 PDEBUG(D_STREAM, "reqbufs st:%d c:%d", ret, rb->count); vidioc_reqbufs()
1823 struct v4l2_requestbuffers rb; read_alloc() local
1825 memset(&rb, 0, sizeof rb); read_alloc()
1826 rb.count = gspca_dev->nbufread; read_alloc()
1827 rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; read_alloc()
1828 rb.memory = GSPCA_MEMORY_READ; read_alloc()
1829 ret = vidioc_reqbufs(file, gspca_dev, &rb); read_alloc()
1370 vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) vidioc_reqbufs() argument
/linux-4.1.27/drivers/misc/
H A Dsram.c52 struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); sram_reserve_cmp() local
54 return ra->start - rb->start; sram_reserve_cmp()
/linux-4.1.27/arch/x86/platform/uv/
H A Duv_irq.c50 * rb tree for a specific irq.
97 /* Retrieve offset and pnode information from the rb tree for a specific irq */ uv_irq_2_mmr_info()
/linux-4.1.27/drivers/net/ethernet/ti/
H A Dnetcp_ethss.c161 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
162 offsetof(struct gbe##_##rb, rn)
163 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
164 offsetof(struct gbenu##_##rb, rn)
165 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
166 offsetof(struct xgbe##_##rb, rn)
167 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
/linux-4.1.27/arch/arm/common/
H A Ddmabounce.c170 struct safe_buffer *b, *rb = NULL; find_safe_buffer() local
178 rb = b; find_safe_buffer()
183 return rb; find_safe_buffer()
/linux-4.1.27/include/rdma/
H A Dib_umem_odp.h42 struct rb_node rb; member in struct:umem_odp_node
/linux-4.1.27/arch/nios2/kernel/
H A Dmisaligned.c113 pr_debug("sth: ra=%d (%08x) rb=%d (%08x), imm16 %04x addr %08x val %08x\n", handle_unaligned_c()
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_modes.c1231 bool yres_specified = false, cvt = false, rb = false; drm_mode_parse_command_line_for_connector() local
1252 !yres_specified && !cvt && !rb && was_digit) { drm_mode_parse_command_line_for_connector()
1261 !rb && was_digit) { drm_mode_parse_command_line_for_connector()
1285 if (yres_specified || cvt || rb || was_digit) drm_mode_parse_command_line_for_connector()
1287 rb = true; drm_mode_parse_command_line_for_connector()
1364 mode->rb = rb; drm_mode_parse_command_line_for_connector()
1392 cmd->rb, cmd->interlace, drm_mode_create_from_cmdline_mode()
H A Ddrm_vma_manager.c48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
192 /* internal helper to link @node into the rb-tree */ _drm_vma_offset_add_rb()
H A Ddrm_edid.c539 short rb; member in struct:minimode
1463 * @rb: Mode reduced-blanking-ness
1471 bool rb) drm_mode_find_dmt()
1483 if (rb != mode_is_rb(ptr)) drm_mode_find_dmt()
2080 bool rb = drm_monitor_supports_rb(edid); drm_cvt_modes_for_range() local
2084 newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); drm_cvt_modes_for_range()
2172 est3_modes[m].rb); drm_est3_modes()
1469 drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh, bool rb) drm_mode_find_dmt() argument
/linux-4.1.27/sound/pci/hda/
H A Dhda_controller.c989 PAGE_SIZE, &chip->rb); azx_alloc_cmd_io()
998 chip->corb.addr = chip->rb.addr; azx_init_cmd_io()
999 chip->corb.buf = (u32 *)chip->rb.area; azx_init_cmd_io()
1035 chip->rirb.addr = chip->rb.addr + 2048; azx_init_cmd_io()
1036 chip->rirb.buf = (u32 *)(chip->rb.area + 2048); azx_init_cmd_io()
1508 if (chip->rb.area) azx_free_stream_pages()
1509 chip->ops->dma_free_pages(chip, &chip->rb); azx_free_stream_pages()
H A Dhda_controller.h341 struct snd_dma_buffer rb; member in struct:azx
/linux-4.1.27/arch/sh/kernel/
H A Ddisassemble.c304 int rb = 0; print_sh_insn() local
372 rb = nibs[n] & 0x07; print_sh_insn()
424 printk("r%d_bank", rb); print_sh_insn()
/linux-4.1.27/drivers/media/platform/coda/
H A Dcoda-bit.c720 struct v4l2_requestbuffers *rb) coda_encoder_reqbufs()
725 if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) coda_encoder_reqbufs()
728 if (rb->count) { coda_encoder_reqbufs()
1402 struct v4l2_requestbuffers *rb) coda_decoder_reqbufs()
1407 if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) coda_decoder_reqbufs()
1410 if (rb->count) { coda_decoder_reqbufs()
719 coda_encoder_reqbufs(struct coda_ctx *ctx, struct v4l2_requestbuffers *rb) coda_encoder_reqbufs() argument
1401 coda_decoder_reqbufs(struct coda_ctx *ctx, struct v4l2_requestbuffers *rb) coda_decoder_reqbufs() argument
H A Dcoda.h183 int (*reqbufs)(struct coda_ctx *ctx, struct v4l2_requestbuffers *rb);
H A Dcoda-common.c699 struct v4l2_requestbuffers *rb) coda_reqbufs()
704 ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb); coda_reqbufs()
712 if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs) coda_reqbufs()
713 return ctx->ops->reqbufs(ctx, rb); coda_reqbufs()
698 coda_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) coda_reqbufs() argument
/linux-4.1.27/drivers/mmc/host/
H A Dsdricoh_cs.c122 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); sdricoh_readw()
137 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); sdricoh_readb()
/linux-4.1.27/drivers/media/pci/ttpci/
H A Dav7110_av.c443 #define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096)
448 struct dvb_ringbuffer *rb; ts_play() local
454 rb = (type) ? &av7110->avout : &av7110->aout; ts_play()
467 if (wait_event_interruptible(rb->queue, FREE_COND_TS)) ts_play()
/linux-4.1.27/block/
H A Dcfq-iosched.c85 struct rb_root rb; member in struct:cfq_rb_root
91 #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
1171 root->left = rb_first(&root->rb); cfq_rb_first()
1182 root->left = rb_first(&root->rb); cfq_rb_first_group()
1200 rb_erase_init(n, &root->rb); cfq_rb_erase()
1250 struct rb_node **node = &st->rb.rb_node; __cfq_group_service_tree_add()
1272 rb_insert_color(&cfqg->rb_node, &st->rb); __cfq_group_service_tree_add()
1365 n = rb_last(&st->rb); cfq_group_notify_queue_add()
2035 parent = rb_last(&st->rb); cfq_service_tree_add()
2043 * Get our rb key offset. Subtract any residual slice cfq_service_tree_add()
2072 p = &st->rb.rb_node; cfq_service_tree_add()
2093 rb_insert_color(&cfqq->rb_node, &st->rb); cfq_service_tree_add()
2218 * rb tree support functions
2533 if (RB_EMPTY_ROOT(&st->rb)) cfq_get_next_queue()
3010 if (RB_EMPTY_ROOT(&st->rb)) cfq_get_next_cfqg()
4428 * to add magic to the rb code cfq_init_queue()
/linux-4.1.27/drivers/acpi/acpica/
H A Dutfileio.c304 file = fopen(filename, "rb"); acpi_ut_read_table_from_file()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_mn.c79 it.rb) { radeon_mn_destroy()
/linux-4.1.27/fs/proc/
H A Dinternal.h29 * subdir_node is used to build the rb tree "subdir" of the parent.
/linux-4.1.27/include/media/
H A Dv4l2-mem2mem.h240 struct v4l2_requestbuffers *rb);
/linux-4.1.27/include/drm/
H A Ddrm_modes.h160 bool rb; member in struct:drm_cmdline_mode
/linux-4.1.27/fs/f2fs/
H A Df2fs.h303 struct rb_node rb_node; /* rb node located in rb-tree */
310 struct rb_root root; /* root of extent info rb-tree */
312 rwlock_t lock; /* protect extent info rb-tree */
313 atomic_t refcount; /* reference count of rb-tree */
314 unsigned int count; /* # of extent node in rb-tree*/
/linux-4.1.27/drivers/media/platform/
H A Dtimblogiw.c285 struct v4l2_requestbuffers *rb) timblogiw_reqbufs()
292 return videobuf_reqbufs(&fh->vb_vidq, rb); timblogiw_reqbufs()
284 timblogiw_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) timblogiw_reqbufs() argument
H A Dvia-camera.c998 struct v4l2_requestbuffers *rb) viacam_reqbufs()
1002 return videobuf_reqbufs(&cam->vb_queue, rb); viacam_reqbufs()
997 viacam_reqbufs(struct file *filp, void *priv, struct v4l2_requestbuffers *rb) viacam_reqbufs() argument
/linux-4.1.27/drivers/media/v4l2-core/
H A Dv4l2-mem2mem.c762 struct v4l2_requestbuffers *rb) v4l2_m2m_ioctl_reqbufs()
766 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); v4l2_m2m_ioctl_reqbufs()
761 v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) v4l2_m2m_ioctl_reqbufs() argument
/linux-4.1.27/drivers/media/usb/tm6000/
H A Dtm6000.h370 struct v4l2_requestbuffers *rb);
/linux-4.1.27/arch/microblaze/mm/
H A Dfault.c183 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb do_page_fault()
/linux-4.1.27/fs/ext2/
H A Dballoc.c190 * @rb_root: root of per-filesystem reservation rb tree
323 * ext2_rsv_window_add() -- Insert a window to the block reservation rb tree.
360 * rsv_window_remove() -- unlink a window from the reservation rb tree
365 * from the filesystem reservation window rb tree. Must be called with
/linux-4.1.27/net/sched/
H A Dsch_htb.c350 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root) htb_safe_rb_erase() argument
352 if (RB_EMPTY_NODE(rb)) { htb_safe_rb_erase()
355 rb_erase(rb, root); htb_safe_rb_erase()
356 RB_CLEAR_NODE(rb); htb_safe_rb_erase()
H A Dsch_netem.c153 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) netem_rb_to_skb() argument
155 return container_of(rb, struct sk_buff, rbnode); netem_rb_to_skb()
H A Dsch_fq.c33 * rb trees, for performance reasons (its expected to send additional packets,
/linux-4.1.27/tools/lib/lockdep/
H A Dpreload.c15 * @node: rb-tree node used to store the lock in a global tree
/linux-4.1.27/drivers/md/bcache/
H A Dbcache.h233 * Beginning and end of range in rb tree - so that we can skip taking
234 * lock and checking the rb tree when we need to check for overlapping
328 * data to refill the rb tree requires an exclusive lock.
/linux-4.1.27/drivers/isdn/i4l/
H A Disdn_tty.c2676 char rb[100]; isdn_tty_cmd_ATand() local
2678 #define MAXRB (sizeof(rb) - 1) isdn_tty_cmd_ATand()
2812 sprintf(rb, "S%02d=%03d%s", i, isdn_tty_cmd_ATand()
2814 isdn_tty_at_cout(rb, info); isdn_tty_cmd_ATand()
2816 sprintf(rb, "\r\nEAZ/MSN: %.50s\r\n", isdn_tty_cmd_ATand()
2818 isdn_tty_at_cout(rb, info); isdn_tty_cmd_ATand()
/linux-4.1.27/drivers/media/usb/dvb-usb/
H A Ddib0700_devices.c2207 u8 rb[2]; dib01x0_pmu_update() local
2210 {.addr = 0x1e >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2}, dib01x0_pmu_update()
2219 switch (rb[0] << 8 | rb[1]) { dib01x0_pmu_update()
2244 wb[2] |= rb[0]; dib01x0_pmu_update()
2245 wb[3] |= rb[1] & ~(3 << 4); dib01x0_pmu_update()
/linux-4.1.27/drivers/scsi/
H A Dinitio.c369 u8 instr, rb; initio_se2_rd() local
382 rb = inb(base + TUL_NVRAM); initio_se2_rd()
383 rb &= SE2DI; initio_se2_rd()
384 val += (rb << i); initio_se2_rd()
404 u8 rb; initio_se2_wr() local
433 if ((rb = inb(base + TUL_NVRAM)) & SE2DI) initio_se2_wr()
/linux-4.1.27/drivers/xen/
H A Dxen-scsiback.c1586 ssize_t rb; scsiback_tpg_param_show_alias() local
1589 rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias); scsiback_tpg_param_show_alias()
1592 return rb; scsiback_tpg_param_show_alias()
/linux-4.1.27/drivers/staging/android/ion/
H A Dion_priv.h144 * @node: rb node to put the heap on the device's tree of heaps
H A Dion.c47 * @buffers: an rb tree of all the existing buffers
71 * @handles: an rb tree of all the handles in this client
/linux-4.1.27/drivers/pinctrl/mvebu/
H A Dpinctrl-armada-375.c126 MPP_FUNCTION(0x5, "nand", "rb"),
/linux-4.1.27/arch/powerpc/mm/
H A Dfault.c360 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb do_page_fault()
/linux-4.1.27/fs/ext3/
H A Dballoc.c198 * @rb_root: root of per-filesystem reservation rb tree
331 * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
369 * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
374 * from the filesystem reservation window rb tree. Must be called with
H A Ddir.c344 * Given a directory entry, enter it into the fname rb tree.
/linux-4.1.27/tools/usb/
H A Dtestusb.c257 fd = fopen(name, "rb"); find_testdev()
/linux-4.1.27/drivers/media/pci/cx18/
H A Dcx18-ioctl.c870 struct v4l2_requestbuffers *rb) cx18_reqbufs()
880 return videobuf_reqbufs(cx18_vb_queue(id), rb); cx18_reqbufs()
869 cx18_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) cx18_reqbufs() argument
/linux-4.1.27/drivers/staging/media/omap4iss/
H A Diss_video.c749 iss_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) iss_video_reqbufs() argument
753 return vb2_reqbufs(&vfh->queue, rb); iss_video_reqbufs()
/linux-4.1.27/fs/hugetlbfs/
H A Dinode.c478 * an empty rb tree and calls spin_lock_init(), later when we hugetlbfs_get_inode()
480 * the rb tree will still be empty. hugetlbfs_get_inode()
/linux-4.1.27/arch/m68k/ifpsp060/src/
H A Dilsp.S301 mov.w %d6, %d5 # rb + u3
307 mov.w %d6, %d5 # rb + u4
/linux-4.1.27/fs/reiserfs/
H A Dprints.c673 "* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n" store_print_tb()
/linux-4.1.27/fs/ext4/
H A Ddir.c418 * Given a directory entry, enter it into the fname rb tree.
/linux-4.1.27/drivers/media/platform/omap3isp/
H A Dispvideo.c821 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) isp_video_reqbufs() argument
828 ret = vb2_reqbufs(&vfh->queue, rb); isp_video_reqbufs()

Completed in 7371 milliseconds

12