rb               2010 arch/alpha/kernel/smc37c669.c 	return rb(&SMC37c669->data_port);
rb                168 arch/arm/common/dmabounce.c 	struct safe_buffer *b, *rb = NULL;
rb                176 arch/arm/common/dmabounce.c 			rb = b;
rb                181 arch/arm/common/dmabounce.c 	return rb;
rb                392 arch/mips/alchemy/common/usb.c static inline int au1000_usb_init(unsigned long rb, int reg)
rb                394 arch/mips/alchemy/common/usb.c 	void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg);
rb                425 arch/mips/alchemy/common/usb.c static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg)
rb                427 arch/mips/alchemy/common/usb.c 	void __iomem *base = (void __iomem *)KSEG1ADDR(rb);
rb                458 arch/mips/alchemy/common/usb.c static inline int au1000_usb_control(int block, int enable, unsigned long rb,
rb                465 arch/mips/alchemy/common/usb.c 		__au1xx0_ohci_control(enable, rb, creg);
rb                304 arch/openrisc/kernel/traps.c 	unsigned int rb, op, jmp;
rb                311 arch/openrisc/kernel/traps.c 		rb = (jmp & 0x0000ffff) >> 11;
rb                335 arch/openrisc/kernel/traps.c 			regs->pc = regs->gpr[rb];
rb                338 arch/openrisc/kernel/traps.c 			regs->pc = regs->gpr[rb];
rb                396 arch/openrisc/kernel/traps.c 	unsigned int ra, rb;
rb                405 arch/openrisc/kernel/traps.c 	rb = (insn >> 11) & 0x1f;
rb                419 arch/openrisc/kernel/traps.c 	if (put_user(regs->gpr[rb], vaddr)) {
rb                431 arch/openrisc/kernel/traps.c 		*((unsigned long *)vaddr) = regs->gpr[rb];
rb                 11 arch/powerpc/include/asm/asm-405.h #define PPC405_ERR77(ra,rb)	stringify_in_c(dcbt	ra, rb;)
rb                 14 arch/powerpc/include/asm/asm-405.h #define PPC405_ERR77(ra,rb)
rb                304 arch/powerpc/include/asm/kvm_book3s_64.h 	unsigned long rb = 0, va_low, sllp;
rb                322 arch/powerpc/include/asm/kvm_book3s_64.h 	rb = (v & ~0x7fUL) << 16;		/* AVA field */
rb                346 arch/powerpc/include/asm/kvm_book3s_64.h 			rb |= sllp << 5;	/*  AP field */
rb                348 arch/powerpc/include/asm/kvm_book3s_64.h 		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
rb                355 arch/powerpc/include/asm/kvm_book3s_64.h 		rb |= (va_low << b_pgshift) & 0x7ff000;
rb                359 arch/powerpc/include/asm/kvm_book3s_64.h 		rb &= ~((1ul << a_pgshift) - 1);
rb                366 arch/powerpc/include/asm/kvm_book3s_64.h 		rb |= ((va_low << aval_shift) & 0xfe);
rb                368 arch/powerpc/include/asm/kvm_book3s_64.h 		rb |= 1;		/* L field */
rb                369 arch/powerpc/include/asm/kvm_book3s_64.h 		rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
rb                371 arch/powerpc/include/asm/kvm_book3s_64.h 	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/* B field */
rb                372 arch/powerpc/include/asm/kvm_book3s_64.h 	return rb;
rb                393 arch/powerpc/include/asm/kvm_host.h 	void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
rb               1026 arch/powerpc/include/asm/kvm_ppc.h static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
rb               1031 arch/powerpc/include/asm/kvm_ppc.h 	ea = kvmppc_get_gpr(vcpu, rb);
rb                492 arch/powerpc/include/asm/ppc-opcode.h #define	PPC_TLBIE_5(rb,rs,ric,prs,r) \
rb                494 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RB(rb) | ___PPC_RS(rs) | \
rb                497 arch/powerpc/include/asm/ppc-opcode.h #define	PPC_TLBIEL(rb,rs,ric,prs,r) \
rb                499 arch/powerpc/include/asm/ppc-opcode.h 					___PPC_RB(rb) | ___PPC_RS(rs) | \
rb                 26 arch/powerpc/include/asm/ppc_asm.h #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
rb                 27 arch/powerpc/include/asm/ppc_asm.h #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
rb                 30 arch/powerpc/include/asm/ppc_asm.h #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)				\
rb                 32 arch/powerpc/include/asm/ppc_asm.h 	PPC_LL	rb, ACCOUNT_STARTTIME_USER(ptr);			\
rb                 34 arch/powerpc/include/asm/ppc_asm.h 	subf	rb,rb,ra;		/* subtract start value */	\
rb                 36 arch/powerpc/include/asm/ppc_asm.h 	add	ra,ra,rb;		/* add on to user time */	\
rb                 39 arch/powerpc/include/asm/ppc_asm.h #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)				\
rb                 41 arch/powerpc/include/asm/ppc_asm.h 	PPC_LL	rb, ACCOUNT_STARTTIME(ptr);				\
rb                 43 arch/powerpc/include/asm/ppc_asm.h 	subf	rb,rb,ra;		/* subtract start value */	\
rb                 45 arch/powerpc/include/asm/ppc_asm.h 	add	ra,ra,rb;		/* add on to system time */	\
rb                190 arch/powerpc/include/asm/trace.h 	TP_PROTO(unsigned long lpid, unsigned long local, unsigned long rb,
rb                193 arch/powerpc/include/asm/trace.h 	TP_ARGS(lpid, local, rb, rs, ric, prs, r),
rb                197 arch/powerpc/include/asm/trace.h 		__field(unsigned long, rb)
rb                207 arch/powerpc/include/asm/trace.h 		__entry->rb = rb;
rb                216 arch/powerpc/include/asm/trace.h 		__entry->rb, __entry->rs, __entry->ric, __entry->prs,
rb                418 arch/powerpc/kernel/btext.c static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
rb                435 arch/powerpc/kernel/btext.c 		base = (unsigned int *) ((char *)base + rb);
rb                439 arch/powerpc/kernel/btext.c static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
rb                453 arch/powerpc/kernel/btext.c 		base = (unsigned int *) ((char *)base + rb);
rb                457 arch/powerpc/kernel/btext.c static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
rb                469 arch/powerpc/kernel/btext.c 		base = (unsigned int *) ((char *)base + rb);
rb                477 arch/powerpc/kernel/btext.c 	int rb			= dispDeviceRowBytes;
rb                483 arch/powerpc/kernel/btext.c 		draw_byte_32(font, (unsigned int *)base, rb);
rb                487 arch/powerpc/kernel/btext.c 		draw_byte_16(font, (unsigned int *)base, rb);
rb                490 arch/powerpc/kernel/btext.c 		draw_byte_8(font, (unsigned int *)base, rb);
rb                367 arch/powerpc/kernel/kvm.c static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
rb                392 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
rb               1156 arch/powerpc/kernel/time.c 	u64 ra, rb, rc;
rb               1166 arch/powerpc/kernel/time.c 	rb = ((u64) do_div(ra, divisor) << 32) + c;
rb               1169 arch/powerpc/kernel/time.c 	rc = ((u64) do_div(rb, divisor) << 32) + d;
rb               1170 arch/powerpc/kernel/time.c 	y = rb;
rb                562 arch/powerpc/kernel/traps.c 			unsigned int rb;
rb                565 arch/powerpc/kernel/traps.c 			rb = (*nip >> 11) & 0x1f;
rb                568 arch/powerpc/kernel/traps.c 			       regs->gpr[rb] - _IO_BASE, nip);
rb                878 arch/powerpc/kernel/traps.c 	unsigned int ra, rb, t, i, sel, instr, rc;
rb                914 arch/powerpc/kernel/traps.c 	rb = (instr >> 11) & 0x1f;
rb                922 arch/powerpc/kernel/traps.c 	ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
rb                378 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
rb                384 arch/powerpc/kvm/book3s_64_mmu.c 	dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
rb                386 arch/powerpc/kvm/book3s_64_mmu.c 	esid = GET_ESID(rb);
rb                387 arch/powerpc/kvm/book3s_64_mmu.c 	esid_1t = GET_ESID_1T(rb);
rb                388 arch/powerpc/kvm/book3s_64_mmu.c 	slb_nr = rb & 0xfff;
rb                399 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
rb                420 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
rb                507 arch/powerpc/kvm/book3s_64_mmu.c 	u64 rb = 0, rs = 0;
rb                530 arch/powerpc/kvm/book3s_64_mmu.c 	rb |= (srnum & 0xf) << 28;
rb                532 arch/powerpc/kvm/book3s_64_mmu.c 	rb |= 1 << 27;
rb                534 arch/powerpc/kvm/book3s_64_mmu.c 	rb |= srnum;
rb                541 arch/powerpc/kvm/book3s_64_mmu.c 	kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
rb                301 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long rb;
rb                316 arch/powerpc/kvm/book3s_64_mmu_radix.c 	rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
rb                318 arch/powerpc/kvm/book3s_64_mmu_radix.c 				lpid, rb);
rb                245 arch/powerpc/kvm/book3s_emulate.c 	int rb = get_rb(inst);
rb                332 arch/powerpc/kvm/book3s_emulate.c 			srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
rb                347 arch/powerpc/kvm/book3s_emulate.c 				(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
rb                354 arch/powerpc/kvm/book3s_emulate.c 			ulong addr = kvmppc_get_gpr(vcpu, rb);
rb                394 arch/powerpc/kvm/book3s_emulate.c 					kvmppc_get_gpr(vcpu, rb));
rb                401 arch/powerpc/kvm/book3s_emulate.c 					kvmppc_get_gpr(vcpu, rb));
rb                416 arch/powerpc/kvm/book3s_emulate.c 				b = kvmppc_get_gpr(vcpu, rb);
rb                432 arch/powerpc/kvm/book3s_emulate.c 				rb_val = kvmppc_get_gpr(vcpu, rb);
rb                443 arch/powerpc/kvm/book3s_emulate.c 				rb_val = kvmppc_get_gpr(vcpu, rb);
rb                453 arch/powerpc/kvm/book3s_emulate.c 			ulong rb_val = kvmppc_get_gpr(vcpu, rb);
rb               1047 arch/powerpc/kvm/book3s_emulate.c 	ulong rb = get_rb(inst);
rb               1061 arch/powerpc/kvm/book3s_emulate.c 		dar += kvmppc_get_gpr(vcpu, rb);
rb               1196 arch/powerpc/kvm/book3s_hv.c 	u32 inst, rb, thr;
rb               1205 arch/powerpc/kvm/book3s_hv.c 	rb = get_rb(inst);
rb               1209 arch/powerpc/kvm/book3s_hv.c 		arg = kvmppc_get_gpr(vcpu, rb);
rb               1224 arch/powerpc/kvm/book3s_hv.c 		arg = kvmppc_get_gpr(vcpu, rb);
rb                705 arch/powerpc/kvm/book3s_hv_builtin.c 	unsigned long rb, set;
rb                720 arch/powerpc/kvm/book3s_hv_builtin.c 			rb = TLBIEL_INVAL_SET_LPID +
rb                723 arch/powerpc/kvm/book3s_hv_builtin.c 				     "r" (rb), "r" (0));
rb                808 arch/powerpc/kvm/book3s_hv_builtin.c 	unsigned long rb, set;
rb                810 arch/powerpc/kvm/book3s_hv_builtin.c 	rb = PPC_BIT(52);	/* IS = 2 */
rb                814 arch/powerpc/kvm/book3s_hv_builtin.c 			     : : "r" (rb), "i" (1), "i" (1), "i" (2),
rb                817 arch/powerpc/kvm/book3s_hv_builtin.c 			rb += PPC_BIT(51);	/* increment set number */
rb                820 arch/powerpc/kvm/book3s_hv_builtin.c 				     : : "r" (rb), "i" (1), "i" (1), "i" (0),
rb                829 arch/powerpc/kvm/book3s_hv_builtin.c 				     : : "r" (rb), "i" (0), "i" (0), "i" (0),
rb                831 arch/powerpc/kvm/book3s_hv_builtin.c 			rb += PPC_BIT(51);	/* increment set number */
rb                 56 arch/powerpc/kvm/book3s_hv_ras.c 		unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
rb                 59 arch/powerpc/kvm/book3s_hv_ras.c 		rb = (rb & ~0xFFFul) | i;	/* insert entry number */
rb                 60 arch/powerpc/kvm/book3s_hv_ras.c 		asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
rb                442 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		unsigned long rb,rs,prs,r,ric;
rb                444 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		rb = PPC_BIT(52); /* IS = 2 */
rb                456 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			     : : "r"(rb), "i"(r), "i"(prs),
rb                503 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned long v, r, rb;
rb                531 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		rb = compute_tlbie_rb(v, pte_r, pte_index);
rb                532 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
rb                692 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned long v, r, rb, mask, bits;
rb                739 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			rb = compute_tlbie_rb(v, r, pte_index);
rb                742 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
rb               1037 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned long rb;
rb               1047 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rb = compute_tlbie_rb(hp0, hp1, pte_index);
rb               1048 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	do_tlbies(kvm, &rb, 1, 1, true);
rb               1055 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned long rb;
rb               1065 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rb = compute_tlbie_rb(hp0, hp1, pte_index);
rb               1069 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	do_tlbies(kvm, &rb, 1, 1, false);
rb               1482 arch/powerpc/kvm/book3s_pr.c 			u64 rb = sregs->u.s.ppc64.slb[i].slbe;
rb               1485 arch/powerpc/kvm/book3s_pr.c 			if (rb & SLB_ESID_V)
rb               1486 arch/powerpc/kvm/book3s_pr.c 				vcpu->arch.mmu.slbmte(vcpu, rs, rb);
rb                 91 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long v = 0, pteg, rb;
rb                113 arch/powerpc/kvm/book3s_pr_papr.c 	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
rb                114 arch/powerpc/kvm/book3s_pr_papr.c 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
rb                155 arch/powerpc/kvm/book3s_pr_papr.c 		unsigned long pteg, rb, flags;
rb                200 arch/powerpc/kvm/book3s_pr_papr.c 			rb = compute_tlbie_rb(pte[0], pte[1],
rb                202 arch/powerpc/kvm/book3s_pr_papr.c 			vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
rb                219 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long rb, pteg, r, v;
rb                246 arch/powerpc/kvm/book3s_pr_papr.c 	rb = compute_tlbie_rb(v, r, pte_index);
rb                247 arch/powerpc/kvm/book3s_pr_papr.c 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
rb                 51 arch/powerpc/kvm/e500_emulate.c static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
rb                 53 arch/powerpc/kvm/e500_emulate.c 	ulong param = vcpu->arch.regs.gpr[rb];
rb                 63 arch/powerpc/kvm/e500_emulate.c static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
rb                 65 arch/powerpc/kvm/e500_emulate.c 	ulong param = vcpu->arch.regs.gpr[rb];
rb                 66 arch/powerpc/kvm/e500_emulate.c 	int prio = dbell2prio(rb);
rb                133 arch/powerpc/kvm/e500_emulate.c 	int rb = get_rb(inst);
rb                147 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
rb                151 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
rb                164 arch/powerpc/kvm/e500_emulate.c 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
rb                170 arch/powerpc/kvm/e500_emulate.c 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
rb                176 arch/powerpc/kvm/e500_emulate.c 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
rb                178 arch/powerpc/lib/sstep.c 	int ra, rb;
rb                182 arch/powerpc/lib/sstep.c 	rb = (instr >> 11) & 0x1f;
rb                183 arch/powerpc/lib/sstep.c 	ea = regs->gpr[rb];
rb               1168 arch/powerpc/lib/sstep.c 	unsigned int opcode, ra, rb, rc, rd, spr, u;
rb               1251 arch/powerpc/lib/sstep.c 			rb = (instr >> 11) & 0x1f;
rb               1254 arch/powerpc/lib/sstep.c 			rb = (regs->ccr >> (31 - rb)) & 1;
rb               1255 arch/powerpc/lib/sstep.c 			val = (instr >> (6 + ra * 2 + rb)) & 1;
rb               1290 arch/powerpc/lib/sstep.c 	rb = (instr >> 11) & 0x1f;
rb               1314 arch/powerpc/lib/sstep.c 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
rb               1320 arch/powerpc/lib/sstep.c 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
rb               1326 arch/powerpc/lib/sstep.c 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
rb               1407 arch/powerpc/lib/sstep.c 		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
rb               1414 arch/powerpc/lib/sstep.c 		op->val = ROTATE(val, rb) & MASK32(mb, me);
rb               1420 arch/powerpc/lib/sstep.c 		rb = regs->gpr[rb] & 0x1f;
rb               1422 arch/powerpc/lib/sstep.c 		op->val = ROTATE(val, rb) & MASK32(mb, me);
rb               1459 arch/powerpc/lib/sstep.c 			sh = rb | ((instr & 2) << 4);
rb               1479 arch/powerpc/lib/sstep.c 			sh = regs->gpr[rb] & 0x3f;
rb               1501 arch/powerpc/lib/sstep.c 			op->val = (val) ? val2 : regs->gpr[rb];
rb               1509 arch/powerpc/lib/sstep.c 					       (int)regs->gpr[rb])))
rb               1514 arch/powerpc/lib/sstep.c 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
rb               1595 arch/powerpc/lib/sstep.c 			val2 = regs->gpr[rb];
rb               1608 arch/powerpc/lib/sstep.c 			val2 = regs->gpr[rb];
rb               1620 arch/powerpc/lib/sstep.c 			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
rb               1628 arch/powerpc/lib/sstep.c 				       regs->gpr[rb], 1);
rb               1633 arch/powerpc/lib/sstep.c 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
rb               1638 arch/powerpc/lib/sstep.c 				       regs->gpr[rb], 0);
rb               1643 arch/powerpc/lib/sstep.c 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
rb               1647 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[rb] - regs->gpr[ra];
rb               1652 arch/powerpc/lib/sstep.c 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
rb               1657 arch/powerpc/lib/sstep.c 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
rb               1666 arch/powerpc/lib/sstep.c 				       regs->gpr[rb], regs->xer & XER_CA);
rb               1671 arch/powerpc/lib/sstep.c 				       regs->gpr[rb], regs->xer & XER_CA);
rb               1690 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[ra] * regs->gpr[rb];
rb               1700 arch/powerpc/lib/sstep.c 				(int) regs->gpr[rb];
rb               1707 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[ra] % regs->gpr[rb];
rb               1711 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[ra] + regs->gpr[rb];
rb               1718 arch/powerpc/lib/sstep.c 				(unsigned int) regs->gpr[rb];
rb               1722 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[ra] / regs->gpr[rb];
rb               1727 arch/powerpc/lib/sstep.c 				(unsigned int) regs->gpr[rb];
rb               1732 arch/powerpc/lib/sstep.c 				(long int) regs->gpr[rb];
rb               1737 arch/powerpc/lib/sstep.c 				(int) regs->gpr[rb];
rb               1766 arch/powerpc/lib/sstep.c 				(long int) regs->gpr[rb];
rb               1773 arch/powerpc/lib/sstep.c 				(int) regs->gpr[rb];
rb               1791 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[rd] & regs->gpr[rb];
rb               1795 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[rd] & ~regs->gpr[rb];
rb               1803 arch/powerpc/lib/sstep.c 			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
rb               1815 arch/powerpc/lib/sstep.c 			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
rb               1819 arch/powerpc/lib/sstep.c 			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
rb               1823 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[rd] ^ regs->gpr[rb];
rb               1831 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[rd] | ~regs->gpr[rb];
rb               1835 arch/powerpc/lib/sstep.c 			op->val = regs->gpr[rd] | regs->gpr[rb];
rb               1839 arch/powerpc/lib/sstep.c 			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
rb               1877 arch/powerpc/lib/sstep.c 			sh = regs->gpr[rb] & 0x3f;
rb               1885 arch/powerpc/lib/sstep.c 			sh = regs->gpr[rb] & 0x3f;
rb               1894 arch/powerpc/lib/sstep.c 			sh = regs->gpr[rb] & 0x3f;
rb               1907 arch/powerpc/lib/sstep.c 			sh = rb;
rb               1920 arch/powerpc/lib/sstep.c 			sh = regs->gpr[rb] & 0x7f;
rb               1928 arch/powerpc/lib/sstep.c 			sh = regs->gpr[rb] & 0x7f;
rb               1937 arch/powerpc/lib/sstep.c 			sh = regs->gpr[rb] & 0x7f;
rb               1951 arch/powerpc/lib/sstep.c 			sh = rb | ((instr & 2) << 4);
rb               1967 arch/powerpc/lib/sstep.c 			sh = rb | ((instr & 2) << 4);
rb               2064 arch/powerpc/lib/sstep.c 			if (!((rd & 1) || rd == ra || rd == rb))
rb               2190 arch/powerpc/lib/sstep.c 			if (rb == 0)
rb               2191 arch/powerpc/lib/sstep.c 				rb = 32;	/* # bytes to load */
rb               2192 arch/powerpc/lib/sstep.c 			op->type = MKOP(LOAD_MULTI, 0, rb);
rb               2257 arch/powerpc/lib/sstep.c 			if (rb == 0)
rb               2258 arch/powerpc/lib/sstep.c 				rb = 32;	/* # bytes to store */
rb               2259 arch/powerpc/lib/sstep.c 			op->type = MKOP(STORE_MULTI, 0, rb);
rb               2303 arch/powerpc/lib/sstep.c 			nb = regs->gpr[rb] & 0xff;
rb               2338 arch/powerpc/lib/sstep.c 			nb = regs->gpr[rb] & 0xff;
rb                 48 arch/powerpc/mm/book3s64/hash_native.c 	unsigned long rb;
rb                 50 arch/powerpc/mm/book3s64/hash_native.c 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
rb                 52 arch/powerpc/mm/book3s64/hash_native.c 	asm volatile("tlbiel %0" : : "r" (rb));
rb                 63 arch/powerpc/mm/book3s64/hash_native.c 	unsigned long rb;
rb                 67 arch/powerpc/mm/book3s64/hash_native.c 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
rb                 71 arch/powerpc/mm/book3s64/hash_native.c 		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
rb                206 arch/powerpc/mm/book3s64/hash_native.c 		unsigned long rb,rs,prs,r,ric;
rb                208 arch/powerpc/mm/book3s64/hash_native.c 		rb = PPC_BIT(52); /* IS = 2 */
rb                220 arch/powerpc/mm/book3s64/hash_native.c 			     : : "r"(rb), "i"(r), "i"(prs),
rb                234 arch/powerpc/mm/book3s64/hash_native.c 	unsigned long rb;
rb                236 arch/powerpc/mm/book3s64/hash_native.c 	rb = ___tlbie(vpn, psize, apsize, ssize);
rb                237 arch/powerpc/mm/book3s64/hash_native.c 	trace_tlbie(0, 0, rb, 0, 0, 0, 0);
rb                 32 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb;
rb                 35 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
rb                 39 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
rb                 96 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,rs,prs,r;
rb                 98 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = PPC_BIT(53); /* IS = 1 */
rb                 99 arch/powerpc/mm/book3s64/radix_tlb.c 	rb |= set << PPC_BITLSHIFT(51);
rb                105 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
rb                106 arch/powerpc/mm/book3s64/radix_tlb.c 	trace_tlbie(0, 1, rb, rs, ric, prs, r);
rb                111 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,rs,prs,r;
rb                113 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = PPC_BIT(53); /* IS = 1 */
rb                119 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
rb                120 arch/powerpc/mm/book3s64/radix_tlb.c 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
rb                125 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,rs,prs,r;
rb                127 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = PPC_BIT(52); /* IS = 2 */
rb                133 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
rb                134 arch/powerpc/mm/book3s64/radix_tlb.c 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
rb                139 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,rs,prs,r;
rb                141 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = PPC_BIT(52); /* IS = 2 */
rb                147 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
rb                148 arch/powerpc/mm/book3s64/radix_tlb.c 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
rb                154 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,rs,prs,r;
rb                156 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = va & ~(PPC_BITMASK(52, 63));
rb                157 arch/powerpc/mm/book3s64/radix_tlb.c 	rb |= ap << PPC_BITLSHIFT(58);
rb                163 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
rb                164 arch/powerpc/mm/book3s64/radix_tlb.c 	trace_tlbie(0, 1, rb, rs, ric, prs, r);
rb                170 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,rs,prs,r;
rb                172 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = va & ~(PPC_BITMASK(52, 63));
rb                173 arch/powerpc/mm/book3s64/radix_tlb.c 	rb |= ap << PPC_BITLSHIFT(58);
rb                179 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
rb                180 arch/powerpc/mm/book3s64/radix_tlb.c 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
rb                186 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,rs,prs,r;
rb                188 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = va & ~(PPC_BITMASK(52, 63));
rb                189 arch/powerpc/mm/book3s64/radix_tlb.c 	rb |= ap << PPC_BITLSHIFT(58);
rb                195 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
rb                196 arch/powerpc/mm/book3s64/radix_tlb.c 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
rb               1194 arch/powerpc/mm/book3s64/radix_tlb.c 	unsigned long rb,prs,r,rs;
rb               1197 arch/powerpc/mm/book3s64/radix_tlb.c 	rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
rb               1207 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
rb               1212 arch/powerpc/mm/book3s64/radix_tlb.c 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
rb                935 arch/powerpc/sysdev/fsl_pci.c 	unsigned int rd, ra, rb, d;
rb                939 arch/powerpc/sysdev/fsl_pci.c 	rb = get_rb(inst);
rb                952 arch/powerpc/sysdev/fsl_pci.c 			regs->gpr[ra] += regs->gpr[rb];
rb                961 arch/powerpc/sysdev/fsl_pci.c 			regs->gpr[ra] += regs->gpr[rb];
rb                971 arch/powerpc/sysdev/fsl_pci.c 			regs->gpr[ra] += regs->gpr[rb];
rb                980 arch/powerpc/sysdev/fsl_pci.c 			regs->gpr[ra] += regs->gpr[rb];
rb                174 arch/powerpc/sysdev/mpic.c 			     struct mpic_reg_bank *rb,
rb                180 arch/powerpc/sysdev/mpic.c 		return dcr_read(rb->dhost, reg);
rb                183 arch/powerpc/sysdev/mpic.c 		return in_be32(rb->base + (reg >> 2));
rb                186 arch/powerpc/sysdev/mpic.c 		return in_le32(rb->base + (reg >> 2));
rb                191 arch/powerpc/sysdev/mpic.c 			       struct mpic_reg_bank *rb,
rb                197 arch/powerpc/sysdev/mpic.c 		dcr_write(rb->dhost, reg, value);
rb                201 arch/powerpc/sysdev/mpic.c 		out_be32(rb->base + (reg >> 2), value);
rb                205 arch/powerpc/sysdev/mpic.c 		out_le32(rb->base + (reg >> 2), value);
rb                315 arch/powerpc/sysdev/mpic.c 			   struct mpic_reg_bank *rb, unsigned int offset,
rb                318 arch/powerpc/sysdev/mpic.c 	rb->base = ioremap(phys_addr + offset, size);
rb                319 arch/powerpc/sysdev/mpic.c 	BUG_ON(rb->base == NULL);
rb                323 arch/powerpc/sysdev/mpic.c static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
rb                327 arch/powerpc/sysdev/mpic.c 	rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
rb                328 arch/powerpc/sysdev/mpic.c 	BUG_ON(!DCR_MAP_OK(rb->dhost));
rb                332 arch/powerpc/sysdev/mpic.c 			    phys_addr_t phys_addr, struct mpic_reg_bank *rb,
rb                336 arch/powerpc/sysdev/mpic.c 		_mpic_map_dcr(mpic, rb, offset, size);
rb                338 arch/powerpc/sysdev/mpic.c 		_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
rb                 19 arch/s390/boot/ipl_report.c #define for_each_rb_entry(entry, rb) \
rb                 20 arch/s390/boot/ipl_report.c 	for (entry = rb->entries; \
rb                 21 arch/s390/boot/ipl_report.c 	     (void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
rb                303 arch/sh/kernel/disassemble.c 		int rb = 0;
rb                371 arch/sh/kernel/disassemble.c 				rb = nibs[n] & 0x07;
rb                423 arch/sh/kernel/disassemble.c 				printk("r%d_bank", rb);
rb                 23 arch/sparc/kernel/btext.c static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
rb                 24 arch/sparc/kernel/btext.c static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
rb                 25 arch/sparc/kernel/btext.c static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
rb                198 arch/sparc/kernel/btext.c 	int rb			= dispDeviceRowBytes;
rb                203 arch/sparc/kernel/btext.c 		draw_byte_32(font, (unsigned int *)base, rb);
rb                207 arch/sparc/kernel/btext.c 		draw_byte_16(font, (unsigned int *)base, rb);
rb                210 arch/sparc/kernel/btext.c 		draw_byte_8(font, (unsigned int *)base, rb);
rb                242 arch/sparc/kernel/btext.c static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
rb                259 arch/sparc/kernel/btext.c 		base = (unsigned int *) ((char *)base + rb);
rb                263 arch/sparc/kernel/btext.c static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
rb                277 arch/sparc/kernel/btext.c 		base = (unsigned int *) ((char *)base + rb);
rb                281 arch/sparc/kernel/btext.c static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
rb                293 arch/sparc/kernel/btext.c 		base = (unsigned int *) ((char *)base + rb);
rb                 15 arch/x86/mm/pat_internal.h 	struct rb_node		rb;
rb                 51 arch/x86/mm/pat_rbtree.c 		struct memtype *data = rb_entry(node, struct memtype, rb);
rb                 60 arch/x86/mm/pat_rbtree.c 			 struct memtype, rb, u64, subtree_max_end, NODE_END)
rb                 70 arch/x86/mm/pat_rbtree.c 		struct memtype *data = rb_entry(node, struct memtype, rb);
rb                110 arch/x86/mm/pat_rbtree.c 		node = rb_next(&match->rb);
rb                112 arch/x86/mm/pat_rbtree.c 			match = rb_entry(node, struct memtype, rb);
rb                139 arch/x86/mm/pat_rbtree.c 	node = rb_next(&match->rb);
rb                141 arch/x86/mm/pat_rbtree.c 		match = rb_entry(node, struct memtype, rb);
rb                151 arch/x86/mm/pat_rbtree.c 		node = rb_next(&match->rb);
rb                172 arch/x86/mm/pat_rbtree.c 		struct memtype *data = rb_entry(*node, struct memtype, rb);
rb                184 arch/x86/mm/pat_rbtree.c 	rb_link_node(&newdata->rb, parent, node);
rb                185 arch/x86/mm/pat_rbtree.c 	rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb);
rb                228 arch/x86/mm/pat_rbtree.c 		rb_erase_augmented(&data->rb, &memtype_rbroot,
rb                232 arch/x86/mm/pat_rbtree.c 		rb_erase_augmented(&data->rb, &memtype_rbroot,
rb                261 arch/x86/mm/pat_rbtree.c 		struct memtype *this = rb_entry(node, struct memtype, rb);
rb                397 crypto/tgr192.c static void tgr192_round(u64 * ra, u64 * rb, u64 * rc, u64 x, int mul)
rb                400 crypto/tgr192.c 	u64 b = *rb;
rb                411 crypto/tgr192.c 	*rb = b;
rb                416 crypto/tgr192.c static void tgr192_pass(u64 * ra, u64 * rb, u64 * rc, u64 * x, int mul)
rb                419 crypto/tgr192.c 	u64 b = *rb;
rb                432 crypto/tgr192.c 	*rb = b;
rb                 12 drivers/block/drbd/drbd_interval.c 	struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
rb                 19 drivers/block/drbd/drbd_interval.c 			 struct drbd_interval, rb, sector_t, end, NODE_END);
rb                 34 drivers/block/drbd/drbd_interval.c 			rb_entry(*new, struct drbd_interval, rb);
rb                 52 drivers/block/drbd/drbd_interval.c 	rb_link_node(&this->rb, parent, new);
rb                 53 drivers/block/drbd/drbd_interval.c 	rb_insert_augmented(&this->rb, root, &augment_callbacks);
rb                 75 drivers/block/drbd/drbd_interval.c 			rb_entry(node, struct drbd_interval, rb);
rb                 97 drivers/block/drbd/drbd_interval.c 	rb_erase_augmented(&this->rb, root, &augment_callbacks);
rb                122 drivers/block/drbd/drbd_interval.c 			rb_entry(node, struct drbd_interval, rb);
rb                148 drivers/block/drbd/drbd_interval.c 		node = rb_next(&i->rb);
rb                151 drivers/block/drbd/drbd_interval.c 		i = rb_entry(node, struct drbd_interval, rb);
rb                  9 drivers/block/drbd/drbd_interval.h 	struct rb_node rb;
rb                 21 drivers/block/drbd/drbd_interval.h 	RB_CLEAR_NODE(&i->rb);
rb                 26 drivers/block/drbd/drbd_interval.h 	return RB_EMPTY_NODE(&i->rb);
rb                 84 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 					     &amn->objects.rb_root, it.rb) {
rb                 51 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 	struct rb_node			rb;
rb                 63 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
rb               2964 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 					     &vm->va.rb_root, rb) {
rb                146 drivers/gpu/drm/drm_client_modeset.c 	if (cmdline_mode->rb || cmdline_mode->margins)
rb                147 drivers/gpu/drm/drm_connector.c 		      mode->rb ? " reduced blanking" : "",
rb                647 drivers/gpu/drm/drm_edid.c 	short rb;
rb               1938 drivers/gpu/drm/drm_edid.c 					   bool rb)
rb               1950 drivers/gpu/drm/drm_edid.c 		if (rb != mode_is_rb(ptr))
rb               2549 drivers/gpu/drm/drm_edid.c 	bool rb = drm_monitor_supports_rb(edid);
rb               2553 drivers/gpu/drm/drm_edid.c 		newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
rb               2641 drivers/gpu/drm/drm_edid.c 							 est3_modes[m].rb);
rb                155 drivers/gpu/drm/drm_mm.c INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
rb                171 drivers/gpu/drm/drm_mm.c 	struct rb_node **link, *rb;
rb                178 drivers/gpu/drm/drm_mm.c 		rb = &hole_node->rb;
rb                179 drivers/gpu/drm/drm_mm.c 		while (rb) {
rb                180 drivers/gpu/drm/drm_mm.c 			parent = rb_entry(rb, struct drm_mm_node, rb);
rb                185 drivers/gpu/drm/drm_mm.c 			rb = rb_parent(rb);
rb                188 drivers/gpu/drm/drm_mm.c 		rb = &hole_node->rb;
rb                189 drivers/gpu/drm/drm_mm.c 		link = &hole_node->rb.rb_right;
rb                192 drivers/gpu/drm/drm_mm.c 		rb = NULL;
rb                198 drivers/gpu/drm/drm_mm.c 		rb = *link;
rb                199 drivers/gpu/drm/drm_mm.c 		parent = rb_entry(rb, struct drm_mm_node, rb);
rb                203 drivers/gpu/drm/drm_mm.c 			link = &parent->rb.rb_left;
rb                205 drivers/gpu/drm/drm_mm.c 			link = &parent->rb.rb_right;
rb                210 drivers/gpu/drm/drm_mm.c 	rb_link_node(&node->rb, rb, link);
rb                211 drivers/gpu/drm/drm_mm.c 	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
rb                216 drivers/gpu/drm/drm_mm.c 	struct rb_node **link = &root.rb_node, *rb = NULL; \
rb                219 drivers/gpu/drm/drm_mm.c 		rb = *link; \
rb                220 drivers/gpu/drm/drm_mm.c 		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
rb                221 drivers/gpu/drm/drm_mm.c 			link = &rb->rb_left; \
rb                223 drivers/gpu/drm/drm_mm.c 			link = &rb->rb_right; \
rb                225 drivers/gpu/drm/drm_mm.c 	rb_link_node(&node->member, rb, link); \
rb                232 drivers/gpu/drm/drm_mm.c static u64 rb_to_hole_size(struct rb_node *rb)
rb                234 drivers/gpu/drm/drm_mm.c 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
rb                240 drivers/gpu/drm/drm_mm.c 	struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
rb                245 drivers/gpu/drm/drm_mm.c 		rb = *link;
rb                246 drivers/gpu/drm/drm_mm.c 		if (x > rb_to_hole_size(rb)) {
rb                247 drivers/gpu/drm/drm_mm.c 			link = &rb->rb_left;
rb                249 drivers/gpu/drm/drm_mm.c 			link = &rb->rb_right;
rb                254 drivers/gpu/drm/drm_mm.c 	rb_link_node(&node->rb_hole_size, rb, link);
rb                284 drivers/gpu/drm/drm_mm.c static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
rb                286 drivers/gpu/drm/drm_mm.c 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
rb                289 drivers/gpu/drm/drm_mm.c static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
rb                291 drivers/gpu/drm/drm_mm.c 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
rb                294 drivers/gpu/drm/drm_mm.c static inline u64 rb_hole_size(struct rb_node *rb)
rb                296 drivers/gpu/drm/drm_mm.c 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
rb                301 drivers/gpu/drm/drm_mm.c 	struct rb_node *rb = mm->holes_size.rb_root.rb_node;
rb                306 drivers/gpu/drm/drm_mm.c 			rb_entry(rb, struct drm_mm_node, rb_hole_size);
rb                310 drivers/gpu/drm/drm_mm.c 			rb = rb->rb_right;
rb                312 drivers/gpu/drm/drm_mm.c 			rb = rb->rb_left;
rb                314 drivers/gpu/drm/drm_mm.c 	} while (rb);
rb                321 drivers/gpu/drm/drm_mm.c 	struct rb_node *rb = mm->holes_addr.rb_node;
rb                324 drivers/gpu/drm/drm_mm.c 	while (rb) {
rb                327 drivers/gpu/drm/drm_mm.c 		node = rb_hole_addr_to_node(rb);
rb                331 drivers/gpu/drm/drm_mm.c 			rb = node->rb_hole_addr.rb_left;
rb                333 drivers/gpu/drm/drm_mm.c 			rb = node->rb_hole_addr.rb_right;
rb                443 drivers/gpu/drm/drm_mm.c static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
rb                445 drivers/gpu/drm/drm_mm.c 	return rb ? rb_to_hole_size(rb) : 0;
rb                613 drivers/gpu/drm/drm_mm.c 	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
rb               1513 drivers/gpu/drm/drm_modes.c 	bool rb = false, cvt = false;
rb               1541 drivers/gpu/drm/drm_modes.c 			rb = true;
rb               1566 drivers/gpu/drm/drm_modes.c 	mode->rb = rb;
rb               1900 drivers/gpu/drm/drm_modes.c 				    cmd->rb, cmd->interlace,
rb                102 drivers/gpu/drm/drm_prime.c 	struct rb_node **p, *rb;
rb                112 drivers/gpu/drm/drm_prime.c 	rb = NULL;
rb                117 drivers/gpu/drm/drm_prime.c 		rb = *p;
rb                118 drivers/gpu/drm/drm_prime.c 		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
rb                120 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_right;
rb                122 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_left;
rb                124 drivers/gpu/drm/drm_prime.c 	rb_link_node(&member->dmabuf_rb, rb, p);
rb                127 drivers/gpu/drm/drm_prime.c 	rb = NULL;
rb                132 drivers/gpu/drm/drm_prime.c 		rb = *p;
rb                133 drivers/gpu/drm/drm_prime.c 		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
rb                135 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_right;
rb                137 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_left;
rb                139 drivers/gpu/drm/drm_prime.c 	rb_link_node(&member->handle_rb, rb, p);
rb                148 drivers/gpu/drm/drm_prime.c 	struct rb_node *rb;
rb                150 drivers/gpu/drm/drm_prime.c 	rb = prime_fpriv->handles.rb_node;
rb                151 drivers/gpu/drm/drm_prime.c 	while (rb) {
rb                154 drivers/gpu/drm/drm_prime.c 		member = rb_entry(rb, struct drm_prime_member, handle_rb);
rb                158 drivers/gpu/drm/drm_prime.c 			rb = rb->rb_right;
rb                160 drivers/gpu/drm/drm_prime.c 			rb = rb->rb_left;
rb                170 drivers/gpu/drm/drm_prime.c 	struct rb_node *rb;
rb                172 drivers/gpu/drm/drm_prime.c 	rb = prime_fpriv->dmabufs.rb_node;
rb                173 drivers/gpu/drm/drm_prime.c 	while (rb) {
rb                176 drivers/gpu/drm/drm_prime.c 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
rb                181 drivers/gpu/drm/drm_prime.c 			rb = rb->rb_right;
rb                183 drivers/gpu/drm/drm_prime.c 			rb = rb->rb_left;
rb                193 drivers/gpu/drm/drm_prime.c 	struct rb_node *rb;
rb                195 drivers/gpu/drm/drm_prime.c 	rb = prime_fpriv->dmabufs.rb_node;
rb                196 drivers/gpu/drm/drm_prime.c 	while (rb) {
rb                199 drivers/gpu/drm/drm_prime.c 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
rb                208 drivers/gpu/drm/drm_prime.c 			rb = rb->rb_right;
rb                210 drivers/gpu/drm/drm_prime.c 			rb = rb->rb_left;
rb                152 drivers/gpu/drm/drm_vma_manager.c 		node = rb_entry(iter, struct drm_mm_node, rb);
rb                 48 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
rb                 54 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (RB_EMPTY_NODE(&mo->it.rb))
rb                 58 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	RB_CLEAR_NODE(&mo->it.rb);
rb                279 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	RB_CLEAR_NODE(&mo->it.rb);
rb                202 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct rb_node rb;
rb                264 drivers/gpu/drm/i915/gt/intel_lrc.c static inline struct i915_priolist *to_priolist(struct rb_node *rb)
rb                266 drivers/gpu/drm/i915/gt/intel_lrc.c 	return rb_entry(rb, struct i915_priolist, node);
rb                306 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct rb_node *rb;
rb                308 drivers/gpu/drm/i915/gt/intel_lrc.c 	rb = rb_first_cached(&execlists->queue);
rb                309 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!rb)
rb                316 drivers/gpu/drm/i915/gt/intel_lrc.c 	p = to_priolist(rb);
rb                322 drivers/gpu/drm/i915/gt/intel_lrc.c 				struct rb_node *rb)
rb                354 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (rb) {
rb                356 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
rb               1044 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct rb_node *rb;
rb               1069 drivers/gpu/drm/i915/gt/intel_lrc.c 	for (rb = rb_first_cached(&execlists->virtual); rb; ) {
rb               1071 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
rb               1075 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_erase_cached(rb, &execlists->virtual);
rb               1076 drivers/gpu/drm/i915/gt/intel_lrc.c 			RB_CLEAR_NODE(rb);
rb               1077 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb = rb_first_cached(&execlists->virtual);
rb               1082 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb = rb_next(rb);
rb               1099 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (need_preempt(engine, last, rb)) {
rb               1167 drivers/gpu/drm/i915/gt/intel_lrc.c 	while (rb) { /* XXX virtual is always taking precedence */
rb               1169 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
rb               1177 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_erase_cached(rb, &execlists->virtual);
rb               1178 drivers/gpu/drm/i915/gt/intel_lrc.c 			RB_CLEAR_NODE(rb);
rb               1179 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb = rb_first_cached(&execlists->virtual);
rb               1190 drivers/gpu/drm/i915/gt/intel_lrc.c 				rb = rb_next(rb);
rb               1210 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_erase_cached(rb, &execlists->virtual);
rb               1211 drivers/gpu/drm/i915/gt/intel_lrc.c 			RB_CLEAR_NODE(rb);
rb               1259 drivers/gpu/drm/i915/gt/intel_lrc.c 				rb = rb_first_cached(&execlists->virtual);
rb               1268 drivers/gpu/drm/i915/gt/intel_lrc.c 	while ((rb = rb_first_cached(&execlists->queue))) {
rb               1269 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_priolist *p = to_priolist(rb);
rb               2571 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct rb_node *rb;
rb               2599 drivers/gpu/drm/i915/gt/intel_lrc.c 	while ((rb = rb_first_cached(&execlists->queue))) {
rb               2600 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_priolist *p = to_priolist(rb);
rb               2613 drivers/gpu/drm/i915/gt/intel_lrc.c 	while ((rb = rb_first_cached(&execlists->virtual))) {
rb               2615 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
rb               2617 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb_erase_cached(rb, &execlists->virtual);
rb               2618 drivers/gpu/drm/i915/gt/intel_lrc.c 		RB_CLEAR_NODE(rb);
rb               3430 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct rb_node *node = &ve->nodes[sibling->id].rb;
rb               3567 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct rb_node **parent, *rb;
rb               3571 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (!RB_EMPTY_NODE(&node->rb)) {
rb               3573 drivers/gpu/drm/i915/gt/intel_lrc.c 				rb_erase_cached(&node->rb,
rb               3575 drivers/gpu/drm/i915/gt/intel_lrc.c 				RB_CLEAR_NODE(&node->rb);
rb               3583 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (!RB_EMPTY_NODE(&node->rb)) {
rb               3589 drivers/gpu/drm/i915/gt/intel_lrc.c 				&node->rb;
rb               3593 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_erase_cached(&node->rb, &sibling->execlists.virtual);
rb               3596 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb = NULL;
rb               3602 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb = *parent;
rb               3603 drivers/gpu/drm/i915/gt/intel_lrc.c 			other = rb_entry(rb, typeof(*other), rb);
rb               3605 drivers/gpu/drm/i915/gt/intel_lrc.c 				parent = &rb->rb_left;
rb               3607 drivers/gpu/drm/i915/gt/intel_lrc.c 				parent = &rb->rb_right;
rb               3612 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb_link_node(&node->rb, rb, parent);
rb               3613 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb_insert_color_cached(&node->rb,
rb               3618 drivers/gpu/drm/i915/gt/intel_lrc.c 		GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
rb               3794 drivers/gpu/drm/i915/gt/intel_lrc.c 		GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
rb               3795 drivers/gpu/drm/i915/gt/intel_lrc.c 		RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
rb               3925 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct rb_node *rb;
rb               3951 drivers/gpu/drm/i915/gt/intel_lrc.c 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
rb               3952 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
rb               3973 drivers/gpu/drm/i915/gt/intel_lrc.c 	for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
rb               3975 drivers/gpu/drm/i915/gt/intel_lrc.c 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
rb                 75 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c static inline struct i915_priolist *to_priolist(struct rb_node *rb)
rb                 77 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	return rb_entry(rb, struct i915_priolist, node);
rb                545 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct rb_node *rb;
rb                562 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	while ((rb = rb_first_cached(&execlists->queue))) {
rb                563 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		struct i915_priolist *p = to_priolist(rb);
rb                588 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		rb ? to_priolist(rb)->priority : INT_MIN;
rb                683 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct rb_node *rb;
rb                716 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	while ((rb = rb_first_cached(&execlists->queue))) {
rb                717 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		struct i915_priolist *p = to_priolist(rb);
rb               1805 drivers/gpu/drm/i915/i915_drv.h #define rb_to_uabi_engine(rb) \
rb               1806 drivers/gpu/drm/i915/i915_drv.h 	rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
rb                 38 drivers/gpu/drm/i915/i915_scheduler.c static inline struct i915_priolist *to_priolist(struct rb_node *rb)
rb                 40 drivers/gpu/drm/i915/i915_scheduler.c 	return rb_entry(rb, struct i915_priolist, node);
rb                 45 drivers/gpu/drm/i915/i915_scheduler.c 	struct rb_node *rb;
rb                 55 drivers/gpu/drm/i915/i915_scheduler.c 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
rb                 56 drivers/gpu/drm/i915/i915_scheduler.c 		const struct i915_priolist *p = to_priolist(rb);
rb                 76 drivers/gpu/drm/i915/i915_scheduler.c 	struct rb_node **parent, *rb;
rb                 91 drivers/gpu/drm/i915/i915_scheduler.c 	rb = NULL;
rb                 94 drivers/gpu/drm/i915/i915_scheduler.c 		rb = *parent;
rb                 95 drivers/gpu/drm/i915/i915_scheduler.c 		p = to_priolist(rb);
rb                 97 drivers/gpu/drm/i915/i915_scheduler.c 			parent = &rb->rb_left;
rb                 99 drivers/gpu/drm/i915/i915_scheduler.c 			parent = &rb->rb_right;
rb                130 drivers/gpu/drm/i915/i915_scheduler.c 	rb_link_node(&p->node, rb, parent);
rb                104 drivers/gpu/drm/i915/i915_vma.c 	struct rb_node *rb, **p;
rb                179 drivers/gpu/drm/i915/i915_vma.c 	rb = NULL;
rb                185 drivers/gpu/drm/i915/i915_vma.c 		rb = *p;
rb                186 drivers/gpu/drm/i915/i915_vma.c 		pos = rb_entry(rb, struct i915_vma, obj_node);
rb                201 drivers/gpu/drm/i915/i915_vma.c 			p = &rb->rb_right;
rb                203 drivers/gpu/drm/i915/i915_vma.c 			p = &rb->rb_left;
rb                205 drivers/gpu/drm/i915/i915_vma.c 	rb_link_node(&vma->obj_node, rb, p);
rb                237 drivers/gpu/drm/i915/i915_vma.c 	struct rb_node *rb;
rb                239 drivers/gpu/drm/i915/i915_vma.c 	rb = obj->vma.tree.rb_node;
rb                240 drivers/gpu/drm/i915/i915_vma.c 	while (rb) {
rb                241 drivers/gpu/drm/i915/i915_vma.c 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
rb                249 drivers/gpu/drm/i915/i915_vma.c 			rb = rb->rb_right;
rb                251 drivers/gpu/drm/i915/i915_vma.c 			rb = rb->rb_left;
rb                 15 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
rb                237 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	if (!adreno_idle(gpu, gpu->rb[0]))
rb                 37 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
rb                332 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	if (!adreno_idle(gpu, gpu->rb[0]))
rb                111 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
rb                332 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	if (!adreno_idle(gpu, gpu->rb[0]))
rb                333 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
rb                372 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
rb                706 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
rb                707 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		OUT_RING(gpu->rb[0], 0x0F);
rb                709 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		gpu->funcs->flush(gpu, gpu->rb[0]);
rb                710 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		if (!a5xx_idle(gpu, gpu->rb[0]))
rb                723 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
rb                724 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		OUT_RING(gpu->rb[0], 0x00000000);
rb                726 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		gpu->funcs->flush(gpu, gpu->rb[0]);
rb                727 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		if (!a5xx_idle(gpu, gpu->rb[0]))
rb                224 drivers/gpu/drm/msm/adreno/a5xx_power.c 	struct msm_ringbuffer *ring = gpu->rb[0];
rb                 63 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		struct msm_ringbuffer *ring = gpu->rb[i];
rb                203 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	a5xx_gpu->cur_ring = gpu->rb[0];
rb                212 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
rb                278 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
rb                294 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
rb                516 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	a6xx_gpu->cur_ring = gpu->rb[0];
rb                534 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
rb                535 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		OUT_RING(gpu->rb[0], 0x00000000);
rb                537 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		a6xx_flush(gpu, gpu->rb[0]);
rb                538 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		if (!a6xx_idle(gpu, gpu->rb[0]))
rb                344 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
rb                369 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
rb                374 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			rbmemptr(gpu->rb[0], rptr));
rb                393 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	return gpu->rb[0];
rb                535 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].fence = gpu->rb[i]->memptrs->fence;
rb                536 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].iova = gpu->rb[i]->iova;
rb                537 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].seqno = gpu->rb[i]->seqno;
rb                538 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
rb                539 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].wptr = get_wptr(gpu->rb[i]);
rb                546 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			if (gpu->rb[i]->start[j])
rb                552 drivers/gpu/drm/msm/adreno/adreno_gpu.c 				memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
rb                768 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
rb                891 drivers/gpu/drm/msm/msm_drv.c 	ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
rb                 49 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->ring = gpu->rb[queue->prio];
rb                438 drivers/gpu/drm/msm/msm_gem_submit.c 	ring = gpu->rb[queue->prio];
rb                472 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
rb                499 drivers/gpu/drm/msm/msm_gpu.c 			struct msm_ringbuffer *ring = gpu->rb[i];
rb                697 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
rb                713 drivers/gpu/drm/msm/msm_gpu.c 		update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
rb                946 drivers/gpu/drm/msm/msm_gpu.c 	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
rb                948 drivers/gpu/drm/msm/msm_gpu.c 			ARRAY_SIZE(gpu->rb));
rb                949 drivers/gpu/drm/msm/msm_gpu.c 		nr_rings = ARRAY_SIZE(gpu->rb);
rb                954 drivers/gpu/drm/msm/msm_gpu.c 		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
rb                956 drivers/gpu/drm/msm/msm_gpu.c 		if (IS_ERR(gpu->rb[i])) {
rb                957 drivers/gpu/drm/msm/msm_gpu.c 			ret = PTR_ERR(gpu->rb[i]);
rb                972 drivers/gpu/drm/msm/msm_gpu.c 	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
rb                973 drivers/gpu/drm/msm/msm_gpu.c 		msm_ringbuffer_destroy(gpu->rb[i]);
rb                974 drivers/gpu/drm/msm/msm_gpu.c 		gpu->rb[i] = NULL;
rb                991 drivers/gpu/drm/msm/msm_gpu.c 	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
rb                992 drivers/gpu/drm/msm/msm_gpu.c 		msm_ringbuffer_destroy(gpu->rb[i]);
rb                993 drivers/gpu/drm/msm/msm_gpu.c 		gpu->rb[i] = NULL;
rb                 87 drivers/gpu/drm/msm/msm_gpu.h 	struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
rb                149 drivers/gpu/drm/msm/msm_gpu.h 		struct msm_ringbuffer *ring = gpu->rb[i];
rb               1321 drivers/gpu/drm/omapdrm/dss/dispc.c 		FLD_VAL(coefs->rb, 9, 0);
rb                224 drivers/gpu/drm/omapdrm/dss/omapdss.h 	s16 rr, rg, rb;
rb               1240 drivers/gpu/drm/radeon/radeon_vm.c 					     &vm->va.rb_root, it.rb) {
rb                 31 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                 51 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                 75 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                 99 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                119 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                165 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                233 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                257 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(!mode.rb);
rb                281 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(!mode.rb);
rb                306 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                342 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                379 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                405 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                431 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                457 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                494 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                520 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                549 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                575 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                599 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                623 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                775 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                800 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                825 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                850 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                908 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                933 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                961 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                986 drivers/gpu/drm/selftests/test-drm_cmdline_parser.c 	FAIL_ON(mode.rb);
rb                 23 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	struct ishtp_cl_rb *rb;
rb                 28 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		rb = ishtp_io_rb_init(cl);
rb                 29 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		if (!rb) {
rb                 33 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		ret = ishtp_io_rb_alloc_buf(rb, len);
rb                 37 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		list_add_tail(&rb->list, &cl->free_rb_list.list);
rb                 99 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	struct ishtp_cl_rb *rb;
rb                105 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
rb                107 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		list_del(&rb->list);
rb                108 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		kfree(rb->buffer.data);
rb                109 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		kfree(rb);
rb                115 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		rb = list_entry(cl->in_process_list.list.next,
rb                117 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		list_del(&rb->list);
rb                118 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		kfree(rb->buffer.data);
rb                119 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		kfree(rb);
rb                165 drivers/hid/intel-ish-hid/ishtp/client-buffers.c void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
rb                167 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (rb == NULL)
rb                170 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	kfree(rb->buffer.data);
rb                171 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	kfree(rb);
rb                184 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	struct ishtp_cl_rb *rb;
rb                186 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
rb                187 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (!rb)
rb                190 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	INIT_LIST_HEAD(&rb->list);
rb                191 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb->cl = cl;
rb                192 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb->buf_idx = 0;
rb                193 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	return rb;
rb                205 drivers/hid/intel-ish-hid/ishtp/client-buffers.c int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
rb                207 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (!rb)
rb                213 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb->buffer.data = kmalloc(length, GFP_KERNEL);
rb                214 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (!rb->buffer.data)
rb                217 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb->buffer.size = length;
rb                229 drivers/hid/intel-ish-hid/ishtp/client-buffers.c int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
rb                235 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (!rb || !rb->cl)
rb                238 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	cl = rb->cl;
rb                240 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	list_add_tail(&rb->list, &cl->free_rb_list.list);
rb                286 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	struct ishtp_cl_rb *rb;
rb                289 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb = list_first_entry_or_null(&cl->in_process_list.list,
rb                291 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (rb)
rb                292 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		list_del_init(&rb->list);
rb                295 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	return rb;
rb                 43 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl_rb *rb;
rb                 48 drivers/hid/intel-ish-hid/ishtp/client.c 	list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
rb                 49 drivers/hid/intel-ish-hid/ishtp/client.c 		if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
rb                 50 drivers/hid/intel-ish-hid/ishtp/client.c 			list_del(&rb->list);
rb                 51 drivers/hid/intel-ish-hid/ishtp/client.c 			ishtp_io_rb_free(rb);
rb                447 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl_rb *rb;
rb                477 drivers/hid/intel-ish-hid/ishtp/client.c 		rb = NULL;
rb                481 drivers/hid/intel-ish-hid/ishtp/client.c 	rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
rb                482 drivers/hid/intel-ish-hid/ishtp/client.c 	list_del_init(&rb->list);
rb                485 drivers/hid/intel-ish-hid/ishtp/client.c 	rb->cl = cl;
rb                486 drivers/hid/intel-ish-hid/ishtp/client.c 	rb->buf_idx = 0;
rb                488 drivers/hid/intel-ish-hid/ishtp/client.c 	INIT_LIST_HEAD(&rb->list);
rb                496 drivers/hid/intel-ish-hid/ishtp/client.c 	list_add_tail(&rb->list, &dev->read_list.list);
rb                504 drivers/hid/intel-ish-hid/ishtp/client.c 	if (rets && rb) {
rb                506 drivers/hid/intel-ish-hid/ishtp/client.c 		list_del(&rb->list);
rb                510 drivers/hid/intel-ish-hid/ishtp/client.c 		list_add_tail(&rb->list, &cl->free_rb_list.list);
rb                609 drivers/hid/intel-ish-hid/ishtp/client.c static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
rb                613 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl	*cl = rb->cl;
rb                621 drivers/hid/intel-ish-hid/ishtp/client.c 	list_add_tail(&rb->list, &cl->in_process_list.list);
rb                821 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl_rb *rb;
rb                841 drivers/hid/intel-ish-hid/ishtp/client.c 	list_for_each_entry(rb, &dev->read_list.list, list) {
rb                843 drivers/hid/intel-ish-hid/ishtp/client.c 		cl = rb->cl;
rb                850 drivers/hid/intel-ish-hid/ishtp/client.c 		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
rb                854 drivers/hid/intel-ish-hid/ishtp/client.c 			list_del(&rb->list);
rb                855 drivers/hid/intel-ish-hid/ishtp/client.c 			ishtp_io_rb_free(rb);
rb                866 drivers/hid/intel-ish-hid/ishtp/client.c 		if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
rb                870 drivers/hid/intel-ish-hid/ishtp/client.c 				rb->buffer.size, ishtp_hdr->length,
rb                871 drivers/hid/intel-ish-hid/ishtp/client.c 				rb->buf_idx);
rb                872 drivers/hid/intel-ish-hid/ishtp/client.c 			list_del(&rb->list);
rb                873 drivers/hid/intel-ish-hid/ishtp/client.c 			ishtp_cl_io_rb_recycle(rb);
rb                878 drivers/hid/intel-ish-hid/ishtp/client.c 		buffer = rb->buffer.data + rb->buf_idx;
rb                881 drivers/hid/intel-ish-hid/ishtp/client.c 		rb->buf_idx += ishtp_hdr->length;
rb                885 drivers/hid/intel-ish-hid/ishtp/client.c 			list_del(&rb->list);
rb                886 drivers/hid/intel-ish-hid/ishtp/client.c 			complete_rb = rb;
rb                954 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl_rb *rb;
rb                962 drivers/hid/intel-ish-hid/ishtp/client.c 	list_for_each_entry(rb, &dev->read_list.list, list) {
rb                963 drivers/hid/intel-ish-hid/ishtp/client.c 		cl = rb->cl;
rb                972 drivers/hid/intel-ish-hid/ishtp/client.c 		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
rb                976 drivers/hid/intel-ish-hid/ishtp/client.c 			list_del(&rb->list);
rb                977 drivers/hid/intel-ish-hid/ishtp/client.c 			ishtp_io_rb_free(rb);
rb                988 drivers/hid/intel-ish-hid/ishtp/client.c 		if (rb->buffer.size < hbm->msg_length) {
rb                992 drivers/hid/intel-ish-hid/ishtp/client.c 				rb->buffer.size, hbm->msg_length, rb->buf_idx);
rb                993 drivers/hid/intel-ish-hid/ishtp/client.c 			list_del(&rb->list);
rb                994 drivers/hid/intel-ish-hid/ishtp/client.c 			ishtp_cl_io_rb_recycle(rb);
rb                999 drivers/hid/intel-ish-hid/ishtp/client.c 		buffer = rb->buffer.data;
rb               1001 drivers/hid/intel-ish-hid/ishtp/client.c 		rb->buf_idx = hbm->msg_length;
rb               1005 drivers/hid/intel-ish-hid/ishtp/client.c 		list_del(&rb->list);
rb               1006 drivers/hid/intel-ish-hid/ishtp/client.c 		complete_rb = rb;
rb                141 drivers/hid/intel-ish-hid/ishtp/client.h int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
rb                185 drivers/i2c/busses/i2c-cpm.c 	u_char *rb;
rb                194 drivers/i2c/busses/i2c-cpm.c 	rb = cpm->rxbuf[rx];
rb                197 drivers/i2c/busses/i2c-cpm.c 	rb = (u_char *) (((ulong) rb + 1) & ~1);
rb                243 drivers/i2c/busses/i2c-cpm.c 	u_char *rb;
rb                250 drivers/i2c/busses/i2c-cpm.c 	rb = cpm->rxbuf[rx];
rb                253 drivers/i2c/busses/i2c-cpm.c 	rb = (u_char *) (((uint) rb + 1) & ~1);
rb                273 drivers/i2c/busses/i2c-cpm.c 		memcpy(pmsg->buf, rb, pmsg->len);
rb                 94 drivers/i2c/i2c-stub.c 	struct smbus_block_data *b, *rb = NULL;
rb                 98 drivers/i2c/i2c-stub.c 			rb = b;
rb                102 drivers/i2c/i2c-stub.c 	if (rb == NULL && create) {
rb                103 drivers/i2c/i2c-stub.c 		rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL);
rb                104 drivers/i2c/i2c-stub.c 		if (rb == NULL)
rb                105 drivers/i2c/i2c-stub.c 			return rb;
rb                106 drivers/i2c/i2c-stub.c 		rb->command = command;
rb                107 drivers/i2c/i2c-stub.c 		list_add(&rb->node, &chip->smbus_blocks);
rb                109 drivers/i2c/i2c-stub.c 	return rb;
rb                106 drivers/iio/industrialio-buffer.c 	struct iio_buffer *rb = indio_dev->buffer;
rb                115 drivers/iio/industrialio-buffer.c 	if (!rb || !rb->access->read_first_n)
rb                118 drivers/iio/industrialio-buffer.c 	datum_size = rb->bytes_per_datum;
rb                130 drivers/iio/industrialio-buffer.c 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
rb                132 drivers/iio/industrialio-buffer.c 	add_wait_queue(&rb->pollq, &wait);
rb                139 drivers/iio/industrialio-buffer.c 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
rb                150 drivers/iio/industrialio-buffer.c 		ret = rb->access->read_first_n(rb, n, buf);
rb                154 drivers/iio/industrialio-buffer.c 	remove_wait_queue(&rb->pollq, &wait);
rb                172 drivers/iio/industrialio-buffer.c 	struct iio_buffer *rb = indio_dev->buffer;
rb                174 drivers/iio/industrialio-buffer.c 	if (!indio_dev->info || rb == NULL)
rb                177 drivers/iio/industrialio-buffer.c 	poll_wait(filp, &rb->pollq, wait);
rb                178 drivers/iio/industrialio-buffer.c 	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
rb                 91 drivers/infiniband/core/umem_odp.c 			rb_entry(node, struct ib_umem_odp, interval_tree.rb);
rb               1002 drivers/infiniband/hw/hfi1/user_sdma.c 	node->rb.len = iovec->iov.iov_len;
rb               1032 drivers/infiniband/hw/hfi1/user_sdma.c 		node = container_of(rb_node, struct sdma_mmu_node, rb);
rb               1047 drivers/infiniband/hw/hfi1/user_sdma.c 		node->rb.addr = (unsigned long)iovec->iov.iov_base;
rb               1067 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
rb               1464 drivers/infiniband/hw/hfi1/user_sdma.c 					   &node->rb);
rb               1495 drivers/infiniband/hw/hfi1/user_sdma.c 		container_of(mnode, struct sdma_mmu_node, rb);
rb               1510 drivers/infiniband/hw/hfi1/user_sdma.c 		container_of(mnode, struct sdma_mmu_node, rb);
rb               1530 drivers/infiniband/hw/hfi1/user_sdma.c 		container_of(mnode, struct sdma_mmu_node, rb);
rb               1539 drivers/infiniband/hw/hfi1/user_sdma.c 		container_of(mnode, struct sdma_mmu_node, rb);
rb                145 drivers/infiniband/hw/hfi1/user_sdma.h 	struct mmu_rb_node rb;
rb                116 drivers/infiniband/hw/mlx5/odp.c 	struct rb_node *rb;
rb                120 drivers/infiniband/hw/mlx5/odp.c 		rb = rb_next(&odp->interval_tree.rb);
rb                121 drivers/infiniband/hw/mlx5/odp.c 		if (!rb)
rb                123 drivers/infiniband/hw/mlx5/odp.c 		odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
rb                139 drivers/infiniband/hw/mlx5/odp.c 	struct rb_node *rb;
rb                149 drivers/infiniband/hw/mlx5/odp.c 		rb = rb_next(&odp->interval_tree.rb);
rb                150 drivers/infiniband/hw/mlx5/odp.c 		if (!rb)
rb                152 drivers/infiniband/hw/mlx5/odp.c 		odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
rb                584 drivers/infiniband/hw/mlx5/odp.c 			rb_entry(node, struct ib_umem_odp, interval_tree.rb);
rb                268 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
rb                 40 drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h 	struct rb_node			rb;
rb                 26 drivers/lightnvm/pblk-rb.c static void pblk_rb_data_free(struct pblk_rb *rb)
rb                 31 drivers/lightnvm/pblk-rb.c 	list_for_each_entry_safe(p, t, &rb->pages, list) {
rb                 39 drivers/lightnvm/pblk-rb.c void pblk_rb_free(struct pblk_rb *rb)
rb                 41 drivers/lightnvm/pblk-rb.c 	pblk_rb_data_free(rb);
rb                 42 drivers/lightnvm/pblk-rb.c 	vfree(rb->entries);
rb                 72 drivers/lightnvm/pblk-rb.c int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
rb                 75 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                 92 drivers/lightnvm/pblk-rb.c 	rb->entries = entries;
rb                 93 drivers/lightnvm/pblk-rb.c 	rb->seg_size = (1 << power_seg_sz);
rb                 94 drivers/lightnvm/pblk-rb.c 	rb->nr_entries = (1 << power_size);
rb                 95 drivers/lightnvm/pblk-rb.c 	rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
rb                 96 drivers/lightnvm/pblk-rb.c 	rb->back_thres = threshold;
rb                 97 drivers/lightnvm/pblk-rb.c 	rb->flush_point = EMPTY_ENTRY;
rb                 99 drivers/lightnvm/pblk-rb.c 	spin_lock_init(&rb->w_lock);
rb                100 drivers/lightnvm/pblk-rb.c 	spin_lock_init(&rb->s_lock);
rb                102 drivers/lightnvm/pblk-rb.c 	INIT_LIST_HEAD(&rb->pages);
rb                131 drivers/lightnvm/pblk-rb.c 			pblk_rb_data_free(rb);
rb                138 drivers/lightnvm/pblk-rb.c 		entry = &rb->entries[init_entry];
rb                145 drivers/lightnvm/pblk-rb.c 			entry = &rb->entries[init_entry];
rb                147 drivers/lightnvm/pblk-rb.c 			entry->data = kaddr + (i * rb->seg_size);
rb                152 drivers/lightnvm/pblk-rb.c 		list_add_tail(&page_set->list, &rb->pages);
rb                158 drivers/lightnvm/pblk-rb.c 	atomic_set(&rb->inflight_flush_point, 0);
rb                165 drivers/lightnvm/pblk-rb.c 	pblk_rl_init(&pblk->rl, rb->nr_entries, threshold);
rb                185 drivers/lightnvm/pblk-rb.c #define pblk_rb_ring_space(rb, head, tail, size) \
rb                192 drivers/lightnvm/pblk-rb.c static unsigned int pblk_rb_space(struct pblk_rb *rb)
rb                194 drivers/lightnvm/pblk-rb.c 	unsigned int mem = READ_ONCE(rb->mem);
rb                195 drivers/lightnvm/pblk-rb.c 	unsigned int sync = READ_ONCE(rb->sync);
rb                197 drivers/lightnvm/pblk-rb.c 	return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
rb                200 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
rb                203 drivers/lightnvm/pblk-rb.c 	return (p + nr_entries) & (rb->nr_entries - 1);
rb                210 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_read_count(struct pblk_rb *rb)
rb                212 drivers/lightnvm/pblk-rb.c 	unsigned int mem = READ_ONCE(rb->mem);
rb                213 drivers/lightnvm/pblk-rb.c 	unsigned int subm = READ_ONCE(rb->subm);
rb                215 drivers/lightnvm/pblk-rb.c 	return pblk_rb_ring_count(mem, subm, rb->nr_entries);
rb                218 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
rb                220 drivers/lightnvm/pblk-rb.c 	unsigned int mem = READ_ONCE(rb->mem);
rb                221 drivers/lightnvm/pblk-rb.c 	unsigned int sync = READ_ONCE(rb->sync);
rb                223 drivers/lightnvm/pblk-rb.c 	return pblk_rb_ring_count(mem, sync, rb->nr_entries);
rb                226 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
rb                230 drivers/lightnvm/pblk-rb.c 	subm = READ_ONCE(rb->subm);
rb                232 drivers/lightnvm/pblk-rb.c 	smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
rb                237 drivers/lightnvm/pblk-rb.c static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
rb                239 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                248 drivers/lightnvm/pblk-rb.c 		entry = &rb->entries[rb->l2p_update];
rb                266 drivers/lightnvm/pblk-rb.c 		rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
rb                279 drivers/lightnvm/pblk-rb.c static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
rb                285 drivers/lightnvm/pblk-rb.c 	lockdep_assert_held(&rb->w_lock);
rb                288 drivers/lightnvm/pblk-rb.c 	space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
rb                294 drivers/lightnvm/pblk-rb.c 	ret = __pblk_rb_update_l2p(rb, count);
rb                305 drivers/lightnvm/pblk-rb.c void pblk_rb_sync_l2p(struct pblk_rb *rb)
rb                310 drivers/lightnvm/pblk-rb.c 	spin_lock(&rb->w_lock);
rb                313 drivers/lightnvm/pblk-rb.c 	sync = smp_load_acquire(&rb->sync);
rb                315 drivers/lightnvm/pblk-rb.c 	to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
rb                316 drivers/lightnvm/pblk-rb.c 	__pblk_rb_update_l2p(rb, to_update);
rb                318 drivers/lightnvm/pblk-rb.c 	spin_unlock(&rb->w_lock);
rb                327 drivers/lightnvm/pblk-rb.c static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
rb                331 drivers/lightnvm/pblk-rb.c 	memcpy(entry->data, data, rb->seg_size);
rb                337 drivers/lightnvm/pblk-rb.c void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
rb                340 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                344 drivers/lightnvm/pblk-rb.c 	entry = &rb->entries[ring_pos];
rb                351 drivers/lightnvm/pblk-rb.c 	__pblk_rb_write_entry(rb, data, w_ctx, entry);
rb                360 drivers/lightnvm/pblk-rb.c void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
rb                364 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                368 drivers/lightnvm/pblk-rb.c 	entry = &rb->entries[ring_pos];
rb                375 drivers/lightnvm/pblk-rb.c 	__pblk_rb_write_entry(rb, data, w_ctx, entry);
rb                386 drivers/lightnvm/pblk-rb.c static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
rb                392 drivers/lightnvm/pblk-rb.c 	pblk_rb_sync_init(rb, NULL);
rb                393 drivers/lightnvm/pblk-rb.c 	sync = READ_ONCE(rb->sync);
rb                396 drivers/lightnvm/pblk-rb.c 		pblk_rb_sync_end(rb, NULL);
rb                401 drivers/lightnvm/pblk-rb.c 	atomic_inc(&rb->inflight_flush_point);
rb                404 drivers/lightnvm/pblk-rb.c 	flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
rb                405 drivers/lightnvm/pblk-rb.c 	entry = &rb->entries[flush_point];
rb                408 drivers/lightnvm/pblk-rb.c 	smp_store_release(&rb->flush_point, flush_point);
rb                413 drivers/lightnvm/pblk-rb.c 	pblk_rb_sync_end(rb, NULL);
rb                418 drivers/lightnvm/pblk-rb.c static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
rb                425 drivers/lightnvm/pblk-rb.c 	sync = READ_ONCE(rb->sync);
rb                426 drivers/lightnvm/pblk-rb.c 	mem = READ_ONCE(rb->mem);
rb                428 drivers/lightnvm/pblk-rb.c 	threshold = nr_entries + rb->back_thres;
rb                430 drivers/lightnvm/pblk-rb.c 	if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold)
rb                433 drivers/lightnvm/pblk-rb.c 	if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
rb                441 drivers/lightnvm/pblk-rb.c static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
rb                444 drivers/lightnvm/pblk-rb.c 	if (!__pblk_rb_may_write(rb, nr_entries, pos))
rb                448 drivers/lightnvm/pblk-rb.c 	smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
rb                452 drivers/lightnvm/pblk-rb.c void pblk_rb_flush(struct pblk_rb *rb)
rb                454 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                455 drivers/lightnvm/pblk-rb.c 	unsigned int mem = READ_ONCE(rb->mem);
rb                457 drivers/lightnvm/pblk-rb.c 	if (pblk_rb_flush_point_set(rb, NULL, mem))
rb                463 drivers/lightnvm/pblk-rb.c static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
rb                469 drivers/lightnvm/pblk-rb.c 	if (!__pblk_rb_may_write(rb, nr_entries, pos))
rb                472 drivers/lightnvm/pblk-rb.c 	mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
rb                476 drivers/lightnvm/pblk-rb.c 		struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                484 drivers/lightnvm/pblk-rb.c 	smp_store_release(&rb->mem, mem);
rb                494 drivers/lightnvm/pblk-rb.c int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
rb                497 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                500 drivers/lightnvm/pblk-rb.c 	spin_lock(&rb->w_lock);
rb                503 drivers/lightnvm/pblk-rb.c 		spin_unlock(&rb->w_lock);
rb                507 drivers/lightnvm/pblk-rb.c 	if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
rb                508 drivers/lightnvm/pblk-rb.c 		spin_unlock(&rb->w_lock);
rb                513 drivers/lightnvm/pblk-rb.c 	spin_unlock(&rb->w_lock);
rb                521 drivers/lightnvm/pblk-rb.c int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
rb                524 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                526 drivers/lightnvm/pblk-rb.c 	spin_lock(&rb->w_lock);
rb                528 drivers/lightnvm/pblk-rb.c 		spin_unlock(&rb->w_lock);
rb                532 drivers/lightnvm/pblk-rb.c 	if (!pblk_rb_may_write(rb, nr_entries, pos)) {
rb                533 drivers/lightnvm/pblk-rb.c 		spin_unlock(&rb->w_lock);
rb                538 drivers/lightnvm/pblk-rb.c 	spin_unlock(&rb->w_lock);
rb                550 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
rb                554 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                577 drivers/lightnvm/pblk-rb.c 		entry = &rb->entries[pos];
rb                599 drivers/lightnvm/pblk-rb.c 		if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
rb                600 drivers/lightnvm/pblk-rb.c 								rb->seg_size) {
rb                615 drivers/lightnvm/pblk-rb.c 		pos = pblk_rb_ptr_wrap(rb, pos, 1);
rb                644 drivers/lightnvm/pblk-rb.c int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
rb                647 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                659 drivers/lightnvm/pblk-rb.c 	BUG_ON(pos >= rb->nr_entries);
rb                661 drivers/lightnvm/pblk-rb.c 	entry = &rb->entries[pos];
rb                665 drivers/lightnvm/pblk-rb.c 	spin_lock(&rb->w_lock);
rb                677 drivers/lightnvm/pblk-rb.c 	memcpy(data, entry->data, rb->seg_size);
rb                680 drivers/lightnvm/pblk-rb.c 	spin_unlock(&rb->w_lock);
rb                684 drivers/lightnvm/pblk-rb.c struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
rb                686 drivers/lightnvm/pblk-rb.c 	unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
rb                688 drivers/lightnvm/pblk-rb.c 	return &rb->entries[entry].w_ctx;
rb                691 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
rb                692 drivers/lightnvm/pblk-rb.c 	__acquires(&rb->s_lock)
rb                695 drivers/lightnvm/pblk-rb.c 		spin_lock_irqsave(&rb->s_lock, *flags);
rb                697 drivers/lightnvm/pblk-rb.c 		spin_lock_irq(&rb->s_lock);
rb                699 drivers/lightnvm/pblk-rb.c 	return rb->sync;
rb                702 drivers/lightnvm/pblk-rb.c void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
rb                703 drivers/lightnvm/pblk-rb.c 	__releases(&rb->s_lock)
rb                705 drivers/lightnvm/pblk-rb.c 	lockdep_assert_held(&rb->s_lock);
rb                708 drivers/lightnvm/pblk-rb.c 		spin_unlock_irqrestore(&rb->s_lock, *flags);
rb                710 drivers/lightnvm/pblk-rb.c 		spin_unlock_irq(&rb->s_lock);
rb                713 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
rb                716 drivers/lightnvm/pblk-rb.c 	lockdep_assert_held(&rb->s_lock);
rb                718 drivers/lightnvm/pblk-rb.c 	sync = READ_ONCE(rb->sync);
rb                719 drivers/lightnvm/pblk-rb.c 	flush_point = READ_ONCE(rb->flush_point);
rb                725 drivers/lightnvm/pblk-rb.c 					rb->nr_entries);
rb                728 drivers/lightnvm/pblk-rb.c 			smp_store_release(&rb->flush_point, EMPTY_ENTRY);
rb                732 drivers/lightnvm/pblk-rb.c 	sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
rb                735 drivers/lightnvm/pblk-rb.c 	smp_store_release(&rb->sync, sync);
rb                741 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
rb                747 drivers/lightnvm/pblk-rb.c 	flush_point = smp_load_acquire(&rb->flush_point);
rb                752 drivers/lightnvm/pblk-rb.c 	sync = smp_load_acquire(&rb->sync);
rb                754 drivers/lightnvm/pblk-rb.c 	subm = READ_ONCE(rb->subm);
rb                755 drivers/lightnvm/pblk-rb.c 	submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
rb                758 drivers/lightnvm/pblk-rb.c 	to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
rb                763 drivers/lightnvm/pblk-rb.c int pblk_rb_tear_down_check(struct pblk_rb *rb)
rb                769 drivers/lightnvm/pblk-rb.c 	spin_lock(&rb->w_lock);
rb                770 drivers/lightnvm/pblk-rb.c 	spin_lock_irq(&rb->s_lock);
rb                772 drivers/lightnvm/pblk-rb.c 	if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
rb                773 drivers/lightnvm/pblk-rb.c 				(rb->sync == rb->l2p_update) &&
rb                774 drivers/lightnvm/pblk-rb.c 				(rb->flush_point == EMPTY_ENTRY)) {
rb                778 drivers/lightnvm/pblk-rb.c 	if (!rb->entries) {
rb                783 drivers/lightnvm/pblk-rb.c 	for (i = 0; i < rb->nr_entries; i++) {
rb                784 drivers/lightnvm/pblk-rb.c 		entry = &rb->entries[i];
rb                793 drivers/lightnvm/pblk-rb.c 	spin_unlock_irq(&rb->s_lock);
rb                794 drivers/lightnvm/pblk-rb.c 	spin_unlock(&rb->w_lock);
rb                799 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
rb                801 drivers/lightnvm/pblk-rb.c 	return (pos & (rb->nr_entries - 1));
rb                804 drivers/lightnvm/pblk-rb.c int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
rb                806 drivers/lightnvm/pblk-rb.c 	return (pos >= rb->nr_entries);
rb                809 drivers/lightnvm/pblk-rb.c ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb                811 drivers/lightnvm/pblk-rb.c 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
rb                816 drivers/lightnvm/pblk-rb.c 	spin_lock_irq(&rb->s_lock);
rb                819 drivers/lightnvm/pblk-rb.c 	spin_unlock_irq(&rb->s_lock);
rb                821 drivers/lightnvm/pblk-rb.c 	if (rb->flush_point != EMPTY_ENTRY)
rb                824 drivers/lightnvm/pblk-rb.c 			rb->nr_entries,
rb                825 drivers/lightnvm/pblk-rb.c 			rb->mem,
rb                826 drivers/lightnvm/pblk-rb.c 			rb->subm,
rb                827 drivers/lightnvm/pblk-rb.c 			rb->sync,
rb                828 drivers/lightnvm/pblk-rb.c 			rb->l2p_update,
rb                830 drivers/lightnvm/pblk-rb.c 			atomic_read(&rb->inflight_flush_point),
rb                834 drivers/lightnvm/pblk-rb.c 			rb->flush_point,
rb                835 drivers/lightnvm/pblk-rb.c 			pblk_rb_read_count(rb),
rb                836 drivers/lightnvm/pblk-rb.c 			pblk_rb_space(rb),
rb                837 drivers/lightnvm/pblk-rb.c 			pblk_rb_flush_point_count(rb),
rb                842 drivers/lightnvm/pblk-rb.c 			rb->nr_entries,
rb                843 drivers/lightnvm/pblk-rb.c 			rb->mem,
rb                844 drivers/lightnvm/pblk-rb.c 			rb->subm,
rb                845 drivers/lightnvm/pblk-rb.c 			rb->sync,
rb                846 drivers/lightnvm/pblk-rb.c 			rb->l2p_update,
rb                848 drivers/lightnvm/pblk-rb.c 			atomic_read(&rb->inflight_flush_point),
rb                852 drivers/lightnvm/pblk-rb.c 			pblk_rb_read_count(rb),
rb                853 drivers/lightnvm/pblk-rb.c 			pblk_rb_space(rb),
rb                854 drivers/lightnvm/pblk-rb.c 			pblk_rb_flush_point_count(rb),
rb                150 drivers/lightnvm/pblk-write.c 	struct pblk_rb *rb = &pblk->rwb;
rb                160 drivers/lightnvm/pblk-write.c 		entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
rb                723 drivers/lightnvm/pblk.h int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
rb                725 drivers/lightnvm/pblk.h int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
rb                727 drivers/lightnvm/pblk.h int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
rb                729 drivers/lightnvm/pblk.h void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
rb                731 drivers/lightnvm/pblk.h void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
rb                734 drivers/lightnvm/pblk.h struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
rb                735 drivers/lightnvm/pblk.h void pblk_rb_flush(struct pblk_rb *rb);
rb                737 drivers/lightnvm/pblk.h void pblk_rb_sync_l2p(struct pblk_rb *rb);
rb                738 drivers/lightnvm/pblk.h unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
rb                741 drivers/lightnvm/pblk.h int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
rb                743 drivers/lightnvm/pblk.h unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
rb                745 drivers/lightnvm/pblk.h unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
rb                746 drivers/lightnvm/pblk.h unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
rb                747 drivers/lightnvm/pblk.h unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
rb                749 drivers/lightnvm/pblk.h void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
rb                750 drivers/lightnvm/pblk.h unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
rb                752 drivers/lightnvm/pblk.h unsigned int pblk_rb_read_count(struct pblk_rb *rb);
rb                753 drivers/lightnvm/pblk.h unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
rb                754 drivers/lightnvm/pblk.h unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
rb                756 drivers/lightnvm/pblk.h int pblk_rb_tear_down_check(struct pblk_rb *rb);
rb                757 drivers/lightnvm/pblk.h int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
rb                758 drivers/lightnvm/pblk.h void pblk_rb_free(struct pblk_rb *rb);
rb                759 drivers/lightnvm/pblk.h ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
rb               1461 drivers/md/dm-cache-target.c 			bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
rb               1462 drivers/md/dm-cache-target.c 			BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
rb               1699 drivers/md/dm-cache-target.c 	bool rb, background_queued;
rb               1704 drivers/md/dm-cache-target.c 	rb = bio_detain_shared(cache, block, bio);
rb               1705 drivers/md/dm-cache-target.c 	if (!rb) {
rb                 54 drivers/media/dvb-frontends/dib3000mb.c 	u8 rb[2];
rb                 57 drivers/media/dvb-frontends/dib3000mb.c 		{ .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 },
rb                 64 drivers/media/dvb-frontends/dib3000mb.c 			(rb[0] << 8) | rb[1],(rb[0] << 8) | rb[1]);
rb                 66 drivers/media/dvb-frontends/dib3000mb.c 	return (rb[0] << 8) | rb[1];
rb                856 drivers/media/pci/cx18/cx18-ioctl.c 	struct v4l2_requestbuffers *rb)
rb                866 drivers/media/pci/cx18/cx18-ioctl.c 	return videobuf_reqbufs(cx18_vb_queue(id), rb);
rb                756 drivers/media/pci/ngene/ngene-core.c static void free_ringbuffer(struct ngene *dev, struct SRingBufferDescriptor *rb)
rb                758 drivers/media/pci/ngene/ngene-core.c 	struct SBufferHeader *Cur = rb->Head;
rb                764 drivers/media/pci/ngene/ngene-core.c 	for (j = 0; j < rb->NumBuffers; j++, Cur = Cur->Next) {
rb                767 drivers/media/pci/ngene/ngene-core.c 					    rb->Buffer1Length,
rb                773 drivers/media/pci/ngene/ngene-core.c 					    rb->Buffer2Length,
rb                778 drivers/media/pci/ngene/ngene-core.c 	if (rb->SCListMem)
rb                779 drivers/media/pci/ngene/ngene-core.c 		pci_free_consistent(dev->pci_dev, rb->SCListMemSize,
rb                780 drivers/media/pci/ngene/ngene-core.c 				    rb->SCListMem, rb->PASCListMem);
rb                782 drivers/media/pci/ngene/ngene-core.c 	pci_free_consistent(dev->pci_dev, rb->MemSize, rb->Head, rb->PAHead);
rb                786 drivers/media/pci/ngene/ngene-core.c 		     struct SRingBufferDescriptor *rb,
rb                792 drivers/media/pci/ngene/ngene-core.c 	if (!rb->Head)
rb                794 drivers/media/pci/ngene/ngene-core.c 	free_ringbuffer(dev, rb);
rb                428 drivers/media/pci/ttpci/av7110_av.c #define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096)
rb                433 drivers/media/pci/ttpci/av7110_av.c 	struct dvb_ringbuffer *rb;
rb                439 drivers/media/pci/ttpci/av7110_av.c 	rb = (type) ? &av7110->avout : &av7110->aout;
rb                452 drivers/media/pci/ttpci/av7110_av.c 			if (wait_event_interruptible(rb->queue, FREE_COND_TS))
rb               1039 drivers/media/platform/coda/coda-bit.c 				struct v4l2_requestbuffers *rb)
rb               1044 drivers/media/platform/coda/coda-bit.c 	if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
rb               1047 drivers/media/platform/coda/coda-bit.c 	if (rb->count) {
rb               1781 drivers/media/platform/coda/coda-bit.c 				struct v4l2_requestbuffers *rb)
rb               1786 drivers/media/platform/coda/coda-bit.c 	if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
rb               1789 drivers/media/platform/coda/coda-bit.c 	if (rb->count) {
rb                855 drivers/media/platform/coda/coda-common.c 			struct v4l2_requestbuffers *rb)
rb                860 drivers/media/platform/coda/coda-common.c 	ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb);
rb                868 drivers/media/platform/coda/coda-common.c 	if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs)
rb                869 drivers/media/platform/coda/coda-common.c 		return ctx->ops->reqbufs(ctx, rb);
rb                190 drivers/media/platform/coda/coda.h 	int (*reqbufs)(struct coda_ctx *ctx, struct v4l2_requestbuffers *rb);
rb                529 drivers/media/platform/exynos4-is/fimc-isp-video.c 				struct v4l2_requestbuffers *rb)
rb                534 drivers/media/platform/exynos4-is/fimc-isp-video.c 	ret = vb2_ioctl_reqbufs(file, priv, rb);
rb                538 drivers/media/platform/exynos4-is/fimc-isp-video.c 	if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) {
rb                539 drivers/media/platform/exynos4-is/fimc-isp-video.c 		rb->count = 0;
rb                540 drivers/media/platform/exynos4-is/fimc-isp-video.c 		vb2_ioctl_reqbufs(file, priv, rb);
rb                544 drivers/media/platform/exynos4-is/fimc-isp-video.c 	isp->video_capture.reqbufs_count = rb->count;
rb                901 drivers/media/platform/omap3isp/ispvideo.c isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
rb                908 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_reqbufs(&vfh->queue, rb);
rb                 50 drivers/media/platform/rockchip/rga/rga-hw.c 	struct rga_addr_offset *lt, *lb, *rt, *rb;
rb                 57 drivers/media/platform/rockchip/rga/rga-hw.c 	rb = &offsets.right_bottom;
rb                 78 drivers/media/platform/rockchip/rga/rga-hw.c 	rb->y_off = lb->y_off + (w - 1) * pixel_width;
rb                 79 drivers/media/platform/rockchip/rga/rga-hw.c 	rb->u_off = lb->u_off + w / x_div - 1;
rb                 80 drivers/media/platform/rockchip/rga/rga-hw.c 	rb->v_off = lb->v_off + w / x_div - 1;
rb                886 drivers/media/platform/s3c-camif/camif-capture.c 			     struct v4l2_requestbuffers *rb)
rb                892 drivers/media/platform/s3c-camif/camif-capture.c 		 vp->id, rb->count, vp->owner, priv);
rb                897 drivers/media/platform/s3c-camif/camif-capture.c 	if (rb->count)
rb                898 drivers/media/platform/s3c-camif/camif-capture.c 		rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count);
rb                902 drivers/media/platform/s3c-camif/camif-capture.c 	ret = vb2_reqbufs(&vp->vb_queue, rb);
rb                906 drivers/media/platform/s3c-camif/camif-capture.c 	if (rb->count && rb->count < CAMIF_REQ_BUFS_MIN) {
rb                907 drivers/media/platform/s3c-camif/camif-capture.c 		rb->count = 0;
rb                908 drivers/media/platform/s3c-camif/camif-capture.c 		vb2_reqbufs(&vp->vb_queue, rb);
rb                912 drivers/media/platform/s3c-camif/camif-capture.c 	vp->reqbufs_count = rb->count;
rb                913 drivers/media/platform/s3c-camif/camif-capture.c 	if (vp->owner == NULL && rb->count > 0)
rb               1649 drivers/media/usb/cx231xx/cx231xx-video.c 			  struct v4l2_requestbuffers *rb)
rb               1659 drivers/media/usb/cx231xx/cx231xx-video.c 	return videobuf_reqbufs(&fh->vb_vidq, rb);
rb               2229 drivers/media/usb/dvb-usb/dib0700_devices.c 	u8 rb[2];
rb               2232 drivers/media/usb/dvb-usb/dib0700_devices.c 		{.addr = 0x1e >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2},
rb               2241 drivers/media/usb/dvb-usb/dib0700_devices.c 	switch (rb[0] << 8 | rb[1]) {
rb               2266 drivers/media/usb/dvb-usb/dib0700_devices.c 			wb[2] |= rb[0];
rb               2267 drivers/media/usb/dvb-usb/dib0700_devices.c 			wb[3] |= rb[1] & ~(3 << 4);
rb               1032 drivers/media/usb/stkwebcam/stk-webcam.c 		void *priv, struct v4l2_requestbuffers *rb)
rb               1038 drivers/media/usb/stkwebcam/stk-webcam.c 	if (rb->memory != V4L2_MEMORY_MMAP)
rb               1044 drivers/media/usb/stkwebcam/stk-webcam.c 	if (rb->count == 0) {
rb               1053 drivers/media/usb/stkwebcam/stk-webcam.c 	if (rb->count < 3)
rb               1054 drivers/media/usb/stkwebcam/stk-webcam.c 		rb->count = 3;
rb               1056 drivers/media/usb/stkwebcam/stk-webcam.c 	else if (rb->count > 5)
rb               1057 drivers/media/usb/stkwebcam/stk-webcam.c 		rb->count = 5;
rb               1059 drivers/media/usb/stkwebcam/stk-webcam.c 	stk_allocate_buffers(dev, rb->count);
rb               1060 drivers/media/usb/stkwebcam/stk-webcam.c 	rb->count = dev->n_sbufs;
rb                357 drivers/media/usb/tm6000/tm6000.h 			  struct v4l2_requestbuffers *rb);
rb                267 drivers/media/usb/uvc/uvc_queue.c 			struct v4l2_requestbuffers *rb)
rb                272 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_reqbufs(&queue->queue, rb);
rb                275 drivers/media/usb/uvc/uvc_queue.c 	return ret ? ret : rb->count;
rb                705 drivers/media/usb/uvc/uvc_v4l2.c 			     struct v4l2_requestbuffers *rb)
rb                716 drivers/media/usb/uvc/uvc_v4l2.c 	ret = uvc_request_buffers(&stream->queue, rb);
rb                751 drivers/media/usb/uvc/uvcvideo.h 			struct v4l2_requestbuffers *rb);
rb               1048 drivers/media/v4l2-core/v4l2-mem2mem.c 				struct v4l2_requestbuffers *rb)
rb               1052 drivers/media/v4l2-core/v4l2-mem2mem.c 	return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
rb                 27 drivers/misc/mic/scif/scif_rb.c void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
rb                 30 drivers/misc/mic/scif/scif_rb.c 	rb->rb_base = rb_base;
rb                 31 drivers/misc/mic/scif/scif_rb.c 	rb->size = (1 << size);
rb                 32 drivers/misc/mic/scif/scif_rb.c 	rb->read_ptr = read_ptr;
rb                 33 drivers/misc/mic/scif/scif_rb.c 	rb->write_ptr = write_ptr;
rb                 34 drivers/misc/mic/scif/scif_rb.c 	rb->current_read_offset = *read_ptr;
rb                 35 drivers/misc/mic/scif/scif_rb.c 	rb->current_write_offset = *write_ptr;
rb                 39 drivers/misc/mic/scif/scif_rb.c static void memcpy_torb(struct scif_rb *rb, void *header,
rb                 44 drivers/misc/mic/scif/scif_rb.c 	if (header + size >= rb->rb_base + rb->size) {
rb                 46 drivers/misc/mic/scif/scif_rb.c 		size1 = (u32)(rb->rb_base + rb->size - header);
rb                 49 drivers/misc/mic/scif/scif_rb.c 		memcpy_toio((void __iomem __force *)rb->rb_base,
rb                 57 drivers/misc/mic/scif/scif_rb.c static void memcpy_fromrb(struct scif_rb *rb, void *header,
rb                 62 drivers/misc/mic/scif/scif_rb.c 	if (header + size >= rb->rb_base + rb->size) {
rb                 64 drivers/misc/mic/scif/scif_rb.c 		size1 = (u32)(rb->rb_base + rb->size - header);
rb                 68 drivers/misc/mic/scif/scif_rb.c 			      (void __iomem __force *)rb->rb_base, size2);
rb                 80 drivers/misc/mic/scif/scif_rb.c u32 scif_rb_space(struct scif_rb *rb)
rb                 82 drivers/misc/mic/scif/scif_rb.c 	rb->current_read_offset = *rb->read_ptr;
rb                 89 drivers/misc/mic/scif/scif_rb.c 	return scif_rb_ring_space(rb->current_write_offset,
rb                 90 drivers/misc/mic/scif/scif_rb.c 				  rb->current_read_offset, rb->size);
rb                102 drivers/misc/mic/scif/scif_rb.c int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
rb                106 drivers/misc/mic/scif/scif_rb.c 	if (scif_rb_space(rb) < size)
rb                108 drivers/misc/mic/scif/scif_rb.c 	header = rb->rb_base + rb->current_write_offset;
rb                109 drivers/misc/mic/scif/scif_rb.c 	memcpy_torb(rb, header, msg, size);
rb                114 drivers/misc/mic/scif/scif_rb.c 	rb->current_write_offset =
rb                115 drivers/misc/mic/scif/scif_rb.c 		(rb->current_write_offset + size) & (rb->size - 1);
rb                123 drivers/misc/mic/scif/scif_rb.c void scif_rb_commit(struct scif_rb *rb)
rb                132 drivers/misc/mic/scif/scif_rb.c 	WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
rb                141 drivers/misc/mic/scif/scif_rb.c 	WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
rb                153 drivers/misc/mic/scif/scif_rb.c static void *scif_rb_get(struct scif_rb *rb, u32 size)
rb                157 drivers/misc/mic/scif/scif_rb.c 	if (scif_rb_count(rb, size) >= size)
rb                158 drivers/misc/mic/scif/scif_rb.c 		header = rb->rb_base + rb->current_read_offset;
rb                171 drivers/misc/mic/scif/scif_rb.c u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
rb                176 drivers/misc/mic/scif/scif_rb.c 	header = scif_rb_get(rb, size);
rb                179 drivers/misc/mic/scif/scif_rb.c 			(rb->current_read_offset + size) & (rb->size - 1);
rb                182 drivers/misc/mic/scif/scif_rb.c 		rb->current_read_offset = next_cmd_offset;
rb                183 drivers/misc/mic/scif/scif_rb.c 		memcpy_fromrb(rb, header, msg, size);
rb                192 drivers/misc/mic/scif/scif_rb.c void scif_rb_update_read_ptr(struct scif_rb *rb)
rb                196 drivers/misc/mic/scif/scif_rb.c 	new_offset = rb->current_read_offset;
rb                204 drivers/misc/mic/scif/scif_rb.c 	WRITE_ONCE(*rb->read_ptr, new_offset);
rb                213 drivers/misc/mic/scif/scif_rb.c 	WRITE_ONCE(*rb->read_ptr, new_offset);
rb                224 drivers/misc/mic/scif/scif_rb.c u32 scif_rb_count(struct scif_rb *rb, u32 size)
rb                226 drivers/misc/mic/scif/scif_rb.c 	if (scif_rb_ring_cnt(rb->current_write_offset,
rb                227 drivers/misc/mic/scif/scif_rb.c 			     rb->current_read_offset,
rb                228 drivers/misc/mic/scif/scif_rb.c 			     rb->size) < size) {
rb                229 drivers/misc/mic/scif/scif_rb.c 		rb->current_write_offset = *rb->write_ptr;
rb                237 drivers/misc/mic/scif/scif_rb.c 	return scif_rb_ring_cnt(rb->current_write_offset,
rb                238 drivers/misc/mic/scif/scif_rb.c 				rb->current_read_offset,
rb                239 drivers/misc/mic/scif/scif_rb.c 				rb->size);
rb                 83 drivers/misc/mic/scif/scif_rb.h void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
rb                 87 drivers/misc/mic/scif/scif_rb.h int scif_rb_write(struct scif_rb *rb, void *msg, u32 size);
rb                 89 drivers/misc/mic/scif/scif_rb.h void scif_rb_commit(struct scif_rb *rb);
rb                 91 drivers/misc/mic/scif/scif_rb.h u32 scif_rb_space(struct scif_rb *rb);
rb                 95 drivers/misc/mic/scif/scif_rb.h u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size);
rb                 97 drivers/misc/mic/scif/scif_rb.h void scif_rb_update_read_ptr(struct scif_rb *rb);
rb                 99 drivers/misc/mic/scif/scif_rb.h u32 scif_rb_count(struct scif_rb *rb, u32 size);
rb                151 drivers/misc/sram.c 	struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
rb                153 drivers/misc/sram.c 	return ra->start - rb->start;
rb                 70 drivers/mtd/mtdswap.c 	struct rb_node rb;
rb                 80 drivers/mtd/mtdswap.c 				rb)->erase_count)
rb                 82 drivers/mtd/mtdswap.c 				rb)->erase_count)
rb                199 drivers/mtd/mtdswap.c 		rb_erase(&eb->rb, eb->root);
rb                211 drivers/mtd/mtdswap.c 		cur = rb_entry(parent, struct swap_eb, rb);
rb                218 drivers/mtd/mtdswap.c 	rb_link_node(&eb->rb, parent, p);
rb                219 drivers/mtd/mtdswap.c 	rb_insert_color(&eb->rb, root);
rb                438 drivers/mtd/mtdswap.c 	median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
rb                451 drivers/mtd/mtdswap.c 		rb_erase(&eb->rb, &hist_root);
rb                576 drivers/mtd/mtdswap.c 			eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
rb                577 drivers/mtd/mtdswap.c 			rb_erase(&eb->rb, clean_root);
rb                861 drivers/mtd/mtdswap.c 	eb = rb_entry(rb_first(rp), struct swap_eb, rb);
rb                863 drivers/mtd/mtdswap.c 	rb_erase(&eb->rb, rp);
rb                150 drivers/mtd/nand/raw/atmel/nand-controller.c 	struct atmel_nand_rb rb;
rb                486 drivers/mtd/nand/raw/atmel/nand-controller.c 	return gpiod_get_value(nand->activecs->rb.gpio);
rb                501 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
rb                515 drivers/mtd/nand/raw/atmel/nand-controller.c 	return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
rb                534 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
rb                991 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
rb               1446 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
rb               1447 drivers/mtd/nand/raw/atmel/nand-controller.c 		cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
rb               1624 drivers/mtd/nand/raw/atmel/nand-controller.c 			nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
rb               1625 drivers/mtd/nand/raw/atmel/nand-controller.c 			nand->cs[i].rb.id = val;
rb               1638 drivers/mtd/nand/raw/atmel/nand-controller.c 				nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
rb               1639 drivers/mtd/nand/raw/atmel/nand-controller.c 				nand->cs[i].rb.gpio = gpio;
rb               1758 drivers/mtd/nand/raw/atmel/nand-controller.c 		nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
rb               1759 drivers/mtd/nand/raw/atmel/nand-controller.c 		nand->cs[0].rb.gpio = gpio;
rb                172 drivers/mtd/nand/raw/marvell_nand.c #define NDSR_RDY(rb)		BIT(11 + rb)
rb                311 drivers/mtd/nand/raw/marvell_nand.c 	unsigned int rb;
rb               2522 drivers/mtd/nand/raw/marvell_nand.c 	u32 cs, rb;
rb               2608 drivers/mtd/nand/raw/marvell_nand.c 			rb = 0;
rb               2611 drivers/mtd/nand/raw/marvell_nand.c 							 &rb);
rb               2620 drivers/mtd/nand/raw/marvell_nand.c 		if (rb >= nfc->caps->max_rb_nb) {
rb               2622 drivers/mtd/nand/raw/marvell_nand.c 				rb, nfc->caps->max_rb_nb);
rb               2626 drivers/mtd/nand/raw/marvell_nand.c 		marvell_nand->sels[i].rb = rb;
rb                168 drivers/mtd/nand/raw/sunxi_nand.c 	s8 rb;
rb                421 drivers/mtd/nand/raw/sunxi_nand.c 	if (sel->rb >= 0)
rb                422 drivers/mtd/nand/raw/sunxi_nand.c 		ctl |= NFC_RB_SEL(sel->rb);
rb               1912 drivers/mtd/nand/raw/sunxi_nand.c 	if (sunxi_nand->sels[op->cs].rb >= 0)
rb               1979 drivers/mtd/nand/raw/sunxi_nand.c 			sunxi_nand->sels[i].rb = tmp;
rb               1981 drivers/mtd/nand/raw/sunxi_nand.c 			sunxi_nand->sels[i].rb = -1;
rb                113 drivers/mtd/ubi/attach.c 		av = rb_entry(parent, struct ubi_ainf_volume, rb);
rb                143 drivers/mtd/ubi/attach.c 	rb_link_node(&av->rb, parent, p);
rb                144 drivers/mtd/ubi/attach.c 	rb_insert_color(&av->rb, &ai->volumes);
rb                595 drivers/mtd/ubi/attach.c 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
rb                704 drivers/mtd/ubi/attach.c 	rb_link_node(&aeb->u.rb, parent, p);
rb                705 drivers/mtd/ubi/attach.c 	rb_insert_color(&aeb->u.rb, &av->root);
rb                753 drivers/mtd/ubi/attach.c 	rb_erase(&av->rb, &ai->volumes);
rb               1289 drivers/mtd/ubi/attach.c 			aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
rb               1292 drivers/mtd/ubi/attach.c 				if (this->rb_left == &aeb->u.rb)
rb               1315 drivers/mtd/ubi/attach.c 	struct rb_node *rb;
rb               1339 drivers/mtd/ubi/attach.c 	rb = ai->volumes.rb_node;
rb               1340 drivers/mtd/ubi/attach.c 	while (rb) {
rb               1341 drivers/mtd/ubi/attach.c 		if (rb->rb_left)
rb               1342 drivers/mtd/ubi/attach.c 			rb = rb->rb_left;
rb               1343 drivers/mtd/ubi/attach.c 		else if (rb->rb_right)
rb               1344 drivers/mtd/ubi/attach.c 			rb = rb->rb_right;
rb               1346 drivers/mtd/ubi/attach.c 			av = rb_entry(rb, struct ubi_ainf_volume, rb);
rb               1348 drivers/mtd/ubi/attach.c 			rb = rb_parent(rb);
rb               1349 drivers/mtd/ubi/attach.c 			if (rb) {
rb               1350 drivers/mtd/ubi/attach.c 				if (rb->rb_left == &av->rb)
rb               1351 drivers/mtd/ubi/attach.c 					rb->rb_left = NULL;
rb               1353 drivers/mtd/ubi/attach.c 					rb->rb_right = NULL;
rb               1415 drivers/mtd/ubi/attach.c 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
rb               1416 drivers/mtd/ubi/attach.c 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
rb               1674 drivers/mtd/ubi/attach.c 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
rb               1717 drivers/mtd/ubi/attach.c 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
rb               1788 drivers/mtd/ubi/attach.c 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
rb               1790 drivers/mtd/ubi/attach.c 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
rb               1876 drivers/mtd/ubi/attach.c 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
rb               1877 drivers/mtd/ubi/attach.c 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
rb                220 drivers/mtd/ubi/eba.c 		le = rb_entry(p, struct ubi_ltree_entry, rb);
rb                286 drivers/mtd/ubi/eba.c 			le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
rb                301 drivers/mtd/ubi/eba.c 		rb_link_node(&le->rb, parent, p);
rb                302 drivers/mtd/ubi/eba.c 		rb_insert_color(&le->rb, &ubi->ltree);
rb                347 drivers/mtd/ubi/eba.c 		rb_erase(&le->rb, &ubi->ltree);
rb                399 drivers/mtd/ubi/eba.c 		rb_erase(&le->rb, &ubi->ltree);
rb                423 drivers/mtd/ubi/eba.c 		rb_erase(&le->rb, &ubi->ltree);
rb               1522 drivers/mtd/ubi/eba.c 	struct rb_node *rb;
rb               1564 drivers/mtd/ubi/eba.c 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
rb               1571 drivers/mtd/ubi/eba.c 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
rb               1616 drivers/mtd/ubi/eba.c 	struct rb_node *rb;
rb               1648 drivers/mtd/ubi/eba.c 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
rb                 32 drivers/mtd/ubi/fastmap-wl.c 	ubi_rb_for_each_entry(p, e, root, u.rb) {
rb                 65 drivers/mtd/ubi/fastmap-wl.c 	ubi_rb_for_each_entry(p, e, root, u.rb)
rb                100 drivers/mtd/ubi/fastmap-wl.c 	rb_erase(&e->u.rb, &ubi->free);
rb                147 drivers/mtd/ubi/fastmap-wl.c 			rb_erase(&e->u.rb, &ubi->free);
rb                380 drivers/mtd/ubi/fastmap-wl.c 			     struct ubi_wl_entry, u.rb);
rb                213 drivers/mtd/ubi/fastmap.c 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
rb                228 drivers/mtd/ubi/fastmap.c 	rb_link_node(&aeb->u.rb, parent, p);
rb                229 drivers/mtd/ubi/fastmap.c 	rb_insert_color(&aeb->u.rb, &av->root);
rb                252 drivers/mtd/ubi/fastmap.c 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
rb                321 drivers/mtd/ubi/fastmap.c 	rb_link_node(&new_aeb->u.rb, parent, p);
rb                322 drivers/mtd/ubi/fastmap.c 	rb_insert_color(&new_aeb->u.rb, &av->root);
rb                376 drivers/mtd/ubi/fastmap.c 	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
rb                377 drivers/mtd/ubi/fastmap.c 		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
rb                379 drivers/mtd/ubi/fastmap.c 				rb_erase(&aeb->u.rb, &av->root);
rb                530 drivers/mtd/ubi/fastmap.c 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
rb                531 drivers/mtd/ubi/fastmap.c 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
rb                180 drivers/mtd/ubi/ubi.h 		struct rb_node rb;
rb                202 drivers/mtd/ubi/ubi.h 	struct rb_node rb;
rb                682 drivers/mtd/ubi/ubi.h 		struct rb_node rb;
rb                717 drivers/mtd/ubi/ubi.h 	struct rb_node rb;
rb               1001 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
rb               1010 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
rb               1019 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
rb               1038 drivers/mtd/ubi/ubi.h #define ubi_rb_for_each_entry(rb, pos, root, member)                         \
rb               1039 drivers/mtd/ubi/ubi.h 	for (rb = rb_first(root),                                            \
rb               1040 drivers/mtd/ubi/ubi.h 	     pos = (rb ? container_of(rb, typeof(*pos), member) : NULL);     \
rb               1041 drivers/mtd/ubi/ubi.h 	     rb;                                                             \
rb               1042 drivers/mtd/ubi/ubi.h 	     rb = rb_next(rb),                                               \
rb               1043 drivers/mtd/ubi/ubi.h 	     pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
rb               1056 drivers/mtd/ubi/ubi.h 		rb_erase(&aeb->u.rb, &av->root);
rb                366 drivers/mtd/ubi/vtbl.c 	struct rb_node *rb;
rb                399 drivers/mtd/ubi/vtbl.c 	ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
rb                148 drivers/mtd/ubi/wl.c 		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
rb                163 drivers/mtd/ubi/wl.c 	rb_link_node(&e->u.rb, parent, p);
rb                164 drivers/mtd/ubi/wl.c 	rb_insert_color(&e->u.rb, root);
rb                244 drivers/mtd/ubi/wl.c 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
rb                325 drivers/mtd/ubi/wl.c 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
rb                332 drivers/mtd/ubi/wl.c 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
rb                366 drivers/mtd/ubi/wl.c 	first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
rb                367 drivers/mtd/ubi/wl.c 	last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
rb                370 drivers/mtd/ubi/wl.c 		e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
rb                406 drivers/mtd/ubi/wl.c 	rb_erase(&e->u.rb, &ubi->free);
rb                720 drivers/mtd/ubi/wl.c 		rb_erase(&e1->u.rb, &ubi->used);
rb                731 drivers/mtd/ubi/wl.c 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
rb                746 drivers/mtd/ubi/wl.c 		rb_erase(&e1->u.rb, &ubi->used);
rb                752 drivers/mtd/ubi/wl.c 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
rb                758 drivers/mtd/ubi/wl.c 		rb_erase(&e1->u.rb, &ubi->scrub);
rb               1036 drivers/mtd/ubi/wl.c 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
rb               1275 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->used);
rb               1278 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->scrub);
rb               1281 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->erroneous);
rb               1350 drivers/mtd/ubi/wl.c 		rb_erase(&e->u.rb, &ubi->used);
rb               1546 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->used);
rb               1552 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->free);
rb               1587 drivers/mtd/ubi/wl.c 	struct rb_node *rb;
rb               1590 drivers/mtd/ubi/wl.c 	rb = root->rb_node;
rb               1591 drivers/mtd/ubi/wl.c 	while (rb) {
rb               1592 drivers/mtd/ubi/wl.c 		if (rb->rb_left)
rb               1593 drivers/mtd/ubi/wl.c 			rb = rb->rb_left;
rb               1594 drivers/mtd/ubi/wl.c 		else if (rb->rb_right)
rb               1595 drivers/mtd/ubi/wl.c 			rb = rb->rb_right;
rb               1597 drivers/mtd/ubi/wl.c 			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
rb               1599 drivers/mtd/ubi/wl.c 			rb = rb_parent(rb);
rb               1600 drivers/mtd/ubi/wl.c 			if (rb) {
rb               1601 drivers/mtd/ubi/wl.c 				if (rb->rb_left == &e->u.rb)
rb               1602 drivers/mtd/ubi/wl.c 					rb->rb_left = NULL;
rb               1604 drivers/mtd/ubi/wl.c 					rb->rb_right = NULL;
rb               1793 drivers/mtd/ubi/wl.c 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
rb               1794 drivers/mtd/ubi/wl.c 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
rb               2029 drivers/mtd/ubi/wl.c 	rb_erase(&e->u.rb, &ubi->free);
rb                195 drivers/net/ethernet/brocade/bna/bfa_ioc.h 	enum bfa_status (*ioc_pll_init) (void __iomem *rb,
rb                 49 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
rb                 51 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
rb                251 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	void __iomem *rb;
rb                254 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	rb = bfa_ioc_bar0(ioc);
rb                256 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
rb                257 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
rb                258 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
rb                261 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
rb                262 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
rb                263 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
rb                264 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
rb                265 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
rb                266 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
rb                267 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
rb                269 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
rb                270 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
rb                271 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
rb                272 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
rb                273 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
rb                274 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
rb                275 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
rb                281 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
rb                282 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
rb                283 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
rb                284 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
rb                289 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
rb                290 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
rb                291 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
rb                292 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
rb                293 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
rb                298 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
rb                304 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
rb                310 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	void __iomem *rb;
rb                313 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	rb = bfa_ioc_bar0(ioc);
rb                315 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
rb                316 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
rb                317 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
rb                318 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
rb                319 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
rb                320 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
rb                323 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
rb                324 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
rb                325 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
rb                326 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
rb                327 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
rb                329 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
rb                330 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
rb                331 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
rb                332 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
rb                333 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
rb                339 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
rb                340 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
rb                341 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
rb                342 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
rb                347 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
rb                348 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
rb                349 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
rb                350 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
rb                351 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
rb                356 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
rb                362 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	ioc->ioc_regs.err_set = rb + ERR_SET_REG;
rb                371 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
rb                377 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + FNC_PERS_REG);
rb                386 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
rb                389 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
rb                397 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
rb                400 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + FNC_PERS_REG);
rb                419 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, rb + FNC_PERS_REG);
rb                447 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
rb                450 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
rb                453 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
rb                459 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			rb + HOSTFN_MSIX_VT_OFST_NUMVT);
rb                461 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
rb                601 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
rb                616 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(0, (rb + OP_MODE));
rb                620 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 				(rb + ETH_MAC_SER_REG));
rb                622 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
rb                624 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 				(rb + ETH_MAC_SER_REG));
rb                626 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
rb                627 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
rb                628 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
rb                629 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
rb                630 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
rb                631 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
rb                632 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
rb                633 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
rb                636 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		rb + APP_PLL_SCLK_CTL_REG);
rb                639 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		rb + APP_PLL_LCLK_CTL_REG);
rb                642 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		rb + APP_PLL_SCLK_CTL_REG);
rb                645 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		rb + APP_PLL_LCLK_CTL_REG);
rb                646 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	readl(rb + HOSTFN0_INT_MSK);
rb                648 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
rb                649 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
rb                652 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		rb + APP_PLL_SCLK_CTL_REG);
rb                655 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		rb + APP_PLL_LCLK_CTL_REG);
rb                658 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
rb                659 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
rb                661 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + PSS_CTL_REG);
rb                663 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, (rb + PSS_CTL_REG));
rb                666 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(0, (rb + PMM_1T_RESET_REG_P0));
rb                667 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(0, (rb + PMM_1T_RESET_REG_P1));
rb                670 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
rb                672 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + MBIST_STAT_REG);
rb                673 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0, (rb + MBIST_CTL_REG));
rb                678 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct2_sclk_init(void __iomem *rb)
rb                685 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                689 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                695 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                697 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                702 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_CHIP_MISC_PRG);
rb                704 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	       rb + CT2_CHIP_MISC_PRG);
rb                706 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_PCIE_MISC_REG);
rb                708 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	       rb + CT2_PCIE_MISC_REG);
rb                713 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                716 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                730 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct2_lclk_init(void __iomem *rb)
rb                737 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                741 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                746 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_CHIP_MISC_PRG);
rb                747 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, (rb + CT2_CHIP_MISC_PRG));
rb                752 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                753 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                758 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                761 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                770 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct2_mem_init(void __iomem *rb)
rb                774 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + PSS_CTL_REG);
rb                776 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(r32, rb + PSS_CTL_REG);
rb                779 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
rb                781 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(0, rb + CT2_MBIST_CTL_REG);
rb                785 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct2_mac_reset(void __iomem *rb)
rb                789 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	bfa_ioc_ct2_sclk_init(rb);
rb                790 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	bfa_ioc_ct2_lclk_init(rb);
rb                795 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                797 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	       rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                802 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                804 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	       rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                808 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	       rb + CT2_CSI_MAC_CONTROL_REG(0));
rb                810 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	       rb + CT2_CSI_MAC_CONTROL_REG(1));
rb                818 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct2_nfc_halted(void __iomem *rb)
rb                822 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
rb                830 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct2_nfc_resume(void __iomem *rb)
rb                835 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
rb                837 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
rb                846 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
rb                851 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	wgn = readl(rb + CT2_WGN_STATUS);
rb                853 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
rb                857 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		if (bfa_ioc_ct2_nfc_halted(rb))
rb                858 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			bfa_ioc_ct2_nfc_resume(rb);
rb                860 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 				rb + CT2_CSI_FW_CTL_SET_REG);
rb                863 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                870 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                877 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + CT2_CSI_FW_CTL_REG);
rb                880 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
rb                882 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			r32 = readl(rb + CT2_NFC_CSR_SET_REG);
rb                888 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		bfa_ioc_ct2_mac_reset(rb);
rb                889 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		bfa_ioc_ct2_sclk_init(rb);
rb                890 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		bfa_ioc_ct2_lclk_init(rb);
rb                893 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                895 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 				rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                896 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                898 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 				rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                903 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + PSS_GPIO_OUT_REG);
rb                904 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
rb                905 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + PSS_GPIO_OE_REG);
rb                906 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		writel(r32 | 1, rb + PSS_GPIO_OE_REG);
rb                913 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
rb                914 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
rb                917 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	r32 = readl(rb + HOST_SEM5_REG);
rb                919 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
rb                921 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
rb                922 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
rb                924 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 		r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
rb                926 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
rb                927 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 			readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
rb                931 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	bfa_ioc_ct2_mem_init(rb);
rb                933 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
rb                934 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
rb                310 drivers/net/ethernet/brocade/bna/bnad_debugfs.c 	void __iomem *rb, *reg_addr;
rb                335 drivers/net/ethernet/brocade/bna/bnad_debugfs.c 	rb = bfa_ioc_bar0(ioc);
rb                348 drivers/net/ethernet/brocade/bna/bnad_debugfs.c 	reg_addr = rb + addr;
rb                174 drivers/net/ethernet/ti/netcp_ethss.c #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
rb                175 drivers/net/ethernet/ti/netcp_ethss.c 		offsetof(struct gbe##_##rb, rn)
rb                176 drivers/net/ethernet/ti/netcp_ethss.c #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
rb                177 drivers/net/ethernet/ti/netcp_ethss.c 		offsetof(struct gbenu##_##rb, rn)
rb                178 drivers/net/ethernet/ti/netcp_ethss.c #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
rb                179 drivers/net/ethernet/ti/netcp_ethss.c 		offsetof(struct xgbe##_##rb, rn)
rb                180 drivers/net/ethernet/ti/netcp_ethss.c #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
rb               2980 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		struct iwl_fw_error_dump_rb *rb;
rb               2985 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
rb               2988 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
rb               2989 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		rb = (void *)(*data)->data;
rb               2990 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		rb->index = cpu_to_le32(i);
rb               2991 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		memcpy(rb->data, page_address(rxb->page), max_len);
rb                195 drivers/of/of_reserved_mem.c 	const struct reserved_mem *ra = a, *rb = b;
rb                197 drivers/of/of_reserved_mem.c 	if (ra->base < rb->base)
rb                200 drivers/of/of_reserved_mem.c 	if (ra->base > rb->base)
rb                 44 drivers/pwm/pwm-atmel-tcb.c 	u32 rb;
rb                472 drivers/pwm/pwm-atmel-tcb.c 		chan->rb = readl(base + ATMEL_TC_REG(i, RB));
rb                489 drivers/pwm/pwm-atmel-tcb.c 		writel(chan->rb, base + ATMEL_TC_REG(i, RB));
rb               5511 drivers/scsi/bfa/bfa_ioc.c 	void __iomem	*rb;
rb               5513 drivers/scsi/bfa/bfa_ioc.c 	rb = bfa_ioc_bar0(ioc);
rb               5514 drivers/scsi/bfa/bfa_ioc.c 	return readl(rb + BFA_PHY_LOCK_STATUS);
rb                331 drivers/scsi/bfa/bfa_ioc.h 	bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
rb                857 drivers/scsi/bfa/bfa_ioc.h bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
rb                858 drivers/scsi/bfa/bfa_ioc.h bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
rb                859 drivers/scsi/bfa/bfa_ioc.h bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
rb                138 drivers/scsi/bfa/bfa_ioc_cb.c 	void __iomem *rb;
rb                141 drivers/scsi/bfa/bfa_ioc_cb.c 	rb = bfa_ioc_bar0(ioc);
rb                143 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
rb                144 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
rb                145 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
rb                148 drivers/scsi/bfa/bfa_ioc_cb.c 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
rb                149 drivers/scsi/bfa/bfa_ioc_cb.c 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
rb                150 drivers/scsi/bfa/bfa_ioc_cb.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
rb                152 drivers/scsi/bfa/bfa_ioc_cb.c 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
rb                153 drivers/scsi/bfa/bfa_ioc_cb.c 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
rb                154 drivers/scsi/bfa/bfa_ioc_cb.c 		ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
rb                160 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
rb                161 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
rb                166 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
rb                167 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
rb                168 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
rb                169 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
rb                174 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
rb                175 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
rb                180 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
rb                186 drivers/scsi/bfa/bfa_ioc_cb.c 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
rb                357 drivers/scsi/bfa/bfa_ioc_cb.c bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
rb                369 drivers/scsi/bfa/bfa_ioc_cb.c 	join_bits = readl(rb + BFA_IOC0_STATE_REG) &
rb                371 drivers/scsi/bfa/bfa_ioc_cb.c 	writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG));
rb                372 drivers/scsi/bfa/bfa_ioc_cb.c 	join_bits = readl(rb + BFA_IOC1_STATE_REG) &
rb                374 drivers/scsi/bfa/bfa_ioc_cb.c 	writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG));
rb                375 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
rb                376 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
rb                377 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
rb                378 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
rb                379 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
rb                380 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
rb                381 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
rb                383 drivers/scsi/bfa/bfa_ioc_cb.c 			rb + APP_PLL_SCLK_CTL_REG);
rb                384 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
rb                386 drivers/scsi/bfa/bfa_ioc_cb.c 			rb + APP_PLL_LCLK_CTL_REG);
rb                388 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
rb                389 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
rb                391 drivers/scsi/bfa/bfa_ioc_cb.c 			rb + APP_PLL_SCLK_CTL_REG);
rb                393 drivers/scsi/bfa/bfa_ioc_cb.c 			rb + APP_PLL_LCLK_CTL_REG);
rb                395 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
rb                396 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
rb                397 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
rb                398 drivers/scsi/bfa/bfa_ioc_cb.c 	writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
rb                185 drivers/scsi/bfa/bfa_ioc_ct.c 	void __iomem *rb;
rb                188 drivers/scsi/bfa/bfa_ioc_ct.c 	rb = bfa_ioc_bar0(ioc);
rb                190 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
rb                191 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
rb                192 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
rb                195 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
rb                196 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
rb                197 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
rb                198 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
rb                199 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
rb                200 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
rb                201 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
rb                203 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
rb                204 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
rb                205 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
rb                206 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
rb                207 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
rb                208 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
rb                209 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
rb                215 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
rb                216 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
rb                217 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
rb                218 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
rb                223 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
rb                224 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
rb                225 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
rb                226 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
rb                227 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
rb                232 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
rb                238 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
rb                244 drivers/scsi/bfa/bfa_ioc_ct.c 	void __iomem *rb;
rb                247 drivers/scsi/bfa/bfa_ioc_ct.c 	rb = bfa_ioc_bar0(ioc);
rb                249 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
rb                250 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
rb                251 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
rb                252 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
rb                253 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
rb                254 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
rb                257 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
rb                258 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
rb                259 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
rb                260 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
rb                261 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
rb                263 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
rb                264 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
rb                265 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
rb                266 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
rb                267 drivers/scsi/bfa/bfa_ioc_ct.c 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
rb                273 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
rb                274 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
rb                275 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
rb                276 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
rb                281 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
rb                282 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
rb                283 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
rb                284 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
rb                285 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
rb                290 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
rb                296 drivers/scsi/bfa/bfa_ioc_ct.c 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
rb                307 drivers/scsi/bfa/bfa_ioc_ct.c 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
rb                313 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + FNC_PERS_REG);
rb                324 drivers/scsi/bfa/bfa_ioc_ct.c 	void __iomem	*rb = ioc->pcidev.pci_bar_kva;
rb                327 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
rb                340 drivers/scsi/bfa/bfa_ioc_ct.c 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
rb                343 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + FNC_PERS_REG);
rb                364 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, rb + FNC_PERS_REG);
rb                564 drivers/scsi/bfa/bfa_ioc_ct.c 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
rb                567 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
rb                570 drivers/scsi/bfa/bfa_ioc_ct.c 			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
rb                576 drivers/scsi/bfa/bfa_ioc_ct.c 		rb + HOSTFN_MSIX_VT_OFST_NUMVT);
rb                578 drivers/scsi/bfa/bfa_ioc_ct.c 		rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
rb                582 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
rb                597 drivers/scsi/bfa/bfa_ioc_ct.c 		writel(0, (rb + OP_MODE));
rb                599 drivers/scsi/bfa/bfa_ioc_ct.c 			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
rb                601 drivers/scsi/bfa/bfa_ioc_ct.c 		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
rb                602 drivers/scsi/bfa/bfa_ioc_ct.c 		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
rb                604 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
rb                605 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
rb                606 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
rb                607 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
rb                608 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
rb                609 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
rb                610 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
rb                611 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
rb                613 drivers/scsi/bfa/bfa_ioc_ct.c 			rb + APP_PLL_SCLK_CTL_REG);
rb                615 drivers/scsi/bfa/bfa_ioc_ct.c 			rb + APP_PLL_LCLK_CTL_REG);
rb                617 drivers/scsi/bfa/bfa_ioc_ct.c 		__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
rb                619 drivers/scsi/bfa/bfa_ioc_ct.c 		__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
rb                620 drivers/scsi/bfa/bfa_ioc_ct.c 	readl(rb + HOSTFN0_INT_MSK);
rb                622 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
rb                623 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
rb                624 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
rb                625 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
rb                628 drivers/scsi/bfa/bfa_ioc_ct.c 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
rb                629 drivers/scsi/bfa/bfa_ioc_ct.c 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
rb                631 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + PSS_CTL_REG));
rb                633 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + PSS_CTL_REG));
rb                636 drivers/scsi/bfa/bfa_ioc_ct.c 		writel(0, (rb + PMM_1T_RESET_REG_P0));
rb                637 drivers/scsi/bfa/bfa_ioc_ct.c 		writel(0, (rb + PMM_1T_RESET_REG_P1));
rb                640 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
rb                642 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + MBIST_STAT_REG));
rb                643 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0, (rb + MBIST_CTL_REG));
rb                648 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_sclk_init(void __iomem *rb)
rb                655 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                659 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                665 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                667 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                672 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_CHIP_MISC_PRG));
rb                673 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
rb                675 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_PCIE_MISC_REG));
rb                676 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
rb                681 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                684 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                693 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_lclk_init(void __iomem *rb)
rb                700 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                704 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                709 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_CHIP_MISC_PRG));
rb                710 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + CT2_CHIP_MISC_PRG));
rb                715 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                716 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                721 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                724 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                733 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_mem_init(void __iomem *rb)
rb                737 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + PSS_CTL_REG));
rb                739 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + PSS_CTL_REG));
rb                742 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
rb                744 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(0, (rb + CT2_MBIST_CTL_REG));
rb                748 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_mac_reset(void __iomem *rb)
rb                752 drivers/scsi/bfa/bfa_ioc_ct.c 		rb + CT2_CSI_MAC_CONTROL_REG(0));
rb                754 drivers/scsi/bfa/bfa_ioc_ct.c 		rb + CT2_CSI_MAC_CONTROL_REG(1));
rb                758 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_enable_flash(void __iomem *rb)
rb                762 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + PSS_GPIO_OUT_REG));
rb                763 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
rb                764 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + PSS_GPIO_OE_REG));
rb                765 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
rb                775 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_nfc_halted(void __iomem *rb)
rb                779 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
rb                787 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_nfc_halt(void __iomem *rb)
rb                791 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
rb                793 drivers/scsi/bfa/bfa_ioc_ct.c 		if (bfa_ioc_ct2_nfc_halted(rb))
rb                797 drivers/scsi/bfa/bfa_ioc_ct.c 	WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
rb                801 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_nfc_resume(void __iomem *rb)
rb                806 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
rb                808 drivers/scsi/bfa/bfa_ioc_ct.c 		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
rb                817 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_clk_reset(void __iomem *rb)
rb                821 drivers/scsi/bfa/bfa_ioc_ct.c 	bfa_ioc_ct2_sclk_init(rb);
rb                822 drivers/scsi/bfa/bfa_ioc_ct.c 	bfa_ioc_ct2_lclk_init(rb);
rb                827 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                829 drivers/scsi/bfa/bfa_ioc_ct.c 			(rb + CT2_APP_PLL_SCLK_CTL_REG));
rb                831 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                833 drivers/scsi/bfa/bfa_ioc_ct.c 			(rb + CT2_APP_PLL_LCLK_CTL_REG));
rb                838 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
rb                842 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl((rb + PSS_CTL_REG));
rb                844 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(r32, (rb + PSS_CTL_REG));
rb                846 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
rb                849 drivers/scsi/bfa/bfa_ioc_ct.c 		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
rb                857 drivers/scsi/bfa/bfa_ioc_ct.c 		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
rb                864 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + CT2_CSI_FW_CTL_REG);
rb                869 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
rb                874 drivers/scsi/bfa/bfa_ioc_ct.c 	if (bfa_ioc_ct2_nfc_halted(rb))
rb                875 drivers/scsi/bfa/bfa_ioc_ct.c 		bfa_ioc_ct2_nfc_resume(rb);
rb                877 drivers/scsi/bfa/bfa_ioc_ct.c 		r32 = readl(rb + CT2_NFC_STS_REG);
rb                883 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + CT2_NFC_STS_REG);
rb                888 drivers/scsi/bfa/bfa_ioc_ct.c bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
rb                892 drivers/scsi/bfa/bfa_ioc_ct.c 	wgn = readl(rb + CT2_WGN_STATUS);
rb                898 drivers/scsi/bfa/bfa_ioc_ct.c 		bfa_ioc_ct2_clk_reset(rb);
rb                899 drivers/scsi/bfa/bfa_ioc_ct.c 		bfa_ioc_ct2_enable_flash(rb);
rb                901 drivers/scsi/bfa/bfa_ioc_ct.c 		bfa_ioc_ct2_mac_reset(rb);
rb                903 drivers/scsi/bfa/bfa_ioc_ct.c 		bfa_ioc_ct2_clk_reset(rb);
rb                904 drivers/scsi/bfa/bfa_ioc_ct.c 		bfa_ioc_ct2_enable_flash(rb);
rb                907 drivers/scsi/bfa/bfa_ioc_ct.c 		nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
rb                912 drivers/scsi/bfa/bfa_ioc_ct.c 			bfa_ioc_ct2_wait_till_nfc_running(rb);
rb                914 drivers/scsi/bfa/bfa_ioc_ct.c 			bfa_ioc_ct2_nfc_clk_reset(rb);
rb                916 drivers/scsi/bfa/bfa_ioc_ct.c 			bfa_ioc_ct2_nfc_halt(rb);
rb                918 drivers/scsi/bfa/bfa_ioc_ct.c 			bfa_ioc_ct2_clk_reset(rb);
rb                919 drivers/scsi/bfa/bfa_ioc_ct.c 			bfa_ioc_ct2_mac_reset(rb);
rb                920 drivers/scsi/bfa/bfa_ioc_ct.c 			bfa_ioc_ct2_clk_reset(rb);
rb                932 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + CT2_CHIP_MISC_PRG);
rb                933 drivers/scsi/bfa/bfa_ioc_ct.c 	writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
rb                940 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
rb                941 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
rb                944 drivers/scsi/bfa/bfa_ioc_ct.c 	r32 = readl(rb + HOST_SEM5_REG);
rb                946 drivers/scsi/bfa/bfa_ioc_ct.c 		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
rb                948 drivers/scsi/bfa/bfa_ioc_ct.c 			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
rb                949 drivers/scsi/bfa/bfa_ioc_ct.c 			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
rb                951 drivers/scsi/bfa/bfa_ioc_ct.c 		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
rb                953 drivers/scsi/bfa/bfa_ioc_ct.c 			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
rb                954 drivers/scsi/bfa/bfa_ioc_ct.c 			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
rb                958 drivers/scsi/bfa/bfa_ioc_ct.c 	bfa_ioc_ct2_mem_init(rb);
rb                960 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
rb                961 drivers/scsi/bfa/bfa_ioc_ct.c 	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
rb                249 drivers/scsi/bfa/bfad_debugfs.c 	void __iomem *rb, *reg_addr;
rb                279 drivers/scsi/bfa/bfad_debugfs.c 	rb = bfa_ioc_bar0(ioc);
rb                293 drivers/scsi/bfa/bfad_debugfs.c 	reg_addr = rb + addr;
rb                339 drivers/scsi/initio.c 	u8 instr, rb;
rb                352 drivers/scsi/initio.c 		rb = inb(base + TUL_NVRAM);
rb                353 drivers/scsi/initio.c 		rb &= SE2DI;
rb                354 drivers/scsi/initio.c 		val += (rb << i);
rb                374 drivers/scsi/initio.c 	u8 rb;
rb                403 drivers/scsi/initio.c 		if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
rb                132 drivers/spi/spi-mpc52xx-psc.c 	unsigned rb = 0;	/* number of bytes receieved */
rb                146 drivers/spi/spi-mpc52xx-psc.c 	while (rb < t->len) {
rb                147 drivers/spi/spi-mpc52xx-psc.c 		if (t->len - rb > MPC52xx_PSC_BUFSIZE) {
rb                152 drivers/spi/spi-mpc52xx-psc.c 			rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb);
rb                174 drivers/spi/spi-mpc52xx-psc.c 		if (t->len - rb == 1) {
rb                187 drivers/spi/spi-mpc52xx-psc.c 			for (; recv_at_once; rb++, recv_at_once--)
rb                188 drivers/spi/spi-mpc52xx-psc.c 				rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8);
rb                190 drivers/spi/spi-mpc52xx-psc.c 			for (; recv_at_once; rb++, recv_at_once--)
rb                779 drivers/staging/media/omap4iss/iss_video.c iss_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
rb                783 drivers/staging/media/omap4iss/iss_video.c 	return vb2_reqbufs(&vfh->queue, rb);
rb                207 drivers/staging/wilc1000/wilc_spi.c static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
rb                215 drivers/staging/wilc1000/wilc_spi.c 			.rx_buf = rb,
rb                247 drivers/staging/wilc1000/wilc_spi.c static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
rb                255 drivers/staging/wilc1000/wilc_spi.c 			.rx_buf = rb,
rb                287 drivers/staging/wilc1000/wilc_spi.c 	u8 wb[32], rb[32];
rb                424 drivers/staging/wilc1000/wilc_spi.c 	if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
rb                435 drivers/staging/wilc1000/wilc_spi.c 	rsp = rb[rix++];
rb                447 drivers/staging/wilc1000/wilc_spi.c 	rsp = rb[rix++];
rb                466 drivers/staging/wilc1000/wilc_spi.c 				rsp = rb[rix++];
rb                487 drivers/staging/wilc1000/wilc_spi.c 			b[0] = rb[rix++];
rb                488 drivers/staging/wilc1000/wilc_spi.c 			b[1] = rb[rix++];
rb                489 drivers/staging/wilc1000/wilc_spi.c 			b[2] = rb[rix++];
rb                490 drivers/staging/wilc1000/wilc_spi.c 			b[3] = rb[rix++];
rb                502 drivers/staging/wilc1000/wilc_spi.c 				crc[0] = rb[rix++];
rb                503 drivers/staging/wilc1000/wilc_spi.c 				crc[1] = rb[rix++];
rb                515 drivers/staging/wilc1000/wilc_spi.c 			b[ix++] = rb[rix++];
rb                 44 drivers/target/iscsi/iscsi_target_configfs.c 	ssize_t rb;
rb                 48 drivers/target/iscsi/iscsi_target_configfs.c 		rb = sprintf(page, "1\n");
rb                 50 drivers/target/iscsi/iscsi_target_configfs.c 		rb = sprintf(page, "0\n");
rb                 52 drivers/target/iscsi/iscsi_target_configfs.c 	return rb;
rb                449 drivers/target/iscsi/iscsi_target_configfs.c 	ssize_t rb;							\
rb                454 drivers/target/iscsi/iscsi_target_configfs.c 		rb = snprintf(page, PAGE_SIZE,				\
rb                458 drivers/target/iscsi/iscsi_target_configfs.c 		rb = snprintf(page, PAGE_SIZE, "%u\n",			\
rb                463 drivers/target/iscsi/iscsi_target_configfs.c 	return rb;							\
rb                505 drivers/target/iscsi/iscsi_target_configfs.c 	ssize_t rb = 0;
rb                511 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
rb                516 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "InitiatorName: %s\n",
rb                518 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "InitiatorAlias: %s\n",
rb                521 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb,
rb                524 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "SessionType: %s\n",
rb                527 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "Session State: ");
rb                530 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_FREE\n");
rb                533 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
rb                536 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
rb                539 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
rb                542 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
rb                545 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "ERROR: Unknown Session"
rb                550 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "---------------------[iSCSI Session"
rb                552 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
rb                555 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
rb                561 drivers/target/iscsi/iscsi_target_configfs.c 		rb += sprintf(page+rb, "----------------------[iSCSI"
rb                566 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "CID: %hu  Connection"
rb                570 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                574 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                578 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                582 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                586 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                590 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                594 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                598 drivers/target/iscsi/iscsi_target_configfs.c 				rb += sprintf(page+rb,
rb                603 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "   Address %pISc %s", &conn->login_sockaddr,
rb                606 drivers/target/iscsi/iscsi_target_configfs.c 			rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
rb                613 drivers/target/iscsi/iscsi_target_configfs.c 	return rb;
rb                726 drivers/target/iscsi/iscsi_target_configfs.c 	ssize_t rb;							\
rb                731 drivers/target/iscsi/iscsi_target_configfs.c 	rb = sprintf(page, "%u\n", tpg->tpg_attrib.name);		\
rb                733 drivers/target/iscsi/iscsi_target_configfs.c 	return rb;							\
rb                907 drivers/target/iscsi/iscsi_target_configfs.c 	ssize_t rb;							\
rb                918 drivers/target/iscsi/iscsi_target_configfs.c 	rb = snprintf(page, PAGE_SIZE, "%s\n", param->value);		\
rb                921 drivers/target/iscsi/iscsi_target_configfs.c 	return rb;							\
rb                222 drivers/tty/hvc/hvc_iucv.c 	struct iucv_tty_buffer *rb;
rb                240 drivers/tty/hvc/hvc_iucv.c 	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
rb                243 drivers/tty/hvc/hvc_iucv.c 	if (!rb->mbuf) { /* message not yet received ... */
rb                246 drivers/tty/hvc/hvc_iucv.c 		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
rb                247 drivers/tty/hvc/hvc_iucv.c 		if (!rb->mbuf)
rb                250 drivers/tty/hvc/hvc_iucv.c 		rc = __iucv_message_receive(priv->path, &rb->msg, 0,
rb                251 drivers/tty/hvc/hvc_iucv.c 					    rb->mbuf, rb->msg.length, NULL);
rb                263 drivers/tty/hvc/hvc_iucv.c 		if (rc || (rb->mbuf->version != MSG_VERSION) ||
rb                264 drivers/tty/hvc/hvc_iucv.c 			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen)))
rb                268 drivers/tty/hvc/hvc_iucv.c 	switch (rb->mbuf->type) {
rb                270 drivers/tty/hvc/hvc_iucv.c 		written = min_t(int, rb->mbuf->datalen - rb->offset, count);
rb                271 drivers/tty/hvc/hvc_iucv.c 		memcpy(buf, rb->mbuf->data + rb->offset, written);
rb                272 drivers/tty/hvc/hvc_iucv.c 		if (written < (rb->mbuf->datalen - rb->offset)) {
rb                273 drivers/tty/hvc/hvc_iucv.c 			rb->offset += written;
rb                280 drivers/tty/hvc/hvc_iucv.c 		if (rb->mbuf->datalen != sizeof(struct winsize))
rb                284 drivers/tty/hvc/hvc_iucv.c 		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
rb                294 drivers/tty/hvc/hvc_iucv.c 	list_del(&rb->list);
rb                295 drivers/tty/hvc/hvc_iucv.c 	destroy_tty_buffer(rb);
rb                907 drivers/tty/hvc/hvc_iucv.c 	struct iucv_tty_buffer *rb;
rb                924 drivers/tty/hvc/hvc_iucv.c 	rb = alloc_tty_buffer(0, GFP_ATOMIC);
rb                925 drivers/tty/hvc/hvc_iucv.c 	if (!rb) {
rb                929 drivers/tty/hvc/hvc_iucv.c 	rb->msg = *msg;
rb                931 drivers/tty/hvc/hvc_iucv.c 	list_add_tail(&rb->list, &priv->tty_inqueue);
rb                474 drivers/usb/class/cdc-acm.c 	struct acm_rb *rb = urb->context;
rb                475 drivers/usb/class/cdc-acm.c 	struct acm *acm = rb->instance;
rb                482 drivers/usb/class/cdc-acm.c 		rb->index, urb->actual_length, status);
rb                511 drivers/usb/class/cdc-acm.c 		set_bit(rb->index, &acm->urbs_in_error_delay);
rb                527 drivers/usb/class/cdc-acm.c 	set_bit(rb->index, &acm->read_urbs_free);
rb                546 drivers/usb/class/cdc-acm.c 	acm_submit_read_urb(acm, rb->index, GFP_ATOMIC);
rb               1389 drivers/usb/class/cdc-acm.c 		struct acm_rb *rb = &(acm->read_buffers[i]);
rb               1392 drivers/usb/class/cdc-acm.c 		rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
rb               1393 drivers/usb/class/cdc-acm.c 								&rb->dma);
rb               1394 drivers/usb/class/cdc-acm.c 		if (!rb->base)
rb               1396 drivers/usb/class/cdc-acm.c 		rb->index = i;
rb               1397 drivers/usb/class/cdc-acm.c 		rb->instance = acm;
rb               1404 drivers/usb/class/cdc-acm.c 		urb->transfer_dma = rb->dma;
rb               1406 drivers/usb/class/cdc-acm.c 			usb_fill_int_urb(urb, acm->dev, acm->in, rb->base,
rb               1408 drivers/usb/class/cdc-acm.c 					 acm_read_bulk_callback, rb,
rb               1411 drivers/usb/class/cdc-acm.c 			usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base,
rb               1413 drivers/usb/class/cdc-acm.c 					  acm_read_bulk_callback, rb);
rb                150 drivers/usb/gadget/function/uvc_queue.c 			      struct v4l2_requestbuffers *rb)
rb                154 drivers/usb/gadget/function/uvc_queue.c 	ret = vb2_reqbufs(&queue->queue, rb);
rb                156 drivers/usb/gadget/function/uvc_queue.c 	return ret ? ret : rb->count;
rb                 68 drivers/usb/gadget/function/uvc_queue.h 		       struct v4l2_requestbuffers *rb);
rb                 53 drivers/vhost/vhost.c 		     rb, __u64, __subtree_last,
rb                 59 drivers/vhost/vhost.h 	struct rb_node rb;
rb                305 drivers/video/fbdev/core/fbcvt.c int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
rb                314 drivers/video/fbdev/core/fbcvt.c 	if (rb)
rb                645 drivers/video/fbdev/core/modedb.c 		int yres_specified = 0, cvt = 0, rb = 0;
rb                659 drivers/video/fbdev/core/modedb.c 					if (cvt || rb)
rb                670 drivers/video/fbdev/core/modedb.c 					if (cvt || rb)
rb                691 drivers/video/fbdev/core/modedb.c 					rb = 1;
rb                724 drivers/video/fbdev/core/modedb.c 				(rb) ? " reduced blanking" : "",
rb                738 drivers/video/fbdev/core/modedb.c 			ret = fb_find_mode_cvt(&cvt_mode, margins, rb);
rb               1098 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 		FLD_VAL(coefs->rb, 9, 0);
rb                365 drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c 			info.cpr_coefs.rb,
rb                386 drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c 				&coefs.rr, &coefs.rg, &coefs.rb,
rb                391 drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c 	arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
rb               1461 drivers/xen/xen-scsiback.c 	ssize_t rb;
rb               1464 drivers/xen/xen-scsiback.c 	rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
rb               1467 drivers/xen/xen-scsiback.c 	return rb;
rb                130 drivers/xen/xenbus/xenbus_dev_frontend.c 	struct read_buffer *rb;
rb                148 drivers/xen/xenbus/xenbus_dev_frontend.c 	rb = list_entry(u->read_buffers.next, struct read_buffer, list);
rb                151 drivers/xen/xenbus/xenbus_dev_frontend.c 		unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
rb                153 drivers/xen/xenbus/xenbus_dev_frontend.c 		ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
rb                156 drivers/xen/xenbus/xenbus_dev_frontend.c 		rb->cons += sz - ret;
rb                165 drivers/xen/xenbus/xenbus_dev_frontend.c 		if (rb->cons == rb->len) {
rb                166 drivers/xen/xenbus/xenbus_dev_frontend.c 			list_del(&rb->list);
rb                167 drivers/xen/xenbus/xenbus_dev_frontend.c 			kfree(rb);
rb                170 drivers/xen/xenbus/xenbus_dev_frontend.c 			rb = list_entry(u->read_buffers.next,
rb                191 drivers/xen/xenbus/xenbus_dev_frontend.c 	struct read_buffer *rb;
rb                198 drivers/xen/xenbus/xenbus_dev_frontend.c 	rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
rb                199 drivers/xen/xenbus/xenbus_dev_frontend.c 	if (rb == NULL)
rb                202 drivers/xen/xenbus/xenbus_dev_frontend.c 	rb->cons = 0;
rb                203 drivers/xen/xenbus/xenbus_dev_frontend.c 	rb->len = len;
rb                205 drivers/xen/xenbus/xenbus_dev_frontend.c 	memcpy(rb->msg, data, len);
rb                207 drivers/xen/xenbus/xenbus_dev_frontend.c 	list_add_tail(&rb->list, queue);
rb                217 drivers/xen/xenbus/xenbus_dev_frontend.c 	struct read_buffer *rb;
rb                220 drivers/xen/xenbus/xenbus_dev_frontend.c 		rb = list_entry(list->next, struct read_buffer, list);
rb                222 drivers/xen/xenbus/xenbus_dev_frontend.c 		kfree(rb);
rb                311 drivers/xen/xenbus/xenbus_dev_frontend.c 	struct read_buffer *rb, *tmp_rb;
rb                332 drivers/xen/xenbus/xenbus_dev_frontend.c 	list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
rb                333 drivers/xen/xenbus/xenbus_dev_frontend.c 		list_del(&rb->list);
rb                334 drivers/xen/xenbus/xenbus_dev_frontend.c 		kfree(rb);
rb                234 fs/btrfs/extent_map.c 	struct rb_node *rb;
rb                248 fs/btrfs/extent_map.c 		rb = rb_prev(&em->rb_node);
rb                249 fs/btrfs/extent_map.c 		if (rb)
rb                250 fs/btrfs/extent_map.c 			merge = rb_entry(rb, struct extent_map, rb_node);
rb                251 fs/btrfs/extent_map.c 		if (rb && mergable_maps(merge, em)) {
rb                267 fs/btrfs/extent_map.c 	rb = rb_next(&em->rb_node);
rb                268 fs/btrfs/extent_map.c 	if (rb)
rb                269 fs/btrfs/extent_map.c 		merge = rb_entry(rb, struct extent_map, rb_node);
rb                270 fs/btrfs/extent_map.c 	if (rb && mergable_maps(em, merge)) {
rb               1676 fs/btrfs/raid56.c 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
rb               1679 fs/btrfs/raid56.c 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
rb                124 fs/jffs2/nodelist.c 	struct rb_node *parent = &base->rb;
rb                131 fs/jffs2/nodelist.c 		base = rb_entry(parent, struct jffs2_node_frag, rb);
rb                134 fs/jffs2/nodelist.c 			link = &base->rb.rb_right;
rb                136 fs/jffs2/nodelist.c 			link = &base->rb.rb_left;
rb                143 fs/jffs2/nodelist.c 	rb_link_node(&newfrag->rb, &base->rb, link);
rb                189 fs/jffs2/nodelist.c 			rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
rb                193 fs/jffs2/nodelist.c 			rb_link_node(&holefrag->rb, NULL, &root->rb_node);
rb                195 fs/jffs2/nodelist.c 		rb_insert_color(&holefrag->rb, root);
rb                204 fs/jffs2/nodelist.c 		rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
rb                207 fs/jffs2/nodelist.c 		rb_link_node(&newfrag->rb, NULL, &root->rb_node);
rb                209 fs/jffs2/nodelist.c 	rb_insert_color(&newfrag->rb, root);
rb                298 fs/jffs2/nodelist.c 			rb_insert_color(&newfrag->rb, root);
rb                301 fs/jffs2/nodelist.c 			rb_insert_color(&newfrag2->rb, root);
rb                310 fs/jffs2/nodelist.c 		rb_insert_color(&newfrag->rb, root);
rb                317 fs/jffs2/nodelist.c 		rb_replace_node(&this->rb, &newfrag->rb, root);
rb                327 fs/jffs2/nodelist.c 			rb_insert_color(&this->rb, root);
rb                338 fs/jffs2/nodelist.c 		rb_erase(&this->rb, root);
rb                537 fs/jffs2/nodelist.c 		frag = rb_entry(next, struct jffs2_node_frag, rb);
rb                543 fs/jffs2/nodelist.c 			next = frag->rb.rb_right;
rb                545 fs/jffs2/nodelist.c 			next = frag->rb.rb_left;
rb                570 fs/jffs2/nodelist.c 	rbtree_postorder_for_each_entry_safe(frag, next, root, rb) {
rb                230 fs/jffs2/nodelist.h 	struct rb_node rb;
rb                271 fs/jffs2/nodelist.h 	struct rb_node rb;
rb                334 fs/jffs2/nodelist.h 	return rb_entry(node, struct jffs2_node_frag, rb);
rb                344 fs/jffs2/nodelist.h 	return rb_entry(node, struct jffs2_node_frag, rb);
rb                347 fs/jffs2/nodelist.h #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
rb                348 fs/jffs2/nodelist.h #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
rb                349 fs/jffs2/nodelist.h #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
rb                350 fs/jffs2/nodelist.h #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
rb                351 fs/jffs2/nodelist.h #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
rb                352 fs/jffs2/nodelist.h #define frag_erase(frag, list) rb_erase(&frag->rb, list);
rb                354 fs/jffs2/nodelist.h #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
rb                355 fs/jffs2/nodelist.h #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
rb                356 fs/jffs2/nodelist.h #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
rb                357 fs/jffs2/nodelist.h #define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb)
rb                358 fs/jffs2/nodelist.h #define tn_right(tn) rb_entry((tn)->rb.rb_right, struct jffs2_tmp_dnode_info, rb)
rb                359 fs/jffs2/nodelist.h #define tn_erase(tn, list) rb_erase(&tn->rb, list);
rb                360 fs/jffs2/nodelist.h #define tn_last(list) rb_entry(rb_last(list), struct jffs2_tmp_dnode_info, rb)
rb                361 fs/jffs2/nodelist.h #define tn_first(list) rb_entry(rb_first(list), struct jffs2_tmp_dnode_info, rb)
rb                185 fs/jffs2/readinode.c 		tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
rb                188 fs/jffs2/readinode.c 			next = tn->rb.rb_right;
rb                190 fs/jffs2/readinode.c 			next = tn->rb.rb_left;
rb                286 fs/jffs2/readinode.c 				rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
rb                344 fs/jffs2/readinode.c 			insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
rb                346 fs/jffs2/readinode.c 				link = &insert_point->rb.rb_right;
rb                349 fs/jffs2/readinode.c 				link = &insert_point->rb.rb_left;
rb                351 fs/jffs2/readinode.c 				link = &insert_point->rb.rb_right;
rb                353 fs/jffs2/readinode.c 		rb_link_node(&tn->rb, &insert_point->rb, link);
rb                354 fs/jffs2/readinode.c 		rb_insert_color(&tn->rb, &rii->tn_root);
rb                432 fs/jffs2/readinode.c 		this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
rb                440 fs/jffs2/readinode.c 	rb_link_node(&tn->rb, parent, link);
rb                441 fs/jffs2/readinode.c 	rb_insert_color(&tn->rb, ver_root);
rb                476 fs/jffs2/readinode.c 		eat_last(&rii->tn_root, &last->rb);
rb                499 fs/jffs2/readinode.c 			eat_last(&ver_root, &this->rb);
rb                532 fs/jffs2/readinode.c 						eat_last(&ver_root, &vers_next->rb);
rb                548 fs/jffs2/readinode.c 	rbtree_postorder_for_each_entry_safe(tn, next, list, rb) {
rb                 25 fs/kernfs/dir.c #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
rb                 66 fs/kernfs/dir.c 	struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
rb                 68 fs/kernfs/dir.c 	if (ra != rb)
rb                 72 fs/kernfs/dir.c 	db = kernfs_depth(rb->kn, b);
rb                361 fs/kernfs/dir.c 			node = &pos->rb.rb_left;
rb                363 fs/kernfs/dir.c 			node = &pos->rb.rb_right;
rb                369 fs/kernfs/dir.c 	rb_link_node(&kn->rb, parent, node);
rb                370 fs/kernfs/dir.c 	rb_insert_color(&kn->rb, &kn->parent->dir.children);
rb                392 fs/kernfs/dir.c 	if (RB_EMPTY_NODE(&kn->rb))
rb                398 fs/kernfs/dir.c 	rb_erase(&kn->rb, &kn->parent->dir.children);
rb                399 fs/kernfs/dir.c 	RB_CLEAR_NODE(&kn->rb);
rb                655 fs/kernfs/dir.c 	RB_CLEAR_NODE(&kn->rb);
rb               1236 fs/kernfs/dir.c 	rbn = rb_next(&pos->rb);
rb               1268 fs/kernfs/dir.c 		WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
rb               1289 fs/kernfs/dir.c 	if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
rb               1474 fs/kernfs/dir.c 		WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
rb               1635 fs/kernfs/dir.c 		struct rb_node *node = rb_next(&pos->rb);
rb               1650 fs/kernfs/dir.c 			struct rb_node *node = rb_next(&pos->rb);
rb                341 fs/nilfs2/recovery.c 			struct nilfs_recovery_block *rb;
rb                349 fs/nilfs2/recovery.c 			rb = kmalloc(sizeof(*rb), GFP_NOFS);
rb                350 fs/nilfs2/recovery.c 			if (unlikely(!rb)) {
rb                354 fs/nilfs2/recovery.c 			rb->ino = ino;
rb                355 fs/nilfs2/recovery.c 			rb->blocknr = blocknr++;
rb                356 fs/nilfs2/recovery.c 			rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
rb                357 fs/nilfs2/recovery.c 			rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
rb                359 fs/nilfs2/recovery.c 			list_add_tail(&rb->list, head);
rb                378 fs/nilfs2/recovery.c 		struct nilfs_recovery_block *rb;
rb                380 fs/nilfs2/recovery.c 		rb = list_first_entry(head, struct nilfs_recovery_block, list);
rb                381 fs/nilfs2/recovery.c 		list_del(&rb->list);
rb                382 fs/nilfs2/recovery.c 		kfree(rb);
rb                474 fs/nilfs2/recovery.c 				     struct nilfs_recovery_block *rb,
rb                480 fs/nilfs2/recovery.c 	bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
rb                498 fs/nilfs2/recovery.c 	struct nilfs_recovery_block *rb, *n;
rb                504 fs/nilfs2/recovery.c 	list_for_each_entry_safe(rb, n, head, list) {
rb                505 fs/nilfs2/recovery.c 		inode = nilfs_iget(sb, root, rb->ino);
rb                512 fs/nilfs2/recovery.c 		pos = rb->blkoff << inode->i_blkbits;
rb                524 fs/nilfs2/recovery.c 		err = nilfs_recovery_copy_block(nilfs, rb, page);
rb                548 fs/nilfs2/recovery.c 			  err, (unsigned long)rb->ino,
rb                549 fs/nilfs2/recovery.c 			  (unsigned long long)rb->blkoff);
rb                554 fs/nilfs2/recovery.c 		list_del_init(&rb->list);
rb                555 fs/nilfs2/recovery.c 		kfree(rb);
rb                386 fs/ocfs2/alloc.c 	struct ocfs2_refcount_block *rb = et->et_object;
rb                388 fs/ocfs2/alloc.c 	et->et_root_el = &rb->rf_list;
rb                394 fs/ocfs2/alloc.c 	struct ocfs2_refcount_block *rb = et->et_object;
rb                396 fs/ocfs2/alloc.c 	rb->rf_last_eb_blk = cpu_to_le64(blkno);
rb                401 fs/ocfs2/alloc.c 	struct ocfs2_refcount_block *rb = et->et_object;
rb                403 fs/ocfs2/alloc.c 	return le64_to_cpu(rb->rf_last_eb_blk);
rb                409 fs/ocfs2/alloc.c 	struct ocfs2_refcount_block *rb = et->et_object;
rb                411 fs/ocfs2/alloc.c 	le32_add_cpu(&rb->rf_clusters, clusters);
rb                 77 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb                 89 fs/ocfs2/refcounttree.c 	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
rb                 97 fs/ocfs2/refcounttree.c 	if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
rb                101 fs/ocfs2/refcounttree.c 				 rb->rf_signature);
rb                105 fs/ocfs2/refcounttree.c 	if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
rb                109 fs/ocfs2/refcounttree.c 				 (unsigned long long)le64_to_cpu(rb->rf_blkno));
rb                113 fs/ocfs2/refcounttree.c 	if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
rb                117 fs/ocfs2/refcounttree.c 				 le32_to_cpu(rb->rf_fs_generation));
rb                451 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb;
rb                477 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
rb                487 fs/ocfs2/refcounttree.c 	if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
rb                559 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb;
rb                621 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)new_bh->b_data;
rb                622 fs/ocfs2/refcounttree.c 	memset(rb, 0, inode->i_sb->s_blocksize);
rb                623 fs/ocfs2/refcounttree.c 	strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
rb                624 fs/ocfs2/refcounttree.c 	rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
rb                625 fs/ocfs2/refcounttree.c 	rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
rb                626 fs/ocfs2/refcounttree.c 	rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
rb                627 fs/ocfs2/refcounttree.c 	rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
rb                628 fs/ocfs2/refcounttree.c 	rb->rf_blkno = cpu_to_le64(first_blkno);
rb                629 fs/ocfs2/refcounttree.c 	rb->rf_count = cpu_to_le32(1);
rb                630 fs/ocfs2/refcounttree.c 	rb->rf_records.rl_count =
rb                633 fs/ocfs2/refcounttree.c 	rb->rf_generation = osb->s_next_generation++;
rb                652 fs/ocfs2/refcounttree.c 	new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
rb                700 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb;
rb                733 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
rb                734 fs/ocfs2/refcounttree.c 	le32_add_cpu(&rb->rf_count, 1);
rb                761 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb;
rb                780 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
rb                786 fs/ocfs2/refcounttree.c 	if (le32_to_cpu(rb->rf_count) == 1) {
rb                787 fs/ocfs2/refcounttree.c 		blk = le64_to_cpu(rb->rf_blkno);
rb                788 fs/ocfs2/refcounttree.c 		bit = le16_to_cpu(rb->rf_suballoc_bit);
rb                789 fs/ocfs2/refcounttree.c 		if (rb->rf_suballoc_loc)
rb                790 fs/ocfs2/refcounttree.c 			bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
rb                796 fs/ocfs2/refcounttree.c 					le16_to_cpu(rb->rf_suballoc_slot));
rb                841 fs/ocfs2/refcounttree.c 	le32_add_cpu(&rb->rf_count , -1);
rb                844 fs/ocfs2/refcounttree.c 	if (!rb->rf_count) {
rb                881 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb                885 fs/ocfs2/refcounttree.c 	for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
rb                886 fs/ocfs2/refcounttree.c 		rec = &rb->rf_records.rl_recs[i];
rb                904 fs/ocfs2/refcounttree.c 		if (i < le16_to_cpu(rb->rf_records.rl_used) &&
rb               1070 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               1073 fs/ocfs2/refcounttree.c 	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
rb               1081 fs/ocfs2/refcounttree.c 	el = &rb->rf_list;
rb               1148 fs/ocfs2/refcounttree.c 	ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
rb               1151 fs/ocfs2/refcounttree.c 	if ((rb->rf_records.rl_recs[index].r_refcount ==
rb               1152 fs/ocfs2/refcounttree.c 	    rb->rf_records.rl_recs[index + 1].r_refcount) &&
rb               1153 fs/ocfs2/refcounttree.c 	    (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
rb               1154 fs/ocfs2/refcounttree.c 	    le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
rb               1155 fs/ocfs2/refcounttree.c 	    le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
rb               1162 fs/ocfs2/refcounttree.c 	ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
rb               1167 fs/ocfs2/refcounttree.c 	if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
rb               1168 fs/ocfs2/refcounttree.c 		ret = ocfs2_refcount_rec_adjacent(rb, index);
rb               1173 fs/ocfs2/refcounttree.c 		tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
rb               1186 fs/ocfs2/refcounttree.c static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
rb               1189 fs/ocfs2/refcounttree.c 	BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
rb               1190 fs/ocfs2/refcounttree.c 	       rb->rf_records.rl_recs[index+1].r_refcount);
rb               1192 fs/ocfs2/refcounttree.c 	le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
rb               1193 fs/ocfs2/refcounttree.c 		     le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
rb               1195 fs/ocfs2/refcounttree.c 	if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
rb               1196 fs/ocfs2/refcounttree.c 		memmove(&rb->rf_records.rl_recs[index + 1],
rb               1197 fs/ocfs2/refcounttree.c 			&rb->rf_records.rl_recs[index + 2],
rb               1199 fs/ocfs2/refcounttree.c 			(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
rb               1201 fs/ocfs2/refcounttree.c 	memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
rb               1203 fs/ocfs2/refcounttree.c 	le16_add_cpu(&rb->rf_records.rl_used, -1);
rb               1209 fs/ocfs2/refcounttree.c static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
rb               1213 fs/ocfs2/refcounttree.c 				ocfs2_refcount_rec_contig(rb, index);
rb               1223 fs/ocfs2/refcounttree.c 	ocfs2_rotate_refcount_rec_left(rb, index);
rb               1226 fs/ocfs2/refcounttree.c 		ocfs2_rotate_refcount_rec_left(rb, index);
rb               1239 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               1241 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_list *rl = &rb->rf_records;
rb               1267 fs/ocfs2/refcounttree.c 		ocfs2_refcount_rec_merge(rb, index);
rb               1452 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               1454 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_list *rl = &rb->rf_records;
rb               1660 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               1664 fs/ocfs2/refcounttree.c 	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
rb               1667 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rb               1668 fs/ocfs2/refcounttree.c 	old_cpos = le32_to_cpu(rb->rf_cpos);
rb               1724 fs/ocfs2/refcounttree.c 	rb->rf_cpos = cpu_to_le32(new_cpos);
rb               1743 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               1745 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
rb               1748 fs/ocfs2/refcounttree.c 	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
rb               1770 fs/ocfs2/refcounttree.c 		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rb               1771 fs/ocfs2/refcounttree.c 		rf_list = &rb->rf_records;
rb               1797 fs/ocfs2/refcounttree.c 		ocfs2_refcount_rec_merge(rb, index);
rb               1833 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               1835 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
rb               1840 fs/ocfs2/refcounttree.c 	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
rb               1900 fs/ocfs2/refcounttree.c 		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rb               1901 fs/ocfs2/refcounttree.c 		rf_list = &rb->rf_records;
rb               1968 fs/ocfs2/refcounttree.c 			ocfs2_refcount_rec_merge(rb, index);
rb               2081 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               2085 fs/ocfs2/refcounttree.c 	BUG_ON(rb->rf_records.rl_used);
rb               2090 fs/ocfs2/refcounttree.c 		le32_to_cpu(rb->rf_cpos));
rb               2093 fs/ocfs2/refcounttree.c 	ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
rb               2107 fs/ocfs2/refcounttree.c 					le16_to_cpu(rb->rf_suballoc_slot),
rb               2108 fs/ocfs2/refcounttree.c 					le64_to_cpu(rb->rf_suballoc_loc),
rb               2109 fs/ocfs2/refcounttree.c 					le64_to_cpu(rb->rf_blkno),
rb               2110 fs/ocfs2/refcounttree.c 					le16_to_cpu(rb->rf_suballoc_bit));
rb               2123 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
rb               2125 fs/ocfs2/refcounttree.c 	le32_add_cpu(&rb->rf_clusters, -1);
rb               2131 fs/ocfs2/refcounttree.c 	if (!rb->rf_list.l_next_free_rec) {
rb               2132 fs/ocfs2/refcounttree.c 		BUG_ON(rb->rf_clusters);
rb               2137 fs/ocfs2/refcounttree.c 		rb->rf_flags = 0;
rb               2138 fs/ocfs2/refcounttree.c 		rb->rf_parent = 0;
rb               2139 fs/ocfs2/refcounttree.c 		rb->rf_cpos = 0;
rb               2140 fs/ocfs2/refcounttree.c 		memset(&rb->rf_records, 0, sb->s_blocksize -
rb               2142 fs/ocfs2/refcounttree.c 		rb->rf_records.rl_count =
rb               2173 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb =
rb               2175 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
rb               2208 fs/ocfs2/refcounttree.c 	if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
rb               2378 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb;
rb               2398 fs/ocfs2/refcounttree.c 				rb = (struct ocfs2_refcount_block *)
rb               2401 fs/ocfs2/refcounttree.c 				if (le16_to_cpu(rb->rf_records.rl_used) +
rb               2403 fs/ocfs2/refcounttree.c 				    le16_to_cpu(rb->rf_records.rl_count))
rb               2462 fs/ocfs2/refcounttree.c 		rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
rb               2464 fs/ocfs2/refcounttree.c 		if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
rb               2465 fs/ocfs2/refcounttree.c 		    le16_to_cpu(rb->rf_records.rl_count))
rb               2484 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
rb               2485 fs/ocfs2/refcounttree.c 	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
rb               3544 fs/ocfs2/refcounttree.c 	struct ocfs2_refcount_block *rb;
rb               3571 fs/ocfs2/refcounttree.c 			rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rb               3580 fs/ocfs2/refcounttree.c 			if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
rb               3581 fs/ocfs2/refcounttree.c 			    le16_to_cpu(rb->rf_records.rl_count))
rb               3600 fs/ocfs2/refcounttree.c 	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
rb               3601 fs/ocfs2/refcounttree.c 	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
rb               6289 fs/ocfs2/xattr.c 	struct ocfs2_refcount_block *rb =
rb               6313 fs/ocfs2/xattr.c 	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
rb               6314 fs/ocfs2/xattr.c 		*credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
rb               6315 fs/ocfs2/xattr.c 			    le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
rb               6757 fs/ocfs2/xattr.c 	struct ocfs2_refcount_block *rb;
rb               6781 fs/ocfs2/xattr.c 	rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
rb               6788 fs/ocfs2/xattr.c 	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
rb               6789 fs/ocfs2/xattr.c 		*credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
rb               6790 fs/ocfs2/xattr.c 			    le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
rb                622 fs/reiserfs/fix_node.c 			   int rnum, int blk_num, short *s012, int lb, int rb)
rb                639 fs/reiserfs/fix_node.c 		tb->rbytes = rb;
rb                645 fs/reiserfs/fix_node.c 	PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
rb                578 fs/ubifs/debug.c 	struct rb_node *rb;
rb                620 fs/ubifs/debug.c 	for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
rb                621 fs/ubifs/debug.c 		bud = rb_entry(rb, struct ubifs_bud, rb);
rb                646 fs/ubifs/debug.c 	struct rb_node *rb;
rb                708 fs/ubifs/debug.c 	for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
rb                709 fs/ubifs/debug.c 		bud = rb_entry(rb, struct ubifs_bud, rb);
rb               1757 fs/ubifs/debug.c 	struct rb_node rb;
rb               1804 fs/ubifs/debug.c 		fscki = rb_entry(parent, struct fsck_inode, rb);
rb               1860 fs/ubifs/debug.c 	rb_link_node(&fscki->rb, parent, p);
rb               1861 fs/ubifs/debug.c 	rb_insert_color(&fscki->rb, &fsckd->inodes);
rb               1882 fs/ubifs/debug.c 		fscki = rb_entry(p, struct fsck_inode, rb);
rb               2126 fs/ubifs/debug.c 	rbtree_postorder_for_each_entry_safe(fscki, n, &fsckd->inodes, rb)
rb               2151 fs/ubifs/debug.c 		fscki = rb_entry(this, struct fsck_inode, rb);
rb                 38 fs/ubifs/log.c 		bud = rb_entry(p, struct ubifs_bud, rb);
rb                 71 fs/ubifs/log.c 		bud = rb_entry(p, struct ubifs_bud, rb);
rb                122 fs/ubifs/log.c 		b = rb_entry(parent, struct ubifs_bud, rb);
rb                130 fs/ubifs/log.c 	rb_link_node(&bud->rb, parent, p);
rb                131 fs/ubifs/log.c 	rb_insert_color(&bud->rb, &c->buds);
rb                311 fs/ubifs/log.c 		bud = rb_entry(p1, struct ubifs_bud, rb);
rb                549 fs/ubifs/log.c 	struct rb_node rb;
rb                568 fs/ubifs/log.c 		dr = rb_entry(parent, struct done_ref, rb);
rb                583 fs/ubifs/log.c 	rb_link_node(&dr->rb, parent, p);
rb                584 fs/ubifs/log.c 	rb_insert_color(&dr->rb, done_tree);
rb                597 fs/ubifs/log.c 	rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
rb                 67 fs/ubifs/orphan.c 		o = rb_entry(parent, struct ubifs_orphan, rb);
rb                 81 fs/ubifs/orphan.c 	rb_link_node(&orphan->rb, parent, p);
rb                 82 fs/ubifs/orphan.c 	rb_insert_color(&orphan->rb, &c->orph_tree);
rb                103 fs/ubifs/orphan.c 		o = rb_entry(p, struct ubifs_orphan, rb);
rb                117 fs/ubifs/orphan.c 	rb_erase(&o->rb, &c->orph_tree);
rb                514 fs/ubifs/orphan.c 		rb_erase(&orphan->rb, &c->orph_tree);
rb                588 fs/ubifs/orphan.c 		o = rb_entry(parent, struct ubifs_orphan, rb);
rb                600 fs/ubifs/orphan.c 	rb_link_node(&orphan->rb, parent, p);
rb                601 fs/ubifs/orphan.c 	rb_insert_color(&orphan->rb, &c->orph_tree);
rb                832 fs/ubifs/orphan.c 	struct rb_node rb;
rb                869 fs/ubifs/orphan.c 		o = rb_entry(parent, struct check_orphan, rb);
rb                879 fs/ubifs/orphan.c 	rb_link_node(&orphan->rb, parent, p);
rb                880 fs/ubifs/orphan.c 	rb_insert_color(&orphan->rb, root);
rb                891 fs/ubifs/orphan.c 		o = rb_entry(p, struct check_orphan, rb);
rb                906 fs/ubifs/orphan.c 	rbtree_postorder_for_each_entry_safe(o, n, root, rb)
rb               1230 fs/ubifs/recovery.c 	struct rb_node rb;
rb               1254 fs/ubifs/recovery.c 		e = rb_entry(parent, struct size_entry, rb);
rb               1270 fs/ubifs/recovery.c 	rb_link_node(&e->rb, parent, p);
rb               1271 fs/ubifs/recovery.c 	rb_insert_color(&e->rb, &c->size_tree);
rb               1287 fs/ubifs/recovery.c 		e = rb_entry(p, struct size_entry, rb);
rb               1309 fs/ubifs/recovery.c 	rb_erase(&e->rb, &c->size_tree);
rb               1321 fs/ubifs/recovery.c 	rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) {
rb               1512 fs/ubifs/recovery.c 	rb_erase(&e->rb, &c->size_tree);
rb               1536 fs/ubifs/recovery.c 		e = rb_entry(this, struct size_entry, rb);
rb               1583 fs/ubifs/recovery.c 		rb_erase(&e->rb, &c->size_tree);
rb                305 fs/ubifs/replay.c 	struct replay_entry *ra, *rb;
rb                312 fs/ubifs/replay.c 	rb = list_entry(b, struct replay_entry, list);
rb                313 fs/ubifs/replay.c 	ubifs_assert(c, ra->sqnum != rb->sqnum);
rb                314 fs/ubifs/replay.c 	if (ra->sqnum > rb->sqnum)
rb                896 fs/ubifs/super.c 	rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb)
rb                 84 fs/ubifs/tnc.c 		o = rb_entry(parent, struct ubifs_old_idx, rb);
rb                 99 fs/ubifs/tnc.c 	rb_link_node(&old_idx->rb, parent, p);
rb                100 fs/ubifs/tnc.c 	rb_insert_color(&old_idx->rb, &c->old_idx);
rb                176 fs/ubifs/tnc.c 	rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb)
rb                170 fs/ubifs/tnc_commit.c 		o = rb_entry(p, struct ubifs_old_idx, rb);
rb                278 fs/ubifs/ubifs.h 	struct rb_node rb;
rb                714 fs/ubifs/ubifs.h 	struct rb_node rb;
rb                925 fs/ubifs/ubifs.h 	struct rb_node rb;
rb                406 fs/xfs/xfs_extfree_item.c 	struct xfs_extent_free_item	*rb;
rb                409 fs/xfs/xfs_extfree_item.c 	rb = container_of(b, struct xfs_extent_free_item, xefi_list);
rb                411 fs/xfs/xfs_extfree_item.c 		XFS_FSB_TO_AGNO(mp, rb->xefi_startblock);
rb                279 fs/xfs/xfs_refcount_item.c 	struct xfs_refcount_intent	*rb;
rb                282 fs/xfs/xfs_refcount_item.c 	rb = container_of(b, struct xfs_refcount_intent, ri_list);
rb                284 fs/xfs/xfs_refcount_item.c 		XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
rb                345 fs/xfs/xfs_rmap_item.c 	struct xfs_rmap_intent		*rb;
rb                348 fs/xfs/xfs_rmap_item.c 	rb = container_of(b, struct xfs_rmap_intent, ri_list);
rb                350 fs/xfs/xfs_rmap_item.c 		XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
rb                139 fs/xfs/xfs_rtalloc.h # define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb)    (ENOSYS)
rb                141 fs/xfs/xfs_rtalloc.h # define xfs_rtpick_extent(m,t,l,rb)                    (ENOSYS)
rb               1025 include/drm/drm_connector.h 	bool rb;
rb                502 include/drm/drm_edid.h 					   bool rb);
rb                166 include/drm/drm_mm.h 	struct rb_node rb;
rb                721 include/linux/fb.h extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
rb                 91 include/linux/intel-ish-client-if.h int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
rb                  8 include/linux/interval_tree.h 	struct rb_node rb;
rb                154 include/linux/interval_tree_generic.h 	struct rb_node *rb = node->ITRB.rb_right, *prev;		      \
rb                164 include/linux/interval_tree_generic.h 		if (rb) {						      \
rb                165 include/linux/interval_tree_generic.h 			ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB);	      \
rb                173 include/linux/interval_tree_generic.h 			rb = rb_parent(&node->ITRB);			      \
rb                174 include/linux/interval_tree_generic.h 			if (!rb)					      \
rb                177 include/linux/interval_tree_generic.h 			node = rb_entry(rb, ITSTRUCT, ITRB);		      \
rb                178 include/linux/interval_tree_generic.h 			rb = node->ITRB.rb_right;			      \
rb                179 include/linux/interval_tree_generic.h 		} while (prev == rb);					      \
rb                146 include/linux/kernfs.h 	struct rb_node		rb;
rb                323 include/linux/mm_types.h 		struct rb_node rb;
rb                672 include/linux/perf_event.h 	struct ring_buffer		*rb;
rb                829 include/linux/perf_event.h 	struct ring_buffer		*rb;
rb                 77 include/linux/rbtree_augmented.h RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop)		\
rb                 79 include/linux/rbtree_augmented.h 	while (rb != stop) {						\
rb                 80 include/linux/rbtree_augmented.h 		RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD);	\
rb                 83 include/linux/rbtree_augmented.h 		rb = rb_parent(&node->RBFIELD);				\
rb                153 include/linux/rbtree_augmented.h #define rb_color(rb)       __rb_color((rb)->__rb_parent_color)
rb                154 include/linux/rbtree_augmented.h #define rb_is_red(rb)      __rb_is_red((rb)->__rb_parent_color)
rb                155 include/linux/rbtree_augmented.h #define rb_is_black(rb)    __rb_is_black((rb)->__rb_parent_color)
rb                157 include/linux/rbtree_augmented.h static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
rb                159 include/linux/rbtree_augmented.h 	rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
rb                162 include/linux/rbtree_augmented.h static inline void rb_set_parent_color(struct rb_node *rb,
rb                165 include/linux/rbtree_augmented.h 	rb->__rb_parent_color = (unsigned long)p | color;
rb                 81 include/linux/rmap.h 	struct rb_node rb;			/* locked by anon_vma->rwsem */
rb               3398 include/linux/skbuff.h #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
rb                654 include/media/v4l2-mem2mem.h 				struct v4l2_requestbuffers *rb);
rb                324 include/sound/hdaudio.h 	struct snd_dma_buffer rb;
rb                289 include/video/omapfb_dss.h 	s16 rr, rg, rb;
rb               4335 kernel/events/core.c 			       struct ring_buffer *rb);
rb               4547 kernel/events/core.c 	if (event->rb) {
rb               5010 kernel/events/core.c 	struct ring_buffer *rb;
rb               5023 kernel/events/core.c 	rb = event->rb;
rb               5024 kernel/events/core.c 	if (rb)
rb               5025 kernel/events/core.c 		events = atomic_xchg(&rb->poll, 0);
rb               5220 kernel/events/core.c 		struct ring_buffer *rb;
rb               5223 kernel/events/core.c 		rb = rcu_dereference(event->rb);
rb               5224 kernel/events/core.c 		if (!rb || !rb->nr_pages) {
rb               5228 kernel/events/core.c 		rb_toggle_paused(rb, !!arg);
rb               5351 kernel/events/core.c 	struct ring_buffer *rb;
rb               5354 kernel/events/core.c 	rb = rcu_dereference(event->rb);
rb               5355 kernel/events/core.c 	if (!rb)
rb               5358 kernel/events/core.c 	userpg = rb->user_page;
rb               5364 kernel/events/core.c 	userpg->data_size = perf_data_size(rb);
rb               5383 kernel/events/core.c 	struct ring_buffer *rb;
rb               5387 kernel/events/core.c 	rb = rcu_dereference(event->rb);
rb               5388 kernel/events/core.c 	if (!rb)
rb               5402 kernel/events/core.c 	userpg = rb->user_page;
rb               5434 kernel/events/core.c 	struct ring_buffer *rb;
rb               5444 kernel/events/core.c 	rb = rcu_dereference(event->rb);
rb               5445 kernel/events/core.c 	if (!rb)
rb               5451 kernel/events/core.c 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
rb               5467 kernel/events/core.c 			       struct ring_buffer *rb)
rb               5472 kernel/events/core.c 	if (event->rb) {
rb               5479 kernel/events/core.c 		old_rb = event->rb;
rb               5488 kernel/events/core.c 	if (rb) {
rb               5494 kernel/events/core.c 		spin_lock_irqsave(&rb->event_lock, flags);
rb               5495 kernel/events/core.c 		list_add_rcu(&event->rb_entry, &rb->event_list);
rb               5496 kernel/events/core.c 		spin_unlock_irqrestore(&rb->event_lock, flags);
rb               5512 kernel/events/core.c 	rcu_assign_pointer(event->rb, rb);
rb               5527 kernel/events/core.c 	struct ring_buffer *rb;
rb               5530 kernel/events/core.c 	rb = rcu_dereference(event->rb);
rb               5531 kernel/events/core.c 	if (rb) {
rb               5532 kernel/events/core.c 		list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
rb               5540 kernel/events/core.c 	struct ring_buffer *rb;
rb               5543 kernel/events/core.c 	rb = rcu_dereference(event->rb);
rb               5544 kernel/events/core.c 	if (rb) {
rb               5545 kernel/events/core.c 		if (!refcount_inc_not_zero(&rb->refcount))
rb               5546 kernel/events/core.c 			rb = NULL;
rb               5550 kernel/events/core.c 	return rb;
rb               5553 kernel/events/core.c void ring_buffer_put(struct ring_buffer *rb)
rb               5555 kernel/events/core.c 	if (!refcount_dec_and_test(&rb->refcount))
rb               5558 kernel/events/core.c 	WARN_ON_ONCE(!list_empty(&rb->event_list));
rb               5560 kernel/events/core.c 	call_rcu(&rb->rcu_head, rb_free_rcu);
rb               5568 kernel/events/core.c 	atomic_inc(&event->rb->mmap_count);
rb               5571 kernel/events/core.c 		atomic_inc(&event->rb->aux_mmap_count);
rb               5591 kernel/events/core.c 	struct ring_buffer *rb = ring_buffer_get(event);
rb               5592 kernel/events/core.c 	struct user_struct *mmap_user = rb->mmap_user;
rb               5593 kernel/events/core.c 	int mmap_locked = rb->mmap_locked;
rb               5594 kernel/events/core.c 	unsigned long size = perf_data_size(rb);
rb               5604 kernel/events/core.c 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
rb               5605 kernel/events/core.c 	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
rb               5615 kernel/events/core.c 		atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
rb               5616 kernel/events/core.c 		atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
rb               5619 kernel/events/core.c 		rb_free_aux(rb);
rb               5620 kernel/events/core.c 		WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
rb               5625 kernel/events/core.c 	atomic_dec(&rb->mmap_count);
rb               5634 kernel/events/core.c 	if (atomic_read(&rb->mmap_count))
rb               5644 kernel/events/core.c 	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
rb               5665 kernel/events/core.c 		if (event->rb == rb)
rb               5694 kernel/events/core.c 	ring_buffer_put(rb); /* could be last */
rb               5710 kernel/events/core.c 	struct ring_buffer *rb = NULL;
rb               5739 kernel/events/core.c 		if (!event->rb)
rb               5747 kernel/events/core.c 		rb = event->rb;
rb               5748 kernel/events/core.c 		if (!rb)
rb               5751 kernel/events/core.c 		aux_offset = READ_ONCE(rb->user_page->aux_offset);
rb               5752 kernel/events/core.c 		aux_size = READ_ONCE(rb->user_page->aux_size);
rb               5754 kernel/events/core.c 		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
rb               5761 kernel/events/core.c 		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
rb               5768 kernel/events/core.c 		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
rb               5774 kernel/events/core.c 		if (!atomic_inc_not_zero(&rb->mmap_count))
rb               5777 kernel/events/core.c 		if (rb_has_aux(rb)) {
rb               5778 kernel/events/core.c 			atomic_inc(&rb->aux_mmap_count);
rb               5783 kernel/events/core.c 		atomic_set(&rb->aux_mmap_count, 1);
rb               5802 kernel/events/core.c 	if (event->rb) {
rb               5803 kernel/events/core.c 		if (event->rb->nr_pages != nr_pages) {
rb               5808 kernel/events/core.c 		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
rb               5866 kernel/events/core.c 	WARN_ON(!rb && event->rb);
rb               5871 kernel/events/core.c 	if (!rb) {
rb               5872 kernel/events/core.c 		rb = rb_alloc(nr_pages,
rb               5876 kernel/events/core.c 		if (!rb) {
rb               5881 kernel/events/core.c 		atomic_set(&rb->mmap_count, 1);
rb               5882 kernel/events/core.c 		rb->mmap_user = get_current_user();
rb               5883 kernel/events/core.c 		rb->mmap_locked = extra;
rb               5885 kernel/events/core.c 		ring_buffer_attach(event, rb);
rb               5890 kernel/events/core.c 		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
rb               5893 kernel/events/core.c 			rb->aux_mmap_locked = extra;
rb               5902 kernel/events/core.c 	} else if (rb) {
rb               5903 kernel/events/core.c 		atomic_dec(&rb->mmap_count);
rb               6513 kernel/events/core.c 			struct ring_buffer *rb = handle->rb;
rb               6514 kernel/events/core.c 			int events = local_inc_return(&rb->events);
rb               6517 kernel/events/core.c 				local_sub(wakeup_events, &rb->events);
rb               6518 kernel/events/core.c 				local_inc(&rb->wakeup);
rb               6930 kernel/events/core.c 	struct ring_buffer	*rb;
rb               6938 kernel/events/core.c 	struct ring_buffer *rb = ro->rb;
rb               6959 kernel/events/core.c 	if (rcu_dereference(parent->rb) == rb)
rb               6969 kernel/events/core.c 		.rb	= event->rb,
rb               6989 kernel/events/core.c 	list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
rb               10749 kernel/events/core.c 	struct ring_buffer *rb = NULL;
rb               10799 kernel/events/core.c 		rb = ring_buffer_get(output_event);
rb               10800 kernel/events/core.c 		if (!rb)
rb               10804 kernel/events/core.c 	ring_buffer_attach(event, rb);
rb                 60 kernel/events/internal.h extern void rb_free(struct ring_buffer *rb);
rb                 64 kernel/events/internal.h 	struct ring_buffer *rb;
rb                 66 kernel/events/internal.h 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb                 67 kernel/events/internal.h 	rb_free(rb);
rb                 70 kernel/events/internal.h static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
rb                 72 kernel/events/internal.h 	if (!pause && rb->nr_pages)
rb                 73 kernel/events/internal.h 		rb->paused = 0;
rb                 75 kernel/events/internal.h 		rb->paused = 1;
rb                 81 kernel/events/internal.h extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
rb                 83 kernel/events/internal.h extern void rb_free_aux(struct ring_buffer *rb);
rb                 85 kernel/events/internal.h extern void ring_buffer_put(struct ring_buffer *rb);
rb                 87 kernel/events/internal.h static inline bool rb_has_aux(struct ring_buffer *rb)
rb                 89 kernel/events/internal.h 	return !!rb->aux_nr_pages;
rb                 96 kernel/events/internal.h perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
rb                105 kernel/events/internal.h static inline int page_order(struct ring_buffer *rb)
rb                107 kernel/events/internal.h 	return rb->page_order;
rb                112 kernel/events/internal.h static inline int page_order(struct ring_buffer *rb)
rb                118 kernel/events/internal.h static inline unsigned long perf_data_size(struct ring_buffer *rb)
rb                120 kernel/events/internal.h 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
rb                123 kernel/events/internal.h static inline unsigned long perf_aux_size(struct ring_buffer *rb)
rb                125 kernel/events/internal.h 	return rb->aux_nr_pages << PAGE_SHIFT;
rb                143 kernel/events/internal.h 			struct ring_buffer *rb = handle->rb;		\
rb                146 kernel/events/internal.h 			handle->page &= rb->nr_pages - 1;		\
rb                147 kernel/events/internal.h 			handle->addr = rb->data_pages[handle->page];	\
rb                148 kernel/events/internal.h 			handle->size = PAGE_SIZE << page_order(rb);	\
rb                 22 kernel/events/ring_buffer.c 	atomic_set(&handle->rb->poll, EPOLLIN);
rb                 38 kernel/events/ring_buffer.c 	struct ring_buffer *rb = handle->rb;
rb                 46 kernel/events/ring_buffer.c 	(*(volatile unsigned int *)&rb->nest)++;
rb                 47 kernel/events/ring_buffer.c 	handle->wakeup = local_read(&rb->wakeup);
rb                 52 kernel/events/ring_buffer.c 	struct ring_buffer *rb = handle->rb;
rb                 60 kernel/events/ring_buffer.c 	nest = READ_ONCE(rb->nest);
rb                 62 kernel/events/ring_buffer.c 		WRITE_ONCE(rb->nest, nest - 1);
rb                 76 kernel/events/ring_buffer.c 	head = local_read(&rb->head);
rb                110 kernel/events/ring_buffer.c 	WRITE_ONCE(rb->user_page->data_head, head);
rb                118 kernel/events/ring_buffer.c 	WRITE_ONCE(rb->nest, 0);
rb                125 kernel/events/ring_buffer.c 	if (unlikely(head != local_read(&rb->head))) {
rb                126 kernel/events/ring_buffer.c 		WRITE_ONCE(rb->nest, 1);
rb                130 kernel/events/ring_buffer.c 	if (handle->wakeup != local_read(&rb->wakeup))
rb                153 kernel/events/ring_buffer.c 	struct ring_buffer *rb;
rb                169 kernel/events/ring_buffer.c 	rb = rcu_dereference(event->rb);
rb                170 kernel/events/ring_buffer.c 	if (unlikely(!rb))
rb                173 kernel/events/ring_buffer.c 	if (unlikely(rb->paused)) {
rb                174 kernel/events/ring_buffer.c 		if (rb->nr_pages)
rb                175 kernel/events/ring_buffer.c 			local_inc(&rb->lost);
rb                179 kernel/events/ring_buffer.c 	handle->rb    = rb;
rb                182 kernel/events/ring_buffer.c 	have_lost = local_read(&rb->lost);
rb                192 kernel/events/ring_buffer.c 		tail = READ_ONCE(rb->user_page->data_tail);
rb                193 kernel/events/ring_buffer.c 		offset = head = local_read(&rb->head);
rb                194 kernel/events/ring_buffer.c 		if (!rb->overwrite) {
rb                196 kernel/events/ring_buffer.c 							    perf_data_size(rb),
rb                217 kernel/events/ring_buffer.c 	} while (local_cmpxchg(&rb->head, offset, head) != offset);
rb                229 kernel/events/ring_buffer.c 	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
rb                230 kernel/events/ring_buffer.c 		local_add(rb->watermark, &rb->wakeup);
rb                232 kernel/events/ring_buffer.c 	page_shift = PAGE_SHIFT + page_order(rb);
rb                234 kernel/events/ring_buffer.c 	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
rb                236 kernel/events/ring_buffer.c 	handle->addr = rb->data_pages[handle->page] + offset;
rb                246 kernel/events/ring_buffer.c 		lost_event.lost        = local_xchg(&rb->lost, 0);
rb                257 kernel/events/ring_buffer.c 	local_inc(&rb->lost);
rb                304 kernel/events/ring_buffer.c ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
rb                306 kernel/events/ring_buffer.c 	long max_size = perf_data_size(rb);
rb                309 kernel/events/ring_buffer.c 		rb->watermark = min(max_size, watermark);
rb                311 kernel/events/ring_buffer.c 	if (!rb->watermark)
rb                312 kernel/events/ring_buffer.c 		rb->watermark = max_size / 2;
rb                315 kernel/events/ring_buffer.c 		rb->overwrite = 0;
rb                317 kernel/events/ring_buffer.c 		rb->overwrite = 1;
rb                319 kernel/events/ring_buffer.c 	refcount_set(&rb->refcount, 1);
rb                321 kernel/events/ring_buffer.c 	INIT_LIST_HEAD(&rb->event_list);
rb                322 kernel/events/ring_buffer.c 	spin_lock_init(&rb->event_lock);
rb                328 kernel/events/ring_buffer.c 	if (!rb->nr_pages)
rb                329 kernel/events/ring_buffer.c 		rb->paused = 1;
rb                364 kernel/events/ring_buffer.c 	struct ring_buffer *rb;
rb                375 kernel/events/ring_buffer.c 	rb = ring_buffer_get(output_event);
rb                376 kernel/events/ring_buffer.c 	if (!rb)
rb                379 kernel/events/ring_buffer.c 	if (!rb_has_aux(rb))
rb                390 kernel/events/ring_buffer.c 	if (!atomic_read(&rb->aux_mmap_count))
rb                393 kernel/events/ring_buffer.c 	if (!refcount_inc_not_zero(&rb->aux_refcount))
rb                396 kernel/events/ring_buffer.c 	nest = READ_ONCE(rb->aux_nest);
rb                404 kernel/events/ring_buffer.c 	WRITE_ONCE(rb->aux_nest, nest + 1);
rb                406 kernel/events/ring_buffer.c 	aux_head = rb->aux_head;
rb                408 kernel/events/ring_buffer.c 	handle->rb = rb;
rb                419 kernel/events/ring_buffer.c 	if (!rb->aux_overwrite) {
rb                420 kernel/events/ring_buffer.c 		aux_tail = READ_ONCE(rb->user_page->aux_tail);
rb                421 kernel/events/ring_buffer.c 		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
rb                422 kernel/events/ring_buffer.c 		if (aux_head - aux_tail < perf_aux_size(rb))
rb                423 kernel/events/ring_buffer.c 			handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
rb                433 kernel/events/ring_buffer.c 			WRITE_ONCE(rb->aux_nest, 0);
rb                438 kernel/events/ring_buffer.c 	return handle->rb->aux_priv;
rb                442 kernel/events/ring_buffer.c 	rb_free_aux(rb);
rb                445 kernel/events/ring_buffer.c 	ring_buffer_put(rb);
rb                452 kernel/events/ring_buffer.c static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
rb                454 kernel/events/ring_buffer.c 	if (rb->aux_overwrite)
rb                457 kernel/events/ring_buffer.c 	if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
rb                458 kernel/events/ring_buffer.c 		rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
rb                478 kernel/events/ring_buffer.c 	struct ring_buffer *rb = handle->rb;
rb                482 kernel/events/ring_buffer.c 	if (rb->aux_overwrite) {
rb                486 kernel/events/ring_buffer.c 		rb->aux_head = aux_head;
rb                490 kernel/events/ring_buffer.c 		aux_head = rb->aux_head;
rb                491 kernel/events/ring_buffer.c 		rb->aux_head += size;
rb                510 kernel/events/ring_buffer.c 	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
rb                511 kernel/events/ring_buffer.c 	if (rb_need_aux_wakeup(rb))
rb                522 kernel/events/ring_buffer.c 	WRITE_ONCE(rb->aux_nest, 0);
rb                524 kernel/events/ring_buffer.c 	rb_free_aux(rb);
rb                525 kernel/events/ring_buffer.c 	ring_buffer_put(rb);
rb                535 kernel/events/ring_buffer.c 	struct ring_buffer *rb = handle->rb;
rb                540 kernel/events/ring_buffer.c 	rb->aux_head += size;
rb                542 kernel/events/ring_buffer.c 	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
rb                543 kernel/events/ring_buffer.c 	if (rb_need_aux_wakeup(rb)) {
rb                545 kernel/events/ring_buffer.c 		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
rb                548 kernel/events/ring_buffer.c 	handle->head = rb->aux_head;
rb                561 kernel/events/ring_buffer.c 	return handle->rb->aux_priv;
rb                593 kernel/events/ring_buffer.c static void rb_free_aux_page(struct ring_buffer *rb, int idx)
rb                595 kernel/events/ring_buffer.c 	struct page *page = virt_to_page(rb->aux_pages[idx]);
rb                602 kernel/events/ring_buffer.c static void __rb_free_aux(struct ring_buffer *rb)
rb                614 kernel/events/ring_buffer.c 	if (rb->aux_priv) {
rb                615 kernel/events/ring_buffer.c 		rb->free_aux(rb->aux_priv);
rb                616 kernel/events/ring_buffer.c 		rb->free_aux = NULL;
rb                617 kernel/events/ring_buffer.c 		rb->aux_priv = NULL;
rb                620 kernel/events/ring_buffer.c 	if (rb->aux_nr_pages) {
rb                621 kernel/events/ring_buffer.c 		for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb                622 kernel/events/ring_buffer.c 			rb_free_aux_page(rb, pg);
rb                624 kernel/events/ring_buffer.c 		kfree(rb->aux_pages);
rb                625 kernel/events/ring_buffer.c 		rb->aux_nr_pages = 0;
rb                629 kernel/events/ring_buffer.c int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
rb                656 kernel/events/ring_buffer.c 	rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
rb                658 kernel/events/ring_buffer.c 	if (!rb->aux_pages)
rb                661 kernel/events/ring_buffer.c 	rb->free_aux = event->pmu->free_aux;
rb                662 kernel/events/ring_buffer.c 	for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
rb                666 kernel/events/ring_buffer.c 		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
rb                671 kernel/events/ring_buffer.c 		for (last = rb->aux_nr_pages + (1 << page_private(page));
rb                672 kernel/events/ring_buffer.c 		     last > rb->aux_nr_pages; rb->aux_nr_pages++)
rb                673 kernel/events/ring_buffer.c 			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
rb                684 kernel/events/ring_buffer.c 		struct page *page = virt_to_page(rb->aux_pages[0]);
rb                690 kernel/events/ring_buffer.c 	rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
rb                692 kernel/events/ring_buffer.c 	if (!rb->aux_priv)
rb                703 kernel/events/ring_buffer.c 	refcount_set(&rb->aux_refcount, 1);
rb                705 kernel/events/ring_buffer.c 	rb->aux_overwrite = overwrite;
rb                706 kernel/events/ring_buffer.c 	rb->aux_watermark = watermark;
rb                708 kernel/events/ring_buffer.c 	if (!rb->aux_watermark && !rb->aux_overwrite)
rb                709 kernel/events/ring_buffer.c 		rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
rb                713 kernel/events/ring_buffer.c 		rb->aux_pgoff = pgoff;
rb                715 kernel/events/ring_buffer.c 		__rb_free_aux(rb);
rb                720 kernel/events/ring_buffer.c void rb_free_aux(struct ring_buffer *rb)
rb                722 kernel/events/ring_buffer.c 	if (refcount_dec_and_test(&rb->aux_refcount))
rb                723 kernel/events/ring_buffer.c 		__rb_free_aux(rb);
rb                733 kernel/events/ring_buffer.c __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
rb                735 kernel/events/ring_buffer.c 	if (pgoff > rb->nr_pages)
rb                739 kernel/events/ring_buffer.c 		return virt_to_page(rb->user_page);
rb                741 kernel/events/ring_buffer.c 	return virt_to_page(rb->data_pages[pgoff - 1]);
rb                759 kernel/events/ring_buffer.c 	struct ring_buffer *rb;
rb                769 kernel/events/ring_buffer.c 	rb = kzalloc(size, GFP_KERNEL);
rb                770 kernel/events/ring_buffer.c 	if (!rb)
rb                773 kernel/events/ring_buffer.c 	rb->user_page = perf_mmap_alloc_page(cpu);
rb                774 kernel/events/ring_buffer.c 	if (!rb->user_page)
rb                778 kernel/events/ring_buffer.c 		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
rb                779 kernel/events/ring_buffer.c 		if (!rb->data_pages[i])
rb                783 kernel/events/ring_buffer.c 	rb->nr_pages = nr_pages;
rb                785 kernel/events/ring_buffer.c 	ring_buffer_init(rb, watermark, flags);
rb                787 kernel/events/ring_buffer.c 	return rb;
rb                791 kernel/events/ring_buffer.c 		free_page((unsigned long)rb->data_pages[i]);
rb                793 kernel/events/ring_buffer.c 	free_page((unsigned long)rb->user_page);
rb                796 kernel/events/ring_buffer.c 	kfree(rb);
rb                810 kernel/events/ring_buffer.c void rb_free(struct ring_buffer *rb)
rb                814 kernel/events/ring_buffer.c 	perf_mmap_free_page((unsigned long)rb->user_page);
rb                815 kernel/events/ring_buffer.c 	for (i = 0; i < rb->nr_pages; i++)
rb                816 kernel/events/ring_buffer.c 		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
rb                817 kernel/events/ring_buffer.c 	kfree(rb);
rb                821 kernel/events/ring_buffer.c static int data_page_nr(struct ring_buffer *rb)
rb                823 kernel/events/ring_buffer.c 	return rb->nr_pages << page_order(rb);
rb                827 kernel/events/ring_buffer.c __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
rb                830 kernel/events/ring_buffer.c 	if (pgoff > data_page_nr(rb))
rb                833 kernel/events/ring_buffer.c 	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
rb                845 kernel/events/ring_buffer.c 	struct ring_buffer *rb;
rb                849 kernel/events/ring_buffer.c 	rb = container_of(work, struct ring_buffer, work);
rb                850 kernel/events/ring_buffer.c 	nr = data_page_nr(rb);
rb                852 kernel/events/ring_buffer.c 	base = rb->user_page;
rb                858 kernel/events/ring_buffer.c 	kfree(rb);
rb                861 kernel/events/ring_buffer.c void rb_free(struct ring_buffer *rb)
rb                863 kernel/events/ring_buffer.c 	schedule_work(&rb->work);
rb                868 kernel/events/ring_buffer.c 	struct ring_buffer *rb;
rb                875 kernel/events/ring_buffer.c 	rb = kzalloc(size, GFP_KERNEL);
rb                876 kernel/events/ring_buffer.c 	if (!rb)
rb                879 kernel/events/ring_buffer.c 	INIT_WORK(&rb->work, rb_free_work);
rb                885 kernel/events/ring_buffer.c 	rb->user_page = all_buf;
rb                886 kernel/events/ring_buffer.c 	rb->data_pages[0] = all_buf + PAGE_SIZE;
rb                888 kernel/events/ring_buffer.c 		rb->nr_pages = 1;
rb                889 kernel/events/ring_buffer.c 		rb->page_order = ilog2(nr_pages);
rb                892 kernel/events/ring_buffer.c 	ring_buffer_init(rb, watermark, flags);
rb                894 kernel/events/ring_buffer.c 	return rb;
rb                897 kernel/events/ring_buffer.c 	kfree(rb);
rb                906 kernel/events/ring_buffer.c perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
rb                908 kernel/events/ring_buffer.c 	if (rb->aux_nr_pages) {
rb                910 kernel/events/ring_buffer.c 		if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
rb                914 kernel/events/ring_buffer.c 		if (pgoff >= rb->aux_pgoff) {
rb                915 kernel/events/ring_buffer.c 			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
rb                916 kernel/events/ring_buffer.c 			return virt_to_page(rb->aux_pages[aux_pgoff]);
rb                920 kernel/events/ring_buffer.c 	return __perf_mmap_to_page(rb, pgoff);
rb                 10 lib/interval_tree.c INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
rb                 59 lib/rbtree.c   static inline void rb_set_black(struct rb_node *rb)
rb                 61 lib/rbtree.c   	rb->__rb_parent_color |= RB_BLACK;
rb                 20 lib/rbtree_test.c 	struct rb_node rb;
rb                 39 lib/rbtree_test.c 		if (key < rb_entry(parent, struct test_node, rb)->key)
rb                 45 lib/rbtree_test.c 	rb_link_node(&node->rb, parent, new);
rb                 46 lib/rbtree_test.c 	rb_insert_color(&node->rb, &root->rb_root);
rb                 57 lib/rbtree_test.c 		if (key < rb_entry(parent, struct test_node, rb)->key)
rb                 65 lib/rbtree_test.c 	rb_link_node(&node->rb, parent, new);
rb                 66 lib/rbtree_test.c 	rb_insert_color_cached(&node->rb, root, leftmost);
rb                 71 lib/rbtree_test.c 	rb_erase(&node->rb, &root->rb_root);
rb                 76 lib/rbtree_test.c 	rb_erase_cached(&node->rb, root);
rb                 83 lib/rbtree_test.c 			 struct test_node, rb, u32, augmented, NODE_VAL)
rb                 95 lib/rbtree_test.c 		parent = rb_entry(rb_parent, struct test_node, rb);
rb                 99 lib/rbtree_test.c 			new = &parent->rb.rb_left;
rb                101 lib/rbtree_test.c 			new = &parent->rb.rb_right;
rb                105 lib/rbtree_test.c 	rb_link_node(&node->rb, rb_parent, new);
rb                106 lib/rbtree_test.c 	rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks);
rb                120 lib/rbtree_test.c 		parent = rb_entry(rb_parent, struct test_node, rb);
rb                124 lib/rbtree_test.c 			new = &parent->rb.rb_left;
rb                126 lib/rbtree_test.c 			new = &parent->rb.rb_right;
rb                132 lib/rbtree_test.c 	rb_link_node(&node->rb, rb_parent, new);
rb                133 lib/rbtree_test.c 	rb_insert_augmented_cached(&node->rb, root,
rb                140 lib/rbtree_test.c 	rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks);
rb                146 lib/rbtree_test.c 	rb_erase_augmented_cached(&node->rb, root, &augment_callbacks);
rb                158 lib/rbtree_test.c static bool is_red(struct rb_node *rb)
rb                160 lib/rbtree_test.c 	return !(rb->__rb_parent_color & 1);
rb                163 lib/rbtree_test.c static int black_path_count(struct rb_node *rb)
rb                166 lib/rbtree_test.c 	for (count = 0; rb; rb = rb_parent(rb))
rb                167 lib/rbtree_test.c 		count += !is_red(rb);
rb                175 lib/rbtree_test.c 	rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb)
rb                183 lib/rbtree_test.c 	struct rb_node *rb;
rb                185 lib/rbtree_test.c 	for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb))
rb                193 lib/rbtree_test.c 	struct rb_node *rb;
rb                197 lib/rbtree_test.c 	for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
rb                198 lib/rbtree_test.c 		struct test_node *node = rb_entry(rb, struct test_node, rb);
rb                200 lib/rbtree_test.c 		WARN_ON_ONCE(is_red(rb) &&
rb                201 lib/rbtree_test.c 			     (!rb_parent(rb) || is_red(rb_parent(rb))));
rb                203 lib/rbtree_test.c 			blacks = black_path_count(rb);
rb                205 lib/rbtree_test.c 			WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
rb                206 lib/rbtree_test.c 				     blacks != black_path_count(rb));
rb                220 lib/rbtree_test.c 	struct rb_node *rb;
rb                223 lib/rbtree_test.c 	for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
rb                224 lib/rbtree_test.c 		struct test_node *node = rb_entry(rb, struct test_node, rb);
rb                226 lib/rbtree_test.c 		if (node->rb.rb_left) {
rb                227 lib/rbtree_test.c 			subtree = rb_entry(node->rb.rb_left, struct test_node,
rb                228 lib/rbtree_test.c 					   rb)->augmented;
rb                232 lib/rbtree_test.c 		if (node->rb.rb_right) {
rb                233 lib/rbtree_test.c 			subtree = rb_entry(node->rb.rb_right, struct test_node,
rb                234 lib/rbtree_test.c 					   rb)->augmented;
rb                 23 mm/interval_tree.c INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
rb                 38 mm/interval_tree.c 	if (!prev->shared.rb.rb_right) {
rb                 40 mm/interval_tree.c 		link = &prev->shared.rb.rb_right;
rb                 42 mm/interval_tree.c 		parent = rb_entry(prev->shared.rb.rb_right,
rb                 43 mm/interval_tree.c 				  struct vm_area_struct, shared.rb);
rb                 46 mm/interval_tree.c 		while (parent->shared.rb.rb_left) {
rb                 47 mm/interval_tree.c 			parent = rb_entry(parent->shared.rb.rb_left,
rb                 48 mm/interval_tree.c 				struct vm_area_struct, shared.rb);
rb                 52 mm/interval_tree.c 		link = &parent->shared.rb.rb_left;
rb                 56 mm/interval_tree.c 	rb_link_node(&node->shared.rb, &parent->shared.rb, link);
rb                 57 mm/interval_tree.c 	rb_insert_augmented(&node->shared.rb, &root->rb_root,
rb                 71 mm/interval_tree.c INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
rb                381 mm/kmemleak.c  	struct rb_node *rb = object_tree_root.rb_node;
rb                383 mm/kmemleak.c  	while (rb) {
rb                385 mm/kmemleak.c  			rb_entry(rb, struct kmemleak_object, rb_node);
rb                387 mm/kmemleak.c  			rb = object->rb_node.rb_left;
rb                389 mm/kmemleak.c  			rb = object->rb_node.rb_right;
rb               1110 mm/nommu.c     	struct rb_node *rb;
rb               1169 mm/nommu.c     		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
rb               1170 mm/nommu.c     			pregion = rb_entry(rb, struct vm_region, vm_rb);
rb                157 mm/swapfile.c  	struct rb_node *rb = rb_first(&sis->swap_extent_root);
rb                158 mm/swapfile.c  	return rb_entry(rb, struct swap_extent, rb_node);
rb                163 mm/swapfile.c  	struct rb_node *rb = rb_next(&se->rb_node);
rb                164 mm/swapfile.c  	return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
rb                208 mm/swapfile.c  	struct rb_node *rb;
rb                210 mm/swapfile.c  	rb = sis->swap_extent_root.rb_node;
rb                211 mm/swapfile.c  	while (rb) {
rb                212 mm/swapfile.c  		se = rb_entry(rb, struct swap_extent, rb_node);
rb                214 mm/swapfile.c  			rb = rb->rb_left;
rb                216 mm/swapfile.c  			rb = rb->rb_right;
rb               2287 mm/swapfile.c  		struct rb_node *rb = sis->swap_extent_root.rb_node;
rb               2288 mm/swapfile.c  		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
rb               2290 mm/swapfile.c  		rb_erase(rb, &sis->swap_extent_root);
rb                187 net/packet/af_packet.c 		struct packet_ring_buffer *rb,
rb                464 net/packet/af_packet.c 				 const struct packet_ring_buffer *rb,
rb                471 net/packet/af_packet.c 	pg_vec_pos = position / rb->frames_per_block;
rb                472 net/packet/af_packet.c 	frame_offset = position % rb->frames_per_block;
rb                474 net/packet/af_packet.c 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
rb                475 net/packet/af_packet.c 		(frame_offset * rb->frame_size);
rb                484 net/packet/af_packet.c 		struct packet_ring_buffer *rb,
rb                487 net/packet/af_packet.c 	return packet_lookup_frame(po, rb, rb->head, status);
rb                569 net/packet/af_packet.c 			struct packet_ring_buffer *rb,
rb                573 net/packet/af_packet.c 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
rb                943 net/packet/af_packet.c static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
rb                945 net/packet/af_packet.c 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
rb               1087 net/packet/af_packet.c 			      const struct packet_ring_buffer *rb,
rb               1091 net/packet/af_packet.c 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
rb               1099 net/packet/af_packet.c static int prb_previous_blk_num(struct packet_ring_buffer *rb)
rb               1102 net/packet/af_packet.c 	if (rb->prb_bdqc.kactive_blk_num)
rb               1103 net/packet/af_packet.c 		prev = rb->prb_bdqc.kactive_blk_num-1;
rb               1105 net/packet/af_packet.c 		prev = rb->prb_bdqc.knum_blocks-1;
rb               1111 net/packet/af_packet.c 					 struct packet_ring_buffer *rb,
rb               1114 net/packet/af_packet.c 	unsigned int previous = prb_previous_blk_num(rb);
rb               1115 net/packet/af_packet.c 	return prb_lookup_block(po, rb, previous, status);
rb               1119 net/packet/af_packet.c 					     struct packet_ring_buffer *rb,
rb               1123 net/packet/af_packet.c 		return packet_previous_frame(po, rb, status);
rb               1125 net/packet/af_packet.c 	return __prb_previous_block(po, rb, status);
rb               1129 net/packet/af_packet.c 					    struct packet_ring_buffer *rb)
rb               1134 net/packet/af_packet.c 		return packet_increment_head(rb);
rb               1144 net/packet/af_packet.c 		struct packet_ring_buffer *rb,
rb               1147 net/packet/af_packet.c 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
rb               1148 net/packet/af_packet.c 	return packet_lookup_frame(po, rb, previous, status);
rb               1156 net/packet/af_packet.c static void packet_inc_pending(struct packet_ring_buffer *rb)
rb               1158 net/packet/af_packet.c 	this_cpu_inc(*rb->pending_refcnt);
rb               1161 net/packet/af_packet.c static void packet_dec_pending(struct packet_ring_buffer *rb)
rb               1163 net/packet/af_packet.c 	this_cpu_dec(*rb->pending_refcnt);
rb               1166 net/packet/af_packet.c static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
rb               1172 net/packet/af_packet.c 	if (rb->pending_refcnt == NULL)
rb               1176 net/packet/af_packet.c 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
rb               4292 net/packet/af_packet.c 	struct packet_ring_buffer *rb;
rb               4299 net/packet/af_packet.c 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb               4306 net/packet/af_packet.c 		if (packet_read_pending(rb))
rb               4315 net/packet/af_packet.c 		if (unlikely(rb->pg_vec))
rb               4345 net/packet/af_packet.c 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
rb               4346 net/packet/af_packet.c 		if (unlikely(rb->frames_per_block == 0))
rb               4348 net/packet/af_packet.c 		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
rb               4350 net/packet/af_packet.c 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
rb               4363 net/packet/af_packet.c 				init_prb_bdqc(po, rb, pg_vec, req_u);
rb               4410 net/packet/af_packet.c 		swap(rb->pg_vec, pg_vec);
rb               4412 net/packet/af_packet.c 			swap(rb->rx_owner_map, rx_owner_map);
rb               4413 net/packet/af_packet.c 		rb->frame_max = (req->tp_frame_nr - 1);
rb               4414 net/packet/af_packet.c 		rb->head = 0;
rb               4415 net/packet/af_packet.c 		rb->frame_size = req->tp_frame_size;
rb               4418 net/packet/af_packet.c 		swap(rb->pg_vec_order, order);
rb               4419 net/packet/af_packet.c 		swap(rb->pg_vec_len, req->tp_block_nr);
rb               4421 net/packet/af_packet.c 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
rb               4457 net/packet/af_packet.c 	struct packet_ring_buffer *rb;
rb               4468 net/packet/af_packet.c 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
rb               4469 net/packet/af_packet.c 		if (rb->pg_vec) {
rb               4470 net/packet/af_packet.c 			expected_size += rb->pg_vec_len
rb               4471 net/packet/af_packet.c 						* rb->pg_vec_pages
rb               4484 net/packet/af_packet.c 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
rb               4485 net/packet/af_packet.c 		if (rb->pg_vec == NULL)
rb               4488 net/packet/af_packet.c 		for (i = 0; i < rb->pg_vec_len; i++) {
rb               4490 net/packet/af_packet.c 			void *kaddr = rb->pg_vec[i].buffer;
rb               4493 net/packet/af_packet.c 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
rb               1395 net/sched/sch_cake.c 			u32 rb = cake_heap_get_backlog(q, r);
rb               1397 net/sched/sch_cake.c 			if (rb > mb) {
rb               1399 net/sched/sch_cake.c 				mb = rb;
rb                355 net/sched/sch_htb.c static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
rb                357 net/sched/sch_htb.c 	if (RB_EMPTY_NODE(rb)) {
rb                360 net/sched/sch_htb.c 		rb_erase(rb, root);
rb                361 net/sched/sch_htb.c 		RB_CLEAR_NODE(rb);
rb                591 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
rb                594 net/sunrpc/xprtrdma/rpc_rdma.c 	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
rb                596 net/sunrpc/xprtrdma/rpc_rdma.c 	sge->addr = rdmab_addr(rb);
rb                598 net/sunrpc/xprtrdma/rpc_rdma.c 	sge->lkey = rdmab_lkey(rb);
rb                600 net/sunrpc/xprtrdma/rpc_rdma.c 	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
rb                620 net/sunrpc/xprtrdma/rpc_rdma.c 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
rb                627 net/sunrpc/xprtrdma/rpc_rdma.c 	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
rb                629 net/sunrpc/xprtrdma/rpc_rdma.c 	sc->sc_device = rdmab_device(rb);
rb                631 net/sunrpc/xprtrdma/rpc_rdma.c 	sge[sge_no].addr = rdmab_addr(rb);
rb                633 net/sunrpc/xprtrdma/rpc_rdma.c 	sge[sge_no].lkey = rdmab_lkey(rb);
rb                634 net/sunrpc/xprtrdma/rpc_rdma.c 	ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
rb                677 net/sunrpc/xprtrdma/rpc_rdma.c 				ib_dma_map_page(rdmab_device(rb), *ppages,
rb                679 net/sunrpc/xprtrdma/rpc_rdma.c 			if (ib_dma_mapping_error(rdmab_device(rb),
rb                683 net/sunrpc/xprtrdma/rpc_rdma.c 			sge[sge_no].lkey = rdmab_lkey(rb);
rb                705 net/sunrpc/xprtrdma/rpc_rdma.c 			ib_dma_map_page(rdmab_device(rb), page, page_base, len,
rb                707 net/sunrpc/xprtrdma/rpc_rdma.c 		if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
rb                710 net/sunrpc/xprtrdma/rpc_rdma.c 		sge[sge_no].lkey = rdmab_lkey(rb);
rb                600 net/sunrpc/xprtrdma/transport.c 				 struct rpcrdma_regbuf *rb, size_t size,
rb                603 net/sunrpc/xprtrdma/transport.c 	if (unlikely(rdmab_length(rb) < size)) {
rb                604 net/sunrpc/xprtrdma/transport.c 		if (!rpcrdma_regbuf_realloc(rb, size, flags))
rb                 85 net/sunrpc/xprtrdma/verbs.c static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
rb                 86 net/sunrpc/xprtrdma/verbs.c static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
rb               1002 net/sunrpc/xprtrdma/verbs.c 	struct rpcrdma_regbuf *rb;
rb               1014 net/sunrpc/xprtrdma/verbs.c 	rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
rb               1016 net/sunrpc/xprtrdma/verbs.c 	if (!rb)
rb               1018 net/sunrpc/xprtrdma/verbs.c 	req->rl_rdmabuf = rb;
rb               1019 net/sunrpc/xprtrdma/verbs.c 	xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
rb               1368 net/sunrpc/xprtrdma/verbs.c 	struct rpcrdma_regbuf *rb;
rb               1370 net/sunrpc/xprtrdma/verbs.c 	rb = kmalloc(sizeof(*rb), flags);
rb               1371 net/sunrpc/xprtrdma/verbs.c 	if (!rb)
rb               1373 net/sunrpc/xprtrdma/verbs.c 	rb->rg_data = kmalloc(size, flags);
rb               1374 net/sunrpc/xprtrdma/verbs.c 	if (!rb->rg_data) {
rb               1375 net/sunrpc/xprtrdma/verbs.c 		kfree(rb);
rb               1379 net/sunrpc/xprtrdma/verbs.c 	rb->rg_device = NULL;
rb               1380 net/sunrpc/xprtrdma/verbs.c 	rb->rg_direction = direction;
rb               1381 net/sunrpc/xprtrdma/verbs.c 	rb->rg_iov.length = size;
rb               1382 net/sunrpc/xprtrdma/verbs.c 	return rb;
rb               1394 net/sunrpc/xprtrdma/verbs.c bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
rb               1402 net/sunrpc/xprtrdma/verbs.c 	rpcrdma_regbuf_dma_unmap(rb);
rb               1403 net/sunrpc/xprtrdma/verbs.c 	kfree(rb->rg_data);
rb               1405 net/sunrpc/xprtrdma/verbs.c 	rb->rg_data = buf;
rb               1406 net/sunrpc/xprtrdma/verbs.c 	rb->rg_iov.length = size;
rb               1418 net/sunrpc/xprtrdma/verbs.c 			      struct rpcrdma_regbuf *rb)
rb               1422 net/sunrpc/xprtrdma/verbs.c 	if (rb->rg_direction == DMA_NONE)
rb               1425 net/sunrpc/xprtrdma/verbs.c 	rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
rb               1426 net/sunrpc/xprtrdma/verbs.c 					    rdmab_length(rb), rb->rg_direction);
rb               1427 net/sunrpc/xprtrdma/verbs.c 	if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
rb               1428 net/sunrpc/xprtrdma/verbs.c 		trace_xprtrdma_dma_maperr(rdmab_addr(rb));
rb               1432 net/sunrpc/xprtrdma/verbs.c 	rb->rg_device = device;
rb               1433 net/sunrpc/xprtrdma/verbs.c 	rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey;
rb               1437 net/sunrpc/xprtrdma/verbs.c static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
rb               1439 net/sunrpc/xprtrdma/verbs.c 	if (!rb)
rb               1442 net/sunrpc/xprtrdma/verbs.c 	if (!rpcrdma_regbuf_is_mapped(rb))
rb               1445 net/sunrpc/xprtrdma/verbs.c 	ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
rb               1446 net/sunrpc/xprtrdma/verbs.c 			    rb->rg_direction);
rb               1447 net/sunrpc/xprtrdma/verbs.c 	rb->rg_device = NULL;
rb               1450 net/sunrpc/xprtrdma/verbs.c static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
rb               1452 net/sunrpc/xprtrdma/verbs.c 	rpcrdma_regbuf_dma_unmap(rb);
rb               1453 net/sunrpc/xprtrdma/verbs.c 	if (rb)
rb               1454 net/sunrpc/xprtrdma/verbs.c 		kfree(rb->rg_data);
rb               1455 net/sunrpc/xprtrdma/verbs.c 	kfree(rb);
rb                130 net/sunrpc/xprtrdma/xprt_rdma.h static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb)
rb                132 net/sunrpc/xprtrdma/xprt_rdma.h 	return rb->rg_iov.addr;
rb                135 net/sunrpc/xprtrdma/xprt_rdma.h static inline u32 rdmab_length(struct rpcrdma_regbuf *rb)
rb                137 net/sunrpc/xprtrdma/xprt_rdma.h 	return rb->rg_iov.length;
rb                140 net/sunrpc/xprtrdma/xprt_rdma.h static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb)
rb                142 net/sunrpc/xprtrdma/xprt_rdma.h 	return rb->rg_iov.lkey;
rb                145 net/sunrpc/xprtrdma/xprt_rdma.h static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb)
rb                147 net/sunrpc/xprtrdma/xprt_rdma.h 	return rb->rg_device;
rb                150 net/sunrpc/xprtrdma/xprt_rdma.h static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
rb                152 net/sunrpc/xprtrdma/xprt_rdma.h 	return rb->rg_data;
rb                505 net/sunrpc/xprtrdma/xprt_rdma.h bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
rb                508 net/sunrpc/xprtrdma/xprt_rdma.h 			      struct rpcrdma_regbuf *rb);
rb                515 net/sunrpc/xprtrdma/xprt_rdma.h static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
rb                517 net/sunrpc/xprtrdma/xprt_rdma.h 	return rb->rg_device != NULL;
rb                528 net/sunrpc/xprtrdma/xprt_rdma.h 					  struct rpcrdma_regbuf *rb)
rb                530 net/sunrpc/xprtrdma/xprt_rdma.h 	if (likely(rpcrdma_regbuf_is_mapped(rb)))
rb                532 net/sunrpc/xprtrdma/xprt_rdma.h 	return __rpcrdma_regbuf_dma_map(r_xprt, rb);
rb                 44 sound/hda/hdac_controller.c 	WARN_ON_ONCE(!bus->rb.area);
rb                 48 sound/hda/hdac_controller.c 	bus->corb.addr = bus->rb.addr;
rb                 49 sound/hda/hdac_controller.c 	bus->corb.buf = (__le32 *)bus->rb.area;
rb                 67 sound/hda/hdac_controller.c 	bus->rirb.addr = bus->rb.addr + 2048;
rb                 68 sound/hda/hdac_controller.c 	bus->rirb.buf = (__le32 *)(bus->rb.area + 2048);
rb                601 sound/hda/hdac_controller.c 	return snd_dma_alloc_pages(dma_type, bus->dev, PAGE_SIZE, &bus->rb);
rb                618 sound/hda/hdac_controller.c 	if (bus->rb.area)
rb                619 sound/hda/hdac_controller.c 		snd_dma_free_pages(&bus->rb);
rb                354 sound/pci/lola/lola.c 				  PAGE_SIZE, &chip->rb);
rb                358 sound/pci/lola/lola.c 	chip->corb.addr = chip->rb.addr;
rb                359 sound/pci/lola/lola.c 	chip->corb.buf = (__le32 *)chip->rb.area;
rb                360 sound/pci/lola/lola.c 	chip->rirb.addr = chip->rb.addr + 2048;
rb                361 sound/pci/lola/lola.c 	chip->rirb.buf = (__le32 *)(chip->rb.area + 2048);
rb                543 sound/pci/lola/lola.c 	if (chip->rb.area)
rb                544 sound/pci/lola/lola.c 		snd_dma_free_pages(&chip->rb);
rb                331 sound/pci/lola/lola.h 	struct snd_dma_buffer rb;
rb                952 sound/soc/codecs/tscs42xx.c #define PLL_CTL(f, rt, rd, r1b_l, r9, ra, rb,		\
rb                962 sound/soc/codecs/tscs42xx.c 			{R_PLLCTLB,   rb,   0xFF},	\
rb                439 sound/soc/meson/axg-spdifin.c 		unsigned int rb =
rb                442 sound/soc/meson/axg-spdifin.c 		if (rb == SNDRV_PCM_RATE_KNOT)
rb                445 sound/soc/meson/axg-spdifin.c 		drv->capture.rates |= rb;
rb                677 sound/soc/sof/intel/hda-stream.c 				  PAGE_SIZE, &bus->rb);
rb                815 sound/soc/sof/intel/hda-stream.c 	if (bus->rb.area)
rb                816 sound/soc/sof/intel/hda-stream.c 		snd_dma_free_pages(&bus->rb);
rb                205 sound/soc/uniphier/aio-core.c 	regmap_write(r, A2RBNMAPCTR0(sub->swm->rb.hw),
rb                206 sound/soc/uniphier/aio-core.c 		     MAPCTR0_EN | sub->swm->rb.map);
rb               1024 sound/soc/uniphier/aio-core.c 		(sub->swm->rb.map << CDA2D_CHMXAMODE_RSSEL_SHIFT);
rb               1042 sound/soc/uniphier/aio-core.c 				   BIT(sub->swm->rb.map),
rb               1043 sound/soc/uniphier/aio-core.c 				   BIT(sub->swm->rb.map));
rb               1049 sound/soc/uniphier/aio-core.c 				   BIT(sub->swm->rb.map),
rb               1061 sound/soc/uniphier/aio-core.c 		     CDA2D_RDPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
rb               1064 sound/soc/uniphier/aio-core.c 		regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
rb               1066 sound/soc/uniphier/aio-core.c 	regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
rb               1067 sound/soc/uniphier/aio-core.c 	regmap_read(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), &pos_u);
rb               1079 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), (u32)pos);
rb               1080 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), (u32)(pos >> 32));
rb               1081 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RDPTRLOAD, BIT(sub->swm->rb.map));
rb               1084 sound/soc/uniphier/aio-core.c 		regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &tmp);
rb               1094 sound/soc/uniphier/aio-core.c 		     CDA2D_WRPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
rb               1097 sound/soc/uniphier/aio-core.c 		regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
rb               1099 sound/soc/uniphier/aio-core.c 	regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
rb               1100 sound/soc/uniphier/aio-core.c 	regmap_read(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map), &pos_u);
rb               1112 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXWRPTR(sub->swm->rb.map),
rb               1114 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map),
rb               1116 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_WRPTRLOAD, BIT(sub->swm->rb.map));
rb               1119 sound/soc/uniphier/aio-core.c 		regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &tmp);
rb               1129 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXBTH(sub->swm->rb.map), th);
rb               1130 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXRTH(sub->swm->rb.map), th);
rb               1145 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXCNFG(sub->swm->rb.map), 0);
rb               1146 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXBGNADRS(sub->swm->rb.map),
rb               1148 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXBGNADRSU(sub->swm->rb.map),
rb               1150 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXENDADRS(sub->swm->rb.map),
rb               1152 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBMXENDADRSU(sub->swm->rb.map),
rb               1155 sound/soc/uniphier/aio-core.c 	regmap_write(r, CDA2D_RBADRSLOAD, BIT(sub->swm->rb.map));
rb               1165 sound/soc/uniphier/aio-core.c 		regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
rb               1172 sound/soc/uniphier/aio-core.c 		regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
rb               1233 sound/soc/uniphier/aio-core.c 	regmap_read(r, CDA2D_RBMXIR(sub->swm->rb.map), &ir);
rb               1246 sound/soc/uniphier/aio-core.c 		regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
rb               1249 sound/soc/uniphier/aio-core.c 		regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
rb                 19 sound/soc/uniphier/aio-ld11.c 			.rb    = { 21, 14, },
rb                 32 sound/soc/uniphier/aio-ld11.c 			.rb    = { 22, 15, },
rb                 46 sound/soc/uniphier/aio-ld11.c 			.rb    = { 23, 16, },
rb                 60 sound/soc/uniphier/aio-ld11.c 			.rb    = { 26, 17, },
rb                 73 sound/soc/uniphier/aio-ld11.c 			.rb    = { 0, 0, },
rb                 87 sound/soc/uniphier/aio-ld11.c 			.rb    = { 0, 0, },
rb                101 sound/soc/uniphier/aio-ld11.c 			.rb    = { 2, 2, },
rb                114 sound/soc/uniphier/aio-ld11.c 			.rb    = { 3, 3, },
rb                127 sound/soc/uniphier/aio-ld11.c 			.rb    = { 7, 5, },
rb                142 sound/soc/uniphier/aio-ld11.c 			.rb    = { 8, 6, },
rb                158 sound/soc/uniphier/aio-ld11.c 			.rb    = { 1, 1, },
rb                172 sound/soc/uniphier/aio-ld11.c 			.rb    = { 1, 1, },
rb                 19 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 16, 11, },
rb                 33 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 17, 12, },
rb                 47 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 0, 0, },
rb                 61 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 1, 1, },
rb                 75 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 2, 2, },
rb                 88 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 6, 4, },
rb                101 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 6, 4, },
rb                114 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 7, 5, },
rb                127 sound/soc/uniphier/aio-pxs2.c 			.rb    = { 7, 5, },
rb                198 sound/soc/uniphier/aio.h 	struct uniphier_aio_selector rb;
rb                 79 tools/include/linux/rbtree_augmented.h RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop)		\
rb                 81 tools/include/linux/rbtree_augmented.h 	while (rb != stop) {						\
rb                 82 tools/include/linux/rbtree_augmented.h 		RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD);	\
rb                 85 tools/include/linux/rbtree_augmented.h 		rb = rb_parent(&node->RBFIELD);				\
rb                155 tools/include/linux/rbtree_augmented.h #define rb_color(rb)       __rb_color((rb)->__rb_parent_color)
rb                156 tools/include/linux/rbtree_augmented.h #define rb_is_red(rb)      __rb_is_red((rb)->__rb_parent_color)
rb                157 tools/include/linux/rbtree_augmented.h #define rb_is_black(rb)    __rb_is_black((rb)->__rb_parent_color)
rb                159 tools/include/linux/rbtree_augmented.h static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
rb                161 tools/include/linux/rbtree_augmented.h 	rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
rb                164 tools/include/linux/rbtree_augmented.h static inline void rb_set_parent_color(struct rb_node *rb,
rb                167 tools/include/linux/rbtree_augmented.h 	rb->__rb_parent_color = (unsigned long)p | color;
rb                 59 tools/lib/rbtree.c static inline void rb_set_black(struct rb_node *rb)
rb                 61 tools/lib/rbtree.c 	rb->__rb_parent_color |= RB_BLACK;
rb                510 tools/perf/builtin-kvm.c 	struct rb_node **rb = &result->rb_node;
rb                514 tools/perf/builtin-kvm.c 	while (*rb) {
rb                515 tools/perf/builtin-kvm.c 		p = container_of(*rb, struct kvm_event, rb);
rb                516 tools/perf/builtin-kvm.c 		parent = *rb;
rb                519 tools/perf/builtin-kvm.c 			rb = &(*rb)->rb_left;
rb                521 tools/perf/builtin-kvm.c 			rb = &(*rb)->rb_right;
rb                524 tools/perf/builtin-kvm.c 	rb_link_node(&event->rb, parent, rb);
rb                525 tools/perf/builtin-kvm.c 	rb_insert_color(&event->rb, result);
rb                568 tools/perf/builtin-kvm.c 	return container_of(node, struct kvm_event, rb);
rb                 48 tools/perf/builtin-lock.c 	struct rb_node		rb;		/* used for sorting */
rb                115 tools/perf/builtin-lock.c 	struct rb_node		rb;
rb                130 tools/perf/builtin-lock.c 		st = container_of(node, struct thread_stat, rb);
rb                144 tools/perf/builtin-lock.c 	struct rb_node **rb = &thread_stats.rb_node;
rb                148 tools/perf/builtin-lock.c 	while (*rb) {
rb                149 tools/perf/builtin-lock.c 		p = container_of(*rb, struct thread_stat, rb);
rb                150 tools/perf/builtin-lock.c 		parent = *rb;
rb                153 tools/perf/builtin-lock.c 			rb = &(*rb)->rb_left;
rb                155 tools/perf/builtin-lock.c 			rb = &(*rb)->rb_right;
rb                160 tools/perf/builtin-lock.c 	rb_link_node(&new->rb, parent, rb);
rb                161 tools/perf/builtin-lock.c 	rb_insert_color(&new->rb, &thread_stats);
rb                202 tools/perf/builtin-lock.c 	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
rb                203 tools/perf/builtin-lock.c 	rb_insert_color(&st->rb, &thread_stats);
rb                285 tools/perf/builtin-lock.c 	struct rb_node **rb = &result.rb_node;
rb                289 tools/perf/builtin-lock.c 	while (*rb) {
rb                290 tools/perf/builtin-lock.c 		p = container_of(*rb, struct lock_stat, rb);
rb                291 tools/perf/builtin-lock.c 		parent = *rb;
rb                294 tools/perf/builtin-lock.c 			rb = &(*rb)->rb_left;
rb                296 tools/perf/builtin-lock.c 			rb = &(*rb)->rb_right;
rb                299 tools/perf/builtin-lock.c 	rb_link_node(&st->rb, parent, rb);
rb                300 tools/perf/builtin-lock.c 	rb_insert_color(&st->rb, &result);
rb                315 tools/perf/builtin-lock.c 	return container_of(node, struct lock_stat, rb);
rb                773 tools/perf/builtin-lock.c 		st = container_of(node, struct thread_stat, rb);
rb                 19 tools/perf/util/block-range.c 	struct rb_node *rb;
rb                 22 tools/perf/util/block-range.c 	for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) {
rb                 23 tools/perf/util/block-range.c 		struct block_range *entry = rb_entry(rb, struct block_range, node);
rb                 29 tools/perf/util/kvm-stat.h 	struct rb_node rb;
rb                976 tools/testing/selftests/net/tls.c 		char rb[8001];
rb                981 tools/testing/selftests/net/tls.c 			res = recv(self->cfd, rb,
rb                982 tools/testing/selftests/net/tls.c 				   left > sizeof(rb) ? sizeof(rb) : left, 0);
rb                 91 tools/testing/selftests/powerpc/include/reg.h #define VSX_XX1(xs, ra, rb)	(((xs) & 0x1f) << 21 | ((ra) << 16) |  \
rb                 92 tools/testing/selftests/powerpc/include/reg.h 				 ((rb) << 11) | (((xs) >> 5)))
rb                 93 tools/testing/selftests/powerpc/include/reg.h #define STXVD2X(xs, ra, rb)	.long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
rb                 94 tools/testing/selftests/powerpc/include/reg.h #define LXVD2X(xs, ra, rb)	.long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
rb                 26 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
rb                 27 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
rb                 30 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)				\
rb                 32 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h 	PPC_LL	rb, ACCOUNT_STARTTIME_USER(ptr);			\
rb                 34 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h 	subf	rb,rb,ra;		/* subtract start value */	\
rb                 36 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h 	add	ra,ra,rb;		/* add on to user time */	\
rb                 39 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)				\
rb                 41 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h 	PPC_LL	rb, ACCOUNT_STARTTIME(ptr);				\
rb                 43 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h 	subf	rb,rb,ra;		/* subtract start value */	\
rb                 45 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h 	add	ra,ra,rb;		/* add on to system time */	\