rr                 48 arch/arm/mach-rpc/include/mach/acornfb.h 		u_int rr, v, p;
rr                 51 arch/arm/mach-rpc/include/mach/acornfb.h 		rr = 41667 * r;
rr                 53 arch/arm/mach-rpc/include/mach/acornfb.h 		v = (rr + pixclk / 2) / pixclk;
rr                 58 arch/arm/mach-rpc/include/mach/acornfb.h 		p = (rr + v / 2) / v;
rr                451 arch/ia64/include/asm/pal.h 			rr		: 1,	/* Region regs valid */
rr                691 arch/ia64/include/asm/pal.h #define pmci_proc_region_regs_valid		pme_processor.rr
rr                384 arch/ia64/include/asm/sal.h 		    rr              : 1,
rr                392 arch/ia64/include/asm/sal.h 	u64 rr[8];
rr                865 arch/ia64/include/asm/sal.h 	u64 rr[8];		/* Region Registers */
rr                 21 arch/ia64/include/asm/tlbflush.h 	u64 rr;
rr               1238 arch/ia64/kernel/mca.c 			if (old_rr != p->rr) {
rr               1239 arch/ia64/kernel/mca.c 				ia64_set_rr(p->ifa, p->rr);
rr               1252 arch/ia64/kernel/mca.c 			if (old_rr != p->rr) {
rr                430 arch/ia64/mm/tlb.c 	if (va_rid != RR_TO_RID(p->rr))
rr                532 arch/ia64/mm/tlb.c 		p->rr = ia64_get_rr(va);
rr                541 arch/ia64/mm/tlb.c 		p->rr = ia64_get_rr(va);
rr               1095 arch/mips/include/uapi/asm/inst.h 	struct m16e_rr rr;
rr                372 arch/mips/kernel/branch.c 		if (inst.rr.func == MIPS16e_jr_func) {
rr                374 arch/mips/kernel/branch.c 			if (inst.rr.ra)
rr                378 arch/mips/kernel/branch.c 				    regs->regs[reg16to32[inst.rr.rx]];
rr                380 arch/mips/kernel/branch.c 			if (inst.rr.l) {
rr                381 arch/mips/kernel/branch.c 				if (inst.rr.nd)
rr                140 arch/powerpc/kvm/book3s_32_mmu_host.c 	register int rr = 0;
rr                170 arch/powerpc/kvm/book3s_32_mmu_host.c 	if (rr == 16) {
rr                173 arch/powerpc/kvm/book3s_32_mmu_host.c 		rr = 0;
rr                179 arch/powerpc/kvm/book3s_32_mmu_host.c 	if (!evict && (pteg[rr] & PTE_V)) {
rr                180 arch/powerpc/kvm/book3s_32_mmu_host.c 		rr += 2;
rr                184 arch/powerpc/kvm/book3s_32_mmu_host.c 	dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
rr                210 arch/powerpc/kvm/book3s_32_mmu_host.c 	if (pteg[rr]) {
rr                211 arch/powerpc/kvm/book3s_32_mmu_host.c 		pteg[rr] = 0;
rr                214 arch/powerpc/kvm/book3s_32_mmu_host.c 	pteg[rr + 1] = pteg1;
rr                215 arch/powerpc/kvm/book3s_32_mmu_host.c 	pteg[rr] = pteg0;
rr                246 arch/powerpc/kvm/book3s_32_mmu_host.c 	pte->slot = (ulong)&pteg[rr];
rr                314 arch/x86/kernel/cpu/microcode/core.c 		u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
rr                315 arch/x86/kernel/cpu/microcode/core.c 		if (*rr)
rr                316 arch/x86/kernel/cpu/microcode/core.c 			start = *rr;
rr                498 arch/x86/kernel/cpu/resctrl/ctrlmondata.c void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
rr                504 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	rr->rgrp = rdtgrp;
rr                505 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	rr->evtid = evtid;
rr                506 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	rr->d = d;
rr                507 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	rr->val = 0;
rr                508 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	rr->first = first;
rr                510 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
rr                521 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	struct rmid_read rr;
rr                542 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	mon_event_read(&rr, d, rdtgrp, evtid, false);
rr                544 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	if (rr.val & RMID_VAL_ERROR)
rr                546 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	else if (rr.val & RMID_VAL_UNAVAIL)
rr                549 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 		seq_printf(m, "%llu\n", rr.val * r->mon_scale);
rr                590 arch/x86/kernel/cpu/resctrl/internal.h void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
rr                225 arch/x86/kernel/cpu/resctrl/monitor.c static int __mon_event_count(u32 rmid, struct rmid_read *rr)
rr                230 arch/x86/kernel/cpu/resctrl/monitor.c 	tval = __rmid_read(rmid, rr->evtid);
rr                232 arch/x86/kernel/cpu/resctrl/monitor.c 		rr->val = tval;
rr                235 arch/x86/kernel/cpu/resctrl/monitor.c 	switch (rr->evtid) {
rr                237 arch/x86/kernel/cpu/resctrl/monitor.c 		rr->val += tval;
rr                240 arch/x86/kernel/cpu/resctrl/monitor.c 		m = &rr->d->mbm_total[rmid];
rr                243 arch/x86/kernel/cpu/resctrl/monitor.c 		m = &rr->d->mbm_local[rmid];
rr                253 arch/x86/kernel/cpu/resctrl/monitor.c 	if (rr->first) {
rr                263 arch/x86/kernel/cpu/resctrl/monitor.c 	rr->val += m->chunks;
rr                271 arch/x86/kernel/cpu/resctrl/monitor.c static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
rr                274 arch/x86/kernel/cpu/resctrl/monitor.c 	struct mbm_state *m = &rr->d->mbm_local[rmid];
rr                277 arch/x86/kernel/cpu/resctrl/monitor.c 	tval = __rmid_read(rmid, rr->evtid);
rr                300 arch/x86/kernel/cpu/resctrl/monitor.c 	struct rmid_read *rr = info;
rr                303 arch/x86/kernel/cpu/resctrl/monitor.c 	rdtgrp = rr->rgrp;
rr                305 arch/x86/kernel/cpu/resctrl/monitor.c 	if (__mon_event_count(rdtgrp->mon.rmid, rr))
rr                315 arch/x86/kernel/cpu/resctrl/monitor.c 			if (__mon_event_count(entry->mon.rmid, rr))
rr                438 arch/x86/kernel/cpu/resctrl/monitor.c 	struct rmid_read rr;
rr                440 arch/x86/kernel/cpu/resctrl/monitor.c 	rr.first = false;
rr                441 arch/x86/kernel/cpu/resctrl/monitor.c 	rr.d = d;
rr                448 arch/x86/kernel/cpu/resctrl/monitor.c 		rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
rr                449 arch/x86/kernel/cpu/resctrl/monitor.c 		__mon_event_count(rmid, &rr);
rr                452 arch/x86/kernel/cpu/resctrl/monitor.c 		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
rr                460 arch/x86/kernel/cpu/resctrl/monitor.c 			__mon_event_count(rmid, &rr);
rr                462 arch/x86/kernel/cpu/resctrl/monitor.c 			mbm_bw_count(rmid, &rr);
rr               2357 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct rmid_read rr;
rr               2390 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			mon_event_read(&rr, d, prgrp, mevt->evtid, true);
rr                608 arch/x86/mm/numa.c 	int rr, i;
rr                610 arch/x86/mm/numa.c 	rr = first_node(node_online_map);
rr                614 arch/x86/mm/numa.c 		numa_set_node(i, rr);
rr                615 arch/x86/mm/numa.c 		rr = next_node_in(rr, node_online_map);
rr                 76 block/partitions/acorn.c 	struct riscix_record *rr;
rr                 78 block/partitions/acorn.c 	rr = read_part_sector(state, first_sect, &sect);
rr                 79 block/partitions/acorn.c 	if (!rr)
rr                 85 block/partitions/acorn.c 	if (rr->magic == RISCIX_MAGIC) {
rr                 93 block/partitions/acorn.c 			if (rr->part[part].one &&
rr                 94 block/partitions/acorn.c 			    memcmp(rr->part[part].name, "All\0", 4)) {
rr                 96 block/partitions/acorn.c 					le32_to_cpu(rr->part[part].start),
rr                 97 block/partitions/acorn.c 					le32_to_cpu(rr->part[part].length));
rr                 99 block/partitions/acorn.c 				strlcat(state->pp_buf, rr->part[part].name, PAGE_SIZE);
rr                327 crypto/camellia_generic.c #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) ({		\
rr                331 crypto/camellia_generic.c 	rl = (rl << bits) + (rr >> (32 - bits));	\
rr                332 crypto/camellia_generic.c 	rr = (rr << bits) + (w0 >> (32 - bits));	\
rr                335 crypto/camellia_generic.c #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) ({	\
rr                339 crypto/camellia_generic.c 	lr = (rl << (bits - 32)) + (rr >> (64 - bits));	\
rr                340 crypto/camellia_generic.c 	rl = (rr << (bits - 32)) + (w0 >> (64 - bits));	\
rr                341 crypto/camellia_generic.c 	rr = (w0 << (bits - 32)) + (w1 >> (64 - bits));	\
rr                819 crypto/camellia_generic.c #define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) ({ \
rr                823 crypto/camellia_generic.c 	t2 |= rr;							\
rr                831 crypto/camellia_generic.c 	rr ^= rol32(t3, 1);						\
rr                341 crypto/cast5_generic.c 	if (!(c->rr)) {
rr                375 crypto/cast5_generic.c 	if (!(c->rr)) {
rr                488 crypto/cast5_generic.c 	c->rr = key_len <= 10 ? 1 : 0;
rr                  8 drivers/acpi/reboot.c static void acpi_pci_reboot(struct acpi_generic_address *rr, u8 reset_value)
rr                 18 drivers/acpi/reboot.c 	devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
rr                 19 drivers/acpi/reboot.c 			  (rr->address >> 16) & 0xffff);
rr                 23 drivers/acpi/reboot.c 			(rr->address & 0xffff), reset_value);
rr                 26 drivers/acpi/reboot.c static inline void acpi_pci_reboot(struct acpi_generic_address *rr,
rr                 35 drivers/acpi/reboot.c 	struct acpi_generic_address *rr;
rr                 41 drivers/acpi/reboot.c 	rr = &acpi_gbl_FADT.reset_register;
rr                 58 drivers/acpi/reboot.c 	switch (rr->space_id) {
rr                 60 drivers/acpi/reboot.c 		acpi_pci_reboot(rr, reset_value);
rr               2199 drivers/block/drbd/drbd_main.c 	int rr;
rr               2201 drivers/block/drbd/drbd_main.c 	rr = drbd_free_peer_reqs(device, &device->active_ee);
rr               2202 drivers/block/drbd/drbd_main.c 	if (rr)
rr               2203 drivers/block/drbd/drbd_main.c 		drbd_err(device, "%d EEs in active list found!\n", rr);
rr               2205 drivers/block/drbd/drbd_main.c 	rr = drbd_free_peer_reqs(device, &device->sync_ee);
rr               2206 drivers/block/drbd/drbd_main.c 	if (rr)
rr               2207 drivers/block/drbd/drbd_main.c 		drbd_err(device, "%d EEs in sync list found!\n", rr);
rr               2209 drivers/block/drbd/drbd_main.c 	rr = drbd_free_peer_reqs(device, &device->read_ee);
rr               2210 drivers/block/drbd/drbd_main.c 	if (rr)
rr               2211 drivers/block/drbd/drbd_main.c 		drbd_err(device, "%d EEs in read list found!\n", rr);
rr               2213 drivers/block/drbd/drbd_main.c 	rr = drbd_free_peer_reqs(device, &device->done_ee);
rr               2214 drivers/block/drbd/drbd_main.c 	if (rr)
rr               2215 drivers/block/drbd/drbd_main.c 		drbd_err(device, "%d EEs in done list found!\n", rr);
rr               2217 drivers/block/drbd/drbd_main.c 	rr = drbd_free_peer_reqs(device, &device->net_ee);
rr               2218 drivers/block/drbd/drbd_main.c 	if (rr)
rr               2219 drivers/block/drbd/drbd_main.c 		drbd_err(device, "%d EEs in net list found!\n", rr);
rr                845 drivers/block/drbd/drbd_receiver.c 	int rr;
rr                851 drivers/block/drbd/drbd_receiver.c 	rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
rr                853 drivers/block/drbd/drbd_receiver.c 	if (rr > 0 || rr == -EAGAIN) {
rr               1286 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 		uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate);
rr               1288 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 		if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
rr               1290 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 		else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
rr               1292 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 		else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
rr               1294 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 		else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
rr               1296 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 		else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
rr               1412 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 		uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate;
rr               1426 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 			if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
rr               1428 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 			else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
rr               1430 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 			else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
rr               1432 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 			else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
rr               1434 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 			else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
rr               1320 drivers/gpu/drm/omapdrm/dss/dispc.c 	coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
rr                224 drivers/gpu/drm/omapdrm/dss/omapdss.h 	s16 rr, rg, rb;
rr                199 drivers/gpu/drm/tilcdc/tilcdc_crtc.c 	int r = rate / 100, rr = real_rate / 100;
rr                201 drivers/gpu/drm/tilcdc/tilcdc_crtc.c 	return (unsigned int)(abs(((rr - r) * 100) / r));
rr                498 drivers/hwmon/dme1737.c 	int rr = (ix == 1) ? reg >> 4 : reg;
rr                500 drivers/hwmon/dme1737.c 	return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
rr                895 drivers/i3c/master/i3c-master-cdns.c 	u32 rr;
rr                897 drivers/i3c/master/i3c-master-cdns.c 	rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
rr                900 drivers/i3c/master/i3c-master-cdns.c 	writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
rr                907 drivers/i3c/master/i3c-master-cdns.c 	u32 rr;
rr                921 drivers/i3c/master/i3c-master-cdns.c 		rr = readl(master->regs + DEV_ID_RR0(i));
rr                922 drivers/i3c/master/i3c-master-cdns.c 		if (!(rr & DEV_ID_RR0_IS_I3C) ||
rr                923 drivers/i3c/master/i3c-master-cdns.c 		    DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
rr               1042 drivers/i3c/master/i3c-master-cdns.c 	u32 rr;
rr               1045 drivers/i3c/master/i3c-master-cdns.c 	rr = readl(master->regs + DEV_ID_RR0(slot));
rr               1046 drivers/i3c/master/i3c-master-cdns.c 	info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
rr               1047 drivers/i3c/master/i3c-master-cdns.c 	rr = readl(master->regs + DEV_ID_RR2(slot));
rr               1048 drivers/i3c/master/i3c-master-cdns.c 	info->dcr = rr;
rr               1049 drivers/i3c/master/i3c-master-cdns.c 	info->bcr = rr >> 8;
rr               1050 drivers/i3c/master/i3c-master-cdns.c 	info->pid = rr >> 16;
rr                 67 drivers/infiniband/hw/hns/hns_roce_alloc.c 			  int rr)
rr                 69 drivers/infiniband/hw/hns/hns_roce_alloc.c 	hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
rr                113 drivers/infiniband/hw/hns/hns_roce_alloc.c 				int rr)
rr                123 drivers/infiniband/hw/hns/hns_roce_alloc.c 	if (!rr)
rr               1169 drivers/infiniband/hw/hns/hns_roce_device.h 			 int rr);
rr               1178 drivers/infiniband/hw/hns/hns_roce_device.h 				int rr);
rr                707 drivers/infiniband/hw/mlx5/mr.c 	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
rr               1094 drivers/infiniband/hw/mlx5/mr.c 	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
rr               1174 drivers/infiniband/hw/mlx5/mr.c 	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
rr                456 drivers/infiniband/sw/siw/siw_cm.c 	struct mpa_rr *rr = &cep->mpa.hdr;
rr                465 drivers/infiniband/sw/siw/siw_cm.c 	iov[iovec_num].iov_base = rr;
rr                466 drivers/infiniband/sw/siw/siw_cm.c 	iov[iovec_num].iov_len = sizeof(*rr);
rr                467 drivers/infiniband/sw/siw/siw_cm.c 	mpa_len = sizeof(*rr);
rr                484 drivers/infiniband/sw/siw/siw_cm.c 	rr->params.pd_len = cpu_to_be16(pd_len);
rr               1316 drivers/isdn/mISDN/dsp_cmx.c 	int r, rr, t, tt, o_r, o_rr;
rr               1374 drivers/isdn/mISDN/dsp_cmx.c 	rr = (r + len) & CMX_BUFF_MASK;
rr               1396 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr && t != tt) {
rr               1406 drivers/isdn/mISDN/dsp_cmx.c 		if (r == rr) {
rr               1423 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr && t != tt) {
rr               1428 drivers/isdn/mISDN/dsp_cmx.c 			if (r != rr) {
rr               1432 drivers/isdn/mISDN/dsp_cmx.c 				memset(d, dsp_silence, (rr - r) & CMX_BUFF_MASK);
rr               1440 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr && t != tt) {
rr               1445 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr) {
rr               1468 drivers/isdn/mISDN/dsp_cmx.c 		o_r = (o_rr - rr + r) & CMX_BUFF_MASK;
rr               1491 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr && t != tt) {
rr               1505 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr) {
rr               1521 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr && t != tt) {
rr               1533 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr) {
rr               1549 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr && t != tt) {
rr               1560 drivers/isdn/mISDN/dsp_cmx.c 		while (r != rr) {
rr               1637 drivers/isdn/mISDN/dsp_cmx.c 	int r, rr;
rr               1717 drivers/isdn/mISDN/dsp_cmx.c 				rr = (r + length) & CMX_BUFF_MASK;
rr               1719 drivers/isdn/mISDN/dsp_cmx.c 				while (r != rr) {
rr               1743 drivers/isdn/mISDN/dsp_cmx.c 			rr = (r + length) & CMX_BUFF_MASK;
rr               1745 drivers/isdn/mISDN/dsp_cmx.c 			while (r != rr) {
rr               1789 drivers/isdn/mISDN/dsp_cmx.c 				rr = (r + delay - (dsp_poll >> 1))
rr               1792 drivers/isdn/mISDN/dsp_cmx.c 				while (r != rr) {
rr               1820 drivers/isdn/mISDN/dsp_cmx.c 				rr = (r + delay - (dsp_poll >> 1))
rr               1823 drivers/isdn/mISDN/dsp_cmx.c 				while (r != rr) {
rr                276 drivers/media/i2c/tda7432.c 	u8 lf, lr, rf, rr;
rr                282 drivers/media/i2c/tda7432.c 			rr = rf = -t->balance->val;
rr                286 drivers/media/i2c/tda7432.c 			rr = rf = TDA7432_ATTEN_0DB;
rr                290 drivers/media/i2c/tda7432.c 			rr = rf = TDA7432_ATTEN_0DB;
rr                297 drivers/media/i2c/tda7432.c 			rr |= TDA7432_MUTE;
rr                303 drivers/media/i2c/tda7432.c 		tda7432_write(sd, TDA7432_RR, rr);
rr                291 drivers/mtd/nand/raw/nand_hynix.c 	struct hynix_read_retry *rr = NULL;
rr                315 drivers/mtd/nand/raw/nand_hynix.c 	rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
rr                316 drivers/mtd/nand/raw/nand_hynix.c 	if (!rr) {
rr                323 drivers/mtd/nand/raw/nand_hynix.c 			u8 *val = rr->values + (i * nregs);
rr                337 drivers/mtd/nand/raw/nand_hynix.c 	rr->nregs = nregs;
rr                338 drivers/mtd/nand/raw/nand_hynix.c 	rr->regs = hynix_1xnm_mlc_read_retry_regs;
rr                339 drivers/mtd/nand/raw/nand_hynix.c 	hynix->read_retry = rr;
rr                347 drivers/mtd/nand/raw/nand_hynix.c 		kfree(rr);
rr               5175 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
rr               5331 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h 	__le16	rr;
rr                 86 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		MLX5_SET(mkc, mkc, rr, 1);
rr                807 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	MLX5_SET(mkc, mkc, rr, 1);
rr                222 drivers/net/hippi/rrunner.c 	struct rr_private *rr = netdev_priv(dev);
rr                224 drivers/net/hippi/rrunner.c 	if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
rr                227 drivers/net/hippi/rrunner.c 		writel(HALT_NIC, &rr->regs->HostCtrl);
rr                231 drivers/net/hippi/rrunner.c 	pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
rr                232 drivers/net/hippi/rrunner.c 			    rr->evt_ring_dma);
rr                233 drivers/net/hippi/rrunner.c 	pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
rr                234 drivers/net/hippi/rrunner.c 			    rr->rx_ring_dma);
rr                235 drivers/net/hippi/rrunner.c 	pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
rr                236 drivers/net/hippi/rrunner.c 			    rr->tx_ring_dma);
rr                237 drivers/net/hippi/rrunner.c 	pci_iounmap(pdev, rr->regs);
rr               3952 drivers/net/wireless/ath/ath10k/wmi.c 				    const struct phyerr_radar_report *rr,
rr               3963 drivers/net/wireless/ath/ath10k/wmi.c 	reg0 = __le32_to_cpu(rr->reg0);
rr               3964 drivers/net/wireless/ath/ath10k/wmi.c 	reg1 = __le32_to_cpu(rr->reg1);
rr               4112 drivers/net/wireless/ath/ath10k/wmi.c 	const struct phyerr_radar_report *rr;
rr               4144 drivers/net/wireless/ath/ath10k/wmi.c 			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
rr               4150 drivers/net/wireless/ath/ath10k/wmi.c 			rr = (struct phyerr_radar_report *)tlv_buf;
rr               4151 drivers/net/wireless/ath/ath10k/wmi.c 			ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
rr                940 drivers/net/wireless/ath/carl9170/tx.c 			txc->s.rr[i - 1] = phy_set;
rr                260 drivers/net/wireless/ath/carl9170/wlan.h 	struct ar9170_tx_hw_phy_control rr[CARL9170_TX_MAX_RETRY_RATES];
rr                324 drivers/net/wireless/ath/carl9170/wlan.h 	__le32 rr[CARL9170_TX_MAX_RETRY_RATES];
rr                533 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	bool rr;
rr                575 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		rr = true;
rr                578 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		rr = false;
rr                586 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		if (rr) {
rr                 21 drivers/net/wireless/mediatek/mt76/debugfs.c 	*val = dev->bus->rr(dev, dev->debugfs_reg);
rr                 84 drivers/net/wireless/mediatek/mt76/mmio.c 		.rr = mt76_mmio_rr,
rr                 38 drivers/net/wireless/mediatek/mt76/mt76.h 	u32 (*rr)(struct mt76_dev *dev, u32 offset);
rr                535 drivers/net/wireless/mediatek/mt76/mt76.h #define __mt76_rr(dev, ...)	(dev)->bus->rr((dev), __VA_ARGS__)
rr                544 drivers/net/wireless/mediatek/mt76/mt76.h #define mt76_rr(dev, ...)	(dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
rr                421 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	return dev->bus_ops->rr(mdev, addr);
rr                522 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	bus_ops->rr = mt7603_rr;
rr                152 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h 		switch (dev->bus->rr(dev, MAC_CSR0)) {
rr                950 drivers/net/wireless/mediatek/mt76/usb.c 		.rr = mt76u_rr,
rr                 16 drivers/net/wireless/mediatek/mt76/util.c 		cur = dev->bus->rr(dev, offset) & mask;
rr                 34 drivers/net/wireless/mediatek/mt76/util.c 		cur = dev->bus->rr(dev, offset) & mask;
rr               1628 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct read_regs_int *rr = &intr->read_regs;
rr               1629 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
rr               1634 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	if (rr->length < struct_size(regs, regs, count)) {
rr               1637 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 			 rr->length, struct_size(regs, regs, count));
rr               1641 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	if (rr->length > sizeof(rr->buffer)) {
rr               1644 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 			 rr->length, sizeof(rr->buffer));
rr               1669 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct read_regs_int *rr = &intr->read_regs;
rr               1670 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
rr               1222 drivers/s390/cio/chsc.c 	} *rr;
rr               1226 drivers/s390/cio/chsc.c 	rr = page;
rr               1227 drivers/s390/cio/chsc.c 	rr->request.length = 0x0020;
rr               1228 drivers/s390/cio/chsc.c 	rr->request.code = 0x0033;
rr               1229 drivers/s390/cio/chsc.c 	rr->op = op;
rr               1230 drivers/s390/cio/chsc.c 	rr->ctrl = ctrl;
rr               1231 drivers/s390/cio/chsc.c 	rc = chsc(rr);
rr               1234 drivers/s390/cio/chsc.c 	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
rr               1236 drivers/s390/cio/chsc.c 		*clock_delta = rr->clock_delta;
rr               1247 drivers/s390/cio/chsc.c 	} *rr;
rr               1251 drivers/s390/cio/chsc.c 	rr = page;
rr               1252 drivers/s390/cio/chsc.c 	rr->request.length = 0x0010;
rr               1253 drivers/s390/cio/chsc.c 	rr->request.code = 0x0038;
rr               1254 drivers/s390/cio/chsc.c 	rc = chsc(rr);
rr               1257 drivers/s390/cio/chsc.c 	memcpy(result, &rr->data, size);
rr               1258 drivers/s390/cio/chsc.c 	return (rr->response.code == 0x0001) ? 0 : -EIO;
rr               1787 drivers/s390/cio/qdio_main.c 	struct chsc_pnso_area *rr;
rr               1793 drivers/s390/cio/qdio_main.c 	rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
rr               1794 drivers/s390/cio/qdio_main.c 	if (rr == NULL)
rr               1798 drivers/s390/cio/qdio_main.c 		rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
rr               1801 drivers/s390/cio/qdio_main.c 		if (rr->response.code != 1) {
rr               1810 drivers/s390/cio/qdio_main.c 		size = rr->naihdr.naids;
rr               1811 drivers/s390/cio/qdio_main.c 		elems = (rr->response.length -
rr               1816 drivers/s390/cio/qdio_main.c 		if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
rr               1823 drivers/s390/cio/qdio_main.c 		prev_instance = rr->naihdr.instance;
rr               1828 drivers/s390/cio/qdio_main.c 						&rr->entries.l3_ipv6[i]);
rr               1832 drivers/s390/cio/qdio_main.c 						&rr->entries.l3_ipv4[i]);
rr               1836 drivers/s390/cio/qdio_main.c 						&rr->entries.l2[i]);
rr               1843 drivers/s390/cio/qdio_main.c 	} while (rr->response.code == 0x0107 ||  /* channel busy */
rr               1844 drivers/s390/cio/qdio_main.c 		  (rr->response.code == 1 && /* list stored */
rr               1846 drivers/s390/cio/qdio_main.c 		   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
rr               1847 drivers/s390/cio/qdio_main.c 	(*response) = rr->response.code;
rr               1850 drivers/s390/cio/qdio_main.c 	free_page((unsigned long)rr);
rr               2646 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t rr:1;
rr               2658 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t rr:1;
rr                195 drivers/scsi/lpfc/lpfc_init.c 	if (mb->un.varRdRev.rr == 0) {
rr                164 drivers/soc/fsl/qbman/bman.c 	union bm_mc_result *rr;
rr                433 drivers/soc/fsl/qbman/bman.c 	mc->rr = portal->addr.ce + BM_CL_RR0;
rr                469 drivers/soc/fsl/qbman/bman.c 	union bm_mc_result *rr = mc->rr + mc->rridx;
rr                475 drivers/soc/fsl/qbman/bman.c 	dpaa_invalidate_touch_ro(rr);
rr                484 drivers/soc/fsl/qbman/bman.c 	union bm_mc_result *rr = mc->rr + mc->rridx;
rr                492 drivers/soc/fsl/qbman/bman.c 	if (!rr->verb) {
rr                493 drivers/soc/fsl/qbman/bman.c 		dpaa_invalidate_touch_ro(rr);
rr                501 drivers/soc/fsl/qbman/bman.c 	return rr;
rr                328 drivers/soc/fsl/qbman/qman.c 	union qm_mc_result *rr;
rr                865 drivers/soc/fsl/qbman/qman.c 	mc->rr = portal->addr.ce + QM_CL_RR0;
rr                873 drivers/soc/fsl/qbman/qman.c 	rr0 = mc->rr->verb;
rr                874 drivers/soc/fsl/qbman/qman.c 	rr1 = (mc->rr+1)->verb;
rr                912 drivers/soc/fsl/qbman/qman.c 	union qm_mc_result *rr = mc->rr + mc->rridx;
rr                918 drivers/soc/fsl/qbman/qman.c 	dpaa_invalidate_touch_ro(rr);
rr                927 drivers/soc/fsl/qbman/qman.c 	union qm_mc_result *rr = mc->rr + mc->rridx;
rr                935 drivers/soc/fsl/qbman/qman.c 	if (!rr->verb) {
rr                936 drivers/soc/fsl/qbman/qman.c 		dpaa_invalidate_touch_ro(rr);
rr                944 drivers/soc/fsl/qbman/qman.c 	return rr;
rr               1460 drivers/soc/fsl/qbman/qman.c 	struct qman_cgrs rr, c;
rr               1474 drivers/soc/fsl/qbman/qman.c 	qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
rr               1477 drivers/soc/fsl/qbman/qman.c 	qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
rr               1479 drivers/soc/fsl/qbman/qman.c 	qman_cgrs_cp(&p->cgrs[1], &rr);
rr               1483 drivers/soc/fsl/qbman/qman.c 			cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
rr                699 drivers/video/fbdev/cyber2000fb.c 		u_int rr, t_mult, t_pll_ps;
rr                705 drivers/video/fbdev/cyber2000fb.c 		rr = ref_ps * t_div1;
rr                706 drivers/video/fbdev/cyber2000fb.c 		t_mult = (rr + pll_ps / 2) / pll_ps;
rr                718 drivers/video/fbdev/cyber2000fb.c 		t_pll_ps = (rr + t_mult / 2) / t_mult;
rr                126 drivers/video/fbdev/hpfb.c static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr)
rr                128 drivers/video/fbdev/hpfb.c 	if (rr >= 0) {
rr                133 drivers/video/fbdev/hpfb.c 	if (rr >= 0) {
rr                135 drivers/video/fbdev/hpfb.c 		out_8(fb_regs + WMRR, rr);
rr               1097 drivers/video/fbdev/omap2/omapfb/dss/dispc.c 	coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
rr                363 drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c 			info.cpr_coefs.rr,
rr                386 drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c 				&coefs.rr, &coefs.rg, &coefs.rb,
rr                391 drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c 	arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
rr                 58 fs/gfs2/recovery.c 	struct gfs2_revoke_replay *rr;
rr                 61 fs/gfs2/recovery.c 	list_for_each_entry(rr, head, rr_list) {
rr                 62 fs/gfs2/recovery.c 		if (rr->rr_blkno == blkno) {
rr                 69 fs/gfs2/recovery.c 		rr->rr_where = where;
rr                 73 fs/gfs2/recovery.c 	rr = kmalloc(sizeof(struct gfs2_revoke_replay), GFP_NOFS);
rr                 74 fs/gfs2/recovery.c 	if (!rr)
rr                 77 fs/gfs2/recovery.c 	rr->rr_blkno = blkno;
rr                 78 fs/gfs2/recovery.c 	rr->rr_where = where;
rr                 79 fs/gfs2/recovery.c 	list_add(&rr->rr_list, head);
rr                 86 fs/gfs2/recovery.c 	struct gfs2_revoke_replay *rr;
rr                 90 fs/gfs2/recovery.c 	list_for_each_entry(rr, &jd->jd_revoke_list, rr_list) {
rr                 91 fs/gfs2/recovery.c 		if (rr->rr_blkno == blkno) {
rr                100 fs/gfs2/recovery.c 	wrap = (rr->rr_where < jd->jd_replay_tail);
rr                102 fs/gfs2/recovery.c 	b = (where < rr->rr_where);
rr                111 fs/gfs2/recovery.c 	struct gfs2_revoke_replay *rr;
rr                114 fs/gfs2/recovery.c 		rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list);
rr                115 fs/gfs2/recovery.c 		list_del(&rr->rr_list);
rr                116 fs/gfs2/recovery.c 		kfree(rr);
rr                 43 fs/isofs/rock.c static int check_sp(struct rock_ridge *rr, struct inode *inode)
rr                 45 fs/isofs/rock.c 	if (rr->u.SP.magic[0] != 0xbe)
rr                 47 fs/isofs/rock.c 	if (rr->u.SP.magic[1] != 0xef)
rr                 49 fs/isofs/rock.c 	ISOFS_SB(inode->i_sb)->s_rock_offset = rr->u.SP.skip;
rr                202 fs/isofs/rock.c 	struct rock_ridge *rr;
rr                219 fs/isofs/rock.c 		rr = (struct rock_ridge *)rs.chr;
rr                225 fs/isofs/rock.c 		if (rr->len < 3)
rr                230 fs/isofs/rock.c 		rs.chr += rr->len;
rr                231 fs/isofs/rock.c 		rs.len -= rr->len;
rr                241 fs/isofs/rock.c 			if ((rr->u.RR.flags[0] & RR_NM) == 0)
rr                245 fs/isofs/rock.c 			if (check_sp(rr, inode))
rr                249 fs/isofs/rock.c 			rs.cont_extent = isonum_733(rr->u.CE.extent);
rr                250 fs/isofs/rock.c 			rs.cont_offset = isonum_733(rr->u.CE.offset);
rr                251 fs/isofs/rock.c 			rs.cont_size = isonum_733(rr->u.CE.size);
rr                256 fs/isofs/rock.c 			if (rr->len < 5)
rr                265 fs/isofs/rock.c 			if (rr->u.NM.flags & 6)
rr                268 fs/isofs/rock.c 			if (rr->u.NM.flags & ~1) {
rr                270 fs/isofs/rock.c 					rr->u.NM.flags);
rr                273 fs/isofs/rock.c 			len = rr->len - 5;
rr                278 fs/isofs/rock.c 			p = memchr(rr->u.NM.name, '\0', len);
rr                280 fs/isofs/rock.c 				len = p - rr->u.NM.name;
rr                281 fs/isofs/rock.c 			memcpy(retname + retnamlen, rr->u.NM.name, len);
rr                316 fs/isofs/rock.c 	struct rock_ridge *rr;
rr                335 fs/isofs/rock.c 		rr = (struct rock_ridge *)rs.chr;
rr                341 fs/isofs/rock.c 		if (rr->len < 3)
rr                346 fs/isofs/rock.c 		rs.chr += rr->len;
rr                347 fs/isofs/rock.c 		rs.len -= rr->len;
rr                358 fs/isofs/rock.c 			if ((rr->u.RR.flags[0] &
rr                364 fs/isofs/rock.c 			if (check_sp(rr, inode))
rr                368 fs/isofs/rock.c 			rs.cont_extent = isonum_733(rr->u.CE.extent);
rr                369 fs/isofs/rock.c 			rs.cont_offset = isonum_733(rr->u.CE.offset);
rr                370 fs/isofs/rock.c 			rs.cont_size = isonum_733(rr->u.CE.size);
rr                374 fs/isofs/rock.c 			if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
rr                380 fs/isofs/rock.c 				for (p = 0; p < rr->u.ER.len_id; p++)
rr                381 fs/isofs/rock.c 					printk(KERN_CONT "%c", rr->u.ER.data[p]);
rr                386 fs/isofs/rock.c 			inode->i_mode = isonum_733(rr->u.PX.mode);
rr                387 fs/isofs/rock.c 			set_nlink(inode, isonum_733(rr->u.PX.n_links));
rr                388 fs/isofs/rock.c 			i_uid_write(inode, isonum_733(rr->u.PX.uid));
rr                389 fs/isofs/rock.c 			i_gid_write(inode, isonum_733(rr->u.PX.gid));
rr                394 fs/isofs/rock.c 				high = isonum_733(rr->u.PN.dev_high);
rr                395 fs/isofs/rock.c 				low = isonum_733(rr->u.PN.dev_low);
rr                423 fs/isofs/rock.c 			if (rr->u.TF.flags & TF_CREATE) {
rr                425 fs/isofs/rock.c 				    iso_date(rr->u.TF.times[cnt++].time,
rr                429 fs/isofs/rock.c 			if (rr->u.TF.flags & TF_MODIFY) {
rr                431 fs/isofs/rock.c 				    iso_date(rr->u.TF.times[cnt++].time,
rr                435 fs/isofs/rock.c 			if (rr->u.TF.flags & TF_ACCESS) {
rr                437 fs/isofs/rock.c 				    iso_date(rr->u.TF.times[cnt++].time,
rr                441 fs/isofs/rock.c 			if (rr->u.TF.flags & TF_ATTRIBUTES) {
rr                443 fs/isofs/rock.c 				    iso_date(rr->u.TF.times[cnt++].time,
rr                453 fs/isofs/rock.c 				slen = rr->len - 5;
rr                454 fs/isofs/rock.c 				slp = &rr->u.SL.link;
rr                483 fs/isofs/rock.c 						if (((rr->u.SL.
rr                515 fs/isofs/rock.c 			reloc_block = isonum_733(rr->u.CL.location);
rr                547 fs/isofs/rock.c 			algo = isonum_721(rr->u.ZF.algorithm);
rr                550 fs/isofs/rock.c 					isonum_711(&rr->u.ZF.parms[1]);
rr                569 fs/isofs/rock.c 						isonum_711(&rr->u.ZF.parms[0]);
rr                571 fs/isofs/rock.c 						isonum_711(&rr->u.ZF.parms[1]);
rr                573 fs/isofs/rock.c 					    isonum_733(rr->u.ZF.
rr                580 fs/isofs/rock.c 				       rr->u.ZF.algorithm[0],
rr                581 fs/isofs/rock.c 				       rr->u.ZF.algorithm[1]);
rr                603 fs/isofs/rock.c static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
rr                609 fs/isofs/rock.c 	slen = rr->len - 5;
rr                610 fs/isofs/rock.c 	slp = &rr->u.SL.link;
rr                650 fs/isofs/rock.c 			if ((!rootflag) && (rr->u.SL.flags & 1) &&
rr                706 fs/isofs/rock.c 	struct rock_ridge *rr;
rr                739 fs/isofs/rock.c 		rr = (struct rock_ridge *)rs.chr;
rr                740 fs/isofs/rock.c 		if (rr->len < 3)
rr                745 fs/isofs/rock.c 		rs.chr += rr->len;
rr                746 fs/isofs/rock.c 		rs.len -= rr->len;
rr                752 fs/isofs/rock.c 			if ((rr->u.RR.flags[0] & RR_SL) == 0)
rr                756 fs/isofs/rock.c 			if (check_sp(rr, inode))
rr                760 fs/isofs/rock.c 			rpnt = get_symlink_chunk(rpnt, rr,
rr                767 fs/isofs/rock.c 			rs.cont_extent = isonum_733(rr->u.CE.extent);
rr                768 fs/isofs/rock.c 			rs.cont_offset = isonum_733(rr->u.CE.offset);
rr                769 fs/isofs/rock.c 			rs.cont_size = isonum_733(rr->u.CE.size);
rr                384 fs/jffs2/scan.c 				struct jffs2_raw_xref *rr, uint32_t ofs,
rr                391 fs/jffs2/scan.c 	crc = crc32(0, rr, sizeof(*rr) - 4);
rr                392 fs/jffs2/scan.c 	if (crc != je32_to_cpu(rr->node_crc)) {
rr                394 fs/jffs2/scan.c 			      ofs, je32_to_cpu(rr->node_crc), crc);
rr                395 fs/jffs2/scan.c 		if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
rr                400 fs/jffs2/scan.c 	if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
rr                402 fs/jffs2/scan.c 			      ofs, je32_to_cpu(rr->totlen),
rr                404 fs/jffs2/scan.c 		if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
rr                422 fs/jffs2/scan.c 	ref->ino = je32_to_cpu(rr->ino);
rr                423 fs/jffs2/scan.c 	ref->xid = je32_to_cpu(rr->xid);
rr                424 fs/jffs2/scan.c 	ref->xseqno = je32_to_cpu(rr->xseqno);
rr                430 fs/jffs2/scan.c 	jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
rr                433 fs/jffs2/scan.c 		jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
rr                176 fs/jffs2/summary.c int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs)
rr                184 fs/jffs2/summary.c 	temp->nodetype = rr->nodetype;
rr                188 fs/jffs2/summary.h int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs);
rr                448 fs/jffs2/xattr.c 	struct jffs2_raw_xref rr;
rr                459 fs/jffs2/xattr.c 	rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr);
rr                460 fs/jffs2/xattr.c 	if (rc || sizeof(rr) != readlen) {
rr                462 fs/jffs2/xattr.c 			      rc, sizeof(rr), readlen, offset);
rr                466 fs/jffs2/xattr.c 	crc = crc32(0, &rr, sizeof(rr) - 4);
rr                467 fs/jffs2/xattr.c 	if (crc != je32_to_cpu(rr.node_crc)) {
rr                469 fs/jffs2/xattr.c 			    offset, je32_to_cpu(rr.node_crc), crc);
rr                472 fs/jffs2/xattr.c 	if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
rr                473 fs/jffs2/xattr.c 	    || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
rr                474 fs/jffs2/xattr.c 	    || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) {
rr                477 fs/jffs2/xattr.c 			    offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
rr                478 fs/jffs2/xattr.c 			    je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
rr                479 fs/jffs2/xattr.c 			    je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
rr                482 fs/jffs2/xattr.c 	ref->ino = je32_to_cpu(rr.ino);
rr                483 fs/jffs2/xattr.c 	ref->xid = je32_to_cpu(rr.xid);
rr                484 fs/jffs2/xattr.c 	ref->xseqno = je32_to_cpu(rr.xseqno);
rr                509 fs/jffs2/xattr.c 	struct jffs2_raw_xref rr;
rr                514 fs/jffs2/xattr.c 	rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rr                515 fs/jffs2/xattr.c 	rr.nodetype = cpu_to_je16(JFFS2_NODETYPE_XREF);
rr                516 fs/jffs2/xattr.c 	rr.totlen = cpu_to_je32(PAD(sizeof(rr)));
rr                517 fs/jffs2/xattr.c 	rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4));
rr                522 fs/jffs2/xattr.c 		rr.ino = cpu_to_je32(ref->ino);
rr                523 fs/jffs2/xattr.c 		rr.xid = cpu_to_je32(ref->xid);
rr                525 fs/jffs2/xattr.c 		rr.ino = cpu_to_je32(ref->ic->ino);
rr                526 fs/jffs2/xattr.c 		rr.xid = cpu_to_je32(ref->xd->xid);
rr                528 fs/jffs2/xattr.c 	rr.xseqno = cpu_to_je32(xseqno);
rr                529 fs/jffs2/xattr.c 	rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4));
rr                531 fs/jffs2/xattr.c 	ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr);
rr                532 fs/jffs2/xattr.c 	if (ret || sizeof(rr) != length) {
rr                534 fs/jffs2/xattr.c 			      ret, sizeof(rr), length, phys_ofs);
rr                537 fs/jffs2/xattr.c 			jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL);
rr                543 fs/jffs2/xattr.c 	jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref);
rr               1907 fs/ocfs2/dlm/dlmmaster.c 		int rr, err = 0;
rr               1955 fs/ocfs2/dlm/dlmmaster.c 		rr = kref_read(&mle->mle_refs);
rr               1957 fs/ocfs2/dlm/dlmmaster.c 			if (extra_ref && rr < 3)
rr               1959 fs/ocfs2/dlm/dlmmaster.c 			else if (!extra_ref && rr < 2)
rr               1962 fs/ocfs2/dlm/dlmmaster.c 			if (extra_ref && rr < 2)
rr               1964 fs/ocfs2/dlm/dlmmaster.c 			else if (!extra_ref && rr < 1)
rr               1971 fs/ocfs2/dlm/dlmmaster.c 			     assert->node_idx, rr, extra_ref, mle->inuse);
rr               1592 fs/xfs/libxfs/xfs_refcount.c 	struct xfs_refcount_recovery	*rr;
rr               1597 fs/xfs/libxfs/xfs_refcount.c 	rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
rr               1598 fs/xfs/libxfs/xfs_refcount.c 	xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
rr               1599 fs/xfs/libxfs/xfs_refcount.c 	list_add_tail(&rr->rr_list, debris);
rr               1613 fs/xfs/libxfs/xfs_refcount.c 	struct xfs_refcount_recovery	*rr, *n;
rr               1663 fs/xfs/libxfs/xfs_refcount.c 	list_for_each_entry_safe(rr, n, &debris, rr_list) {
rr               1669 fs/xfs/libxfs/xfs_refcount.c 		trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
rr               1672 fs/xfs/libxfs/xfs_refcount.c 		agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
rr               1675 fs/xfs/libxfs/xfs_refcount.c 				rr->rr_rrec.rc_blockcount);
rr               1678 fs/xfs/libxfs/xfs_refcount.c 		xfs_bmap_add_free(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
rr               1684 fs/xfs/libxfs/xfs_refcount.c 		list_del(&rr->rr_list);
rr               1685 fs/xfs/libxfs/xfs_refcount.c 		kmem_free(rr);
rr               1693 fs/xfs/libxfs/xfs_refcount.c 	list_for_each_entry_safe(rr, n, &debris, rr_list) {
rr               1694 fs/xfs/libxfs/xfs_refcount.c 		list_del(&rr->rr_list);
rr               1695 fs/xfs/libxfs/xfs_refcount.c 		kmem_free(rr);
rr                 16 include/crypto/cast5.h 	int rr;	/* rr ? rounds = 12 : rounds = 16; (rfc 2144) */
rr               3248 include/linux/mlx5/mlx5_ifc.h 	u8         rr[0x1];
rr                 44 include/net/inet_sock.h 	unsigned char	rr;
rr                381 include/uapi/linux/batadv_packet.h 	__u8   rr[BATADV_RR_LEN][ETH_ALEN];
rr                289 include/video/omapfb_dss.h 	s16 rr, rg, rb;
rr                251 net/batman-adv/icmp_socket.c 			ether_addr_copy(icmp_packet_rr->rr[0], addr);
rr                402 net/batman-adv/routing.c 		ether_addr_copy(icmp_packet_rr->rr[icmp_packet_rr->rr_cur],
rr                107 net/bluetooth/smp.c 	u8		rr[16]; /* Remote OOB ra/rb value */
rr                701 net/bluetooth/smp.c 			memcpy(smp->rr, oob_data->rand256, 16);
rr                704 net/bluetooth/smp.c 			SMP_DBG("OOB Remote Random: %16phN", smp->rr);
rr               1462 net/bluetooth/smp.c 		memcpy(r, smp->rr, 16);
rr               2692 net/bluetooth/smp.c 			     smp->rr, 0, cfm.confirm_val);
rr               1993 net/ipv4/cipso_ipv4.c 	if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
rr               2006 net/ipv4/cipso_ipv4.c 		if (opt->opt.rr > opt->opt.cipso)
rr               2007 net/ipv4/cipso_ipv4.c 			opt->opt.rr -= cipso_len;
rr                 58 net/ipv4/ip_options.c 			ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, skb, rt);
rr                 69 net/ipv4/ip_options.c 	if (opt->rr) {
rr                 70 net/ipv4/ip_options.c 		memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]);
rr                 71 net/ipv4/ip_options.c 		opt->rr = 0;
rr                105 net/ipv4/ip_options.c 	if (sopt->rr) {
rr                106 net/ipv4/ip_options.c 		optlen  = sptr[sopt->rr+1];
rr                107 net/ipv4/ip_options.c 		soffset = sptr[sopt->rr+2];
rr                108 net/ipv4/ip_options.c 		dopt->rr = dopt->optlen + sizeof(struct iphdr);
rr                109 net/ipv4/ip_options.c 		memcpy(dptr, sptr+sopt->rr, optlen);
rr                233 net/ipv4/ip_options.c 	opt->rr = 0;
rr                325 net/ipv4/ip_options.c 			if (opt->rr) {
rr                350 net/ipv4/ip_options.c 			opt->rr = optptr - iph;
rr                503 net/ipv4/ip_options.c 		unsigned  char *optptr = opt->__data+opt->rr-sizeof(struct  iphdr);
rr                577 net/ipv4/ip_options.c 		optptr = (unsigned char *)raw + opt->rr;
rr                112 net/netfilter/nft_exthdr.c 		if (!opt->rr)
rr                114 net/netfilter/nft_exthdr.c 		*offset = opt->rr + start;
rr               1587 net/wireless/reg.c 		const struct ieee80211_reg_rule *rr;
rr               1590 net/wireless/reg.c 		rr = &regd->reg_rules[i];
rr               1591 net/wireless/reg.c 		fr = &rr->freq_range;
rr               1604 net/wireless/reg.c 			return rr;