re                557 arch/ia64/mm/discontig.c 	unsigned long rs, re, end = start + len;
re                577 arch/ia64/mm/discontig.c 		re = min(end, node_memblk[i].start_paddr +
re                580 arch/ia64/mm/discontig.c 		if (rs < re)
re                581 arch/ia64/mm/discontig.c 			(*func)(rs, re - rs, node_memblk[i].nid);
re                583 arch/ia64/mm/discontig.c 		if (re == end)
re                965 arch/m68k/ifpsp060/src/fpsp.S # if our emulation, after re-doing the operation, decided that
re                998 arch/m68k/ifpsp060/src/fpsp.S # if our emulation, after re-doing the operation, decided that
re               11815 arch/m68k/ifpsp060/src/fpsp.S # if the rnd mode is anything but RZ, then we have to re-do the above
re                964 arch/m68k/ifpsp060/src/pfpsp.S # if our emulation, after re-doing the operation, decided that
re                997 arch/m68k/ifpsp060/src/pfpsp.S # if our emulation, after re-doing the operation, decided that
re               8246 arch/m68k/ifpsp060/src/pfpsp.S # if the rnd mode is anything but RZ, then we have to re-do the above
re                662 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int re : 5,
re                682 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int re : 5,
re                702 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int re : 5,
re                713 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int re : 5,
re               2516 arch/mips/kvm/emulate.c 	    (inst.r_format.re >> 3) == 0) {
re               2520 arch/mips/kvm/emulate.c 		int sel = inst.r_format.re & 0x7;
re               1170 arch/mips/kvm/vz.c 			if (inst.r_format.rs || (inst.r_format.re >> 3))
re               1175 arch/mips/kvm/vz.c 			sel = inst.r_format.re & 0x7;
re                173 arch/mips/math-emu/cp1emu.c 				mips32_insn.r_format.re = insn.mm_fp5_format.fd;
re                258 arch/mips/math-emu/cp1emu.c 				mips32_insn.r_format.re = 0;
re                 15 arch/mips/math-emu/dp_div.c 	int re;
re                117 arch/mips/math-emu/dp_div.c 	re = xe - ye;
re                140 arch/mips/math-emu/dp_div.c 		re--;
re                143 arch/mips/math-emu/dp_div.c 	return ieee754dp_format(xs == ys ? 0 : 1, re, rm);
re                 43 arch/mips/math-emu/dp_maddf.c 	int re;
re                189 arch/mips/math-emu/dp_maddf.c 	re = xe + ye;
re                230 arch/mips/math-emu/dp_maddf.c 		re++;
re                242 arch/mips/math-emu/dp_maddf.c 		return ieee754dp_format(rs, re, lrm);
re                251 arch/mips/math-emu/dp_maddf.c 	if (ze > re) {
re                255 arch/mips/math-emu/dp_maddf.c 		s = ze - re;
re                257 arch/mips/math-emu/dp_maddf.c 		re += s;
re                258 arch/mips/math-emu/dp_maddf.c 	} else if (re > ze) {
re                262 arch/mips/math-emu/dp_maddf.c 		s = re - ze;
re                266 arch/mips/math-emu/dp_maddf.c 	assert(ze == re);
re                 14 arch/mips/math-emu/dp_mul.c 	int re;
re                109 arch/mips/math-emu/dp_mul.c 	re = xe + ye;
re                152 arch/mips/math-emu/dp_mul.c 		re++;
re                159 arch/mips/math-emu/dp_mul.c 	return ieee754dp_format(rs, re, rm);
re                 15 arch/mips/math-emu/sp_div.c 	int re;
re                117 arch/mips/math-emu/sp_div.c 	re = xe - ye;
re                139 arch/mips/math-emu/sp_div.c 		re--;
re                142 arch/mips/math-emu/sp_div.c 	return ieee754sp_format(xs == ys ? 0 : 1, re, rm);
re                 18 arch/mips/math-emu/sp_maddf.c 	int re;
re                160 arch/mips/math-emu/sp_maddf.c 	re = xe + ye;
re                174 arch/mips/math-emu/sp_maddf.c 		re++;
re                186 arch/mips/math-emu/sp_maddf.c 		return ieee754sp_format(rs, re, rm);
re                194 arch/mips/math-emu/sp_maddf.c 	if (ze > re) {
re                198 arch/mips/math-emu/sp_maddf.c 		s = ze - re;
re                200 arch/mips/math-emu/sp_maddf.c 		re += s;
re                201 arch/mips/math-emu/sp_maddf.c 	} else if (re > ze) {
re                205 arch/mips/math-emu/sp_maddf.c 		s = re - ze;
re                209 arch/mips/math-emu/sp_maddf.c 	assert(ze == re);
re                 14 arch/mips/math-emu/sp_mul.c 	int re;
re                109 arch/mips/math-emu/sp_mul.c 	re = xe + ye;
re                147 arch/mips/math-emu/sp_mul.c 		re++;
re                154 arch/mips/math-emu/sp_mul.c 	return ieee754sp_format(rs, re, rm);
re                164 arch/powerpc/include/asm/vas.h int vas_paste_crb(struct vas_window *win, int offset, bool re);
re               1072 arch/powerpc/platforms/powernv/vas-window.c int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
re               1085 arch/powerpc/platforms/powernv/vas-window.c 	WARN_ON_ONCE(txwin->nx_win && !re);
re               1088 arch/powerpc/platforms/powernv/vas-window.c 	if (re) {
re                321 arch/s390/net/bpf_jit_comp.c static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
re                325 arch/s390/net/bpf_jit_comp.c 	if (rs == re)
re                330 arch/s390/net/bpf_jit_comp.c 		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
re                336 arch/s390/net/bpf_jit_comp.c static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
re                343 arch/s390/net/bpf_jit_comp.c 	if (rs == re)
re                348 arch/s390/net/bpf_jit_comp.c 		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
re                388 arch/s390/net/bpf_jit_comp.c 	int re = 6, rs;
re                391 arch/s390/net/bpf_jit_comp.c 		rs = get_start(jit, re);
re                394 arch/s390/net/bpf_jit_comp.c 		re = get_end(jit, rs + 1);
re                396 arch/s390/net/bpf_jit_comp.c 			save_regs(jit, rs, re);
re                398 arch/s390/net/bpf_jit_comp.c 			restore_regs(jit, rs, re, stack_depth);
re                399 arch/s390/net/bpf_jit_comp.c 		re++;
re                400 arch/s390/net/bpf_jit_comp.c 	} while (re <= 15);
re                 73 arch/sh/include/uapi/asm/ptrace_32.h 	unsigned long	re;
re                142 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c 	const struct nvkm_enum *en, *re, *cl, *sc;
re                175 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c 	re = nvkm_enum_find(vm_fault , st1);
re                191 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c 		   st1, re ? re->name : "");
re                402 drivers/hid/hid-google-hammer.c 	struct hid_report_enum *re = &hdev->report_enum[HID_INPUT_REPORT];
re                405 drivers/hid/hid-google-hammer.c 	list_for_each_entry(report, &re->report_list, list)
re                414 drivers/hid/hid-google-hammer.c 	struct hid_report_enum *re = &hdev->report_enum[HID_OUTPUT_REPORT];
re                418 drivers/hid/hid-google-hammer.c 	list_for_each_entry(report, &re->report_list, list) {
re               3473 drivers/hid/hid-logitech-hidpp.c 	struct hid_report_enum *re;
re               3476 drivers/hid/hid-logitech-hidpp.c 	re = &(hdev->report_enum[HID_OUTPUT_REPORT]);
re               3477 drivers/hid/hid-logitech-hidpp.c 	report = re->report_id_hash[id];
re                554 drivers/hid/wacom_sys.c 	struct hid_report_enum *re;
re                559 drivers/hid/wacom_sys.c 	re = &(hdev->report_enum[HID_FEATURE_REPORT]);
re                560 drivers/hid/wacom_sys.c 	r = re->report_id_hash[hid_data->inputmode];
re                573 drivers/hid/wacom_sys.c 	struct hid_report_enum *re;
re                580 drivers/hid/wacom_sys.c 	re = &(hdev->report_enum[HID_FEATURE_REPORT]);
re                581 drivers/hid/wacom_sys.c 	r = re->report_id_hash[wacom_wac->mode_report];
re                463 drivers/hid/wacom_wac.c 	struct hid_report_enum *re;
re                465 drivers/hid/wacom_wac.c 	re = &(wacom->hdev->report_enum[HID_FEATURE_REPORT]);
re                467 drivers/hid/wacom_wac.c 		r = re->report_id_hash[WACOM_REPORT_INTUOSHT2_ID];
re                469 drivers/hid/wacom_wac.c 		r = re->report_id_hash[WACOM_REPORT_INTUOS_ID1];
re                191 drivers/iommu/intel-iommu.c static phys_addr_t root_entry_lctp(struct root_entry *re)
re                193 drivers/iommu/intel-iommu.c 	if (!(re->lo & 1))
re                196 drivers/iommu/intel-iommu.c 	return re->lo & VTD_PAGE_MASK;
re                203 drivers/iommu/intel-iommu.c static phys_addr_t root_entry_uctp(struct root_entry *re)
re                205 drivers/iommu/intel-iommu.c 	if (!(re->hi & 1))
re                208 drivers/iommu/intel-iommu.c 	return re->hi & VTD_PAGE_MASK;
re               2998 drivers/iommu/intel-iommu.c 	struct root_entry re;
re               3002 drivers/iommu/intel-iommu.c 	memcpy(&re, old_re, sizeof(re));
re               3022 drivers/iommu/intel-iommu.c 				old_ce_phys = root_entry_lctp(&re);
re               3024 drivers/iommu/intel-iommu.c 				old_ce_phys = root_entry_uctp(&re);
re               1038 drivers/media/dvb-frontends/drx39xyj/drx_driver.h 	s16 re;
re                684 drivers/mtd/ubi/cdev.c 	struct ubi_rename_entry *re, *re1;
re                730 drivers/mtd/ubi/cdev.c 		re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
re                731 drivers/mtd/ubi/cdev.c 		if (!re) {
re                736 drivers/mtd/ubi/cdev.c 		re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
re                737 drivers/mtd/ubi/cdev.c 		if (IS_ERR(re->desc)) {
re                738 drivers/mtd/ubi/cdev.c 			err = PTR_ERR(re->desc);
re                741 drivers/mtd/ubi/cdev.c 			kfree(re);
re                746 drivers/mtd/ubi/cdev.c 		if (re->desc->vol->name_len == name_len &&
re                747 drivers/mtd/ubi/cdev.c 		    !memcmp(re->desc->vol->name, name, name_len)) {
re                748 drivers/mtd/ubi/cdev.c 			ubi_close_volume(re->desc);
re                749 drivers/mtd/ubi/cdev.c 			kfree(re);
re                753 drivers/mtd/ubi/cdev.c 		re->new_name_len = name_len;
re                754 drivers/mtd/ubi/cdev.c 		memcpy(re->new_name, name, name_len);
re                755 drivers/mtd/ubi/cdev.c 		list_add_tail(&re->list, &rename_list);
re                757 drivers/mtd/ubi/cdev.c 			vol_id, re->desc->vol->name, name);
re                764 drivers/mtd/ubi/cdev.c 	list_for_each_entry(re, &rename_list, list) {
re                775 drivers/mtd/ubi/cdev.c 			if (re->new_name_len == re1->desc->vol->name_len &&
re                776 drivers/mtd/ubi/cdev.c 			    !memcmp(re->new_name, re1->desc->vol->name,
re                790 drivers/mtd/ubi/cdev.c 		desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
re                800 drivers/mtd/ubi/cdev.c 				re->new_name, err);
re                823 drivers/mtd/ubi/cdev.c 	list_for_each_entry_safe(re, re1, &rename_list, list) {
re                824 drivers/mtd/ubi/cdev.c 		ubi_close_volume(re->desc);
re                825 drivers/mtd/ubi/cdev.c 		list_del(&re->list);
re                826 drivers/mtd/ubi/cdev.c 		kfree(re);
re                539 drivers/mtd/ubi/vmt.c 	struct ubi_rename_entry *re;
re                545 drivers/mtd/ubi/vmt.c 	list_for_each_entry(re, rename_list, list) {
re                546 drivers/mtd/ubi/vmt.c 		if (re->remove) {
re                547 drivers/mtd/ubi/vmt.c 			err = ubi_remove_volume(re->desc, 1);
re                551 drivers/mtd/ubi/vmt.c 			struct ubi_volume *vol = re->desc->vol;
re                554 drivers/mtd/ubi/vmt.c 			vol->name_len = re->new_name_len;
re                555 drivers/mtd/ubi/vmt.c 			memcpy(vol->name, re->new_name, re->new_name_len + 1);
re                124 drivers/mtd/ubi/vtbl.c 	struct ubi_rename_entry *re;
re                126 drivers/mtd/ubi/vtbl.c 	list_for_each_entry(re, rename_list, list) {
re                128 drivers/mtd/ubi/vtbl.c 		struct ubi_volume *vol = re->desc->vol;
re                131 drivers/mtd/ubi/vtbl.c 		if (re->remove) {
re                137 drivers/mtd/ubi/vtbl.c 		vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
re                138 drivers/mtd/ubi/vtbl.c 		memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
re                139 drivers/mtd/ubi/vtbl.c 		memset(vtbl_rec->name + re->new_name_len, 0,
re                140 drivers/mtd/ubi/vtbl.c 		       UBI_VOL_NAME_MAX + 1 - re->new_name_len);
re               1729 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_vlan_mac_registry_elem **re)
re               1763 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	*re = reg_elem;
re                218 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		union mgmt_port_ring_entry re;
re                230 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.d64 = 0;
re                231 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.s.len = size;
re                232 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.s.addr = dma_map_single(p->dev, skb->data,
re                237 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->rx_ring[p->rx_next_fill] = re.d64;
re                252 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	union mgmt_port_ring_entry re;
re                272 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.d64 = p->tx_ring[p->tx_next_clean];
re                286 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_unmap_single(p->dev, re.s.addr, re.s.len,
re                290 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		if (unlikely(re.s.tstamp)) {
re                368 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	union mgmt_port_ring_entry re;
re                374 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.d64 = p->rx_ring[p->rx_next];
re                379 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_unmap_single(p->dev, re.s.addr,
re                383 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	return re.d64;
re                391 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	union mgmt_port_ring_entry re;
re                399 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
re                400 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
re                402 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb_put(skb, re.s.len);
re                418 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
re                426 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb_put(skb, re.s.len);
re               1278 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	union mgmt_port_ring_entry re;
re               1282 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.d64 = 0;
re               1283 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
re               1284 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.s.len = skb->len;
re               1285 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.s.addr = dma_map_single(p->dev, skb->data,
re               1300 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_unmap_single(p->dev, re.s.addr, re.s.len,
re               1308 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_ring[p->tx_next] = re.d64;
re               1154 drivers/net/ethernet/marvell/sky2.c 	struct rx_ring_info *re;
re               1161 drivers/net/ethernet/marvell/sky2.c 	BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
re               1195 drivers/net/ethernet/marvell/sky2.c 			   const struct rx_ring_info *re)
re               1199 drivers/net/ethernet/marvell/sky2.c 	sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
re               1201 drivers/net/ethernet/marvell/sky2.c 	for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
re               1202 drivers/net/ethernet/marvell/sky2.c 		sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
re               1206 drivers/net/ethernet/marvell/sky2.c static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
re               1209 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb = re->skb;
re               1212 drivers/net/ethernet/marvell/sky2.c 	re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
re               1213 drivers/net/ethernet/marvell/sky2.c 	if (pci_dma_mapping_error(pdev, re->data_addr))
re               1216 drivers/net/ethernet/marvell/sky2.c 	dma_unmap_len_set(re, data_size, size);
re               1221 drivers/net/ethernet/marvell/sky2.c 		re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
re               1225 drivers/net/ethernet/marvell/sky2.c 		if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
re               1232 drivers/net/ethernet/marvell/sky2.c 		pci_unmap_page(pdev, re->frag_addr[i],
re               1237 drivers/net/ethernet/marvell/sky2.c 	pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
re               1247 drivers/net/ethernet/marvell/sky2.c static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
re               1249 drivers/net/ethernet/marvell/sky2.c 	struct sk_buff *skb = re->skb;
re               1252 drivers/net/ethernet/marvell/sky2.c 	pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
re               1256 drivers/net/ethernet/marvell/sky2.c 		pci_unmap_page(pdev, re->frag_addr[i],
re               1353 drivers/net/ethernet/marvell/sky2.c 		struct rx_ring_info *re = sky2->rx_ring + i;
re               1355 drivers/net/ethernet/marvell/sky2.c 		if (re->skb) {
re               1356 drivers/net/ethernet/marvell/sky2.c 			sky2_rx_unmap_skb(sky2->hw->pdev, re);
re               1357 drivers/net/ethernet/marvell/sky2.c 			kfree_skb(re->skb);
re               1358 drivers/net/ethernet/marvell/sky2.c 			re->skb = NULL;
re               1492 drivers/net/ethernet/marvell/sky2.c 		struct rx_ring_info *re = sky2->rx_ring + i;
re               1494 drivers/net/ethernet/marvell/sky2.c 		re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
re               1495 drivers/net/ethernet/marvell/sky2.c 		if (!re->skb)
re               1498 drivers/net/ethernet/marvell/sky2.c 		if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
re               1499 drivers/net/ethernet/marvell/sky2.c 			dev_kfree_skb(re->skb);
re               1500 drivers/net/ethernet/marvell/sky2.c 			re->skb = NULL;
re               1519 drivers/net/ethernet/marvell/sky2.c 	struct rx_ring_info *re;
re               1546 drivers/net/ethernet/marvell/sky2.c 		re = sky2->rx_ring + i;
re               1547 drivers/net/ethernet/marvell/sky2.c 		sky2_rx_submit(sky2, re);
re               1806 drivers/net/ethernet/marvell/sky2.c static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
re               1808 drivers/net/ethernet/marvell/sky2.c 	if (re->flags & TX_MAP_SINGLE)
re               1809 drivers/net/ethernet/marvell/sky2.c 		pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
re               1810 drivers/net/ethernet/marvell/sky2.c 				 dma_unmap_len(re, maplen),
re               1812 drivers/net/ethernet/marvell/sky2.c 	else if (re->flags & TX_MAP_PAGE)
re               1813 drivers/net/ethernet/marvell/sky2.c 		pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
re               1814 drivers/net/ethernet/marvell/sky2.c 			       dma_unmap_len(re, maplen),
re               1816 drivers/net/ethernet/marvell/sky2.c 	re->flags = 0;
re               1831 drivers/net/ethernet/marvell/sky2.c 	struct tx_ring_info *re;
re               1922 drivers/net/ethernet/marvell/sky2.c 	re = sky2->tx_ring + slot;
re               1923 drivers/net/ethernet/marvell/sky2.c 	re->flags = TX_MAP_SINGLE;
re               1924 drivers/net/ethernet/marvell/sky2.c 	dma_unmap_addr_set(re, mapaddr, mapping);
re               1925 drivers/net/ethernet/marvell/sky2.c 	dma_unmap_len_set(re, maplen, len);
re               1951 drivers/net/ethernet/marvell/sky2.c 		re = sky2->tx_ring + slot;
re               1952 drivers/net/ethernet/marvell/sky2.c 		re->flags = TX_MAP_PAGE;
re               1953 drivers/net/ethernet/marvell/sky2.c 		dma_unmap_addr_set(re, mapaddr, mapping);
re               1954 drivers/net/ethernet/marvell/sky2.c 		dma_unmap_len_set(re, maplen, skb_frag_size(frag));
re               1963 drivers/net/ethernet/marvell/sky2.c 	re->skb = skb;
re               1978 drivers/net/ethernet/marvell/sky2.c 		re = sky2->tx_ring + i;
re               1980 drivers/net/ethernet/marvell/sky2.c 		sky2_tx_unmap(hw->pdev, re);
re               2010 drivers/net/ethernet/marvell/sky2.c 		struct tx_ring_info *re = sky2->tx_ring + idx;
re               2011 drivers/net/ethernet/marvell/sky2.c 		struct sk_buff *skb = re->skb;
re               2013 drivers/net/ethernet/marvell/sky2.c 		sky2_tx_unmap(sky2->hw->pdev, re);
re               2022 drivers/net/ethernet/marvell/sky2.c 			re->skb = NULL;
re               2447 drivers/net/ethernet/marvell/sky2.c static inline bool needs_copy(const struct rx_ring_info *re,
re               2452 drivers/net/ethernet/marvell/sky2.c 	if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
re               2460 drivers/net/ethernet/marvell/sky2.c 				    const struct rx_ring_info *re,
re               2467 drivers/net/ethernet/marvell/sky2.c 		pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
re               2469 drivers/net/ethernet/marvell/sky2.c 		skb_copy_from_linear_data(re->skb, skb->data, length);
re               2470 drivers/net/ethernet/marvell/sky2.c 		skb->ip_summed = re->skb->ip_summed;
re               2471 drivers/net/ethernet/marvell/sky2.c 		skb->csum = re->skb->csum;
re               2472 drivers/net/ethernet/marvell/sky2.c 		skb_copy_hash(skb, re->skb);
re               2473 drivers/net/ethernet/marvell/sky2.c 		__vlan_hwaccel_copy_tag(skb, re->skb);
re               2475 drivers/net/ethernet/marvell/sky2.c 		pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
re               2477 drivers/net/ethernet/marvell/sky2.c 		__vlan_hwaccel_clear_tag(re->skb);
re               2478 drivers/net/ethernet/marvell/sky2.c 		skb_clear_hash(re->skb);
re               2479 drivers/net/ethernet/marvell/sky2.c 		re->skb->ip_summed = CHECKSUM_NONE;
re               2520 drivers/net/ethernet/marvell/sky2.c 				   struct rx_ring_info *re,
re               2534 drivers/net/ethernet/marvell/sky2.c 	skb = re->skb;
re               2535 drivers/net/ethernet/marvell/sky2.c 	sky2_rx_unmap_skb(sky2->hw->pdev, re);
re               2537 drivers/net/ethernet/marvell/sky2.c 	*re = nre;
re               2559 drivers/net/ethernet/marvell/sky2.c 	struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
re               2570 drivers/net/ethernet/marvell/sky2.c 	if (skb_vlan_tag_present(re->skb))
re               2593 drivers/net/ethernet/marvell/sky2.c 	if (needs_copy(re, length))
re               2594 drivers/net/ethernet/marvell/sky2.c 		skb = receive_copy(sky2, re, length);
re               2596 drivers/net/ethernet/marvell/sky2.c 		skb = receive_new(sky2, re, length);
re               2601 drivers/net/ethernet/marvell/sky2.c 	sky2_rx_submit(sky2, re);
re                163 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	s16 re;
re                168 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	u16 re;
re               3537 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	cc.re = 0;
re               3542 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.re = a;
re               3549 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.re = (u16) di0;
re               3554 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.re = (u16) ei;
re               3559 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.re = (u16) fi;
re               3716 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	phy_c15 = (s16) phy_c3.re;
re               3719 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		if (phy_c3.re > 127)
re               3720 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 			phy_c15 = phy_c3.re - 256;
re               3801 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 					phy_c2.re = phy_c1[phy_c6].re;
re               3804 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 					phy_c18 = phy_c18 + phy_c17 * phy_c2.re;
re                234 drivers/pci/hotplug/ibmphp_ebda.c 	u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base;
re                295 drivers/pci/hotplug/ibmphp_ebda.c 			re = readw(io_mem + sub_addr);	/* next sub blk */
re                320 drivers/pci/hotplug/ibmphp_ebda.c 			sub_addr = base + re;	/* re sub blk */
re                419 drivers/thunderbolt/property.c 		struct tb_property_rootdir_entry *re;
re                421 drivers/thunderbolt/property.c 		re = (struct tb_property_rootdir_entry *)&block[start_offset];
re                422 drivers/thunderbolt/property.c 		re->magic = TB_PROPERTY_ROOTDIR_MAGIC;
re                423 drivers/thunderbolt/property.c 		re->length = dir_len - sizeof(*re) / 4;
re                424 drivers/thunderbolt/property.c 		entry = re->entries;
re                157 drivers/virtio/virtio_input.c 	u32 mi, ma, re, fu, fl;
re                162 drivers/virtio/virtio_input.c 	virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re);
re                166 drivers/virtio/virtio_input.c 	input_abs_set_res(vi->idev, abs, re);
re                 95 fs/btrfs/reada.c 			     struct reada_extent *re, struct extent_buffer *eb,
re                104 fs/btrfs/reada.c 	spin_lock(&re->lock);
re                109 fs/btrfs/reada.c 	list_replace_init(&re->extctl, &list);
re                110 fs/btrfs/reada.c 	re->scheduled = 0;
re                111 fs/btrfs/reada.c 	spin_unlock(&re->lock);
re                143 fs/btrfs/reada.c 			next_key = re->top;
re                192 fs/btrfs/reada.c 		reada_extent_put(fs_info, re);	/* one ref for each entry */
re                202 fs/btrfs/reada.c 	struct reada_extent *re;
re                206 fs/btrfs/reada.c 	re = radix_tree_lookup(&fs_info->reada_tree,
re                208 fs/btrfs/reada.c 	if (re)
re                209 fs/btrfs/reada.c 		re->refcnt++;
re                211 fs/btrfs/reada.c 	if (!re) {
re                216 fs/btrfs/reada.c 	__readahead_hook(fs_info, re, eb, err);
re                217 fs/btrfs/reada.c 	reada_extent_put(fs_info, re);	/* our ref */
re                304 fs/btrfs/reada.c 	struct reada_extent *re = NULL;
re                317 fs/btrfs/reada.c 	re = radix_tree_lookup(&fs_info->reada_tree, index);
re                318 fs/btrfs/reada.c 	if (re)
re                319 fs/btrfs/reada.c 		re->refcnt++;
re                322 fs/btrfs/reada.c 	if (re)
re                323 fs/btrfs/reada.c 		return re;
re                325 fs/btrfs/reada.c 	re = kzalloc(sizeof(*re), GFP_KERNEL);
re                326 fs/btrfs/reada.c 	if (!re)
re                329 fs/btrfs/reada.c 	re->logical = logical;
re                330 fs/btrfs/reada.c 	re->top = *top;
re                331 fs/btrfs/reada.c 	INIT_LIST_HEAD(&re->extctl);
re                332 fs/btrfs/reada.c 	spin_lock_init(&re->lock);
re                333 fs/btrfs/reada.c 	re->refcnt = 1;
re                365 fs/btrfs/reada.c 		re->zones[re->nzones++] = zone;
re                375 fs/btrfs/reada.c 	if (re->nzones == 0) {
re                389 fs/btrfs/reada.c 	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
re                408 fs/btrfs/reada.c 	for (nzones = 0; nzones < re->nzones; ++nzones) {
re                409 fs/btrfs/reada.c 		dev = re->zones[nzones]->device;
re                433 fs/btrfs/reada.c 		ret = radix_tree_insert(&dev->reada_extents, index, re);
re                436 fs/btrfs/reada.c 				dev = re->zones[nzones]->device;
re                455 fs/btrfs/reada.c 	return re;
re                458 fs/btrfs/reada.c 	for (nzones = 0; nzones < re->nzones; ++nzones) {
re                461 fs/btrfs/reada.c 		zone = re->zones[nzones];
re                479 fs/btrfs/reada.c 	kfree(re);
re                484 fs/btrfs/reada.c 			     struct reada_extent *re)
re                487 fs/btrfs/reada.c 	unsigned long index = re->logical >> PAGE_SHIFT;
re                490 fs/btrfs/reada.c 	if (--re->refcnt) {
re                496 fs/btrfs/reada.c 	for (i = 0; i < re->nzones; ++i) {
re                497 fs/btrfs/reada.c 		struct reada_zone *zone = re->zones[i];
re                504 fs/btrfs/reada.c 	for (i = 0; i < re->nzones; ++i) {
re                505 fs/btrfs/reada.c 		struct reada_zone *zone = re->zones[i];
re                522 fs/btrfs/reada.c 	kfree(re);
re                547 fs/btrfs/reada.c 	struct reada_extent *re;
re                551 fs/btrfs/reada.c 	re = reada_find_extent(fs_info, logical, top);
re                552 fs/btrfs/reada.c 	if (!re)
re                557 fs/btrfs/reada.c 		reada_extent_put(fs_info, re);
re                565 fs/btrfs/reada.c 	spin_lock(&re->lock);
re                566 fs/btrfs/reada.c 	list_add_tail(&rec->list, &re->extctl);
re                567 fs/btrfs/reada.c 	spin_unlock(&re->lock);
re                674 fs/btrfs/reada.c 	struct reada_extent *re = NULL;
re                694 fs/btrfs/reada.c 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
re                696 fs/btrfs/reada.c 	if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
re                702 fs/btrfs/reada.c 		re = NULL;
re                703 fs/btrfs/reada.c 		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
re                710 fs/btrfs/reada.c 	dev->reada_next = re->logical + fs_info->nodesize;
re                711 fs/btrfs/reada.c 	re->refcnt++;
re                715 fs/btrfs/reada.c 	spin_lock(&re->lock);
re                716 fs/btrfs/reada.c 	if (re->scheduled || list_empty(&re->extctl)) {
re                717 fs/btrfs/reada.c 		spin_unlock(&re->lock);
re                718 fs/btrfs/reada.c 		reada_extent_put(fs_info, re);
re                721 fs/btrfs/reada.c 	re->scheduled = 1;
re                722 fs/btrfs/reada.c 	spin_unlock(&re->lock);
re                727 fs/btrfs/reada.c 	for (i = 0; i < re->nzones; ++i) {
re                728 fs/btrfs/reada.c 		if (re->zones[i]->device == dev) {
re                733 fs/btrfs/reada.c 	logical = re->logical;
re                738 fs/btrfs/reada.c 		__readahead_hook(fs_info, re, NULL, ret);
re                740 fs/btrfs/reada.c 		__readahead_hook(fs_info, re, eb, ret);
re                746 fs/btrfs/reada.c 	reada_extent_put(fs_info, re);
re                867 fs/btrfs/reada.c 			struct reada_extent *re = NULL;
re                870 fs/btrfs/reada.c 						     (void **)&re, index, 1);
re                874 fs/btrfs/reada.c 				re->logical, fs_info->nodesize,
re                875 fs/btrfs/reada.c 				list_empty(&re->extctl), re->scheduled);
re                877 fs/btrfs/reada.c 			for (i = 0; i < re->nzones; ++i) {
re                879 fs/btrfs/reada.c 					re->zones[i]->start,
re                880 fs/btrfs/reada.c 					re->zones[i]->end);
re                881 fs/btrfs/reada.c 				for (j = 0; j < re->zones[i]->ndevs; ++j) {
re                883 fs/btrfs/reada.c 						re->zones[i]->devs[j]->devid);
re                887 fs/btrfs/reada.c 			index = (re->logical >> PAGE_SHIFT) + 1;
re                896 fs/btrfs/reada.c 		struct reada_extent *re = NULL;
re                898 fs/btrfs/reada.c 		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
re                902 fs/btrfs/reada.c 		if (!re->scheduled) {
re                903 fs/btrfs/reada.c 			index = (re->logical >> PAGE_SHIFT) + 1;
re                907 fs/btrfs/reada.c 			re->logical, fs_info->nodesize,
re                908 fs/btrfs/reada.c 			list_empty(&re->extctl), re->scheduled);
re                909 fs/btrfs/reada.c 		for (i = 0; i < re->nzones; ++i) {
re                911 fs/btrfs/reada.c 				re->zones[i]->start,
re                912 fs/btrfs/reada.c 				re->zones[i]->end);
re                913 fs/btrfs/reada.c 			for (j = 0; j < re->zones[i]->ndevs; ++j) {
re                915 fs/btrfs/reada.c 				       re->zones[i]->devs[j]->devid);
re                919 fs/btrfs/reada.c 		index = (re->logical >> PAGE_SHIFT) + 1;
re                117 fs/btrfs/ref-verify.c 					    struct root_entry *re)
re                126 fs/btrfs/ref-verify.c 		if (entry->root_objectid > re->root_objectid)
re                128 fs/btrfs/ref-verify.c 		else if (entry->root_objectid < re->root_objectid)
re                134 fs/btrfs/ref-verify.c 	rb_link_node(&re->node, parent_node, p);
re                135 fs/btrfs/ref-verify.c 	rb_insert_color(&re->node, root);
re                234 fs/btrfs/ref-verify.c 	struct root_entry *re;
re                240 fs/btrfs/ref-verify.c 		re = rb_entry(n, struct root_entry, node);
re                241 fs/btrfs/ref-verify.c 		rb_erase(&re->node, &be->roots);
re                242 fs/btrfs/ref-verify.c 		kfree(re);
re                265 fs/btrfs/ref-verify.c 	struct root_entry *re = NULL;
re                267 fs/btrfs/ref-verify.c 	re = kzalloc(sizeof(struct root_entry), GFP_KERNEL);
re                269 fs/btrfs/ref-verify.c 	if (!be || !re) {
re                270 fs/btrfs/ref-verify.c 		kfree(re);
re                277 fs/btrfs/ref-verify.c 	re->root_objectid = root_objectid;
re                278 fs/btrfs/ref-verify.c 	re->num_refs = 0;
re                286 fs/btrfs/ref-verify.c 			exist_re = insert_root_entry(&exist->roots, re);
re                288 fs/btrfs/ref-verify.c 				kfree(re);
re                301 fs/btrfs/ref-verify.c 		insert_root_entry(&be->roots, re);
re                303 fs/btrfs/ref-verify.c 		kfree(re);
re                311 fs/btrfs/ref-verify.c 	struct root_entry *re;
re                338 fs/btrfs/ref-verify.c 		re = lookup_root_entry(&be->roots, ref_root);
re                339 fs/btrfs/ref-verify.c 		ASSERT(re);
re                340 fs/btrfs/ref-verify.c 		re->num_refs++;
re                388 fs/btrfs/ref-verify.c 	struct root_entry *re;
re                416 fs/btrfs/ref-verify.c 	re = lookup_root_entry(&be->roots, ref_root);
re                417 fs/btrfs/ref-verify.c 	if (!re) {
re                422 fs/btrfs/ref-verify.c 	re->num_refs += num_refs;
re                634 fs/btrfs/ref-verify.c 	struct root_entry *re;
re                652 fs/btrfs/ref-verify.c 		re = rb_entry(n, struct root_entry, node);
re                654 fs/btrfs/ref-verify.c 			  re->root_objectid, re->num_refs);
re                675 fs/btrfs/ref-verify.c 	struct root_entry *re = NULL;
re                778 fs/btrfs/ref-verify.c 			re = kmalloc(sizeof(struct root_entry), GFP_NOFS);
re                779 fs/btrfs/ref-verify.c 			if (!re) {
re                791 fs/btrfs/ref-verify.c 			re->root_objectid = generic_ref->real_root;
re                792 fs/btrfs/ref-verify.c 			re->num_refs = 0;
re                809 fs/btrfs/ref-verify.c 			tmp = insert_root_entry(&be->roots, re);
re                811 fs/btrfs/ref-verify.c 				kfree(re);
re                812 fs/btrfs/ref-verify.c 				re = tmp;
re                857 fs/btrfs/ref-verify.c 	if (!parent && !re) {
re                858 fs/btrfs/ref-verify.c 		re = lookup_root_entry(&be->roots, ref_root);
re                859 fs/btrfs/ref-verify.c 		if (!re) {
re                875 fs/btrfs/ref-verify.c 		if (re)
re                876 fs/btrfs/ref-verify.c 			re->num_refs--;
re                880 fs/btrfs/ref-verify.c 		if (re)
re                881 fs/btrfs/ref-verify.c 			re->num_refs++;
re                 34 fs/f2fs/extent_cache.c 	struct rb_entry *re;
re                 37 fs/f2fs/extent_cache.c 		re = rb_entry(node, struct rb_entry, rb_node);
re                 39 fs/f2fs/extent_cache.c 		if (ofs < re->ofs)
re                 41 fs/f2fs/extent_cache.c 		else if (ofs >= re->ofs + re->len)
re                 44 fs/f2fs/extent_cache.c 			return re;
re                 52 fs/f2fs/extent_cache.c 	struct rb_entry *re;
re                 54 fs/f2fs/extent_cache.c 	re = __lookup_rb_tree_fast(cached_re, ofs);
re                 55 fs/f2fs/extent_cache.c 	if (!re)
re                 58 fs/f2fs/extent_cache.c 	return re;
re                 67 fs/f2fs/extent_cache.c 	struct rb_entry *re;
re                 71 fs/f2fs/extent_cache.c 		re = rb_entry(*parent, struct rb_entry, rb_node);
re                 73 fs/f2fs/extent_cache.c 		if (ofs < re->ofs) {
re                 75 fs/f2fs/extent_cache.c 		} else if (ofs >= re->ofs + re->len) {
re                106 fs/f2fs/extent_cache.c 	struct rb_entry *re = cached_re;
re                116 fs/f2fs/extent_cache.c 	if (re) {
re                117 fs/f2fs/extent_cache.c 		if (re->ofs <= ofs && re->ofs + re->len > ofs)
re                126 fs/f2fs/extent_cache.c 		re = rb_entry(*pnode, struct rb_entry, rb_node);
re                128 fs/f2fs/extent_cache.c 		if (ofs < re->ofs) {
re                130 fs/f2fs/extent_cache.c 		} else if (ofs >= re->ofs + re->len) {
re                142 fs/f2fs/extent_cache.c 	re = rb_entry(parent, struct rb_entry, rb_node);
re                144 fs/f2fs/extent_cache.c 	if (parent && ofs > re->ofs)
re                149 fs/f2fs/extent_cache.c 	if (parent && ofs < re->ofs)
re                155 fs/f2fs/extent_cache.c 	if (ofs == re->ofs || force) {
re                157 fs/f2fs/extent_cache.c 		tmp_node = rb_prev(&re->rb_node);
re                160 fs/f2fs/extent_cache.c 	if (ofs == re->ofs + re->len - 1 || force) {
re                162 fs/f2fs/extent_cache.c 		tmp_node = rb_next(&re->rb_node);
re                165 fs/f2fs/extent_cache.c 	return re;
re               1911 kernel/trace/trace_events_filter.c 	char *str, **re;
re               1923 kernel/trace/trace_events_filter.c 	re = argv_split(GFP_KERNEL, str, count);
re               1925 kernel/trace/trace_events_filter.c 	return re;
re               1929 kernel/trace/trace_events_filter.c 				      int reset, char *re, int len)
re               1934 kernel/trace/trace_events_filter.c 		ret = ftrace_set_filter(ops, re, len, reset);
re               1936 kernel/trace/trace_events_filter.c 		ret = ftrace_set_notrace(ops, re, len, reset);
re               1946 kernel/trace/trace_events_filter.c 	char **re;
re               1955 kernel/trace/trace_events_filter.c 	re = ftrace_function_filter_re(buf, len, &re_cnt);
re               1956 kernel/trace/trace_events_filter.c 	if (!re)
re               1961 kernel/trace/trace_events_filter.c 						 re[i], strlen(re[i]));
re               1969 kernel/trace/trace_events_filter.c 	argv_free(re);
re                273 mm/percpu.c    static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
re                276 mm/percpu.c    	*re = find_next_bit(bitmap, end, *rs + 1);
re                279 mm/percpu.c    static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
re                282 mm/percpu.c    	*re = find_next_zero_bit(bitmap, end, *rs + 1);
re                290 mm/percpu.c    #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end)		     \
re                291 mm/percpu.c    	for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
re                292 mm/percpu.c    	     (rs) < (re);						     \
re                293 mm/percpu.c    	     (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
re                295 mm/percpu.c    #define pcpu_for_each_pop_region(bitmap, rs, re, start, end)		     \
re                296 mm/percpu.c    	for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end));   \
re                297 mm/percpu.c    	     (rs) < (re);						     \
re                298 mm/percpu.c    	     (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
re                752 mm/percpu.c    	int rs, re, start;	/* region start, region end */
re                768 mm/percpu.c    	pcpu_for_each_unpop_region(alloc_map, rs, re, start,
re                770 mm/percpu.c    		pcpu_block_update(block, rs, re);
re               1044 mm/percpu.c    	int page_start, page_end, rs, re;
re               1050 mm/percpu.c    	pcpu_next_unpop(chunk->populated, &rs, &re, page_end);
re               1054 mm/percpu.c    	*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
re               1705 mm/percpu.c    		int page_start, page_end, rs, re;
re               1710 mm/percpu.c    		pcpu_for_each_unpop_region(chunk->populated, rs, re,
re               1714 mm/percpu.c    			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
re               1722 mm/percpu.c    			pcpu_chunk_populated(chunk, rs, re);
re               1861 mm/percpu.c    		int rs, re;
re               1863 mm/percpu.c    		pcpu_for_each_pop_region(chunk->populated, rs, re, 0,
re               1865 mm/percpu.c    			pcpu_depopulate_chunk(chunk, rs, re);
re               1867 mm/percpu.c    			pcpu_chunk_depopulated(chunk, rs, re);
re               1896 mm/percpu.c    		int nr_unpop = 0, rs, re;
re               1913 mm/percpu.c    		pcpu_for_each_unpop_region(chunk->populated, rs, re, 0,
re               1915 mm/percpu.c    			int nr = min(re - rs, nr_to_pop);
re                228 net/can/gw.c   static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r)
re                245 net/can/gw.c   	    re >= -dlen && re < dlen)
re                520 net/xfrm/xfrm_user.c 	struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
re                525 net/xfrm/xfrm_user.c 	if (re) {
re                527 net/xfrm/xfrm_user.c 		replay_esn = nla_data(re);
re               2091 net/xfrm/xfrm_user.c 	struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
re               2096 net/xfrm/xfrm_user.c 	if (!lt && !rp && !re && !et && !rt)
re               2112 net/xfrm/xfrm_user.c 	err = xfrm_replay_verify_len(x->replay_esn, re);
re                189 scripts/dtc/data.c 	struct fdt_reserve_entry re;
re                191 scripts/dtc/data.c 	re.address = cpu_to_fdt64(address);
re                192 scripts/dtc/data.c 	re.size = cpu_to_fdt64(size);
re                194 scripts/dtc/data.c 	return data_append_data(d, &re, sizeof(re));
re                 39 scripts/dtc/dtc-parser.y 	struct reserve_info *re;
re                 66 scripts/dtc/dtc-parser.y %type <re> memreserve
re                 67 scripts/dtc/dtc-parser.y %type <re> memreserves
re                296 scripts/dtc/flattree.c 	struct reserve_info *re;
re                300 scripts/dtc/flattree.c 	for (re = reservelist; re; re = re->next) {
re                301 scripts/dtc/flattree.c 		d = data_append_re(d, re->address, re->size);
re                451 scripts/dtc/flattree.c 	struct reserve_info *re;
re                516 scripts/dtc/flattree.c 	for (re = dti->reservelist; re; re = re->next) {
re                519 scripts/dtc/flattree.c 		for_each_label(re->labels, l) {
re                523 scripts/dtc/flattree.c 		ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->address >> 32));
re                525 scripts/dtc/flattree.c 				(unsigned int)(re->address & 0xffffffff));
re                526 scripts/dtc/flattree.c 		ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->size >> 32));
re                527 scripts/dtc/flattree.c 		ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->size & 0xffffffff));
re                688 scripts/dtc/flattree.c 	struct fdt_reserve_entry re;
re                699 scripts/dtc/flattree.c 		flat_read_chunk(inb, &re, sizeof(re));
re                700 scripts/dtc/flattree.c 		address  = fdt64_to_cpu(re.address);
re                701 scripts/dtc/flattree.c 		size = fdt64_to_cpu(re.size);
re                162 scripts/dtc/libfdt/fdt_ro.c 	const struct fdt_reserve_entry *re;
re                165 scripts/dtc/libfdt/fdt_ro.c 	re = fdt_mem_rsv(fdt, n);
re                166 scripts/dtc/libfdt/fdt_ro.c 	if (!re)
re                169 scripts/dtc/libfdt/fdt_ro.c 	*address = fdt64_ld(&re->address);
re                170 scripts/dtc/libfdt/fdt_ro.c 	*size = fdt64_ld(&re->size);
re                177 scripts/dtc/libfdt/fdt_ro.c 	const struct fdt_reserve_entry *re;
re                179 scripts/dtc/libfdt/fdt_ro.c 	for (i = 0; (re = fdt_mem_rsv(fdt, i)) != NULL; i++) {
re                180 scripts/dtc/libfdt/fdt_ro.c 		if (fdt64_ld(&re->size) == 0)
re                143 scripts/dtc/libfdt/fdt_rw.c 	struct fdt_reserve_entry *re;
re                148 scripts/dtc/libfdt/fdt_rw.c 	re = fdt_mem_rsv_w_(fdt, fdt_num_mem_rsv(fdt));
re                149 scripts/dtc/libfdt/fdt_rw.c 	err = fdt_splice_mem_rsv_(fdt, re, 0, 1);
re                153 scripts/dtc/libfdt/fdt_rw.c 	re->address = cpu_to_fdt64(address);
re                154 scripts/dtc/libfdt/fdt_rw.c 	re->size = cpu_to_fdt64(size);
re                160 scripts/dtc/libfdt/fdt_rw.c 	struct fdt_reserve_entry *re = fdt_mem_rsv_w_(fdt, n);
re                167 scripts/dtc/libfdt/fdt_rw.c 	return fdt_splice_mem_rsv_(fdt, re, 1, 0);
re                182 scripts/dtc/libfdt/fdt_sw.c 	struct fdt_reserve_entry *re;
re                188 scripts/dtc/libfdt/fdt_sw.c 	if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
re                191 scripts/dtc/libfdt/fdt_sw.c 	re = (struct fdt_reserve_entry *)((char *)fdt + offset);
re                192 scripts/dtc/libfdt/fdt_sw.c 	re->address = cpu_to_fdt64(addr);
re                193 scripts/dtc/libfdt/fdt_sw.c 	re->size = cpu_to_fdt64(size);
re                195 scripts/dtc/libfdt/fdt_sw.c 	fdt_set_off_dt_struct(fdt, offset + sizeof(*re));
re                330 scripts/dtc/treesource.c 	struct reserve_info *re;
re                334 scripts/dtc/treesource.c 	for (re = dti->reservelist; re; re = re->next) {
re                337 scripts/dtc/treesource.c 		for_each_label(re->labels, l)
re                340 scripts/dtc/treesource.c 			(unsigned long long)re->address,
re                341 scripts/dtc/treesource.c 			(unsigned long long)re->size);
re               1182 scripts/kconfig/qconf.cc 	QRegExp re("[<>&\"\\n]");
re               1184 scripts/kconfig/qconf.cc 	for (int i = 0; (i = res.indexOf(re, i)) >= 0;) {
re                953 scripts/kconfig/symbol.c 	regex_t re;
re                960 scripts/kconfig/symbol.c 	if (regcomp(&re, pattern, REG_EXTENDED|REG_ICASE))
re                966 scripts/kconfig/symbol.c 		if (regexec(&re, sym->name, 1, match, 0))
re                996 scripts/kconfig/symbol.c 	regfree(&re);
re                953 sound/soc/codecs/tscs42xx.c 		rc, r12, r1b_h, re, rf, r10, r11)	\
re                966 sound/soc/codecs/tscs42xx.c 			{R_PLLCTLE,   re,   0xFF},	\
re                102 tools/perf/arch/x86/util/header.c 	regex_t re;
re                118 tools/perf/arch/x86/util/header.c 	if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
re                124 tools/perf/arch/x86/util/header.c 	match = !regexec(&re, id, 1, pmatch, 0);
re                125 tools/perf/arch/x86/util/header.c 	regfree(&re);
re                825 tools/perf/util/header.c 	regex_t re;
re                829 tools/perf/util/header.c 	if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
re                835 tools/perf/util/header.c 	match = !regexec(&re, cpuid, 1, pmatch, 0);
re                836 tools/perf/util/header.c 	regfree(&re);