rp                426 arch/arc/kernel/kprobes.c 		if (ri->rp && ri->rp->handler)
rp                427 arch/arc/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                464 arch/arm/probes/kprobes/core.c 		if (ri->rp && ri->rp->handler) {
rp                465 arch/arm/probes/kprobes/core.c 			__this_cpu_write(current_kprobe, &ri->rp->kp);
rp                468 arch/arm/probes/kprobes/core.c 			ri->rp->handler(ri, regs);
rp                526 arch/arm64/kernel/probes/kprobes.c 		if (ri->rp && ri->rp->handler) {
rp                527 arch/arm64/kernel/probes/kprobes.c 			__this_cpu_write(current_kprobe, &ri->rp->kp);
rp                530 arch/arm64/kernel/probes/kprobes.c 			ri->rp->handler(ri, regs);
rp                  7 arch/c6x/include/asm/flat.h static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
rp                 10 arch/c6x/include/asm/flat.h 	*addr = get_unaligned((__force u32 *)rp);
rp                 13 arch/c6x/include/asm/flat.h static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
rp                 15 arch/c6x/include/asm/flat.h 	put_unaligned(addr, (__force u32 *)rp);
rp                 19 arch/h8300/include/asm/flat.h static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
rp                 22 arch/h8300/include/asm/flat.h 	u32 val = get_unaligned((__force u32 *)rp);
rp                 29 arch/h8300/include/asm/flat.h static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
rp                 31 arch/h8300/include/asm/flat.h 	u32 *p = (__force u32 *)rp;
rp                528 arch/ia64/include/asm/pal.h 			rp		: 1,	/* Responder identifier
rp                565 arch/ia64/include/asm/pal.h 			rp		: 1,	/* Responder identifier
rp                601 arch/ia64/include/asm/pal.h 			rp		: 1,	/* Responder identifier
rp                661 arch/ia64/include/asm/pal.h 			rp		: 1,	/* Responder identifier
rp                737 arch/ia64/include/asm/pal.h #define pmci_bus_resp_address_valid		pme_bus.rp
rp                 31 arch/ia64/kernel/entry.h 	.spillsp rp, PT(CR_IIP)+16+(off);	\
rp                454 arch/ia64/kernel/kprobes.c 		if (ri->rp && ri->rp->handler)
rp                455 arch/ia64/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                 13 arch/ia64/kernel/minstate.h (pUStk) br.call.spnt rp=account_sys_enter		\
rp                 30 arch/m68k/sun3/prom/init.c void __init prom_init(struct linux_romvec *rp)
rp                 32 arch/m68k/sun3/prom/init.c 	romvec = rp;
rp                 30 arch/microblaze/include/asm/flat.h static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
rp                 33 arch/microblaze/include/asm/flat.h 	u32 *p = (__force u32 *)rp;
rp                 59 arch/microblaze/include/asm/flat.h flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 relval)
rp                 61 arch/microblaze/include/asm/flat.h 	u32 *p = (__force u32 *)rp;
rp                189 arch/microblaze/pci/pci-common.c 			struct resource *rp = &pdev->resource[i];
rp                190 arch/microblaze/pci/pci-common.c 			int flags = rp->flags;
rp                196 arch/microblaze/pci/pci-common.c 			if (offset < (rp->start & PAGE_MASK) ||
rp                197 arch/microblaze/pci/pci-common.c 			    offset > rp->end)
rp                199 arch/microblaze/pci/pci-common.c 			found = rp;
rp                222 arch/microblaze/pci/pci-common.c 	struct resource *rp = &hose->io_resource;
rp                233 arch/microblaze/pci/pci-common.c 	if (!(rp->flags & IORESOURCE_IO))
rp                235 arch/microblaze/pci/pci-common.c 	if (offset < rp->start || (offset + size) > rp->end)
rp                262 arch/microblaze/pci/pci-common.c 	struct resource *rp = &hose->io_resource;
rp                273 arch/microblaze/pci/pci-common.c 	if (!(rp->flags & IORESOURCE_IO))
rp                275 arch/microblaze/pci/pci-common.c 	if (offset < rp->start || (offset + size) > rp->end)
rp                311 arch/microblaze/pci/pci-common.c 	struct resource *rp;
rp                343 arch/microblaze/pci/pci-common.c 		rp = &hose->io_resource;
rp                344 arch/microblaze/pci/pci-common.c 		if (!(rp->flags & IORESOURCE_IO))
rp                346 arch/microblaze/pci/pci-common.c 		if (roffset < rp->start || (roffset + size) > rp->end)
rp                518 arch/mips/kernel/kprobes.c 		if (ri->rp && ri->rp->handler)
rp                519 arch/mips/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                 11 arch/parisc/include/asm/asmregs.h rp:	.reg	%r2
rp                 64 arch/parisc/include/asm/unwind.h 	unsigned long sp, ip, rp, r31;
rp                242 arch/parisc/kernel/kprobes.c 		if (ri->rp && ri->rp->handler) {
rp                243 arch/parisc/kernel/kprobes.c 			__this_cpu_write(current_kprobe, &ri->rp->kp);
rp                246 arch/parisc/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                232 arch/parisc/kernel/signal.c 	unsigned long rp, usp;
rp                310 arch/parisc/kernel/signal.c 	rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
rp                386 arch/parisc/kernel/signal.c 	regs->gr[2]  = rp;                /* userland return pointer */
rp                409 arch/parisc/kernel/signal.c 	       regs->iaoq[0], regs->iaoq[1], rp);
rp                245 arch/parisc/kernel/unwind.c 		info->rp = regs->gr[2];
rp                311 arch/parisc/kernel/unwind.c 		info->rp = 0;
rp                366 arch/parisc/kernel/unwind.c 				info->rp = info->r31;
rp                368 arch/parisc/kernel/unwind.c 				info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
rp                369 arch/parisc/kernel/unwind.c 			info->prev_ip = info->rp;
rp                370 arch/parisc/kernel/unwind.c 			info->rp = 0;
rp                386 arch/parisc/kernel/unwind.c 	info->rp = regs->gr[2];
rp                426 arch/powerpc/kernel/kprobes.c 		if (ri->rp && ri->rp->handler)
rp                427 arch/powerpc/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                453 arch/powerpc/kernel/pci-common.c 			struct resource *rp = &pdev->resource[i];
rp                454 arch/powerpc/kernel/pci-common.c 			int flags = rp->flags;
rp                460 arch/powerpc/kernel/pci-common.c 			if (offset < (rp->start & PAGE_MASK) ||
rp                461 arch/powerpc/kernel/pci-common.c 			    offset > rp->end)
rp                463 arch/powerpc/kernel/pci-common.c 			found = rp;
rp                486 arch/powerpc/kernel/pci-common.c 	struct resource *rp = &hose->io_resource;
rp                497 arch/powerpc/kernel/pci-common.c 	if (!(rp->flags & IORESOURCE_IO))
rp                499 arch/powerpc/kernel/pci-common.c 	if (offset < rp->start || (offset + size) > rp->end)
rp                526 arch/powerpc/kernel/pci-common.c 	struct resource *rp = &hose->io_resource;
rp                537 arch/powerpc/kernel/pci-common.c 	if (!(rp->flags & IORESOURCE_IO))
rp                539 arch/powerpc/kernel/pci-common.c 	if (offset < rp->start || (offset + size) > rp->end)
rp                575 arch/powerpc/kernel/pci-common.c 	struct resource *rp;
rp                603 arch/powerpc/kernel/pci-common.c 		rp = &hose->io_resource;
rp                604 arch/powerpc/kernel/pci-common.c 		if (!(rp->flags & IORESOURCE_IO))
rp                606 arch/powerpc/kernel/pci-common.c 		if (roffset < rp->start || (roffset + size) > rp->end)
rp                420 arch/s390/kernel/kprobes.c 		if (ri->rp && ri->rp->handler) {
rp                422 arch/s390/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                 14 arch/sh/include/asm/flat.h static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
rp                 17 arch/sh/include/asm/flat.h 	*addr = get_unaligned((__force u32 *)rp);
rp                 20 arch/sh/include/asm/flat.h static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
rp                 22 arch/sh/include/asm/flat.h 	put_unaligned(addr, (__force u32 *)rp);
rp                332 arch/sh/kernel/kprobes.c 		if (ri->rp && ri->rp->handler) {
rp                333 arch/sh/kernel/kprobes.c 			__this_cpu_write(current_kprobe, &ri->rp->kp);
rp                334 arch/sh/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                286 arch/sparc/kernel/ds.c 	struct ds_md_update_req *rp;
rp                292 arch/sparc/kernel/ds.c 	rp = (struct ds_md_update_req *) (dpkt + 1);
rp                302 arch/sparc/kernel/ds.c 	pkt.res.req_num = rp->req_num;
rp                325 arch/sparc/kernel/ds.c 	struct ds_shutdown_req *rp;
rp                331 arch/sparc/kernel/ds.c 	rp = (struct ds_shutdown_req *) (dpkt + 1);
rp                340 arch/sparc/kernel/ds.c 	pkt.res.req_num = rp->req_num;
rp                365 arch/sparc/kernel/ds.c 	struct ds_panic_req *rp;
rp                371 arch/sparc/kernel/ds.c 	rp = (struct ds_panic_req *) (dpkt + 1);
rp                380 arch/sparc/kernel/ds.c 	pkt.res.req_num = rp->req_num;
rp                686 arch/sparc/kernel/ds.c 	struct ds_pri_msg *rp;
rp                688 arch/sparc/kernel/ds.c 	rp = (struct ds_pri_msg *) (dpkt + 1);
rp                691 arch/sparc/kernel/ds.c 	       dp->id, rp->req_num, rp->type, len);
rp                731 arch/sparc/kernel/ds.c 	struct ds_var_resp *rp;
rp                733 arch/sparc/kernel/ds.c 	rp = (struct ds_var_resp *) (dpkt + 1);
rp                735 arch/sparc/kernel/ds.c 	if (rp->hdr.type != DS_VAR_SET_RESP &&
rp                736 arch/sparc/kernel/ds.c 	    rp->hdr.type != DS_VAR_DELETE_RESP)
rp                739 arch/sparc/kernel/ds.c 	ds_var_response = rp->result;
rp                 75 arch/sparc/kernel/kernel.h void sparc32_start_kernel(struct linux_romvec *rp);
rp                495 arch/sparc/kernel/kprobes.c 		if (ri->rp && ri->rp->handler)
rp                496 arch/sparc/kernel/kprobes.c 			ri->rp->handler(ri, regs);
rp                832 arch/sparc/kernel/pci.c 		struct resource *rp = &pdev->resource[i];
rp                836 arch/sparc/kernel/pci.c 		if (!rp->flags)
rp                845 arch/sparc/kernel/pci.c 			     (rp->flags & IORESOURCE_IO) == 0) ||
rp                847 arch/sparc/kernel/pci.c 			     (rp->flags & IORESOURCE_MEM) == 0))
rp                856 arch/sparc/kernel/pci.c 		aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
rp                858 arch/sparc/kernel/pci.c 		if ((rp->start <= user_paddr) &&
rp                991 arch/sparc/kernel/pci.c 			  const struct resource *rp, resource_size_t *start,
rp               1003 arch/sparc/kernel/pci.c 	pcibios_resource_to_bus(pdev->bus, &region, (struct resource *) rp);
rp                338 arch/sparc/kernel/pci_common.c 		struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
rp                340 arch/sparc/kernel/pci_common.c 		if (!rp) {
rp                345 arch/sparc/kernel/pci_common.c 		rp->name = "IOMMU";
rp                346 arch/sparc/kernel/pci_common.c 		rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
rp                347 arch/sparc/kernel/pci_common.c 		rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
rp                348 arch/sparc/kernel/pci_common.c 		rp->flags = IORESOURCE_BUSY;
rp                349 arch/sparc/kernel/pci_common.c 		if (request_resource(&pbm->mem_space, rp)) {
rp                352 arch/sparc/kernel/pci_common.c 			kfree(rp);
rp                207 arch/sparc/kernel/process_64.c 	struct global_reg_snapshot *rp;
rp                211 arch/sparc/kernel/process_64.c 	rp = &global_cpu_snapshot[this_cpu].reg;
rp                213 arch/sparc/kernel/process_64.c 	rp->tstate = regs->tstate;
rp                214 arch/sparc/kernel/process_64.c 	rp->tpc = regs->tpc;
rp                215 arch/sparc/kernel/process_64.c 	rp->tnpc = regs->tnpc;
rp                216 arch/sparc/kernel/process_64.c 	rp->o7 = regs->u_regs[UREG_I7];
rp                224 arch/sparc/kernel/process_64.c 			rp->i7 = rw->ins[7];
rp                228 arch/sparc/kernel/process_64.c 				rp->rpc = rw->ins[7];
rp                231 arch/sparc/kernel/process_64.c 		rp->i7 = 0;
rp                232 arch/sparc/kernel/process_64.c 		rp->rpc = 0;
rp                234 arch/sparc/kernel/process_64.c 	rp->thread = tp;
rp                275 arch/sparc/kernel/setup_32.c void __init sparc32_start_kernel(struct linux_romvec *rp)
rp                277 arch/sparc/kernel/setup_32.c 	prom_init(rp);
rp                439 arch/sparc/kernel/signal32.c 		struct reg_window *rp;
rp                441 arch/sparc/kernel/signal32.c 		rp = &current_thread_info()->reg_window[wsaved - 1];
rp                443 arch/sparc/kernel/signal32.c 			err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
rp                445 arch/sparc/kernel/signal32.c 			err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
rp                446 arch/sparc/kernel/signal32.c 		err |= __put_user(rp->ins[6], &sf->ss.fp);
rp                447 arch/sparc/kernel/signal32.c 		err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
rp                571 arch/sparc/kernel/signal32.c 		struct reg_window *rp;
rp                573 arch/sparc/kernel/signal32.c 		rp = &current_thread_info()->reg_window[wsaved - 1];
rp                575 arch/sparc/kernel/signal32.c 			err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
rp                577 arch/sparc/kernel/signal32.c 			err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
rp                578 arch/sparc/kernel/signal32.c 		err |= __put_user(rp->ins[6], &sf->ss.fp);
rp                579 arch/sparc/kernel/signal32.c 		err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
rp                284 arch/sparc/kernel/signal_32.c 		struct reg_window32 *rp;
rp                286 arch/sparc/kernel/signal_32.c 		rp = &current_thread_info()->reg_window[wsaved - 1];
rp                287 arch/sparc/kernel/signal_32.c 		err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
rp                381 arch/sparc/kernel/signal_32.c 		struct reg_window32 *rp;
rp                383 arch/sparc/kernel/signal_32.c 		rp = &current_thread_info()->reg_window[wsaved - 1];
rp                384 arch/sparc/kernel/signal_32.c 		err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
rp                415 arch/sparc/kernel/signal_64.c 		struct reg_window *rp;
rp                417 arch/sparc/kernel/signal_64.c 		rp = &current_thread_info()->reg_window[wsaved - 1];
rp                418 arch/sparc/kernel/signal_64.c 		err |= copy_to_user(sf, rp, sizeof(struct reg_window));
rp                  8 arch/sparc/kernel/sigutil.h int restore_rwin_state(__siginfo_rwin_t __user *rp);
rp                 88 arch/sparc/kernel/sigutil_32.c 		struct reg_window32 *rp;
rp                 91 arch/sparc/kernel/sigutil_32.c 		rp = &current_thread_info()->reg_window[i];
rp                 93 arch/sparc/kernel/sigutil_32.c 		err |= copy_to_user(&rwin->reg_window[i], rp,
rp                100 arch/sparc/kernel/sigutil_32.c int restore_rwin_state(__siginfo_rwin_t __user *rp)
rp                105 arch/sparc/kernel/sigutil_32.c 	if (((unsigned long) rp) & 3)
rp                108 arch/sparc/kernel/sigutil_32.c 	get_user(wsaved, &rp->wsaved);
rp                115 arch/sparc/kernel/sigutil_32.c 				      &rp->reg_window[i],
rp                118 arch/sparc/kernel/sigutil_32.c 				  &rp->rwbuf_stkptrs[i]);
rp                 64 arch/sparc/kernel/sigutil_64.c 		struct reg_window *rp = &current_thread_info()->reg_window[i];
rp                 67 arch/sparc/kernel/sigutil_64.c 		err |= copy_to_user(&rwin->reg_window[i], rp,
rp                 74 arch/sparc/kernel/sigutil_64.c int restore_rwin_state(__siginfo_rwin_t __user *rp)
rp                 79 arch/sparc/kernel/sigutil_64.c 	if (((unsigned long) rp) & 7)
rp                 82 arch/sparc/kernel/sigutil_64.c 	get_user(wsaved, &rp->wsaved);
rp                 89 arch/sparc/kernel/sigutil_64.c 				      &rp->reg_window[i],
rp                 92 arch/sparc/kernel/sigutil_64.c 				  &rp->rwbuf_stkptrs[i]);
rp                 35 arch/sparc/prom/init_32.c void __init prom_init(struct linux_romvec *rp)
rp                 37 arch/sparc/prom/init_32.c 	romvec = rp;
rp                827 arch/x86/kernel/kprobes/core.c 				ri->rp->kp.addr);
rp                849 arch/x86/kernel/kprobes/core.c 		if (ri->rp && ri->rp->handler) {
rp                850 arch/x86/kernel/kprobes/core.c 			__this_cpu_write(current_kprobe, &ri->rp->kp);
rp                852 arch/x86/kernel/kprobes/core.c 			ri->rp->handler(ri, regs);
rp                  7 arch/xtensa/include/asm/flat.h static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
rp                 10 arch/xtensa/include/asm/flat.h 	*addr = get_unaligned((__force u32 *)rp);
rp                 13 arch/xtensa/include/asm/flat.h static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
rp                 15 arch/xtensa/include/asm/flat.h 	put_unaligned(addr, (__force u32 *)rp);
rp                609 drivers/atm/firestream.c 				int rp, wp;
rp                610 drivers/atm/firestream.c 				rp =  read_fs (dev, Q_RP(q->offset));
rp                613 drivers/atm/firestream.c 					    q->offset, rp, wp, wp-rp);
rp               1101 drivers/block/drbd/drbd_int.h 			     struct p_block_req *rp);
rp               1375 drivers/block/drbd/drbd_main.c 		      struct p_block_req *rp)
rp               1377 drivers/block/drbd/drbd_main.c 	_drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
rp               1129 drivers/block/xen-blkback/blkback.c 	RING_IDX rc, rp;
rp               1133 drivers/block/xen-blkback/blkback.c 	rp = blk_rings->common.sring->req_prod;
rp               1136 drivers/block/xen-blkback/blkback.c 	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
rp               1139 drivers/block/xen-blkback/blkback.c 			rp, rc, rp - rc, ring->blkif->vbd.pdevice);
rp               1142 drivers/block/xen-blkback/blkback.c 	while (rc != rp) {
rp               1552 drivers/block/xen-blkfront.c 	RING_IDX i, rp;
rp               1562 drivers/block/xen-blkfront.c 	rp = rinfo->ring.sring->rsp_prod;
rp               1565 drivers/block/xen-blkfront.c 	for (i = rinfo->ring.rsp_cons; i != rp; i++) {
rp                393 drivers/bluetooth/btintel.c 	struct ibt_rp_reg_access *rp;
rp                429 drivers/bluetooth/btintel.c 	if (skb->len != sizeof(*rp) + val_size) {
rp                436 drivers/bluetooth/btintel.c 	rp = (struct ibt_rp_reg_access *)skb->data;
rp                438 drivers/bluetooth/btintel.c 	if (rp->addr != cp.addr) {
rp                440 drivers/bluetooth/btintel.c 			   le32_to_cpu(rp->addr));
rp                445 drivers/bluetooth/btintel.c 	memcpy(val, rp->data, val_size);
rp                393 drivers/bluetooth/btrtl.c 	struct hci_rp_read_local_version *rp;
rp                443 drivers/bluetooth/btrtl.c 	rp = (struct hci_rp_read_local_version *)skb->data;
rp                445 drivers/bluetooth/btrtl.c 		     __le16_to_cpu(rp->hci_rev), __le16_to_cpu(rp->lmp_subver));
rp               1622 drivers/bluetooth/btusb.c 	struct hci_rp_read_local_version *rp;
rp               1641 drivers/bluetooth/btusb.c 	rp = (struct hci_rp_read_local_version *)skb->data;
rp               1644 drivers/bluetooth/btusb.c 	if (le16_to_cpu(rp->manufacturer) != 10 ||
rp               1645 drivers/bluetooth/btusb.c 	    le16_to_cpu(rp->lmp_subver) == 0x0c5c) {
rp                726 drivers/char/ipmi/ipmi_devintf.c 		struct ipmi_req	rp;
rp                734 drivers/char/ipmi/ipmi_devintf.c 		get_compat_ipmi_req(&rp, &r32);
rp                741 drivers/char/ipmi/ipmi_devintf.c 		return handle_send_req(priv->user, &rp,
rp                 90 drivers/edac/pnd2_edac.c 	void (*mk_region)(char *name, struct region *rp, void *asym);
rp                339 drivers/edac/pnd2_edac.c static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
rp                341 drivers/edac/pnd2_edac.c 	rp->enabled = 1;
rp                342 drivers/edac/pnd2_edac.c 	rp->base = base;
rp                343 drivers/edac/pnd2_edac.c 	rp->limit = limit;
rp                347 drivers/edac/pnd2_edac.c static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
rp                361 drivers/edac/pnd2_edac.c 	rp->base = base;
rp                362 drivers/edac/pnd2_edac.c 	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
rp                363 drivers/edac/pnd2_edac.c 	rp->enabled = 1;
rp                364 drivers/edac/pnd2_edac.c 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
rp                367 drivers/edac/pnd2_edac.c static bool in_region(struct region *rp, u64 addr)
rp                369 drivers/edac/pnd2_edac.c 	if (!rp->enabled)
rp                372 drivers/edac/pnd2_edac.c 	return rp->base <= addr && addr <= rp->limit;
rp                446 drivers/edac/pnd2_edac.c static void apl_mk_region(char *name, struct region *rp, void *asym)
rp                450 drivers/edac/pnd2_edac.c 	mk_region(name, rp,
rp                456 drivers/edac/pnd2_edac.c static void dnv_mk_region(char *name, struct region *rp, void *asym)
rp                460 drivers/edac/pnd2_edac.c 	mk_region(name, rp,
rp                 39 drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h 		int rp;
rp                 93 drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c 	struct nvkm_fault_buffer *buffer = fault->buffer[fault->func->user.rp];
rp                855 drivers/gpu/drm/radeon/rv6xx_dpm.c 		    (pi->hw.rp[0] * pi->bsp) / 200,
rp                856 drivers/gpu/drm/radeon/rv6xx_dpm.c 		    (pi->hw.rp[1] * pi->bsp) / 200,
rp               1024 drivers/gpu/drm/radeon/rv6xx_dpm.c 	pi->hw.rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS - 1]
rp               1033 drivers/gpu/drm/radeon/rv6xx_dpm.c 			  &pi->hw.rp[0]);
rp               1041 drivers/gpu/drm/radeon/rv6xx_dpm.c 			  &pi->hw.rp[1]);
rp                 52 drivers/gpu/drm/radeon/rv6xx_dpm.h 	u8 rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
rp                 28 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	RING_IDX i, rp;
rp                 37 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	rp = evtchnl->u.req.ring.sring->rsp_prod;
rp                 41 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
rp                 95 drivers/infiniband/ulp/ipoib/ipoib_main.c static void ipoib_neigh_reclaim(struct rcu_head *rp);
rp               1465 drivers/infiniband/ulp/ipoib/ipoib_main.c static void ipoib_neigh_reclaim(struct rcu_head *rp)
rp               1468 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
rp               2145 drivers/macintosh/via-pmu.c 			struct rb_entry *rp = &pp->rb_buf[pp->rb_put];
rp               2146 drivers/macintosh/via-pmu.c 			rp->len = len;
rp               2147 drivers/macintosh/via-pmu.c 			memcpy(rp->data, data, len);
rp               2202 drivers/macintosh/via-pmu.c 			struct rb_entry *rp = &pp->rb_buf[i];
rp               2203 drivers/macintosh/via-pmu.c 			ret = rp->len;
rp               2207 drivers/macintosh/via-pmu.c 			if (ret > 0 && copy_to_user(buf, rp->data, ret))
rp                 42 drivers/md/raid1-10.c static inline int resync_alloc_pages(struct resync_pages *rp,
rp                 48 drivers/md/raid1-10.c 		rp->pages[i] = alloc_page(gfp_flags);
rp                 49 drivers/md/raid1-10.c 		if (!rp->pages[i])
rp                 57 drivers/md/raid1-10.c 		put_page(rp->pages[i]);
rp                 61 drivers/md/raid1-10.c static inline void resync_free_pages(struct resync_pages *rp)
rp                 66 drivers/md/raid1-10.c 		put_page(rp->pages[i]);
rp                 69 drivers/md/raid1-10.c static inline void resync_get_all_pages(struct resync_pages *rp)
rp                 74 drivers/md/raid1-10.c 		get_page(rp->pages[i]);
rp                 77 drivers/md/raid1-10.c static inline struct page *resync_fetch_page(struct resync_pages *rp,
rp                 82 drivers/md/raid1-10.c 	return rp->pages[idx];
rp                 95 drivers/md/raid1-10.c static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
rp                102 drivers/md/raid1-10.c 		struct page *page = resync_fetch_page(rp, idx);
rp                167 drivers/md/raid1.c 		struct resync_pages *rp = &rps[j];
rp                172 drivers/md/raid1.c 			if (resync_alloc_pages(rp, gfp_flags))
rp                175 drivers/md/raid1.c 			memcpy(rp, &rps[0], sizeof(*rp));
rp                176 drivers/md/raid1.c 			resync_get_all_pages(rp);
rp                179 drivers/md/raid1.c 		rp->raid_bio = r1_bio;
rp                180 drivers/md/raid1.c 		bio->bi_private = rp;
rp                206 drivers/md/raid1.c 	struct resync_pages *rp = NULL;
rp                209 drivers/md/raid1.c 		rp = get_resync_pages(r1bio->bios[i]);
rp                210 drivers/md/raid1.c 		resync_free_pages(rp);
rp                215 drivers/md/raid1.c 	kfree(rp);
rp               2124 drivers/md/raid1.c 		struct resync_pages *rp = get_resync_pages(b);
rp               2135 drivers/md/raid1.c 		rp->raid_bio = r1_bio;
rp               2136 drivers/md/raid1.c 		b->bi_private = rp;
rp               2139 drivers/md/raid1.c 		md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
rp               2873 drivers/md/raid1.c 			struct resync_pages *rp;
rp               2876 drivers/md/raid1.c 			rp = get_resync_pages(bio);
rp               2878 drivers/md/raid1.c 				page = resync_fetch_page(rp, page_idx);
rp                165 drivers/md/raid10.c 		struct resync_pages *rp, *rp_repl;
rp                167 drivers/md/raid10.c 		rp = &rps[j];
rp                175 drivers/md/raid10.c 			if (resync_alloc_pages(rp, gfp_flags))
rp                178 drivers/md/raid10.c 			memcpy(rp, &rps[0], sizeof(*rp));
rp                179 drivers/md/raid10.c 			resync_get_all_pages(rp);
rp                182 drivers/md/raid10.c 		rp->raid_bio = r10_bio;
rp                183 drivers/md/raid10.c 		bio->bi_private = rp;
rp                185 drivers/md/raid10.c 			memcpy(rp_repl, rp, sizeof(*rp));
rp                215 drivers/md/raid10.c 	struct resync_pages *rp = NULL;
rp                221 drivers/md/raid10.c 			rp = get_resync_pages(bio);
rp                222 drivers/md/raid10.c 			resync_free_pages(rp);
rp                232 drivers/md/raid10.c 	kfree(rp);
rp               2037 drivers/md/raid10.c 		struct resync_pages *rp;
rp               2081 drivers/md/raid10.c 		rp = get_resync_pages(tbio);
rp               2084 drivers/md/raid10.c 		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
rp               2086 drivers/md/raid10.c 		rp->raid_bio = r10_bio;
rp               2087 drivers/md/raid10.c 		tbio->bi_private = rp;
rp               2806 drivers/md/raid10.c 	struct rsync_pages *rp;
rp               2819 drivers/md/raid10.c 		rp = bio->bi_private;
rp               2821 drivers/md/raid10.c 		bio->bi_private = rp;
rp               2824 drivers/md/raid10.c 			rp = bio->bi_private;
rp               2826 drivers/md/raid10.c 			bio->bi_private = rp;
rp               3429 drivers/md/raid10.c 			struct resync_pages *rp = get_resync_pages(bio);
rp               3430 drivers/md/raid10.c 			page = resync_fetch_page(rp, page_idx);
rp                 43 drivers/media/pci/bt8xx/bttv-risc.c 	__le32 *rp;
rp                 58 drivers/media/pci/bt8xx/bttv-risc.c 	rp = risc->cpu;
rp                 59 drivers/media/pci/bt8xx/bttv-risc.c 	*(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM1);
rp                 60 drivers/media/pci/bt8xx/bttv-risc.c 	*(rp++) = cpu_to_le32(0);
rp                 63 drivers/media/pci/bt8xx/bttv-risc.c 		*(rp++) = cpu_to_le32(BT848_RISC_SKIP | BT848_RISC_SOL |
rp                 79 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL|
rp                 81 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
rp                 86 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL|
rp                 88 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
rp                 93 drivers/media/pci/bt8xx/bttv-risc.c 				*(rp++)=cpu_to_le32(BT848_RISC_WRITE|
rp                 95 drivers/media/pci/bt8xx/bttv-risc.c 				*(rp++)=cpu_to_le32(sg_dma_address(sg));
rp                 99 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_EOL|
rp                101 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(sg_dma_address(sg));
rp                108 drivers/media/pci/bt8xx/bttv-risc.c 	risc->jmp = rp;
rp                123 drivers/media/pci/bt8xx/bttv-risc.c 	__le32 *rp;
rp                141 drivers/media/pci/bt8xx/bttv-risc.c 	rp = risc->cpu;
rp                142 drivers/media/pci/bt8xx/bttv-risc.c 	*(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM3);
rp                143 drivers/media/pci/bt8xx/bttv-risc.c 	*(rp++) = cpu_to_le32(0);
rp                209 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(ri | ylen);
rp                210 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(((ylen >> hshift) << 16) |
rp                212 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(sg_dma_address(ysg)+yoffset);
rp                215 drivers/media/pci/bt8xx/bttv-risc.c 				*(rp++)=cpu_to_le32(sg_dma_address(usg)+uoffset);
rp                217 drivers/media/pci/bt8xx/bttv-risc.c 				*(rp++)=cpu_to_le32(sg_dma_address(vsg)+voffset);
rp                229 drivers/media/pci/bt8xx/bttv-risc.c 	risc->jmp = rp;
rp                242 drivers/media/pci/bt8xx/bttv-risc.c 	__le32 *rp;
rp                262 drivers/media/pci/bt8xx/bttv-risc.c 	rp = risc->cpu;
rp                263 drivers/media/pci/bt8xx/bttv-risc.c 	*(rp++) = cpu_to_le32(BT848_RISC_SYNC|BT848_FIFO_STATUS_FM1);
rp                264 drivers/media/pci/bt8xx/bttv-risc.c 	*(rp++) = cpu_to_le32(0);
rp                310 drivers/media/pci/bt8xx/bttv-risc.c 			*(rp++)=cpu_to_le32(ri);
rp                312 drivers/media/pci/bt8xx/bttv-risc.c 				*(rp++)=cpu_to_le32(ra);
rp                317 drivers/media/pci/bt8xx/bttv-risc.c 	risc->jmp = rp;
rp               1131 drivers/media/pci/cx23885/cx23885-core.c static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
rp               1141 drivers/media/pci/cx23885/cx23885-core.c 		*(rp++) = cpu_to_le32(RISC_JUMP);
rp               1142 drivers/media/pci/cx23885/cx23885-core.c 		*(rp++) = cpu_to_le32(0);
rp               1143 drivers/media/pci/cx23885/cx23885-core.c 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
rp               1148 drivers/media/pci/cx23885/cx23885-core.c 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
rp               1165 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
rp               1166 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
rp               1167 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
rp               1172 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
rp               1174 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
rp               1175 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
rp               1180 drivers/media/pci/cx23885/cx23885-core.c 				*(rp++) = cpu_to_le32(RISC_WRITE|
rp               1182 drivers/media/pci/cx23885/cx23885-core.c 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp               1183 drivers/media/pci/cx23885/cx23885-core.c 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
rp               1187 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
rp               1188 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp               1189 drivers/media/pci/cx23885/cx23885-core.c 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
rp               1195 drivers/media/pci/cx23885/cx23885-core.c 	return rp;
rp               1204 drivers/media/pci/cx23885/cx23885-core.c 	__le32 *rp;
rp               1226 drivers/media/pci/cx23885/cx23885-core.c 	rp = risc->cpu;
rp               1228 drivers/media/pci/cx23885/cx23885-core.c 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
rp               1231 drivers/media/pci/cx23885/cx23885-core.c 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
rp               1235 drivers/media/pci/cx23885/cx23885-core.c 	risc->jmp = rp;
rp               1247 drivers/media/pci/cx23885/cx23885-core.c 	__le32 *rp;
rp               1263 drivers/media/pci/cx23885/cx23885-core.c 	rp = risc->cpu;
rp               1264 drivers/media/pci/cx23885/cx23885-core.c 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
rp               1268 drivers/media/pci/cx23885/cx23885-core.c 	risc->jmp = rp;
rp               1279 drivers/media/pci/cx23885/cx23885-core.c 	__le32 *rp;
rp               1300 drivers/media/pci/cx23885/cx23885-core.c 	rp = risc->cpu;
rp               1305 drivers/media/pci/cx23885/cx23885-core.c 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
rp               1309 drivers/media/pci/cx23885/cx23885-core.c 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
rp               1315 drivers/media/pci/cx23885/cx23885-core.c 	risc->jmp = rp;
rp                993 drivers/media/pci/cx25821/cx25821-core.c static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
rp               1002 drivers/media/pci/cx25821/cx25821-core.c 		*(rp++) = cpu_to_le32(RISC_JUMP);
rp               1003 drivers/media/pci/cx25821/cx25821-core.c 		*(rp++) = cpu_to_le32(0);
rp               1004 drivers/media/pci/cx25821/cx25821-core.c 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
rp               1009 drivers/media/pci/cx25821/cx25821-core.c 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
rp               1020 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL | RISC_EOL |
rp               1022 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp               1023 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1028 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL |
rp               1030 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp               1031 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1036 drivers/media/pci/cx25821/cx25821-core.c 				*(rp++) = cpu_to_le32(RISC_WRITE |
rp               1038 drivers/media/pci/cx25821/cx25821-core.c 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp               1039 drivers/media/pci/cx25821/cx25821-core.c 				*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1043 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
rp               1044 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp               1045 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1052 drivers/media/pci/cx25821/cx25821-core.c 	return rp;
rp               1062 drivers/media/pci/cx25821/cx25821-core.c 	__le32 *rp;
rp               1085 drivers/media/pci/cx25821/cx25821-core.c 	rp = risc->cpu;
rp               1088 drivers/media/pci/cx25821/cx25821-core.c 		rp = cx25821_risc_field(rp, sglist, top_offset, 0, bpl, padding,
rp               1093 drivers/media/pci/cx25821/cx25821-core.c 		rp = cx25821_risc_field(rp, sglist, bottom_offset, 0x200, bpl,
rp               1098 drivers/media/pci/cx25821/cx25821-core.c 	risc->jmp = rp;
rp               1104 drivers/media/pci/cx25821/cx25821-core.c static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
rp               1114 drivers/media/pci/cx25821/cx25821-core.c 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
rp               1131 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | sol | RISC_EOL |
rp               1133 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp               1134 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1139 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | sol |
rp               1141 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp               1142 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1147 drivers/media/pci/cx25821/cx25821-core.c 				*(rp++) = cpu_to_le32(RISC_WRITE |
rp               1149 drivers/media/pci/cx25821/cx25821-core.c 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp               1150 drivers/media/pci/cx25821/cx25821-core.c 				*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1154 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
rp               1155 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp               1156 drivers/media/pci/cx25821/cx25821-core.c 			*(rp++) = cpu_to_le32(0);	/* bits 63-32 */
rp               1162 drivers/media/pci/cx25821/cx25821-core.c 	return rp;
rp               1172 drivers/media/pci/cx25821/cx25821-core.c 	__le32 *rp;
rp               1188 drivers/media/pci/cx25821/cx25821-core.c 	rp = risc->cpu;
rp               1189 drivers/media/pci/cx25821/cx25821-core.c 	rp = cx25821_risc_field_audio(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
rp               1193 drivers/media/pci/cx25821/cx25821-core.c 	risc->jmp = rp;
rp                 70 drivers/media/pci/cx88/cx88-core.c static __le32 *cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
rp                 79 drivers/media/pci/cx88/cx88-core.c 		(*rp++) = cpu_to_le32(RISC_JUMP);
rp                 80 drivers/media/pci/cx88/cx88-core.c 		(*rp++) = 0;
rp                 85 drivers/media/pci/cx88/cx88-core.c 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
rp                100 drivers/media/pci/cx88/cx88-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | sol |
rp                102 drivers/media/pci/cx88/cx88-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp                107 drivers/media/pci/cx88/cx88-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | sol |
rp                109 drivers/media/pci/cx88/cx88-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp                114 drivers/media/pci/cx88/cx88-core.c 				*(rp++) = cpu_to_le32(RISC_WRITE |
rp                116 drivers/media/pci/cx88/cx88-core.c 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp                120 drivers/media/pci/cx88/cx88-core.c 			*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
rp                121 drivers/media/pci/cx88/cx88-core.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp                127 drivers/media/pci/cx88/cx88-core.c 	return rp;
rp                136 drivers/media/pci/cx88/cx88-core.c 	__le32 *rp;
rp                160 drivers/media/pci/cx88/cx88-core.c 	rp = risc->cpu;
rp                162 drivers/media/pci/cx88/cx88-core.c 		rp = cx88_risc_field(rp, sglist, top_offset, 0,
rp                165 drivers/media/pci/cx88/cx88-core.c 		rp = cx88_risc_field(rp, sglist, bottom_offset, 0x200,
rp                170 drivers/media/pci/cx88/cx88-core.c 	risc->jmp = rp;
rp                181 drivers/media/pci/cx88/cx88-core.c 	__le32 *rp;
rp                198 drivers/media/pci/cx88/cx88-core.c 	rp = risc->cpu;
rp                199 drivers/media/pci/cx88/cx88-core.c 	rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
rp                203 drivers/media/pci/cx88/cx88-core.c 	risc->jmp = rp;
rp                357 drivers/media/pci/saa7164/saa7164-core.c 	u32 wp, mcb, rp, cnt = 0;
rp                399 drivers/media/pci/saa7164/saa7164-core.c 			rp = mcb;
rp                401 drivers/media/pci/saa7164/saa7164-core.c 			rp = (port->last_svc_rp + 1) % 8;
rp                403 drivers/media/pci/saa7164/saa7164-core.c 		if (rp > (port->hwcfg.buffercount - 1)) {
rp                404 drivers/media/pci/saa7164/saa7164-core.c 			printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
rp                408 drivers/media/pci/saa7164/saa7164-core.c 		saa7164_work_enchandler_helper(port, rp);
rp                409 drivers/media/pci/saa7164/saa7164-core.c 		port->last_svc_rp = rp;
rp                412 drivers/media/pci/saa7164/saa7164-core.c 		if (rp == mcb)
rp                434 drivers/media/pci/saa7164/saa7164-core.c 	u32 wp, mcb, rp, cnt = 0;
rp                475 drivers/media/pci/saa7164/saa7164-core.c 			rp = mcb;
rp                477 drivers/media/pci/saa7164/saa7164-core.c 			rp = (port->last_svc_rp + 1) % 8;
rp                479 drivers/media/pci/saa7164/saa7164-core.c 		if (rp > (port->hwcfg.buffercount - 1)) {
rp                480 drivers/media/pci/saa7164/saa7164-core.c 			printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
rp                484 drivers/media/pci/saa7164/saa7164-core.c 		saa7164_work_enchandler_helper(port, rp);
rp                485 drivers/media/pci/saa7164/saa7164-core.c 		port->last_svc_rp = rp;
rp                488 drivers/media/pci/saa7164/saa7164-core.c 		if (rp == mcb)
rp                576 drivers/media/pci/saa7164/saa7164-core.c 	int wp, i = 0, rp;
rp                585 drivers/media/pci/saa7164/saa7164-core.c 		rp = (port->hwcfg.buffercount - 1);
rp                587 drivers/media/pci/saa7164/saa7164-core.c 		rp = wp - 1;
rp                596 drivers/media/pci/saa7164/saa7164-core.c 		if (buf->idx == rp) {
rp                599 drivers/media/pci/saa7164/saa7164-core.c 				__func__, wp, rp);
rp                 33 drivers/media/pci/tw68/tw68-risc.c static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
rp                 42 drivers/media/pci/tw68/tw68-risc.c 		*(rp++) = cpu_to_le32(RISC_JUMP);
rp                 43 drivers/media/pci/tw68/tw68-risc.c 		*(rp++) = 0;
rp                 48 drivers/media/pci/tw68/tw68-risc.c 		*(rp++) = cpu_to_le32(RISC_SYNCO);
rp                 50 drivers/media/pci/tw68/tw68-risc.c 		*(rp++) = cpu_to_le32(RISC_SYNCE);
rp                 51 drivers/media/pci/tw68/tw68-risc.c 	*(rp++) = 0;
rp                 63 drivers/media/pci/tw68/tw68-risc.c 			*(rp++) = cpu_to_le32(RISC_LINESTART |
rp                 65 drivers/media/pci/tw68/tw68-risc.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp                 77 drivers/media/pci/tw68/tw68-risc.c 			*(rp++) = cpu_to_le32(RISC_LINESTART |
rp                 80 drivers/media/pci/tw68/tw68-risc.c 			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
rp                 85 drivers/media/pci/tw68/tw68-risc.c 				*(rp++) = cpu_to_le32(RISC_INLINE |
rp                 88 drivers/media/pci/tw68/tw68-risc.c 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp                 95 drivers/media/pci/tw68/tw68-risc.c 				*(rp++) = cpu_to_le32(RISC_INLINE |
rp                 98 drivers/media/pci/tw68/tw68-risc.c 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
rp                105 drivers/media/pci/tw68/tw68-risc.c 	return rp;
rp                138 drivers/media/pci/tw68/tw68-risc.c 	__le32 *rp;
rp                159 drivers/media/pci/tw68/tw68-risc.c 	rp = buf->cpu;
rp                161 drivers/media/pci/tw68/tw68-risc.c 		rp = tw68_risc_field(rp, sglist, top_offset, 1,
rp                164 drivers/media/pci/tw68/tw68-risc.c 		rp = tw68_risc_field(rp, sglist, bottom_offset, 2,
rp                168 drivers/media/pci/tw68/tw68-risc.c 	buf->jmp = rp;
rp                 84 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 	unsigned long wp, rp;
rp                 94 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 	rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
rp                 96 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 	pos = rp - channel->back_buffer_busaddr;
rp                 99 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 	if (wp < rp)
rp                102 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 	size = wp - rp;
rp                107 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 				rp,
rp                115 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c 		channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
rp                172 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 	int rp;
rp                185 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 	rp = hdcs->exp.rs + cp;
rp                187 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 	rowexp = cycles / rp;
rp                190 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 	cycles -= rowexp * rp;
rp                134 drivers/misc/sgi-xp/xpc_partition.c 	struct xpc_rsvd_page *rp;
rp                147 drivers/misc/sgi-xp/xpc_partition.c 	rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
rp                149 drivers/misc/sgi-xp/xpc_partition.c 	if (rp->SAL_version < 3) {
rp                151 drivers/misc/sgi-xp/xpc_partition.c 		rp->SAL_partid &= 0xff;
rp                153 drivers/misc/sgi-xp/xpc_partition.c 	BUG_ON(rp->SAL_partid != xp_partition_id);
rp                155 drivers/misc/sgi-xp/xpc_partition.c 	if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) {
rp                157 drivers/misc/sgi-xp/xpc_partition.c 			"supported range (< 0 || >= %d)\n", rp->SAL_partid,
rp                162 drivers/misc/sgi-xp/xpc_partition.c 	rp->version = XPC_RP_VERSION;
rp                163 drivers/misc/sgi-xp/xpc_partition.c 	rp->max_npartitions = xp_max_npartitions;
rp                166 drivers/misc/sgi-xp/xpc_partition.c 	if (rp->SAL_version == 1) {
rp                168 drivers/misc/sgi-xp/xpc_partition.c 		rp->SAL_nasids_size = 128;
rp                170 drivers/misc/sgi-xp/xpc_partition.c 	xpc_nasid_mask_nbytes = rp->SAL_nasids_size;
rp                171 drivers/misc/sgi-xp/xpc_partition.c 	xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size *
rp                175 drivers/misc/sgi-xp/xpc_partition.c 	xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
rp                176 drivers/misc/sgi-xp/xpc_partition.c 	xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
rp                178 drivers/misc/sgi-xp/xpc_partition.c 	ret = xpc_arch_ops.setup_rsvd_page(rp);
rp                188 drivers/misc/sgi-xp/xpc_partition.c 	if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies)
rp                190 drivers/misc/sgi-xp/xpc_partition.c 	rp->ts_jiffies = new_ts_jiffies;
rp                192 drivers/misc/sgi-xp/xpc_partition.c 	xpc_rsvd_page = rp;
rp                816 drivers/misc/sgi-xp/xpc_uv.c xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
rp                820 drivers/misc/sgi-xp/xpc_uv.c 	rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
rp                821 drivers/misc/sgi-xp/xpc_uv.c 	rp->sn.uv.activate_gru_mq_desc_gpa =
rp                848 drivers/mtd/mtdswap.c 	struct rb_root *rp = NULL;
rp                860 drivers/mtd/mtdswap.c 	rp = &d->trees[idx].root;
rp                861 drivers/mtd/mtdswap.c 	eb = rb_entry(rb_first(rp), struct swap_eb, rb);
rp                863 drivers/mtd/mtdswap.c 	rb_erase(&eb->rb, rp);
rp                274 drivers/net/appletalk/ipddp.c         struct ipddp_route rcp, rcp2, *rp;
rp                289 drivers/net/appletalk/ipddp.c 			rp = __ipddp_find_route(&rcp);
rp                290 drivers/net/appletalk/ipddp.c 			if (rp) {
rp                292 drivers/net/appletalk/ipddp.c 				rcp2.ip    = rp->ip;
rp                293 drivers/net/appletalk/ipddp.c 				rcp2.at    = rp->at;
rp                294 drivers/net/appletalk/ipddp.c 				rcp2.flags = rp->flags;
rp                298 drivers/net/appletalk/ipddp.c 			if (rp) {
rp                 63 drivers/net/ethernet/8390/mcf8390.c 	NE2000_BYTE *rp;
rp                 65 drivers/net/ethernet/8390/mcf8390.c 	rp = (NE2000_BYTE *) NE_PTR(addr);
rp                 66 drivers/net/ethernet/8390/mcf8390.c 	*rp = RSWAP(val);
rp                 72 drivers/net/ethernet/8390/mcf8390.c 	NE2000_BYTE *rp, val;
rp                 74 drivers/net/ethernet/8390/mcf8390.c 	rp = (NE2000_BYTE *) NE_PTR(addr);
rp                 75 drivers/net/ethernet/8390/mcf8390.c 	val = *rp;
rp                 81 drivers/net/ethernet/8390/mcf8390.c 	NE2000_BYTE *rp, val;
rp                 85 drivers/net/ethernet/8390/mcf8390.c 	rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
rp                 87 drivers/net/ethernet/8390/mcf8390.c 		val = *rp;
rp                 94 drivers/net/ethernet/8390/mcf8390.c 	volatile u16 *rp;
rp                 98 drivers/net/ethernet/8390/mcf8390.c 	rp = (volatile u16 *) NE_DATA_PTR(addr);
rp                100 drivers/net/ethernet/8390/mcf8390.c 		w = *rp;
rp                107 drivers/net/ethernet/8390/mcf8390.c 	NE2000_BYTE *rp, val;
rp                111 drivers/net/ethernet/8390/mcf8390.c 	rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
rp                114 drivers/net/ethernet/8390/mcf8390.c 		*rp = RSWAP(val);
rp                120 drivers/net/ethernet/8390/mcf8390.c 	volatile u16 *rp;
rp                124 drivers/net/ethernet/8390/mcf8390.c 	rp = (volatile u16 *) NE_DATA_PTR(addr);
rp                127 drivers/net/ethernet/8390/mcf8390.c 		*rp = BSWAP(w);
rp                627 drivers/net/ethernet/broadcom/b44.c 		struct ring_info *rp = &bp->tx_buffers[cons];
rp                628 drivers/net/ethernet/broadcom/b44.c 		struct sk_buff *skb = rp->skb;
rp                633 drivers/net/ethernet/broadcom/b44.c 				 rp->mapping,
rp                636 drivers/net/ethernet/broadcom/b44.c 		rp->skb = NULL;
rp                790 drivers/net/ethernet/broadcom/b44.c 		struct ring_info *rp = &bp->rx_buffers[cons];
rp                791 drivers/net/ethernet/broadcom/b44.c 		struct sk_buff *skb = rp->skb;
rp                792 drivers/net/ethernet/broadcom/b44.c 		dma_addr_t map = rp->mapping;
rp               1096 drivers/net/ethernet/broadcom/b44.c 	struct ring_info *rp;
rp               1100 drivers/net/ethernet/broadcom/b44.c 		rp = &bp->rx_buffers[i];
rp               1102 drivers/net/ethernet/broadcom/b44.c 		if (rp->skb == NULL)
rp               1104 drivers/net/ethernet/broadcom/b44.c 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
rp               1106 drivers/net/ethernet/broadcom/b44.c 		dev_kfree_skb_any(rp->skb);
rp               1107 drivers/net/ethernet/broadcom/b44.c 		rp->skb = NULL;
rp               1112 drivers/net/ethernet/broadcom/b44.c 		rp = &bp->tx_buffers[i];
rp               1114 drivers/net/ethernet/broadcom/b44.c 		if (rp->skb == NULL)
rp               1116 drivers/net/ethernet/broadcom/b44.c 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
rp               1118 drivers/net/ethernet/broadcom/b44.c 		dev_kfree_skb_any(rp->skb);
rp               1119 drivers/net/ethernet/broadcom/b44.c 		rp->skb = NULL;
rp               1597 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 				  struct ethtool_ringparam *rp)
rp               1602 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->rx_max_pending = MAX_RX_BUFFERS;
rp               1603 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
rp               1604 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->rx_jumbo_max_pending = 0;
rp               1605 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
rp               1607 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
rp               1608 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
rp               1609 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->rx_jumbo_pending = 0;
rp               1610 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
rp               1620 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 				 struct ethtool_ringparam *rp)
rp               1627 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	if (rp->rx_pending > MAX_RX_BUFFERS ||
rp               1628 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	    rp->rx_jumbo_pending ||
rp               1629 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
rp               1630 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
rp               1631 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
rp               1632 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	    rp->rx_pending < MIN_FL_ENTRIES ||
rp               1633 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	    rp->tx_pending < MIN_TXQ_ENTRIES)
rp               1640 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
rp               1641 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
rp               1642 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		s->ethtxq[qs].q.size = rp->tx_pending;
rp               2122 drivers/net/ethernet/cortina/gemini.c 			       struct ethtool_ringparam *rp)
rp               2129 drivers/net/ethernet/cortina/gemini.c 	rp->rx_max_pending = 1 << 15;
rp               2130 drivers/net/ethernet/cortina/gemini.c 	rp->rx_mini_max_pending = 0;
rp               2131 drivers/net/ethernet/cortina/gemini.c 	rp->rx_jumbo_max_pending = 0;
rp               2132 drivers/net/ethernet/cortina/gemini.c 	rp->tx_max_pending = 1 << 15;
rp               2134 drivers/net/ethernet/cortina/gemini.c 	rp->rx_pending = 1 << port->rxq_order;
rp               2135 drivers/net/ethernet/cortina/gemini.c 	rp->rx_mini_pending = 0;
rp               2136 drivers/net/ethernet/cortina/gemini.c 	rp->rx_jumbo_pending = 0;
rp               2137 drivers/net/ethernet/cortina/gemini.c 	rp->tx_pending = 1 << port->txq_order;
rp               2141 drivers/net/ethernet/cortina/gemini.c 			      struct ethtool_ringparam *rp)
rp               2149 drivers/net/ethernet/cortina/gemini.c 	if (rp->rx_pending) {
rp               2150 drivers/net/ethernet/cortina/gemini.c 		port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
rp               2153 drivers/net/ethernet/cortina/gemini.c 	if (rp->tx_pending) {
rp               2154 drivers/net/ethernet/cortina/gemini.c 		port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
rp               2141 drivers/net/ethernet/ibm/emac/core.c 				       struct ethtool_ringparam *rp)
rp               2143 drivers/net/ethernet/ibm/emac/core.c 	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
rp               2144 drivers/net/ethernet/ibm/emac/core.c 	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
rp                937 drivers/net/ethernet/rdc/r6040.c 	struct r6040_private *rp = netdev_priv(dev);
rp                941 drivers/net/ethernet/rdc/r6040.c 	strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
rp               3270 drivers/net/ethernet/sun/niu.c static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
rp               3278 drivers/net/ethernet/sun/niu.c static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
rp               3281 drivers/net/ethernet/sun/niu.c 	unsigned int h = niu_hash_rxaddr(rp, addr);
rp               3285 drivers/net/ethernet/sun/niu.c 	pp = &rp->rxhash[h];
rp               3298 drivers/net/ethernet/sun/niu.c static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
rp               3300 drivers/net/ethernet/sun/niu.c 	unsigned int h = niu_hash_rxaddr(rp, base);
rp               3303 drivers/net/ethernet/sun/niu.c 	page->mapping = (struct address_space *) rp->rxhash[h];
rp               3304 drivers/net/ethernet/sun/niu.c 	rp->rxhash[h] = page;
rp               3307 drivers/net/ethernet/sun/niu.c static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
rp               3325 drivers/net/ethernet/sun/niu.c 	niu_hash_page(rp, page, addr);
rp               3326 drivers/net/ethernet/sun/niu.c 	if (rp->rbr_blocks_per_page > 1)
rp               3327 drivers/net/ethernet/sun/niu.c 		page_ref_add(page, rp->rbr_blocks_per_page - 1);
rp               3329 drivers/net/ethernet/sun/niu.c 	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
rp               3330 drivers/net/ethernet/sun/niu.c 		__le32 *rbr = &rp->rbr[start_index + i];
rp               3333 drivers/net/ethernet/sun/niu.c 		addr += rp->rbr_block_size;
rp               3339 drivers/net/ethernet/sun/niu.c static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
rp               3341 drivers/net/ethernet/sun/niu.c 	int index = rp->rbr_index;
rp               3343 drivers/net/ethernet/sun/niu.c 	rp->rbr_pending++;
rp               3344 drivers/net/ethernet/sun/niu.c 	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
rp               3345 drivers/net/ethernet/sun/niu.c 		int err = niu_rbr_add_page(np, rp, mask, index);
rp               3348 drivers/net/ethernet/sun/niu.c 			rp->rbr_pending--;
rp               3352 drivers/net/ethernet/sun/niu.c 		rp->rbr_index += rp->rbr_blocks_per_page;
rp               3353 drivers/net/ethernet/sun/niu.c 		BUG_ON(rp->rbr_index > rp->rbr_table_size);
rp               3354 drivers/net/ethernet/sun/niu.c 		if (rp->rbr_index == rp->rbr_table_size)
rp               3355 drivers/net/ethernet/sun/niu.c 			rp->rbr_index = 0;
rp               3357 drivers/net/ethernet/sun/niu.c 		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
rp               3358 drivers/net/ethernet/sun/niu.c 			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
rp               3359 drivers/net/ethernet/sun/niu.c 			rp->rbr_pending = 0;
rp               3364 drivers/net/ethernet/sun/niu.c static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rp               3366 drivers/net/ethernet/sun/niu.c 	unsigned int index = rp->rcr_index;
rp               3369 drivers/net/ethernet/sun/niu.c 	rp->rx_dropped++;
rp               3377 drivers/net/ethernet/sun/niu.c 		val = le64_to_cpup(&rp->rcr[index]);
rp               3380 drivers/net/ethernet/sun/niu.c 		page = niu_find_rxpage(rp, addr, &link);
rp               3382 drivers/net/ethernet/sun/niu.c 		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
rp               3391 drivers/net/ethernet/sun/niu.c 			rp->rbr_refill_pending++;
rp               3394 drivers/net/ethernet/sun/niu.c 		index = NEXT_RCR(rp, index);
rp               3399 drivers/net/ethernet/sun/niu.c 	rp->rcr_index = index;
rp               3405 drivers/net/ethernet/sun/niu.c 			      struct rx_ring_info *rp)
rp               3407 drivers/net/ethernet/sun/niu.c 	unsigned int index = rp->rcr_index;
rp               3414 drivers/net/ethernet/sun/niu.c 		return niu_rx_pkt_ignore(np, rp);
rp               3424 drivers/net/ethernet/sun/niu.c 		val = le64_to_cpup(&rp->rcr[index]);
rp               3432 drivers/net/ethernet/sun/niu.c 		page = niu_find_rxpage(rp, addr, &link);
rp               3434 drivers/net/ethernet/sun/niu.c 		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
rp               3453 drivers/net/ethernet/sun/niu.c 		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
rp               3459 drivers/net/ethernet/sun/niu.c 			rp->rbr_refill_pending++;
rp               3463 drivers/net/ethernet/sun/niu.c 		index = NEXT_RCR(rp, index);
rp               3468 drivers/net/ethernet/sun/niu.c 	rp->rcr_index = index;
rp               3484 drivers/net/ethernet/sun/niu.c 	rp->rx_packets++;
rp               3485 drivers/net/ethernet/sun/niu.c 	rp->rx_bytes += skb->len;
rp               3488 drivers/net/ethernet/sun/niu.c 	skb_record_rx_queue(skb, rp->rx_channel);
rp               3494 drivers/net/ethernet/sun/niu.c static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
rp               3496 drivers/net/ethernet/sun/niu.c 	int blocks_per_page = rp->rbr_blocks_per_page;
rp               3497 drivers/net/ethernet/sun/niu.c 	int err, index = rp->rbr_index;
rp               3500 drivers/net/ethernet/sun/niu.c 	while (index < (rp->rbr_table_size - blocks_per_page)) {
rp               3501 drivers/net/ethernet/sun/niu.c 		err = niu_rbr_add_page(np, rp, mask, index);
rp               3508 drivers/net/ethernet/sun/niu.c 	rp->rbr_index = index;
rp               3512 drivers/net/ethernet/sun/niu.c static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
rp               3519 drivers/net/ethernet/sun/niu.c 		page = rp->rxhash[i];
rp               3535 drivers/net/ethernet/sun/niu.c 	for (i = 0; i < rp->rbr_table_size; i++)
rp               3536 drivers/net/ethernet/sun/niu.c 		rp->rbr[i] = cpu_to_le32(0);
rp               3537 drivers/net/ethernet/sun/niu.c 	rp->rbr_index = 0;
rp               3540 drivers/net/ethernet/sun/niu.c static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
rp               3542 drivers/net/ethernet/sun/niu.c 	struct tx_buff_info *tb = &rp->tx_buffs[idx];
rp               3551 drivers/net/ethernet/sun/niu.c 	rp->tx_packets++;
rp               3552 drivers/net/ethernet/sun/niu.c 	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
rp               3559 drivers/net/ethernet/sun/niu.c 	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
rp               3560 drivers/net/ethernet/sun/niu.c 		rp->mark_pending--;
rp               3564 drivers/net/ethernet/sun/niu.c 		idx = NEXT_TX(rp, idx);
rp               3569 drivers/net/ethernet/sun/niu.c 		tb = &rp->tx_buffs[idx];
rp               3574 drivers/net/ethernet/sun/niu.c 		idx = NEXT_TX(rp, idx);
rp               3582 drivers/net/ethernet/sun/niu.c #define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
rp               3584 drivers/net/ethernet/sun/niu.c static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
rp               3591 drivers/net/ethernet/sun/niu.c 	index = (rp - np->tx_rings);
rp               3594 drivers/net/ethernet/sun/niu.c 	cs = rp->tx_cs;
rp               3599 drivers/net/ethernet/sun/niu.c 	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
rp               3602 drivers/net/ethernet/sun/niu.c 	rp->last_pkt_cnt = tmp;
rp               3604 drivers/net/ethernet/sun/niu.c 	cons = rp->cons;
rp               3610 drivers/net/ethernet/sun/niu.c 		cons = release_tx_packet(np, rp, cons);
rp               3612 drivers/net/ethernet/sun/niu.c 	rp->cons = cons;
rp               3617 drivers/net/ethernet/sun/niu.c 		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
rp               3620 drivers/net/ethernet/sun/niu.c 		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
rp               3627 drivers/net/ethernet/sun/niu.c 					     struct rx_ring_info *rp,
rp               3641 drivers/net/ethernet/sun/niu.c 	int rx_channel = rp->rx_channel;
rp               3652 drivers/net/ethernet/sun/niu.c 		rp->rx_errors += misc & RXMISC_COUNT;
rp               3667 drivers/net/ethernet/sun/niu.c 		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
rp               3679 drivers/net/ethernet/sun/niu.c 		       struct rx_ring_info *rp, int budget)
rp               3682 drivers/net/ethernet/sun/niu.c 	struct rxdma_mailbox *mbox = rp->mbox;
rp               3686 drivers/net/ethernet/sun/niu.c 	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
rp               3687 drivers/net/ethernet/sun/niu.c 	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
rp               3697 drivers/net/ethernet/sun/niu.c 		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
rp               3702 drivers/net/ethernet/sun/niu.c 		rcr_done += niu_process_rx_pkt(napi, np, rp);
rp               3706 drivers/net/ethernet/sun/niu.c 	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
rp               3709 drivers/net/ethernet/sun/niu.c 		for (i = 0; i < rp->rbr_refill_pending; i++)
rp               3710 drivers/net/ethernet/sun/niu.c 			niu_rbr_refill(np, rp, GFP_ATOMIC);
rp               3711 drivers/net/ethernet/sun/niu.c 		rp->rbr_refill_pending = 0;
rp               3718 drivers/net/ethernet/sun/niu.c 	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
rp               3722 drivers/net/ethernet/sun/niu.c 		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
rp               3738 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &np->tx_rings[i];
rp               3739 drivers/net/ethernet/sun/niu.c 		if (tx_vec & (1 << rp->tx_channel))
rp               3740 drivers/net/ethernet/sun/niu.c 			niu_tx_work(np, rp);
rp               3741 drivers/net/ethernet/sun/niu.c 		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
rp               3745 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &np->rx_rings[i];
rp               3747 drivers/net/ethernet/sun/niu.c 		if (rx_vec & (1 << rp->rx_channel)) {
rp               3750 drivers/net/ethernet/sun/niu.c 			this_work_done = niu_rx_work(&lp->napi, np, rp,
rp               3756 drivers/net/ethernet/sun/niu.c 		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
rp               3777 drivers/net/ethernet/sun/niu.c static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
rp               3780 drivers/net/ethernet/sun/niu.c 	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
rp               3814 drivers/net/ethernet/sun/niu.c static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
rp               3816 drivers/net/ethernet/sun/niu.c 	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
rp               3826 drivers/net/ethernet/sun/niu.c 			   rp->rx_channel,
rp               3829 drivers/net/ethernet/sun/niu.c 		niu_log_rxchan_errors(np, rp, stat);
rp               3832 drivers/net/ethernet/sun/niu.c 	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
rp               3838 drivers/net/ethernet/sun/niu.c static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
rp               3841 drivers/net/ethernet/sun/niu.c 	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
rp               3863 drivers/net/ethernet/sun/niu.c static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
rp               3867 drivers/net/ethernet/sun/niu.c 	cs = nr64(TX_CS(rp->tx_channel));
rp               3868 drivers/net/ethernet/sun/niu.c 	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
rp               3869 drivers/net/ethernet/sun/niu.c 	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
rp               3872 drivers/net/ethernet/sun/niu.c 		   rp->tx_channel,
rp               3877 drivers/net/ethernet/sun/niu.c 	niu_log_txchan_errors(np, rp, cs);
rp               4074 drivers/net/ethernet/sun/niu.c 			struct rx_ring_info *rp = &np->rx_rings[i];
rp               4076 drivers/net/ethernet/sun/niu.c 			if (rx_vec & (1 << rp->rx_channel)) {
rp               4077 drivers/net/ethernet/sun/niu.c 				int r = niu_rx_error(np, rp);
rp               4082 drivers/net/ethernet/sun/niu.c 						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
rp               4092 drivers/net/ethernet/sun/niu.c 			struct tx_ring_info *rp = &np->tx_rings[i];
rp               4094 drivers/net/ethernet/sun/niu.c 			if (tx_vec & (1 << rp->tx_channel)) {
rp               4095 drivers/net/ethernet/sun/niu.c 				int r = niu_tx_error(np, rp);
rp               4125 drivers/net/ethernet/sun/niu.c static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
rp               4128 drivers/net/ethernet/sun/niu.c 	struct rxdma_mailbox *mbox = rp->mbox;
rp               4133 drivers/net/ethernet/sun/niu.c 	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
rp               4139 drivers/net/ethernet/sun/niu.c static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
rp               4142 drivers/net/ethernet/sun/niu.c 	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
rp               4145 drivers/net/ethernet/sun/niu.c 		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
rp               4158 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &np->rx_rings[i];
rp               4159 drivers/net/ethernet/sun/niu.c 		int ldn = LDN_RXDMA(rp->rx_channel);
rp               4165 drivers/net/ethernet/sun/niu.c 		if (rx_vec & (1 << rp->rx_channel))
rp               4166 drivers/net/ethernet/sun/niu.c 			niu_rxchan_intr(np, rp, ldn);
rp               4170 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &np->tx_rings[i];
rp               4171 drivers/net/ethernet/sun/niu.c 		int ldn = LDN_TXDMA(rp->tx_channel);
rp               4177 drivers/net/ethernet/sun/niu.c 		if (tx_vec & (1 << rp->tx_channel))
rp               4178 drivers/net/ethernet/sun/niu.c 			niu_txchan_intr(np, rp, ldn);
rp               4238 drivers/net/ethernet/sun/niu.c static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
rp               4240 drivers/net/ethernet/sun/niu.c 	if (rp->mbox) {
rp               4243 drivers/net/ethernet/sun/niu.c 				       rp->mbox, rp->mbox_dma);
rp               4244 drivers/net/ethernet/sun/niu.c 		rp->mbox = NULL;
rp               4246 drivers/net/ethernet/sun/niu.c 	if (rp->rcr) {
rp               4249 drivers/net/ethernet/sun/niu.c 				       rp->rcr, rp->rcr_dma);
rp               4250 drivers/net/ethernet/sun/niu.c 		rp->rcr = NULL;
rp               4251 drivers/net/ethernet/sun/niu.c 		rp->rcr_table_size = 0;
rp               4252 drivers/net/ethernet/sun/niu.c 		rp->rcr_index = 0;
rp               4254 drivers/net/ethernet/sun/niu.c 	if (rp->rbr) {
rp               4255 drivers/net/ethernet/sun/niu.c 		niu_rbr_free(np, rp);
rp               4259 drivers/net/ethernet/sun/niu.c 				       rp->rbr, rp->rbr_dma);
rp               4260 drivers/net/ethernet/sun/niu.c 		rp->rbr = NULL;
rp               4261 drivers/net/ethernet/sun/niu.c 		rp->rbr_table_size = 0;
rp               4262 drivers/net/ethernet/sun/niu.c 		rp->rbr_index = 0;
rp               4264 drivers/net/ethernet/sun/niu.c 	kfree(rp->rxhash);
rp               4265 drivers/net/ethernet/sun/niu.c 	rp->rxhash = NULL;
rp               4268 drivers/net/ethernet/sun/niu.c static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
rp               4270 drivers/net/ethernet/sun/niu.c 	if (rp->mbox) {
rp               4273 drivers/net/ethernet/sun/niu.c 				       rp->mbox, rp->mbox_dma);
rp               4274 drivers/net/ethernet/sun/niu.c 		rp->mbox = NULL;
rp               4276 drivers/net/ethernet/sun/niu.c 	if (rp->descr) {
rp               4280 drivers/net/ethernet/sun/niu.c 			if (rp->tx_buffs[i].skb)
rp               4281 drivers/net/ethernet/sun/niu.c 				(void) release_tx_packet(np, rp, i);
rp               4286 drivers/net/ethernet/sun/niu.c 				       rp->descr, rp->descr_dma);
rp               4287 drivers/net/ethernet/sun/niu.c 		rp->descr = NULL;
rp               4288 drivers/net/ethernet/sun/niu.c 		rp->pending = 0;
rp               4289 drivers/net/ethernet/sun/niu.c 		rp->prod = 0;
rp               4290 drivers/net/ethernet/sun/niu.c 		rp->cons = 0;
rp               4291 drivers/net/ethernet/sun/niu.c 		rp->wrap_bit = 0;
rp               4301 drivers/net/ethernet/sun/niu.c 			struct rx_ring_info *rp = &np->rx_rings[i];
rp               4303 drivers/net/ethernet/sun/niu.c 			niu_free_rx_ring_info(np, rp);
rp               4312 drivers/net/ethernet/sun/niu.c 			struct tx_ring_info *rp = &np->tx_rings[i];
rp               4314 drivers/net/ethernet/sun/niu.c 			niu_free_tx_ring_info(np, rp);
rp               4323 drivers/net/ethernet/sun/niu.c 				  struct rx_ring_info *rp)
rp               4327 drivers/net/ethernet/sun/niu.c 	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
rp               4329 drivers/net/ethernet/sun/niu.c 	if (!rp->rxhash)
rp               4332 drivers/net/ethernet/sun/niu.c 	rp->mbox = np->ops->alloc_coherent(np->device,
rp               4334 drivers/net/ethernet/sun/niu.c 					   &rp->mbox_dma, GFP_KERNEL);
rp               4335 drivers/net/ethernet/sun/niu.c 	if (!rp->mbox)
rp               4337 drivers/net/ethernet/sun/niu.c 	if ((unsigned long)rp->mbox & (64UL - 1)) {
rp               4339 drivers/net/ethernet/sun/niu.c 			   rp->mbox);
rp               4343 drivers/net/ethernet/sun/niu.c 	rp->rcr = np->ops->alloc_coherent(np->device,
rp               4345 drivers/net/ethernet/sun/niu.c 					  &rp->rcr_dma, GFP_KERNEL);
rp               4346 drivers/net/ethernet/sun/niu.c 	if (!rp->rcr)
rp               4348 drivers/net/ethernet/sun/niu.c 	if ((unsigned long)rp->rcr & (64UL - 1)) {
rp               4350 drivers/net/ethernet/sun/niu.c 			   rp->rcr);
rp               4353 drivers/net/ethernet/sun/niu.c 	rp->rcr_table_size = MAX_RCR_RING_SIZE;
rp               4354 drivers/net/ethernet/sun/niu.c 	rp->rcr_index = 0;
rp               4356 drivers/net/ethernet/sun/niu.c 	rp->rbr = np->ops->alloc_coherent(np->device,
rp               4358 drivers/net/ethernet/sun/niu.c 					  &rp->rbr_dma, GFP_KERNEL);
rp               4359 drivers/net/ethernet/sun/niu.c 	if (!rp->rbr)
rp               4361 drivers/net/ethernet/sun/niu.c 	if ((unsigned long)rp->rbr & (64UL - 1)) {
rp               4363 drivers/net/ethernet/sun/niu.c 			   rp->rbr);
rp               4366 drivers/net/ethernet/sun/niu.c 	rp->rbr_table_size = MAX_RBR_RING_SIZE;
rp               4367 drivers/net/ethernet/sun/niu.c 	rp->rbr_index = 0;
rp               4368 drivers/net/ethernet/sun/niu.c 	rp->rbr_pending = 0;
rp               4373 drivers/net/ethernet/sun/niu.c static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
rp               4380 drivers/net/ethernet/sun/niu.c 	rp->max_burst = mtu + 32;
rp               4381 drivers/net/ethernet/sun/niu.c 	if (rp->max_burst > 4096)
rp               4382 drivers/net/ethernet/sun/niu.c 		rp->max_burst = 4096;
rp               4386 drivers/net/ethernet/sun/niu.c 				  struct tx_ring_info *rp)
rp               4390 drivers/net/ethernet/sun/niu.c 	rp->mbox = np->ops->alloc_coherent(np->device,
rp               4392 drivers/net/ethernet/sun/niu.c 					   &rp->mbox_dma, GFP_KERNEL);
rp               4393 drivers/net/ethernet/sun/niu.c 	if (!rp->mbox)
rp               4395 drivers/net/ethernet/sun/niu.c 	if ((unsigned long)rp->mbox & (64UL - 1)) {
rp               4397 drivers/net/ethernet/sun/niu.c 			   rp->mbox);
rp               4401 drivers/net/ethernet/sun/niu.c 	rp->descr = np->ops->alloc_coherent(np->device,
rp               4403 drivers/net/ethernet/sun/niu.c 					    &rp->descr_dma, GFP_KERNEL);
rp               4404 drivers/net/ethernet/sun/niu.c 	if (!rp->descr)
rp               4406 drivers/net/ethernet/sun/niu.c 	if ((unsigned long)rp->descr & (64UL - 1)) {
rp               4408 drivers/net/ethernet/sun/niu.c 			   rp->descr);
rp               4412 drivers/net/ethernet/sun/niu.c 	rp->pending = MAX_TX_RING_SIZE;
rp               4413 drivers/net/ethernet/sun/niu.c 	rp->prod = 0;
rp               4414 drivers/net/ethernet/sun/niu.c 	rp->cons = 0;
rp               4415 drivers/net/ethernet/sun/niu.c 	rp->wrap_bit = 0;
rp               4418 drivers/net/ethernet/sun/niu.c 	rp->mark_freq = rp->pending / 4;
rp               4420 drivers/net/ethernet/sun/niu.c 	niu_set_max_burst(np, rp);
rp               4425 drivers/net/ethernet/sun/niu.c static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
rp               4431 drivers/net/ethernet/sun/niu.c 	rp->rbr_block_size = 1 << bss;
rp               4432 drivers/net/ethernet/sun/niu.c 	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
rp               4434 drivers/net/ethernet/sun/niu.c 	rp->rbr_sizes[0] = 256;
rp               4435 drivers/net/ethernet/sun/niu.c 	rp->rbr_sizes[1] = 1024;
rp               4439 drivers/net/ethernet/sun/niu.c 			rp->rbr_sizes[2] = 4096;
rp               4443 drivers/net/ethernet/sun/niu.c 			rp->rbr_sizes[2] = 8192;
rp               4447 drivers/net/ethernet/sun/niu.c 		rp->rbr_sizes[2] = 2048;
rp               4449 drivers/net/ethernet/sun/niu.c 	rp->rbr_sizes[3] = rp->rbr_block_size;
rp               4484 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &np->rx_rings[i];
rp               4486 drivers/net/ethernet/sun/niu.c 		rp->np = np;
rp               4487 drivers/net/ethernet/sun/niu.c 		rp->rx_channel = first_rx_channel + i;
rp               4489 drivers/net/ethernet/sun/niu.c 		err = niu_alloc_rx_ring_info(np, rp);
rp               4493 drivers/net/ethernet/sun/niu.c 		niu_size_rbr(np, rp);
rp               4496 drivers/net/ethernet/sun/niu.c 		rp->nonsyn_window = 64;
rp               4497 drivers/net/ethernet/sun/niu.c 		rp->nonsyn_threshold = rp->rcr_table_size - 64;
rp               4498 drivers/net/ethernet/sun/niu.c 		rp->syn_window = 64;
rp               4499 drivers/net/ethernet/sun/niu.c 		rp->syn_threshold = rp->rcr_table_size - 64;
rp               4500 drivers/net/ethernet/sun/niu.c 		rp->rcr_pkt_threshold = 16;
rp               4501 drivers/net/ethernet/sun/niu.c 		rp->rcr_timeout = 8;
rp               4502 drivers/net/ethernet/sun/niu.c 		rp->rbr_kick_thresh = RBR_REFILL_MIN;
rp               4503 drivers/net/ethernet/sun/niu.c 		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
rp               4504 drivers/net/ethernet/sun/niu.c 			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
rp               4506 drivers/net/ethernet/sun/niu.c 		err = niu_rbr_fill(np, rp, GFP_KERNEL);
rp               4524 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &np->tx_rings[i];
rp               4526 drivers/net/ethernet/sun/niu.c 		rp->np = np;
rp               4527 drivers/net/ethernet/sun/niu.c 		rp->tx_channel = first_tx_channel + i;
rp               4529 drivers/net/ethernet/sun/niu.c 		err = niu_alloc_tx_ring_info(np, rp);
rp               4655 drivers/net/ethernet/sun/niu.c static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
rp               4657 drivers/net/ethernet/sun/niu.c 	int err, channel = rp->tx_channel;
rp               4672 drivers/net/ethernet/sun/niu.c 	nw64(TXC_DMA_MAX(channel), rp->max_burst);
rp               4675 drivers/net/ethernet/sun/niu.c 	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
rp               4678 drivers/net/ethernet/sun/niu.c 			   channel, (unsigned long long)rp->descr_dma);
rp               4687 drivers/net/ethernet/sun/niu.c 	ring_len = (rp->pending / 8);
rp               4690 drivers/net/ethernet/sun/niu.c 	       rp->descr_dma);
rp               4693 drivers/net/ethernet/sun/niu.c 	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
rp               4694 drivers/net/ethernet/sun/niu.c 	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
rp               4696 drivers/net/ethernet/sun/niu.c 			    channel, (unsigned long long)rp->mbox_dma);
rp               4699 drivers/net/ethernet/sun/niu.c 	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
rp               4700 drivers/net/ethernet/sun/niu.c 	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
rp               4704 drivers/net/ethernet/sun/niu.c 	rp->last_pkt_cnt = 0;
rp               4795 drivers/net/ethernet/sun/niu.c static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
rp               4799 drivers/net/ethernet/sun/niu.c 	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
rp               4800 drivers/net/ethernet/sun/niu.c 	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
rp               4801 drivers/net/ethernet/sun/niu.c 	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
rp               4802 drivers/net/ethernet/sun/niu.c 	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
rp               4803 drivers/net/ethernet/sun/niu.c 	nw64(RDC_RED_PARA(rp->rx_channel), val);
rp               4806 drivers/net/ethernet/sun/niu.c static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
rp               4811 drivers/net/ethernet/sun/niu.c 	switch (rp->rbr_block_size) {
rp               4828 drivers/net/ethernet/sun/niu.c 	switch (rp->rbr_sizes[2]) {
rp               4846 drivers/net/ethernet/sun/niu.c 	switch (rp->rbr_sizes[1]) {
rp               4864 drivers/net/ethernet/sun/niu.c 	switch (rp->rbr_sizes[0]) {
rp               4908 drivers/net/ethernet/sun/niu.c static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
rp               4910 drivers/net/ethernet/sun/niu.c 	int err, channel = rp->rx_channel;
rp               4921 drivers/net/ethernet/sun/niu.c 	niu_rx_channel_wred_init(np, rp);
rp               4929 drivers/net/ethernet/sun/niu.c 	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
rp               4931 drivers/net/ethernet/sun/niu.c 	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
rp               4934 drivers/net/ethernet/sun/niu.c 	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
rp               4935 drivers/net/ethernet/sun/niu.c 	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
rp               4936 drivers/net/ethernet/sun/niu.c 	err = niu_compute_rbr_cfig_b(rp, &val);
rp               4941 drivers/net/ethernet/sun/niu.c 	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
rp               4942 drivers/net/ethernet/sun/niu.c 	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
rp               4944 drivers/net/ethernet/sun/niu.c 	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
rp               4946 drivers/net/ethernet/sun/niu.c 	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
rp               4952 drivers/net/ethernet/sun/niu.c 	nw64(RBR_KICK(channel), rp->rbr_index);
rp               4982 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &np->rx_rings[i];
rp               4984 drivers/net/ethernet/sun/niu.c 		err = niu_init_one_rx_channel(np, rp);
rp               5821 drivers/net/ethernet/sun/niu.c static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
rp               5823 drivers/net/ethernet/sun/niu.c 	(void) niu_tx_channel_stop(np, rp->tx_channel);
rp               5831 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &np->tx_rings[i];
rp               5833 drivers/net/ethernet/sun/niu.c 		niu_stop_one_tx_channel(np, rp);
rp               5837 drivers/net/ethernet/sun/niu.c static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
rp               5839 drivers/net/ethernet/sun/niu.c 	(void) niu_tx_channel_reset(np, rp->tx_channel);
rp               5847 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &np->tx_rings[i];
rp               5849 drivers/net/ethernet/sun/niu.c 		niu_reset_one_tx_channel(np, rp);
rp               5853 drivers/net/ethernet/sun/niu.c static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
rp               5855 drivers/net/ethernet/sun/niu.c 	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
rp               5863 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &np->rx_rings[i];
rp               5865 drivers/net/ethernet/sun/niu.c 		niu_stop_one_rx_channel(np, rp);
rp               5869 drivers/net/ethernet/sun/niu.c static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
rp               5871 drivers/net/ethernet/sun/niu.c 	int channel = rp->rx_channel;
rp               5884 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &np->rx_rings[i];
rp               5886 drivers/net/ethernet/sun/niu.c 		niu_reset_one_rx_channel(np, rp);
rp               5930 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &np->tx_rings[i];
rp               5932 drivers/net/ethernet/sun/niu.c 		err = niu_init_one_tx_channel(np, rp);
rp               6233 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &rx_rings[i];
rp               6235 drivers/net/ethernet/sun/niu.c 		niu_sync_rx_discard_stats(np, rp, 0);
rp               6237 drivers/net/ethernet/sun/niu.c 		pkts += rp->rx_packets;
rp               6238 drivers/net/ethernet/sun/niu.c 		bytes += rp->rx_bytes;
rp               6239 drivers/net/ethernet/sun/niu.c 		dropped += rp->rx_dropped;
rp               6240 drivers/net/ethernet/sun/niu.c 		errors += rp->rx_errors;
rp               6264 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &tx_rings[i];
rp               6266 drivers/net/ethernet/sun/niu.c 		pkts += rp->tx_packets;
rp               6267 drivers/net/ethernet/sun/niu.c 		bytes += rp->tx_bytes;
rp               6268 drivers/net/ethernet/sun/niu.c 		errors += rp->tx_errors;
rp               6438 drivers/net/ethernet/sun/niu.c 			struct rx_ring_info *rp = &np->rx_rings[i];
rp               6443 drivers/net/ethernet/sun/niu.c 				page = rp->rxhash[j];
rp               6449 drivers/net/ethernet/sun/niu.c 					rp->rbr[k++] = cpu_to_le32(base);
rp               6454 drivers/net/ethernet/sun/niu.c 				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
rp               6459 drivers/net/ethernet/sun/niu.c 			rp->rbr_index = rp->rbr_table_size - 1;
rp               6460 drivers/net/ethernet/sun/niu.c 			rp->rcr_index = 0;
rp               6461 drivers/net/ethernet/sun/niu.c 			rp->rbr_pending = 0;
rp               6462 drivers/net/ethernet/sun/niu.c 			rp->rbr_refill_pending = 0;
rp               6467 drivers/net/ethernet/sun/niu.c 			struct tx_ring_info *rp = &np->tx_rings[i];
rp               6470 drivers/net/ethernet/sun/niu.c 				if (rp->tx_buffs[j].skb)
rp               6471 drivers/net/ethernet/sun/niu.c 					(void) release_tx_packet(np, rp, j);
rp               6474 drivers/net/ethernet/sun/niu.c 			rp->pending = MAX_TX_RING_SIZE;
rp               6475 drivers/net/ethernet/sun/niu.c 			rp->prod = 0;
rp               6476 drivers/net/ethernet/sun/niu.c 			rp->cons = 0;
rp               6477 drivers/net/ethernet/sun/niu.c 			rp->wrap_bit = 0;
rp               6530 drivers/net/ethernet/sun/niu.c static void niu_set_txd(struct tx_ring_info *rp, int index,
rp               6534 drivers/net/ethernet/sun/niu.c 	__le64 *desc = &rp->descr[index];
rp               6613 drivers/net/ethernet/sun/niu.c 	struct tx_ring_info *rp;
rp               6621 drivers/net/ethernet/sun/niu.c 	rp = &np->tx_rings[i];
rp               6624 drivers/net/ethernet/sun/niu.c 	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
rp               6627 drivers/net/ethernet/sun/niu.c 		rp->tx_errors++;
rp               6660 drivers/net/ethernet/sun/niu.c 	prod = rp->prod;
rp               6662 drivers/net/ethernet/sun/niu.c 	rp->tx_buffs[prod].skb = skb;
rp               6663 drivers/net/ethernet/sun/niu.c 	rp->tx_buffs[prod].mapping = mapping;
rp               6666 drivers/net/ethernet/sun/niu.c 	if (++rp->mark_counter == rp->mark_freq) {
rp               6667 drivers/net/ethernet/sun/niu.c 		rp->mark_counter = 0;
rp               6669 drivers/net/ethernet/sun/niu.c 		rp->mark_pending++;
rp               6685 drivers/net/ethernet/sun/niu.c 		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
rp               6688 drivers/net/ethernet/sun/niu.c 		prod = NEXT_TX(rp, prod);
rp               6701 drivers/net/ethernet/sun/niu.c 		rp->tx_buffs[prod].skb = NULL;
rp               6702 drivers/net/ethernet/sun/niu.c 		rp->tx_buffs[prod].mapping = mapping;
rp               6704 drivers/net/ethernet/sun/niu.c 		niu_set_txd(rp, prod, mapping, len, 0, 0);
rp               6706 drivers/net/ethernet/sun/niu.c 		prod = NEXT_TX(rp, prod);
rp               6709 drivers/net/ethernet/sun/niu.c 	if (prod < rp->prod)
rp               6710 drivers/net/ethernet/sun/niu.c 		rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp               6711 drivers/net/ethernet/sun/niu.c 	rp->prod = prod;
rp               6713 drivers/net/ethernet/sun/niu.c 	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
rp               6715 drivers/net/ethernet/sun/niu.c 	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
rp               6717 drivers/net/ethernet/sun/niu.c 		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
rp               6725 drivers/net/ethernet/sun/niu.c 	rp->tx_errors++;
rp               7792 drivers/net/ethernet/sun/niu.c 		struct rx_ring_info *rp = &np->rx_rings[i];
rp               7794 drivers/net/ethernet/sun/niu.c 		niu_sync_rx_discard_stats(np, rp, 0);
rp               7796 drivers/net/ethernet/sun/niu.c 		data[0] = rp->rx_channel;
rp               7797 drivers/net/ethernet/sun/niu.c 		data[1] = rp->rx_packets;
rp               7798 drivers/net/ethernet/sun/niu.c 		data[2] = rp->rx_bytes;
rp               7799 drivers/net/ethernet/sun/niu.c 		data[3] = rp->rx_dropped;
rp               7800 drivers/net/ethernet/sun/niu.c 		data[4] = rp->rx_errors;
rp               7804 drivers/net/ethernet/sun/niu.c 		struct tx_ring_info *rp = &np->tx_rings[i];
rp               7806 drivers/net/ethernet/sun/niu.c 		data[0] = rp->tx_channel;
rp               7807 drivers/net/ethernet/sun/niu.c 		data[1] = rp->tx_packets;
rp               7808 drivers/net/ethernet/sun/niu.c 		data[2] = rp->tx_bytes;
rp               7809 drivers/net/ethernet/sun/niu.c 		data[3] = rp->tx_errors;
rp               2946 drivers/net/ethernet/sun/niu.h #define NEXT_RCR(rp, index) \
rp               2947 drivers/net/ethernet/sun/niu.h 	(((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0)
rp               2948 drivers/net/ethernet/sun/niu.h #define NEXT_RBR(rp, index) \
rp               2949 drivers/net/ethernet/sun/niu.h 	(((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0)
rp                527 drivers/net/ethernet/via/via-rhine.c static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
rp                529 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                540 drivers/net/ethernet/via/via-rhine.c 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
rp                545 drivers/net/ethernet/via/via-rhine.c static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
rp                547 drivers/net/ethernet/via/via-rhine.c 	rhine_wait_bit(rp, reg, mask, false);
rp                550 drivers/net/ethernet/via/via-rhine.c static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
rp                552 drivers/net/ethernet/via/via-rhine.c 	rhine_wait_bit(rp, reg, mask, true);
rp                555 drivers/net/ethernet/via/via-rhine.c static u32 rhine_get_events(struct rhine_private *rp)
rp                557 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                562 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqStatusWBRace)
rp                567 drivers/net/ethernet/via/via-rhine.c static void rhine_ack_events(struct rhine_private *rp, u32 mask)
rp                569 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                571 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqStatusWBRace)
rp                582 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp                583 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                586 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqWOL) {
rp                596 drivers/net/ethernet/via/via-rhine.c 		if (rp->quirks & rq6patterns)
rp                601 drivers/net/ethernet/via/via-rhine.c 		if (rp->quirks & rq6patterns)
rp                606 drivers/net/ethernet/via/via-rhine.c 		if (rp->quirks & rq6patterns)
rp                638 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp                639 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                649 drivers/net/ethernet/via/via-rhine.c 		if (rp->quirks & rqForceReset)
rp                653 drivers/net/ethernet/via/via-rhine.c 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
rp                657 drivers/net/ethernet/via/via-rhine.c 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
rp                708 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp                709 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                725 drivers/net/ethernet/via/via-rhine.c 	enable_mmio(pioaddr, rp->quirks);
rp                728 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqWOL)
rp                736 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp                737 drivers/net/ethernet/via/via-rhine.c 	const int irq = rp->irq;
rp                745 drivers/net/ethernet/via/via-rhine.c static void rhine_kick_tx_threshold(struct rhine_private *rp)
rp                747 drivers/net/ethernet/via/via-rhine.c 	if (rp->tx_thresh < 0xe0) {
rp                748 drivers/net/ethernet/via/via-rhine.c 		void __iomem *ioaddr = rp->base;
rp                750 drivers/net/ethernet/via/via-rhine.c 		rp->tx_thresh += 0x20;
rp                751 drivers/net/ethernet/via/via-rhine.c 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
rp                755 drivers/net/ethernet/via/via-rhine.c static void rhine_tx_err(struct rhine_private *rp, u32 status)
rp                757 drivers/net/ethernet/via/via-rhine.c 	struct net_device *dev = rp->dev;
rp                760 drivers/net/ethernet/via/via-rhine.c 		netif_info(rp, tx_err, dev,
rp                765 drivers/net/ethernet/via/via-rhine.c 		rhine_kick_tx_threshold(rp);
rp                766 drivers/net/ethernet/via/via-rhine.c 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
rp                767 drivers/net/ethernet/via/via-rhine.c 			   "Tx threshold now %02x\n", rp->tx_thresh);
rp                771 drivers/net/ethernet/via/via-rhine.c 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
rp                775 drivers/net/ethernet/via/via-rhine.c 		rhine_kick_tx_threshold(rp);
rp                776 drivers/net/ethernet/via/via-rhine.c 		netif_info(rp, tx_err, dev, "Unspecified error. "
rp                777 drivers/net/ethernet/via/via-rhine.c 			   "Tx threshold now %02x\n", rp->tx_thresh);
rp                783 drivers/net/ethernet/via/via-rhine.c static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
rp                785 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                786 drivers/net/ethernet/via/via-rhine.c 	struct net_device_stats *stats = &rp->dev->stats;
rp                824 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
rp                825 drivers/net/ethernet/via/via-rhine.c 	struct net_device *dev = rp->dev;
rp                826 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp                831 drivers/net/ethernet/via/via-rhine.c 	status = rhine_get_events(rp);
rp                832 drivers/net/ethernet/via/via-rhine.c 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
rp                840 drivers/net/ethernet/via/via-rhine.c 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
rp                842 drivers/net/ethernet/via/via-rhine.c 				netif_warn(rp, tx_err, dev, "Tx still on\n");
rp                848 drivers/net/ethernet/via/via-rhine.c 			rhine_tx_err(rp, status);
rp                852 drivers/net/ethernet/via/via-rhine.c 		spin_lock(&rp->lock);
rp                853 drivers/net/ethernet/via/via-rhine.c 		rhine_update_rx_crc_and_missed_errord(rp);
rp                854 drivers/net/ethernet/via/via-rhine.c 		spin_unlock(&rp->lock);
rp                859 drivers/net/ethernet/via/via-rhine.c 		schedule_work(&rp->slow_event_task);
rp                871 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp                877 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqRhineI)
rp                906 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp;
rp                924 drivers/net/ethernet/via/via-rhine.c 	rp = netdev_priv(dev);
rp                925 drivers/net/ethernet/via/via-rhine.c 	rp->dev = dev;
rp                926 drivers/net/ethernet/via/via-rhine.c 	rp->quirks = quirks;
rp                927 drivers/net/ethernet/via/via-rhine.c 	rp->pioaddr = pioaddr;
rp                928 drivers/net/ethernet/via/via-rhine.c 	rp->base = ioaddr;
rp                929 drivers/net/ethernet/via/via-rhine.c 	rp->irq = irq;
rp                930 drivers/net/ethernet/via/via-rhine.c 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
rp                932 drivers/net/ethernet/via/via-rhine.c 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
rp                934 drivers/net/ethernet/via/via-rhine.c 	u64_stats_init(&rp->tx_stats.syncp);
rp                935 drivers/net/ethernet/via/via-rhine.c 	u64_stats_init(&rp->rx_stats.syncp);
rp                956 drivers/net/ethernet/via/via-rhine.c 	spin_lock_init(&rp->lock);
rp                957 drivers/net/ethernet/via/via-rhine.c 	mutex_init(&rp->task_lock);
rp                958 drivers/net/ethernet/via/via-rhine.c 	INIT_WORK(&rp->reset_task, rhine_reset_task);
rp                959 drivers/net/ethernet/via/via-rhine.c 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
rp                961 drivers/net/ethernet/via/via-rhine.c 	rp->mii_if.dev = dev;
rp                962 drivers/net/ethernet/via/via-rhine.c 	rp->mii_if.mdio_read = mdio_read;
rp                963 drivers/net/ethernet/via/via-rhine.c 	rp->mii_if.mdio_write = mdio_write;
rp                964 drivers/net/ethernet/via/via-rhine.c 	rp->mii_if.phy_id_mask = 0x1f;
rp                965 drivers/net/ethernet/via/via-rhine.c 	rp->mii_if.reg_num_mask = 0x1f;
rp                972 drivers/net/ethernet/via/via-rhine.c 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
rp                974 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqRhineI)
rp                977 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqMgmt)
rp                987 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqRhineI)
rp                989 drivers/net/ethernet/via/via-rhine.c 	else if (rp->quirks & rqStatusWBRace)
rp                991 drivers/net/ethernet/via/via-rhine.c 	else if (rp->quirks & rqMgmt)
rp                997 drivers/net/ethernet/via/via-rhine.c 		    name, ioaddr, dev->dev_addr, rp->irq);
rp               1007 drivers/net/ethernet/via/via-rhine.c 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
rp               1011 drivers/net/ethernet/via/via-rhine.c 				    mii_status, rp->mii_if.advertising,
rp               1022 drivers/net/ethernet/via/via-rhine.c 	rp->mii_if.phy_id = phy_id;
rp               1024 drivers/net/ethernet/via/via-rhine.c 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
rp               1154 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1168 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqRhineI) {
rp               1169 drivers/net/ethernet/via/via-rhine.c 		rp->tx_bufs = dma_alloc_coherent(hwdev,
rp               1171 drivers/net/ethernet/via/via-rhine.c 						 &rp->tx_bufs_dma,
rp               1173 drivers/net/ethernet/via/via-rhine.c 		if (rp->tx_bufs == NULL) {
rp               1182 drivers/net/ethernet/via/via-rhine.c 	rp->rx_ring = ring;
rp               1183 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
rp               1184 drivers/net/ethernet/via/via-rhine.c 	rp->rx_ring_dma = ring_dma;
rp               1185 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
rp               1192 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1198 drivers/net/ethernet/via/via-rhine.c 			  rp->rx_ring, rp->rx_ring_dma);
rp               1199 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring = NULL;
rp               1201 drivers/net/ethernet/via/via-rhine.c 	if (rp->tx_bufs)
rp               1203 drivers/net/ethernet/via/via-rhine.c 				  rp->tx_bufs, rp->tx_bufs_dma);
rp               1205 drivers/net/ethernet/via/via-rhine.c 	rp->tx_bufs = NULL;
rp               1217 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1219 drivers/net/ethernet/via/via-rhine.c 	const int size = rp->rx_buf_sz;
rp               1227 drivers/net/ethernet/via/via-rhine.c 		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
rp               1235 drivers/net/ethernet/via/via-rhine.c static void rhine_reset_rbufs(struct rhine_private *rp)
rp               1239 drivers/net/ethernet/via/via-rhine.c 	rp->cur_rx = 0;
rp               1242 drivers/net/ethernet/via/via-rhine.c 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
rp               1245 drivers/net/ethernet/via/via-rhine.c static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
rp               1248 drivers/net/ethernet/via/via-rhine.c 	rp->rx_skbuff_dma[entry] = sd->dma;
rp               1249 drivers/net/ethernet/via/via-rhine.c 	rp->rx_skbuff[entry] = sd->skb;
rp               1251 drivers/net/ethernet/via/via-rhine.c 	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
rp               1259 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1263 drivers/net/ethernet/via/via-rhine.c 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
rp               1264 drivers/net/ethernet/via/via-rhine.c 	next = rp->rx_ring_dma;
rp               1268 drivers/net/ethernet/via/via-rhine.c 		rp->rx_ring[i].rx_status = 0;
rp               1269 drivers/net/ethernet/via/via-rhine.c 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
rp               1271 drivers/net/ethernet/via/via-rhine.c 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
rp               1272 drivers/net/ethernet/via/via-rhine.c 		rp->rx_skbuff[i] = NULL;
rp               1275 drivers/net/ethernet/via/via-rhine.c 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
rp               1287 drivers/net/ethernet/via/via-rhine.c 		rhine_skb_dma_nic_store(rp, &sd, i);
rp               1290 drivers/net/ethernet/via/via-rhine.c 	rhine_reset_rbufs(rp);
rp               1297 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1303 drivers/net/ethernet/via/via-rhine.c 		rp->rx_ring[i].rx_status = 0;
rp               1304 drivers/net/ethernet/via/via-rhine.c 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
rp               1305 drivers/net/ethernet/via/via-rhine.c 		if (rp->rx_skbuff[i]) {
rp               1307 drivers/net/ethernet/via/via-rhine.c 					 rp->rx_skbuff_dma[i],
rp               1308 drivers/net/ethernet/via/via-rhine.c 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
rp               1309 drivers/net/ethernet/via/via-rhine.c 			dev_kfree_skb(rp->rx_skbuff[i]);
rp               1311 drivers/net/ethernet/via/via-rhine.c 		rp->rx_skbuff[i] = NULL;
rp               1317 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1321 drivers/net/ethernet/via/via-rhine.c 	rp->dirty_tx = rp->cur_tx = 0;
rp               1322 drivers/net/ethernet/via/via-rhine.c 	next = rp->tx_ring_dma;
rp               1324 drivers/net/ethernet/via/via-rhine.c 		rp->tx_skbuff[i] = NULL;
rp               1325 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].tx_status = 0;
rp               1326 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
rp               1328 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
rp               1329 drivers/net/ethernet/via/via-rhine.c 		if (rp->quirks & rqRhineI)
rp               1330 drivers/net/ethernet/via/via-rhine.c 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
rp               1332 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
rp               1339 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1344 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].tx_status = 0;
rp               1345 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
rp               1346 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
rp               1347 drivers/net/ethernet/via/via-rhine.c 		if (rp->tx_skbuff[i]) {
rp               1348 drivers/net/ethernet/via/via-rhine.c 			if (rp->tx_skbuff_dma[i]) {
rp               1350 drivers/net/ethernet/via/via-rhine.c 						 rp->tx_skbuff_dma[i],
rp               1351 drivers/net/ethernet/via/via-rhine.c 						 rp->tx_skbuff[i]->len,
rp               1354 drivers/net/ethernet/via/via-rhine.c 			dev_kfree_skb(rp->tx_skbuff[i]);
rp               1356 drivers/net/ethernet/via/via-rhine.c 		rp->tx_skbuff[i] = NULL;
rp               1357 drivers/net/ethernet/via/via-rhine.c 		rp->tx_buf[i] = NULL;
rp               1363 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1364 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1366 drivers/net/ethernet/via/via-rhine.c 	if (!rp->mii_if.force_media)
rp               1367 drivers/net/ethernet/via/via-rhine.c 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
rp               1369 drivers/net/ethernet/via/via-rhine.c 	if (rp->mii_if.full_duplex)
rp               1376 drivers/net/ethernet/via/via-rhine.c 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
rp               1377 drivers/net/ethernet/via/via-rhine.c 		   rp->mii_if.force_media, netif_carrier_ok(dev));
rp               1384 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1394 drivers/net/ethernet/via/via-rhine.c 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
rp               1504 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1505 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1524 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1525 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1530 drivers/net/ethernet/via/via-rhine.c 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
rp               1541 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1543 drivers/net/ethernet/via/via-rhine.c 	spin_lock_bh(&rp->lock);
rp               1544 drivers/net/ethernet/via/via-rhine.c 	set_bit(vid, rp->active_vlans);
rp               1546 drivers/net/ethernet/via/via-rhine.c 	spin_unlock_bh(&rp->lock);
rp               1552 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1554 drivers/net/ethernet/via/via-rhine.c 	spin_lock_bh(&rp->lock);
rp               1555 drivers/net/ethernet/via/via-rhine.c 	clear_bit(vid, rp->active_vlans);
rp               1557 drivers/net/ethernet/via/via-rhine.c 	spin_unlock_bh(&rp->lock);
rp               1563 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1564 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1574 drivers/net/ethernet/via/via-rhine.c 	rp->tx_thresh = 0x20;
rp               1575 drivers/net/ethernet/via/via-rhine.c 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
rp               1577 drivers/net/ethernet/via/via-rhine.c 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
rp               1578 drivers/net/ethernet/via/via-rhine.c 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
rp               1582 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqMgmt)
rp               1585 drivers/net/ethernet/via/via-rhine.c 	napi_enable(&rp->napi);
rp               1595 drivers/net/ethernet/via/via-rhine.c static void rhine_enable_linkmon(struct rhine_private *rp)
rp               1597 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1603 drivers/net/ethernet/via/via-rhine.c 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
rp               1609 drivers/net/ethernet/via/via-rhine.c static void rhine_disable_linkmon(struct rhine_private *rp)
rp               1611 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1615 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqRhineI) {
rp               1624 drivers/net/ethernet/via/via-rhine.c 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
rp               1630 drivers/net/ethernet/via/via-rhine.c 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
rp               1637 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1638 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1641 drivers/net/ethernet/via/via-rhine.c 	rhine_disable_linkmon(rp);
rp               1647 drivers/net/ethernet/via/via-rhine.c 	rhine_wait_bit_low(rp, MIICmd, 0x40);
rp               1650 drivers/net/ethernet/via/via-rhine.c 	rhine_enable_linkmon(rp);
rp               1656 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1657 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1659 drivers/net/ethernet/via/via-rhine.c 	rhine_disable_linkmon(rp);
rp               1666 drivers/net/ethernet/via/via-rhine.c 	rhine_wait_bit_low(rp, MIICmd, 0x20);
rp               1668 drivers/net/ethernet/via/via-rhine.c 	rhine_enable_linkmon(rp);
rp               1671 drivers/net/ethernet/via/via-rhine.c static void rhine_task_disable(struct rhine_private *rp)
rp               1673 drivers/net/ethernet/via/via-rhine.c 	mutex_lock(&rp->task_lock);
rp               1674 drivers/net/ethernet/via/via-rhine.c 	rp->task_enable = false;
rp               1675 drivers/net/ethernet/via/via-rhine.c 	mutex_unlock(&rp->task_lock);
rp               1677 drivers/net/ethernet/via/via-rhine.c 	cancel_work_sync(&rp->slow_event_task);
rp               1678 drivers/net/ethernet/via/via-rhine.c 	cancel_work_sync(&rp->reset_task);
rp               1681 drivers/net/ethernet/via/via-rhine.c static void rhine_task_enable(struct rhine_private *rp)
rp               1683 drivers/net/ethernet/via/via-rhine.c 	mutex_lock(&rp->task_lock);
rp               1684 drivers/net/ethernet/via/via-rhine.c 	rp->task_enable = true;
rp               1685 drivers/net/ethernet/via/via-rhine.c 	mutex_unlock(&rp->task_lock);
rp               1690 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1691 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1694 drivers/net/ethernet/via/via-rhine.c 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
rp               1698 drivers/net/ethernet/via/via-rhine.c 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rp               1710 drivers/net/ethernet/via/via-rhine.c 	rhine_task_enable(rp);
rp               1713 drivers/net/ethernet/via/via-rhine.c 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
rp               1715 drivers/net/ethernet/via/via-rhine.c 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
rp               1725 drivers/net/ethernet/via/via-rhine.c 	free_irq(rp->irq, dev);
rp               1731 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = container_of(work, struct rhine_private,
rp               1733 drivers/net/ethernet/via/via-rhine.c 	struct net_device *dev = rp->dev;
rp               1735 drivers/net/ethernet/via/via-rhine.c 	mutex_lock(&rp->task_lock);
rp               1737 drivers/net/ethernet/via/via-rhine.c 	if (!rp->task_enable)
rp               1740 drivers/net/ethernet/via/via-rhine.c 	napi_disable(&rp->napi);
rp               1742 drivers/net/ethernet/via/via-rhine.c 	spin_lock_bh(&rp->lock);
rp               1748 drivers/net/ethernet/via/via-rhine.c 	rhine_reset_rbufs(rp);
rp               1754 drivers/net/ethernet/via/via-rhine.c 	spin_unlock_bh(&rp->lock);
rp               1761 drivers/net/ethernet/via/via-rhine.c 	mutex_unlock(&rp->task_lock);
rp               1766 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1767 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1771 drivers/net/ethernet/via/via-rhine.c 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
rp               1773 drivers/net/ethernet/via/via-rhine.c 	schedule_work(&rp->reset_task);
rp               1776 drivers/net/ethernet/via/via-rhine.c static inline bool rhine_tx_queue_full(struct rhine_private *rp)
rp               1778 drivers/net/ethernet/via/via-rhine.c 	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
rp               1784 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1786 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               1793 drivers/net/ethernet/via/via-rhine.c 	entry = rp->cur_tx % TX_RING_SIZE;
rp               1798 drivers/net/ethernet/via/via-rhine.c 	rp->tx_skbuff[entry] = skb;
rp               1800 drivers/net/ethernet/via/via-rhine.c 	if ((rp->quirks & rqRhineI) &&
rp               1806 drivers/net/ethernet/via/via-rhine.c 			rp->tx_skbuff[entry] = NULL;
rp               1812 drivers/net/ethernet/via/via-rhine.c 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
rp               1814 drivers/net/ethernet/via/via-rhine.c 			memset(rp->tx_buf[entry] + skb->len, 0,
rp               1816 drivers/net/ethernet/via/via-rhine.c 		rp->tx_skbuff_dma[entry] = 0;
rp               1817 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
rp               1818 drivers/net/ethernet/via/via-rhine.c 						      (rp->tx_buf[entry] -
rp               1819 drivers/net/ethernet/via/via-rhine.c 						       rp->tx_bufs));
rp               1821 drivers/net/ethernet/via/via-rhine.c 		rp->tx_skbuff_dma[entry] =
rp               1824 drivers/net/ethernet/via/via-rhine.c 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
rp               1826 drivers/net/ethernet/via/via-rhine.c 			rp->tx_skbuff_dma[entry] = 0;
rp               1830 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
rp               1833 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring[entry].desc_length =
rp               1842 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
rp               1844 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
rp               1847 drivers/net/ethernet/via/via-rhine.c 		rp->tx_ring[entry].tx_status = 0;
rp               1852 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
rp               1855 drivers/net/ethernet/via/via-rhine.c 	rp->cur_tx++;
rp               1875 drivers/net/ethernet/via/via-rhine.c 	if (rhine_tx_queue_full(rp)) {
rp               1879 drivers/net/ethernet/via/via-rhine.c 		if (!rhine_tx_queue_full(rp))
rp               1883 drivers/net/ethernet/via/via-rhine.c 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
rp               1884 drivers/net/ethernet/via/via-rhine.c 		  rp->cur_tx - 1, entry);
rp               1889 drivers/net/ethernet/via/via-rhine.c static void rhine_irq_disable(struct rhine_private *rp)
rp               1891 drivers/net/ethernet/via/via-rhine.c 	iowrite16(0x0000, rp->base + IntrEnable);
rp               1899 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1903 drivers/net/ethernet/via/via-rhine.c 	status = rhine_get_events(rp);
rp               1905 drivers/net/ethernet/via/via-rhine.c 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
rp               1910 drivers/net/ethernet/via/via-rhine.c 		rhine_irq_disable(rp);
rp               1911 drivers/net/ethernet/via/via-rhine.c 		napi_schedule(&rp->napi);
rp               1915 drivers/net/ethernet/via/via-rhine.c 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
rp               1926 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               1929 drivers/net/ethernet/via/via-rhine.c 	unsigned int dirty_tx = rp->dirty_tx;
rp               1940 drivers/net/ethernet/via/via-rhine.c 	cur_tx = rp->cur_tx;
rp               1944 drivers/net/ethernet/via/via-rhine.c 		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
rp               1946 drivers/net/ethernet/via/via-rhine.c 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
rp               1950 drivers/net/ethernet/via/via-rhine.c 		skb = rp->tx_skbuff[entry];
rp               1952 drivers/net/ethernet/via/via-rhine.c 			netif_dbg(rp, tx_done, dev,
rp               1963 drivers/net/ethernet/via/via-rhine.c 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
rp               1966 drivers/net/ethernet/via/via-rhine.c 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
rp               1971 drivers/net/ethernet/via/via-rhine.c 			if (rp->quirks & rqRhineI)
rp               1975 drivers/net/ethernet/via/via-rhine.c 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
rp               1978 drivers/net/ethernet/via/via-rhine.c 			u64_stats_update_begin(&rp->tx_stats.syncp);
rp               1979 drivers/net/ethernet/via/via-rhine.c 			rp->tx_stats.bytes += skb->len;
rp               1980 drivers/net/ethernet/via/via-rhine.c 			rp->tx_stats.packets++;
rp               1981 drivers/net/ethernet/via/via-rhine.c 			u64_stats_update_end(&rp->tx_stats.syncp);
rp               1984 drivers/net/ethernet/via/via-rhine.c 		if (rp->tx_skbuff_dma[entry]) {
rp               1986 drivers/net/ethernet/via/via-rhine.c 					 rp->tx_skbuff_dma[entry],
rp               1993 drivers/net/ethernet/via/via-rhine.c 		rp->tx_skbuff[entry] = NULL;
rp               1997 drivers/net/ethernet/via/via-rhine.c 	rp->dirty_tx = dirty_tx;
rp               2004 drivers/net/ethernet/via/via-rhine.c 	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
rp               2008 drivers/net/ethernet/via/via-rhine.c 		if (rhine_tx_queue_full(rp))
rp               2043 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2045 drivers/net/ethernet/via/via-rhine.c 	int entry = rp->cur_rx % RX_RING_SIZE;
rp               2048 drivers/net/ethernet/via/via-rhine.c 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
rp               2049 drivers/net/ethernet/via/via-rhine.c 		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
rp               2053 drivers/net/ethernet/via/via-rhine.c 		struct rx_desc *desc = rp->rx_ring + entry;
rp               2060 drivers/net/ethernet/via/via-rhine.c 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
rp               2073 drivers/net/ethernet/via/via-rhine.c 				netif_dbg(rp, rx_err, dev,
rp               2085 drivers/net/ethernet/via/via-rhine.c 					spin_lock(&rp->lock);
rp               2087 drivers/net/ethernet/via/via-rhine.c 					spin_unlock(&rp->lock);
rp               2103 drivers/net/ethernet/via/via-rhine.c 							rp->rx_skbuff_dma[entry],
rp               2104 drivers/net/ethernet/via/via-rhine.c 							rp->rx_buf_sz,
rp               2108 drivers/net/ethernet/via/via-rhine.c 						 rp->rx_skbuff[entry]->data,
rp               2112 drivers/net/ethernet/via/via-rhine.c 							   rp->rx_skbuff_dma[entry],
rp               2113 drivers/net/ethernet/via/via-rhine.c 							   rp->rx_buf_sz,
rp               2121 drivers/net/ethernet/via/via-rhine.c 				skb = rp->rx_skbuff[entry];
rp               2124 drivers/net/ethernet/via/via-rhine.c 						 rp->rx_skbuff_dma[entry],
rp               2125 drivers/net/ethernet/via/via-rhine.c 						 rp->rx_buf_sz,
rp               2127 drivers/net/ethernet/via/via-rhine.c 				rhine_skb_dma_nic_store(rp, &sd, entry);
rp               2138 drivers/net/ethernet/via/via-rhine.c 			u64_stats_update_begin(&rp->rx_stats.syncp);
rp               2139 drivers/net/ethernet/via/via-rhine.c 			rp->rx_stats.bytes += pkt_len;
rp               2140 drivers/net/ethernet/via/via-rhine.c 			rp->rx_stats.packets++;
rp               2141 drivers/net/ethernet/via/via-rhine.c 			u64_stats_update_end(&rp->rx_stats.syncp);
rp               2145 drivers/net/ethernet/via/via-rhine.c 		entry = (++rp->cur_rx) % RX_RING_SIZE;
rp               2156 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2157 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               2158 drivers/net/ethernet/via/via-rhine.c 	int entry = rp->dirty_tx % TX_RING_SIZE;
rp               2165 drivers/net/ethernet/via/via-rhine.c 	intr_status = rhine_get_events(rp);
rp               2170 drivers/net/ethernet/via/via-rhine.c 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
rp               2176 drivers/net/ethernet/via/via-rhine.c 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
rp               2186 drivers/net/ethernet/via/via-rhine.c 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
rp               2194 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp =
rp               2196 drivers/net/ethernet/via/via-rhine.c 	struct net_device *dev = rp->dev;
rp               2199 drivers/net/ethernet/via/via-rhine.c 	mutex_lock(&rp->task_lock);
rp               2201 drivers/net/ethernet/via/via-rhine.c 	if (!rp->task_enable)
rp               2204 drivers/net/ethernet/via/via-rhine.c 	intr_status = rhine_get_events(rp);
rp               2205 drivers/net/ethernet/via/via-rhine.c 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
rp               2211 drivers/net/ethernet/via/via-rhine.c 		netif_warn(rp, hw, dev, "PCI error\n");
rp               2213 drivers/net/ethernet/via/via-rhine.c 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
rp               2216 drivers/net/ethernet/via/via-rhine.c 	mutex_unlock(&rp->task_lock);
rp               2222 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2225 drivers/net/ethernet/via/via-rhine.c 	spin_lock_bh(&rp->lock);
rp               2226 drivers/net/ethernet/via/via-rhine.c 	rhine_update_rx_crc_and_missed_errord(rp);
rp               2227 drivers/net/ethernet/via/via-rhine.c 	spin_unlock_bh(&rp->lock);
rp               2232 drivers/net/ethernet/via/via-rhine.c 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
rp               2233 drivers/net/ethernet/via/via-rhine.c 		stats->rx_packets = rp->rx_stats.packets;
rp               2234 drivers/net/ethernet/via/via-rhine.c 		stats->rx_bytes = rp->rx_stats.bytes;
rp               2235 drivers/net/ethernet/via/via-rhine.c 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
rp               2238 drivers/net/ethernet/via/via-rhine.c 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
rp               2239 drivers/net/ethernet/via/via-rhine.c 		stats->tx_packets = rp->tx_stats.packets;
rp               2240 drivers/net/ethernet/via/via-rhine.c 		stats->tx_bytes = rp->tx_stats.bytes;
rp               2241 drivers/net/ethernet/via/via-rhine.c 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
rp               2246 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2247 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               2261 drivers/net/ethernet/via/via-rhine.c 	} else if (rp->quirks & rqMgmt) {
rp               2283 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rqMgmt) {
rp               2304 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2306 drivers/net/ethernet/via/via-rhine.c 	mutex_lock(&rp->task_lock);
rp               2307 drivers/net/ethernet/via/via-rhine.c 	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
rp               2308 drivers/net/ethernet/via/via-rhine.c 	mutex_unlock(&rp->task_lock);
rp               2316 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2319 drivers/net/ethernet/via/via-rhine.c 	mutex_lock(&rp->task_lock);
rp               2320 drivers/net/ethernet/via/via-rhine.c 	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
rp               2321 drivers/net/ethernet/via/via-rhine.c 	rhine_set_carrier(&rp->mii_if);
rp               2322 drivers/net/ethernet/via/via-rhine.c 	mutex_unlock(&rp->task_lock);
rp               2329 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2331 drivers/net/ethernet/via/via-rhine.c 	return mii_nway_restart(&rp->mii_if);
rp               2336 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2338 drivers/net/ethernet/via/via-rhine.c 	return mii_link_ok(&rp->mii_if);
rp               2343 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2345 drivers/net/ethernet/via/via-rhine.c 	return rp->msg_enable;
rp               2350 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2352 drivers/net/ethernet/via/via-rhine.c 	rp->msg_enable = value;
rp               2357 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2359 drivers/net/ethernet/via/via-rhine.c 	if (!(rp->quirks & rqWOL))
rp               2362 drivers/net/ethernet/via/via-rhine.c 	spin_lock_irq(&rp->lock);
rp               2365 drivers/net/ethernet/via/via-rhine.c 	wol->wolopts = rp->wolopts;
rp               2366 drivers/net/ethernet/via/via-rhine.c 	spin_unlock_irq(&rp->lock);
rp               2371 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2375 drivers/net/ethernet/via/via-rhine.c 	if (!(rp->quirks & rqWOL))
rp               2381 drivers/net/ethernet/via/via-rhine.c 	spin_lock_irq(&rp->lock);
rp               2382 drivers/net/ethernet/via/via-rhine.c 	rp->wolopts = wol->wolopts;
rp               2383 drivers/net/ethernet/via/via-rhine.c 	spin_unlock_irq(&rp->lock);
rp               2402 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2408 drivers/net/ethernet/via/via-rhine.c 	mutex_lock(&rp->task_lock);
rp               2409 drivers/net/ethernet/via/via-rhine.c 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
rp               2410 drivers/net/ethernet/via/via-rhine.c 	rhine_set_carrier(&rp->mii_if);
rp               2411 drivers/net/ethernet/via/via-rhine.c 	mutex_unlock(&rp->task_lock);
rp               2418 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2419 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               2421 drivers/net/ethernet/via/via-rhine.c 	rhine_task_disable(rp);
rp               2422 drivers/net/ethernet/via/via-rhine.c 	napi_disable(&rp->napi);
rp               2425 drivers/net/ethernet/via/via-rhine.c 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
rp               2429 drivers/net/ethernet/via/via-rhine.c 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
rp               2431 drivers/net/ethernet/via/via-rhine.c 	rhine_irq_disable(rp);
rp               2436 drivers/net/ethernet/via/via-rhine.c 	free_irq(rp->irq, dev);
rp               2448 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2452 drivers/net/ethernet/via/via-rhine.c 	pci_iounmap(pdev, rp->base);
rp               2462 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2466 drivers/net/ethernet/via/via-rhine.c 	iounmap(rp->base);
rp               2476 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2477 drivers/net/ethernet/via/via-rhine.c 	void __iomem *ioaddr = rp->base;
rp               2479 drivers/net/ethernet/via/via-rhine.c 	if (!(rp->quirks & rqWOL))
rp               2485 drivers/net/ethernet/via/via-rhine.c 	if (rp->quirks & rq6patterns)
rp               2488 drivers/net/ethernet/via/via-rhine.c 	spin_lock(&rp->lock);
rp               2490 drivers/net/ethernet/via/via-rhine.c 	if (rp->wolopts & WAKE_MAGIC) {
rp               2499 drivers/net/ethernet/via/via-rhine.c 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
rp               2502 drivers/net/ethernet/via/via-rhine.c 	if (rp->wolopts & WAKE_PHY)
rp               2505 drivers/net/ethernet/via/via-rhine.c 	if (rp->wolopts & WAKE_UCAST)
rp               2508 drivers/net/ethernet/via/via-rhine.c 	if (rp->wolopts) {
rp               2514 drivers/net/ethernet/via/via-rhine.c 	spin_unlock(&rp->lock);
rp               2528 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2533 drivers/net/ethernet/via/via-rhine.c 	rhine_task_disable(rp);
rp               2534 drivers/net/ethernet/via/via-rhine.c 	rhine_irq_disable(rp);
rp               2535 drivers/net/ethernet/via/via-rhine.c 	napi_disable(&rp->napi);
rp               2548 drivers/net/ethernet/via/via-rhine.c 	struct rhine_private *rp = netdev_priv(dev);
rp               2553 drivers/net/ethernet/via/via-rhine.c 	enable_mmio(rp->pioaddr, rp->quirks);
rp               2557 drivers/net/ethernet/via/via-rhine.c 	rhine_reset_rbufs(rp);
rp               2558 drivers/net/ethernet/via/via-rhine.c 	rhine_task_enable(rp);
rp               2559 drivers/net/ethernet/via/via-rhine.c 	spin_lock_bh(&rp->lock);
rp               2561 drivers/net/ethernet/via/via-rhine.c 	spin_unlock_bh(&rp->lock);
rp                 52 drivers/net/wireless/ath/ar5523/ar5523.c 	__be32 *rp;
rp                 64 drivers/net/wireless/ath/ar5523/ar5523.c 	rp = (__be32 *)(hdr + 1);
rp                 66 drivers/net/wireless/ath/ar5523/ar5523.c 		olen = be32_to_cpu(rp[0]);
rp                 83 drivers/net/wireless/ath/ar5523/ar5523.c 			memcpy(cmd->odata, &rp[1], olen);
rp                 44 drivers/net/wireless/intel/iwlegacy/3945.c #define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
rp                 49 drivers/net/wireless/intel/iwlegacy/3945.c 				    RATE_##rp##M_IDX, \
rp                 61 drivers/net/wireless/intel/iwlegacy/4965-rs.c #define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
rp                 68 drivers/net/wireless/intel/iwlegacy/4965-rs.c 				    RATE_##rp##M_IDX,    \
rp                 61 drivers/net/wireless/intel/iwlwifi/dvm/rs.c #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
rp                 69 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 				    IWL_RATE_##rp##M_INDEX,    \
rp                 58 drivers/net/wireless/intel/iwlwifi/mvm/rs.c #define IWL_DECLARE_RATE_INFO(r, s, rp, rn)			      \
rp                 64 drivers/net/wireless/intel/iwlwifi/mvm/rs.c 				    IWL_RATE_##rp##M_INDEX,	      \
rp                 46 drivers/net/wireless/mediatek/mt76/mt76.h 		     const struct mt76_reg_pair *rp, int len);
rp                 48 drivers/net/wireless/mediatek/mt76/mt76.h 		     struct mt76_reg_pair *rp, int len);
rp                142 drivers/net/wireless/mediatek/mt76/mt76.h 			 const struct mt76_reg_pair *rp, int len);
rp                144 drivers/net/wireless/mediatek/mt76/mt76.h 			 struct mt76_reg_pair *rp, int len);
rp                403 drivers/net/wireless/mediatek/mt76/mt76.h 		struct mt76_reg_pair *rp;
rp               1116 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 				      const struct mt76_reg_pair *rp, int len)
rp               1121 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 		u32 reg = rp[i].reg;
rp               1122 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c 		u8 val = rp[i].value;
rp                 82 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 			struct mt76_rate_power *rp = &dev->mt76.rate_power;
rp                 86 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 				max_txpwr = rp->cck[r->hw_value & 0x3];
rp                 88 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 				max_txpwr = rp->ofdm[r->hw_value & 0x7];
rp                 30 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 		reg = usb->mcu.rp[0].reg - usb->mcu.base;
rp                 33 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 			usb->mcu.rp[i].reg = reg++;
rp                 34 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 			usb->mcu.rp[i].value = val;
rp                 44 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 			WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
rp                 45 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 			usb->mcu.rp[i].value = val;
rp                 64 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 		if (usb->mcu.rp)
rp                205 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	usb->mcu.rp = data;
rp                212 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c 	usb->mcu.rp = NULL;
rp                 93 drivers/net/wireless/mediatek/mt7601u/debugfs.c 	struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
rp                108 drivers/net/wireless/mediatek/mt7601u/debugfs.c 			   rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
rp                111 drivers/net/wireless/mediatek/mt7601u/debugfs.c 			   rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
rp                114 drivers/net/wireless/mediatek/mt7601u/debugfs.c 			   rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
rp                300 drivers/net/wireless/realtek/rtlwifi/pci.h static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size)
rp                302 drivers/net/wireless/realtek/rtlwifi/pci.h 	if (rp <= wp)
rp                303 drivers/net/wireless/realtek/rtlwifi/pci.h 		return size - 1 + rp - wp;
rp                304 drivers/net/wireless/realtek/rtlwifi/pci.h 	return rp - wp - 1;
rp                873 drivers/net/wireless/realtek/rtw88/mac.c 	u32 wp, rp;
rp                904 drivers/net/wireless/realtek/rtw88/mac.c 	rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
rp                905 drivers/net/wireless/realtek/rtw88/mac.c 	h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
rp                199 drivers/net/wireless/realtek/rtw88/pci.c 	tx_ring->r.rp = 0;
rp                289 drivers/net/wireless/realtek/rtw88/pci.c 	rx_ring->r.rp = 0;
rp                403 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
rp                410 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
rp                417 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
rp                424 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
rp                431 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
rp                438 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
rp                445 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
rp                452 drivers/net/wireless/realtek/rtw88/pci.c 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
rp                626 drivers/net/wireless/realtek/rtw88/pci.c 	else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
rp                731 drivers/net/wireless/realtek/rtw88/pci.c 	if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
rp                758 drivers/net/wireless/realtek/rtw88/pci.c 	if (cur_rp >= ring->r.rp)
rp                759 drivers/net/wireless/realtek/rtw88/pci.c 		count = cur_rp - ring->r.rp;
rp                761 drivers/net/wireless/realtek/rtw88/pci.c 		count = ring->r.len - (ring->r.rp - cur_rp);
rp                767 drivers/net/wireless/realtek/rtw88/pci.c 				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
rp                781 drivers/net/wireless/realtek/rtw88/pci.c 		    avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
rp                807 drivers/net/wireless/realtek/rtw88/pci.c 	ring->r.rp = cur_rp;
rp                837 drivers/net/wireless/realtek/rtw88/pci.c 	cur_rp = ring->r.rp;
rp                883 drivers/net/wireless/realtek/rtw88/pci.c 	ring->r.rp = cur_rp;
rp                885 drivers/net/wireless/realtek/rtw88/pci.c 	rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
rp                130 drivers/net/wireless/realtek/rtw88/pci.h static inline int avail_desc(u32 wp, u32 rp, u32 len)
rp                132 drivers/net/wireless/realtek/rtw88/pci.h 	if (rp > wp)
rp                133 drivers/net/wireless/realtek/rtw88/pci.h 		return rp - wp - 1;
rp                135 drivers/net/wireless/realtek/rtw88/pci.h 		return len - wp + rp - 1;
rp                160 drivers/net/wireless/realtek/rtw88/pci.h 	u32 rp;
rp                739 drivers/net/xen-netfront.c 			     RING_IDX rp)
rp                751 drivers/net/xen-netfront.c 		if (unlikely(cons + 1 == rp)) {
rp                782 drivers/net/xen-netfront.c 				struct netfront_rx_info *rinfo, RING_IDX rp,
rp                797 drivers/net/xen-netfront.c 		err = xennet_get_extras(queue, extras, rp);
rp                836 drivers/net/xen-netfront.c 		if (cons + slots == rp) {
rp                995 drivers/net/xen-netfront.c 	RING_IDX i, rp;
rp               1008 drivers/net/xen-netfront.c 	rp = queue->rx.sring->rsp_prod;
rp               1013 drivers/net/xen-netfront.c 	while ((i != rp) && (work_done < budget)) {
rp               1017 drivers/net/xen-netfront.c 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
rp                720 drivers/nubus/nubus.c 	unsigned char *rp;
rp                727 drivers/nubus/nubus.c 	rp = nubus_rom_addr(slot);
rp                728 drivers/nubus/nubus.c 	nubus_rewind(&rp, FORMAT_BLOCK_SIZE, bytelanes);
rp                733 drivers/nubus/nubus.c 	board->fblock = rp;
rp                736 drivers/nubus/nubus.c 	pr_debug("Slot %X, format block at 0x%p:\n", slot, rp);
rp                737 drivers/nubus/nubus.c 	pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp                738 drivers/nubus/nubus.c 	pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp                739 drivers/nubus/nubus.c 	pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp                740 drivers/nubus/nubus.c 	pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
rp                741 drivers/nubus/nubus.c 	pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
rp                742 drivers/nubus/nubus.c 	pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp                743 drivers/nubus/nubus.c 	pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
rp                744 drivers/nubus/nubus.c 	pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
rp                745 drivers/nubus/nubus.c 	rp = board->fblock;
rp                749 drivers/nubus/nubus.c 	board->doffset = nubus_get_rom(&rp, 4, bytelanes);
rp                756 drivers/nubus/nubus.c 	board->rom_length = nubus_get_rom(&rp, 4, bytelanes);
rp                757 drivers/nubus/nubus.c 	board->crc = nubus_get_rom(&rp, 4, bytelanes);
rp                758 drivers/nubus/nubus.c 	board->rev = nubus_get_rom(&rp, 1, bytelanes);
rp                759 drivers/nubus/nubus.c 	board->format = nubus_get_rom(&rp, 1, bytelanes);
rp                765 drivers/nubus/nubus.c 	dpat = nubus_get_rom(&rp, 4, bytelanes);
rp                832 drivers/nubus/nubus.c 	unsigned char *rp;
rp                835 drivers/nubus/nubus.c 	rp = nubus_rom_addr(slot);
rp                837 drivers/nubus/nubus.c 		rp--;
rp                838 drivers/nubus/nubus.c 		if (!hwreg_present(rp))
rp                841 drivers/nubus/nubus.c 		dp = *rp;
rp                850 drivers/nubus/nubus.c 		if (not_useful(rp, dp))
rp               2232 drivers/pci/controller/pci-tegra.c 		struct tegra_pcie_port *rp;
rp               2276 drivers/pci/controller/pci-tegra.c 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
rp               2277 drivers/pci/controller/pci-tegra.c 		if (!rp) {
rp               2282 drivers/pci/controller/pci-tegra.c 		err = of_address_to_resource(port, 0, &rp->regs);
rp               2288 drivers/pci/controller/pci-tegra.c 		INIT_LIST_HEAD(&rp->list);
rp               2289 drivers/pci/controller/pci-tegra.c 		rp->index = index;
rp               2290 drivers/pci/controller/pci-tegra.c 		rp->lanes = value;
rp               2291 drivers/pci/controller/pci-tegra.c 		rp->pcie = pcie;
rp               2292 drivers/pci/controller/pci-tegra.c 		rp->np = port;
rp               2294 drivers/pci/controller/pci-tegra.c 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
rp               2295 drivers/pci/controller/pci-tegra.c 		if (IS_ERR(rp->base))
rp               2296 drivers/pci/controller/pci-tegra.c 			return PTR_ERR(rp->base);
rp               2309 drivers/pci/controller/pci-tegra.c 		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
rp               2313 drivers/pci/controller/pci-tegra.c 		if (IS_ERR(rp->reset_gpio)) {
rp               2314 drivers/pci/controller/pci-tegra.c 			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
rp               2315 drivers/pci/controller/pci-tegra.c 				rp->reset_gpio = NULL;
rp               2319 drivers/pci/controller/pci-tegra.c 				return PTR_ERR(rp->reset_gpio);
rp               2323 drivers/pci/controller/pci-tegra.c 		list_add_tail(&rp->list, &pcie->ports);
rp                249 drivers/pci/pci-acpi.c 	struct pci_dev *rp = pcie_find_root_port(dev);
rp                252 drivers/pci/pci-acpi.c 	if (!rp)
rp                255 drivers/pci/pci-acpi.c 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
rp               1236 drivers/pci/pcie/aer.c 	struct pci_dev *rp = rpc->rpd;
rp               1238 drivers/pci/pcie/aer.c 	int pos = rp->aer_cap;
rp               1240 drivers/pci/pcie/aer.c 	pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status);
rp               1244 drivers/pci/pcie/aer.c 	pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
rp               1245 drivers/pci/pcie/aer.c 	pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status);
rp                311 drivers/platform/olpc/olpc-ec.c 	char *rp;
rp                315 drivers/platform/olpc/olpc-ec.c 	rp = respbuf;
rp                316 drivers/platform/olpc/olpc-ec.c 	rp += sprintf(rp, "%02x", ec_dbgfs_resp[0]);
rp                318 drivers/platform/olpc/olpc-ec.c 		rp += sprintf(rp, ", %02x", ec_dbgfs_resp[i]);
rp                320 drivers/platform/olpc/olpc-ec.c 	rp += sprintf(rp, "\n");
rp                322 drivers/platform/olpc/olpc-ec.c 	r = rp - respbuf;
rp                 94 drivers/powercap/intel_rapl_common.c 	int (*check_unit)(struct rapl_package *rp, int cpu);
rp                 96 drivers/powercap/intel_rapl_common.c 	u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
rp                130 drivers/powercap/intel_rapl_common.c static void rapl_init_domains(struct rapl_package *rp);
rp                139 drivers/powercap/intel_rapl_common.c static void package_power_limit_irq_save(struct rapl_package *rp);
rp                185 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp = rd->rp;
rp                192 drivers/powercap/intel_rapl_common.c 		rp->domains = NULL;
rp                315 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp;
rp                327 drivers/powercap/intel_rapl_common.c 	rp = rd->rp;
rp                347 drivers/powercap/intel_rapl_common.c 		package_power_limit_irq_save(rp);
rp                513 drivers/powercap/intel_rapl_common.c static void rapl_init_domains(struct rapl_package *rp)
rp                517 drivers/powercap/intel_rapl_common.c 	struct rapl_domain *rd = rp->domains;
rp                520 drivers/powercap/intel_rapl_common.c 		unsigned int mask = rp->domain_map & (1 << i);
rp                525 drivers/powercap/intel_rapl_common.c 		rd->rp = rp;
rp                531 drivers/powercap/intel_rapl_common.c 		if (rp->priv->limits[i] == 2) {
rp                537 drivers/powercap/intel_rapl_common.c 			rd->regs[j] = rp->priv->regs[i][j];
rp                554 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp = rd->rp;
rp                559 drivers/powercap/intel_rapl_common.c 		units = rp->power_unit;
rp                567 drivers/powercap/intel_rapl_common.c 			units = rp->energy_unit;
rp                570 drivers/powercap/intel_rapl_common.c 		return rapl_defaults->compute_time_window(rp, value, to_raw);
rp                642 drivers/powercap/intel_rapl_common.c 	struct rapl_primitive_info *rp = &rpi[prim];
rp                646 drivers/powercap/intel_rapl_common.c 	if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
rp                649 drivers/powercap/intel_rapl_common.c 	ra.reg = rd->regs[rp->id];
rp                653 drivers/powercap/intel_rapl_common.c 	cpu = rd->rp->lead_cpu;
rp                656 drivers/powercap/intel_rapl_common.c 	if (prim == FW_LOCK && rd->rp->priv->limits[rd->id] == 2) {
rp                657 drivers/powercap/intel_rapl_common.c 		rp->mask = POWER_HIGH_LOCK;
rp                658 drivers/powercap/intel_rapl_common.c 		rp->shift = 63;
rp                661 drivers/powercap/intel_rapl_common.c 	if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
rp                666 drivers/powercap/intel_rapl_common.c 	ra.mask = rp->mask;
rp                668 drivers/powercap/intel_rapl_common.c 	if (rd->rp->priv->read_raw(cpu, &ra)) {
rp                673 drivers/powercap/intel_rapl_common.c 	value = ra.value >> rp->shift;
rp                676 drivers/powercap/intel_rapl_common.c 		*data = rapl_unit_xlate(rd, rp->unit, value, 0);
rp                688 drivers/powercap/intel_rapl_common.c 	struct rapl_primitive_info *rp = &rpi[prim];
rp                694 drivers/powercap/intel_rapl_common.c 	cpu = rd->rp->lead_cpu;
rp                695 drivers/powercap/intel_rapl_common.c 	bits = rapl_unit_xlate(rd, rp->unit, value, 1);
rp                696 drivers/powercap/intel_rapl_common.c 	bits <<= rp->shift;
rp                697 drivers/powercap/intel_rapl_common.c 	bits &= rp->mask;
rp                701 drivers/powercap/intel_rapl_common.c 	ra.reg = rd->regs[rp->id];
rp                702 drivers/powercap/intel_rapl_common.c 	ra.mask = rp->mask;
rp                705 drivers/powercap/intel_rapl_common.c 	ret = rd->rp->priv->write_raw(cpu, &ra);
rp                721 drivers/powercap/intel_rapl_common.c static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
rp                726 drivers/powercap/intel_rapl_common.c 	ra.reg = rp->priv->reg_unit;
rp                728 drivers/powercap/intel_rapl_common.c 	if (rp->priv->read_raw(cpu, &ra)) {
rp                730 drivers/powercap/intel_rapl_common.c 		       rp->priv->reg_unit, cpu);
rp                735 drivers/powercap/intel_rapl_common.c 	rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
rp                738 drivers/powercap/intel_rapl_common.c 	rp->power_unit = 1000000 / (1 << value);
rp                741 drivers/powercap/intel_rapl_common.c 	rp->time_unit = 1000000 / (1 << value);
rp                744 drivers/powercap/intel_rapl_common.c 		 rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
rp                749 drivers/powercap/intel_rapl_common.c static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
rp                754 drivers/powercap/intel_rapl_common.c 	ra.reg = rp->priv->reg_unit;
rp                756 drivers/powercap/intel_rapl_common.c 	if (rp->priv->read_raw(cpu, &ra)) {
rp                758 drivers/powercap/intel_rapl_common.c 		       rp->priv->reg_unit, cpu);
rp                763 drivers/powercap/intel_rapl_common.c 	rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
rp                766 drivers/powercap/intel_rapl_common.c 	rp->power_unit = (1 << value) * 1000;
rp                769 drivers/powercap/intel_rapl_common.c 	rp->time_unit = 1000000 / (1 << value);
rp                772 drivers/powercap/intel_rapl_common.c 		 rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
rp                780 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp = (struct rapl_package *)info;
rp                784 drivers/powercap/intel_rapl_common.c 	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
rp                785 drivers/powercap/intel_rapl_common.c 		rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
rp                786 drivers/powercap/intel_rapl_common.c 		rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
rp                802 drivers/powercap/intel_rapl_common.c static void package_power_limit_irq_save(struct rapl_package *rp)
rp                807 drivers/powercap/intel_rapl_common.c 	smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
rp                814 drivers/powercap/intel_rapl_common.c static void package_power_limit_irq_restore(struct rapl_package *rp)
rp                822 drivers/powercap/intel_rapl_common.c 	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
rp                827 drivers/powercap/intel_rapl_common.c 	if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
rp                874 drivers/powercap/intel_rapl_common.c static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
rp                886 drivers/powercap/intel_rapl_common.c 		value = (1 << y) * (4 + f) * rp->time_unit / 4;
rp                888 drivers/powercap/intel_rapl_common.c 		do_div(value, rp->time_unit);
rp                896 drivers/powercap/intel_rapl_common.c static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value,
rp                904 drivers/powercap/intel_rapl_common.c 		return (value) ? value *= rp->time_unit : rp->time_unit;
rp                906 drivers/powercap/intel_rapl_common.c 	value = div64_u64(value, rp->time_unit);
rp                999 drivers/powercap/intel_rapl_common.c static void rapl_update_domain_data(struct rapl_package *rp)
rp               1004 drivers/powercap/intel_rapl_common.c 	for (dmn = 0; dmn < rp->nr_domains; dmn++) {
rp               1005 drivers/powercap/intel_rapl_common.c 		pr_debug("update %s domain %s data\n", rp->name,
rp               1006 drivers/powercap/intel_rapl_common.c 			 rp->domains[dmn].name);
rp               1009 drivers/powercap/intel_rapl_common.c 			if (!rapl_read_data_raw(&rp->domains[dmn], prim,
rp               1011 drivers/powercap/intel_rapl_common.c 				rp->domains[dmn].rdd.primitives[prim] = val;
rp               1017 drivers/powercap/intel_rapl_common.c static int rapl_package_register_powercap(struct rapl_package *rp)
rp               1024 drivers/powercap/intel_rapl_common.c 	rapl_update_domain_data(rp);
rp               1027 drivers/powercap/intel_rapl_common.c 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
rp               1030 drivers/powercap/intel_rapl_common.c 			pr_debug("register package domain %s\n", rp->name);
rp               1032 drivers/powercap/intel_rapl_common.c 					    rp->priv->control_type, rp->name,
rp               1037 drivers/powercap/intel_rapl_common.c 					 rp->name);
rp               1041 drivers/powercap/intel_rapl_common.c 			rp->power_zone = power_zone;
rp               1051 drivers/powercap/intel_rapl_common.c 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
rp               1057 drivers/powercap/intel_rapl_common.c 						    rp->priv->control_type,
rp               1058 drivers/powercap/intel_rapl_common.c 						    rd->name, rp->power_zone,
rp               1064 drivers/powercap/intel_rapl_common.c 				 rp->name, rd->name);
rp               1076 drivers/powercap/intel_rapl_common.c 	while (--rd >= rp->domains) {
rp               1077 drivers/powercap/intel_rapl_common.c 		pr_debug("unregister %s domain %s\n", rp->name, rd->name);
rp               1078 drivers/powercap/intel_rapl_common.c 		powercap_unregister_zone(rp->priv->control_type,
rp               1118 drivers/powercap/intel_rapl_common.c 	rd->rp = rapl_find_package_domain(0, priv);
rp               1146 drivers/powercap/intel_rapl_common.c static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp)
rp               1155 drivers/powercap/intel_rapl_common.c 		ra.reg = rp->priv->regs[domain][RAPL_DOMAIN_REG_STATUS];
rp               1169 drivers/powercap/intel_rapl_common.c 	if (rp->priv->read_raw(cpu, &ra) || !ra.value)
rp               1193 drivers/powercap/intel_rapl_common.c 				rd->rp->name, rd->name);
rp               1209 drivers/powercap/intel_rapl_common.c static int rapl_detect_domains(struct rapl_package *rp, int cpu)
rp               1216 drivers/powercap/intel_rapl_common.c 		if (!rapl_check_domain(cpu, i, rp)) {
rp               1217 drivers/powercap/intel_rapl_common.c 			rp->domain_map |= 1 << i;
rp               1221 drivers/powercap/intel_rapl_common.c 	rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
rp               1222 drivers/powercap/intel_rapl_common.c 	if (!rp->nr_domains) {
rp               1223 drivers/powercap/intel_rapl_common.c 		pr_debug("no valid rapl domains found in %s\n", rp->name);
rp               1226 drivers/powercap/intel_rapl_common.c 	pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
rp               1228 drivers/powercap/intel_rapl_common.c 	rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
rp               1230 drivers/powercap/intel_rapl_common.c 	if (!rp->domains)
rp               1233 drivers/powercap/intel_rapl_common.c 	rapl_init_domains(rp);
rp               1235 drivers/powercap/intel_rapl_common.c 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++)
rp               1242 drivers/powercap/intel_rapl_common.c void rapl_remove_package(struct rapl_package *rp)
rp               1246 drivers/powercap/intel_rapl_common.c 	package_power_limit_irq_restore(rp);
rp               1248 drivers/powercap/intel_rapl_common.c 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
rp               1260 drivers/powercap/intel_rapl_common.c 			 rp->name, rd->name);
rp               1261 drivers/powercap/intel_rapl_common.c 		powercap_unregister_zone(rp->priv->control_type,
rp               1265 drivers/powercap/intel_rapl_common.c 	powercap_unregister_zone(rp->priv->control_type,
rp               1267 drivers/powercap/intel_rapl_common.c 	list_del(&rp->plist);
rp               1268 drivers/powercap/intel_rapl_common.c 	kfree(rp);
rp               1276 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp;
rp               1278 drivers/powercap/intel_rapl_common.c 	list_for_each_entry(rp, &rapl_packages, plist) {
rp               1279 drivers/powercap/intel_rapl_common.c 		if (rp->id == id
rp               1280 drivers/powercap/intel_rapl_common.c 		    && rp->priv->control_type == priv->control_type)
rp               1281 drivers/powercap/intel_rapl_common.c 			return rp;
rp               1292 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp;
rp               1299 drivers/powercap/intel_rapl_common.c 	rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
rp               1300 drivers/powercap/intel_rapl_common.c 	if (!rp)
rp               1304 drivers/powercap/intel_rapl_common.c 	rp->id = id;
rp               1305 drivers/powercap/intel_rapl_common.c 	rp->lead_cpu = cpu;
rp               1306 drivers/powercap/intel_rapl_common.c 	rp->priv = priv;
rp               1309 drivers/powercap/intel_rapl_common.c 		snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH,
rp               1312 drivers/powercap/intel_rapl_common.c 		snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
rp               1316 drivers/powercap/intel_rapl_common.c 	if (rapl_detect_domains(rp, cpu) || rapl_defaults->check_unit(rp, cpu)) {
rp               1320 drivers/powercap/intel_rapl_common.c 	ret = rapl_package_register_powercap(rp);
rp               1322 drivers/powercap/intel_rapl_common.c 		INIT_LIST_HEAD(&rp->plist);
rp               1323 drivers/powercap/intel_rapl_common.c 		list_add(&rp->plist, &rapl_packages);
rp               1324 drivers/powercap/intel_rapl_common.c 		return rp;
rp               1328 drivers/powercap/intel_rapl_common.c 	kfree(rp->domains);
rp               1329 drivers/powercap/intel_rapl_common.c 	kfree(rp);
rp               1336 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp;
rp               1341 drivers/powercap/intel_rapl_common.c 	list_for_each_entry(rp, &rapl_packages, plist) {
rp               1342 drivers/powercap/intel_rapl_common.c 		if (!rp->power_zone)
rp               1344 drivers/powercap/intel_rapl_common.c 		rd = power_zone_to_rapl_domain(rp->power_zone);
rp               1370 drivers/powercap/intel_rapl_common.c 	struct rapl_package *rp;
rp               1375 drivers/powercap/intel_rapl_common.c 	list_for_each_entry(rp, &rapl_packages, plist) {
rp               1376 drivers/powercap/intel_rapl_common.c 		if (!rp->power_zone)
rp               1378 drivers/powercap/intel_rapl_common.c 		rd = power_zone_to_rapl_domain(rp->power_zone);
rp                 57 drivers/powercap/intel_rapl_msr.c 	struct rapl_package *rp;
rp                 59 drivers/powercap/intel_rapl_msr.c 	rp = rapl_find_package_domain(cpu, &rapl_msr_priv);
rp                 60 drivers/powercap/intel_rapl_msr.c 	if (!rp) {
rp                 61 drivers/powercap/intel_rapl_msr.c 		rp = rapl_add_package(cpu, &rapl_msr_priv);
rp                 62 drivers/powercap/intel_rapl_msr.c 		if (IS_ERR(rp))
rp                 63 drivers/powercap/intel_rapl_msr.c 			return PTR_ERR(rp);
rp                 65 drivers/powercap/intel_rapl_msr.c 	cpumask_set_cpu(cpu, &rp->cpumask);
rp                 71 drivers/powercap/intel_rapl_msr.c 	struct rapl_package *rp;
rp                 74 drivers/powercap/intel_rapl_msr.c 	rp = rapl_find_package_domain(cpu, &rapl_msr_priv);
rp                 75 drivers/powercap/intel_rapl_msr.c 	if (!rp)
rp                 78 drivers/powercap/intel_rapl_msr.c 	cpumask_clear_cpu(cpu, &rp->cpumask);
rp                 79 drivers/powercap/intel_rapl_msr.c 	lead_cpu = cpumask_first(&rp->cpumask);
rp                 81 drivers/powercap/intel_rapl_msr.c 		rapl_remove_package(rp);
rp                 82 drivers/powercap/intel_rapl_msr.c 	else if (rp->lead_cpu == cpu)
rp                 83 drivers/powercap/intel_rapl_msr.c 		rp->lead_cpu = lead_cpu;
rp                 48 drivers/pwm/pwm-rcar.c static void rcar_pwm_write(struct rcar_pwm_chip *rp, u32 data,
rp                 51 drivers/pwm/pwm-rcar.c 	writel(data, rp->base + offset);
rp                 54 drivers/pwm/pwm-rcar.c static u32 rcar_pwm_read(struct rcar_pwm_chip *rp, unsigned int offset)
rp                 56 drivers/pwm/pwm-rcar.c 	return readl(rp->base + offset);
rp                 59 drivers/pwm/pwm-rcar.c static void rcar_pwm_update(struct rcar_pwm_chip *rp, u32 mask, u32 data,
rp                 64 drivers/pwm/pwm-rcar.c 	value = rcar_pwm_read(rp, offset);
rp                 67 drivers/pwm/pwm-rcar.c 	rcar_pwm_write(rp, value, offset);
rp                 70 drivers/pwm/pwm-rcar.c static int rcar_pwm_get_clock_division(struct rcar_pwm_chip *rp, int period_ns)
rp                 72 drivers/pwm/pwm-rcar.c 	unsigned long clk_rate = clk_get_rate(rp->clk);
rp                 86 drivers/pwm/pwm-rcar.c static void rcar_pwm_set_clock_control(struct rcar_pwm_chip *rp,
rp                 91 drivers/pwm/pwm-rcar.c 	value = rcar_pwm_read(rp, RCAR_PWMCR);
rp                100 drivers/pwm/pwm-rcar.c 	rcar_pwm_write(rp, value, RCAR_PWMCR);
rp                103 drivers/pwm/pwm-rcar.c static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
rp                107 drivers/pwm/pwm-rcar.c 	unsigned long clk_rate = clk_get_rate(rp->clk);
rp                125 drivers/pwm/pwm-rcar.c 	rcar_pwm_write(rp, cyc | ph, RCAR_PWMCNT);
rp                140 drivers/pwm/pwm-rcar.c static int rcar_pwm_enable(struct rcar_pwm_chip *rp)
rp                145 drivers/pwm/pwm-rcar.c 	value = rcar_pwm_read(rp, RCAR_PWMCNT);
rp                150 drivers/pwm/pwm-rcar.c 	rcar_pwm_update(rp, RCAR_PWMCR_EN0, RCAR_PWMCR_EN0, RCAR_PWMCR);
rp                155 drivers/pwm/pwm-rcar.c static void rcar_pwm_disable(struct rcar_pwm_chip *rp)
rp                157 drivers/pwm/pwm-rcar.c 	rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR);
rp                163 drivers/pwm/pwm-rcar.c 	struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
rp                173 drivers/pwm/pwm-rcar.c 		rcar_pwm_disable(rp);
rp                177 drivers/pwm/pwm-rcar.c 	div = rcar_pwm_get_clock_division(rp, state->period);
rp                181 drivers/pwm/pwm-rcar.c 	rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
rp                183 drivers/pwm/pwm-rcar.c 	ret = rcar_pwm_set_counter(rp, div, state->duty_cycle, state->period);
rp                185 drivers/pwm/pwm-rcar.c 		rcar_pwm_set_clock_control(rp, div);
rp                188 drivers/pwm/pwm-rcar.c 	rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR);
rp                191 drivers/pwm/pwm-rcar.c 		ret = rcar_pwm_enable(rp);
rp                596 drivers/s390/char/con3270.c 	struct raw3270 *rp;
rp                610 drivers/s390/char/con3270.c 	rp = raw3270_setup_console();
rp                611 drivers/s390/char/con3270.c 	if (IS_ERR(rp))
rp                612 drivers/s390/char/con3270.c 		return PTR_ERR(rp);
rp                617 drivers/s390/char/con3270.c 	condev->view.dev = rp;
rp                 94 drivers/s390/char/raw3270.c static void __raw3270_disconnect(struct raw3270 *rp);
rp                110 drivers/s390/char/raw3270.c static inline int raw3270_state_ready(struct raw3270 *rp)
rp                112 drivers/s390/char/raw3270.c 	return rp->state == RAW3270_STATE_READY;
rp                115 drivers/s390/char/raw3270.c static inline int raw3270_state_final(struct raw3270 *rp)
rp                117 drivers/s390/char/raw3270.c 	return rp->state == RAW3270_STATE_INIT ||
rp                118 drivers/s390/char/raw3270.c 		rp->state == RAW3270_STATE_READY;
rp                122 drivers/s390/char/raw3270.c raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
rp                124 drivers/s390/char/raw3270.c 	if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
rp                239 drivers/s390/char/raw3270.c __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
rp                244 drivers/s390/char/raw3270.c 	if (list_empty(&rp->req_queue) &&
rp                245 drivers/s390/char/raw3270.c 	    !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
rp                247 drivers/s390/char/raw3270.c 		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
rp                254 drivers/s390/char/raw3270.c 	list_add_tail(&rq->list, &rp->req_queue);
rp                261 drivers/s390/char/raw3270.c 	struct raw3270 *rp = view->dev;
rp                263 drivers/s390/char/raw3270.c 	return rp && rp->view == view &&
rp                264 drivers/s390/char/raw3270.c 		!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
rp                271 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                275 drivers/s390/char/raw3270.c 	rp = view->dev;
rp                276 drivers/s390/char/raw3270.c 	if (!rp || rp->view != view ||
rp                277 drivers/s390/char/raw3270.c 	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rp                279 drivers/s390/char/raw3270.c 	else if (!raw3270_state_ready(rp))
rp                282 drivers/s390/char/raw3270.c 		rc =  __raw3270_start(rp, view, rq);
rp                290 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                293 drivers/s390/char/raw3270.c 	rp = view->dev;
rp                294 drivers/s390/char/raw3270.c 	if (!rp || rp->view != view ||
rp                295 drivers/s390/char/raw3270.c 	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rp                297 drivers/s390/char/raw3270.c 	else if (!raw3270_state_ready(rp))
rp                300 drivers/s390/char/raw3270.c 		rc =  __raw3270_start(rp, view, rq);
rp                307 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                309 drivers/s390/char/raw3270.c 	rp = view->dev;
rp                312 drivers/s390/char/raw3270.c 	list_add_tail(&rq->list, &rp->req_queue);
rp                322 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                326 drivers/s390/char/raw3270.c 	rp = dev_get_drvdata(&cdev->dev);
rp                327 drivers/s390/char/raw3270.c 	if (!rp)
rp                330 drivers/s390/char/raw3270.c 	view = rq ? rq->view : rp->view;
rp                335 drivers/s390/char/raw3270.c 			clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rp                339 drivers/s390/char/raw3270.c 			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rp                343 drivers/s390/char/raw3270.c 			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rp                344 drivers/s390/char/raw3270.c 			if (rp->state > RAW3270_STATE_RESET)
rp                345 drivers/s390/char/raw3270.c 				__raw3270_disconnect(rp);
rp                352 drivers/s390/char/raw3270.c 	if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
rp                369 drivers/s390/char/raw3270.c 	while (!list_empty(&rp->req_queue)) {
rp                370 drivers/s390/char/raw3270.c 		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
rp                371 drivers/s390/char/raw3270.c 		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
rp                429 drivers/s390/char/raw3270.c raw3270_size_device_vm(struct raw3270 *rp)
rp                435 drivers/s390/char/raw3270.c 	ccw_device_get_id(rp->cdev, &dev_id);
rp                445 drivers/s390/char/raw3270.c 		rp->model = model;
rp                446 drivers/s390/char/raw3270.c 		rp->rows = 24;
rp                447 drivers/s390/char/raw3270.c 		rp->cols = 80;
rp                450 drivers/s390/char/raw3270.c 		rp->model = model;
rp                451 drivers/s390/char/raw3270.c 		rp->rows = 32;
rp                452 drivers/s390/char/raw3270.c 		rp->cols = 80;
rp                455 drivers/s390/char/raw3270.c 		rp->model = model;
rp                456 drivers/s390/char/raw3270.c 		rp->rows = 43;
rp                457 drivers/s390/char/raw3270.c 		rp->cols = 80;
rp                460 drivers/s390/char/raw3270.c 		rp->model = model;
rp                461 drivers/s390/char/raw3270.c 		rp->rows = 27;
rp                462 drivers/s390/char/raw3270.c 		rp->cols = 132;
rp                468 drivers/s390/char/raw3270.c raw3270_size_device(struct raw3270 *rp)
rp                473 drivers/s390/char/raw3270.c 	uap = (struct raw3270_ua *) (rp->init_data + 1);
rp                475 drivers/s390/char/raw3270.c 	if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
rp                478 drivers/s390/char/raw3270.c 		rp->model = 2;
rp                479 drivers/s390/char/raw3270.c 		rp->rows = 24;
rp                480 drivers/s390/char/raw3270.c 		rp->cols = 80;
rp                484 drivers/s390/char/raw3270.c 	rp->rows = uap->uab.h;
rp                485 drivers/s390/char/raw3270.c 	rp->cols = uap->uab.w;
rp                488 drivers/s390/char/raw3270.c 		set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
rp                492 drivers/s390/char/raw3270.c 		rp->rows = uap->aua.hauai;
rp                493 drivers/s390/char/raw3270.c 		rp->cols = uap->aua.wauai;
rp                496 drivers/s390/char/raw3270.c 	rp->model = 0;
rp                497 drivers/s390/char/raw3270.c 	if (rp->rows == 24 && rp->cols == 80)
rp                498 drivers/s390/char/raw3270.c 		rp->model = 2;
rp                499 drivers/s390/char/raw3270.c 	if (rp->rows == 32 && rp->cols == 80)
rp                500 drivers/s390/char/raw3270.c 		rp->model = 3;
rp                501 drivers/s390/char/raw3270.c 	if (rp->rows == 43 && rp->cols == 80)
rp                502 drivers/s390/char/raw3270.c 		rp->model = 4;
rp                503 drivers/s390/char/raw3270.c 	if (rp->rows == 27 && rp->cols == 132)
rp                504 drivers/s390/char/raw3270.c 		rp->model = 5;
rp                508 drivers/s390/char/raw3270.c raw3270_size_device_done(struct raw3270 *rp)
rp                512 drivers/s390/char/raw3270.c 	rp->view = NULL;
rp                513 drivers/s390/char/raw3270.c 	rp->state = RAW3270_STATE_READY;
rp                515 drivers/s390/char/raw3270.c 	list_for_each_entry(view, &rp->view_list, list)
rp                517 drivers/s390/char/raw3270.c 			view->fn->resize(view, rp->model, rp->rows, rp->cols);
rp                519 drivers/s390/char/raw3270.c 	list_for_each_entry(view, &rp->view_list, list) {
rp                520 drivers/s390/char/raw3270.c 		rp->view = view;
rp                523 drivers/s390/char/raw3270.c 		rp->view = NULL;
rp                530 drivers/s390/char/raw3270.c 	struct raw3270 *rp = rq->view->dev;
rp                532 drivers/s390/char/raw3270.c 	raw3270_size_device(rp);
rp                533 drivers/s390/char/raw3270.c 	raw3270_size_device_done(rp);
rp                537 drivers/s390/char/raw3270.c raw3270_read_modified(struct raw3270 *rp)
rp                539 drivers/s390/char/raw3270.c 	if (rp->state != RAW3270_STATE_W4ATTN)
rp                542 drivers/s390/char/raw3270.c 	memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
rp                543 drivers/s390/char/raw3270.c 	memset(&rp->init_data, 0, sizeof(rp->init_data));
rp                544 drivers/s390/char/raw3270.c 	rp->init_readmod.ccw.cmd_code = TC_READMOD;
rp                545 drivers/s390/char/raw3270.c 	rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
rp                546 drivers/s390/char/raw3270.c 	rp->init_readmod.ccw.count = sizeof(rp->init_data);
rp                547 drivers/s390/char/raw3270.c 	rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
rp                548 drivers/s390/char/raw3270.c 	rp->init_readmod.callback = raw3270_read_modified_cb;
rp                549 drivers/s390/char/raw3270.c 	rp->state = RAW3270_STATE_READMOD;
rp                550 drivers/s390/char/raw3270.c 	raw3270_start_irq(&rp->init_view, &rp->init_readmod);
rp                554 drivers/s390/char/raw3270.c raw3270_writesf_readpart(struct raw3270 *rp)
rp                560 drivers/s390/char/raw3270.c 	memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
rp                561 drivers/s390/char/raw3270.c 	memset(&rp->init_data, 0, sizeof(rp->init_data));
rp                562 drivers/s390/char/raw3270.c 	memcpy(&rp->init_data, wbuf, sizeof(wbuf));
rp                563 drivers/s390/char/raw3270.c 	rp->init_readpart.ccw.cmd_code = TC_WRITESF;
rp                564 drivers/s390/char/raw3270.c 	rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
rp                565 drivers/s390/char/raw3270.c 	rp->init_readpart.ccw.count = sizeof(wbuf);
rp                566 drivers/s390/char/raw3270.c 	rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
rp                567 drivers/s390/char/raw3270.c 	rp->state = RAW3270_STATE_W4ATTN;
rp                568 drivers/s390/char/raw3270.c 	raw3270_start_irq(&rp->init_view, &rp->init_readpart);
rp                577 drivers/s390/char/raw3270.c 	struct raw3270 *rp = rq->view->dev;
rp                579 drivers/s390/char/raw3270.c 	if (rp->state != RAW3270_STATE_RESET)
rp                583 drivers/s390/char/raw3270.c 		rp->state = RAW3270_STATE_INIT;
rp                585 drivers/s390/char/raw3270.c 		raw3270_size_device_vm(rp);
rp                586 drivers/s390/char/raw3270.c 		raw3270_size_device_done(rp);
rp                588 drivers/s390/char/raw3270.c 		raw3270_writesf_readpart(rp);
rp                589 drivers/s390/char/raw3270.c 	memset(&rp->init_reset, 0, sizeof(rp->init_reset));
rp                593 drivers/s390/char/raw3270.c __raw3270_reset_device(struct raw3270 *rp)
rp                598 drivers/s390/char/raw3270.c 	if (rp->init_reset.view)
rp                601 drivers/s390/char/raw3270.c 	rp->init_data[0] = TW_KR;
rp                602 drivers/s390/char/raw3270.c 	rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp                603 drivers/s390/char/raw3270.c 	rp->init_reset.ccw.flags = CCW_FLAG_SLI;
rp                604 drivers/s390/char/raw3270.c 	rp->init_reset.ccw.count = 1;
rp                605 drivers/s390/char/raw3270.c 	rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
rp                606 drivers/s390/char/raw3270.c 	rp->init_reset.callback = raw3270_reset_device_cb;
rp                607 drivers/s390/char/raw3270.c 	rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
rp                608 drivers/s390/char/raw3270.c 	if (rc == 0 && rp->state == RAW3270_STATE_INIT)
rp                609 drivers/s390/char/raw3270.c 		rp->state = RAW3270_STATE_RESET;
rp                614 drivers/s390/char/raw3270.c raw3270_reset_device(struct raw3270 *rp)
rp                619 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                620 drivers/s390/char/raw3270.c 	rc = __raw3270_reset_device(rp);
rp                621 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp                628 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                631 drivers/s390/char/raw3270.c 	rp = view->dev;
rp                632 drivers/s390/char/raw3270.c 	if (!rp || rp->view != view ||
rp                633 drivers/s390/char/raw3270.c 	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rp                635 drivers/s390/char/raw3270.c 	else if (!raw3270_state_ready(rp))
rp                643 drivers/s390/char/raw3270.c __raw3270_disconnect(struct raw3270 *rp)
rp                648 drivers/s390/char/raw3270.c 	rp->state = RAW3270_STATE_INIT;
rp                649 drivers/s390/char/raw3270.c 	rp->view = &rp->init_view;
rp                651 drivers/s390/char/raw3270.c 	while (!list_empty(&rp->req_queue)) {
rp                652 drivers/s390/char/raw3270.c 		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
rp                661 drivers/s390/char/raw3270.c 	__raw3270_reset_device(rp);
rp                668 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                680 drivers/s390/char/raw3270.c 		rp = view->dev;
rp                681 drivers/s390/char/raw3270.c 		raw3270_read_modified(rp);
rp                693 drivers/s390/char/raw3270.c raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
rp                699 drivers/s390/char/raw3270.c 	memset(rp, 0, sizeof(struct raw3270));
rp                708 drivers/s390/char/raw3270.c 	rp->ascebc = ascebc;
rp                711 drivers/s390/char/raw3270.c 	rp->rows = 24;
rp                712 drivers/s390/char/raw3270.c 	rp->cols = 80;
rp                714 drivers/s390/char/raw3270.c 	INIT_LIST_HEAD(&rp->req_queue);
rp                715 drivers/s390/char/raw3270.c 	INIT_LIST_HEAD(&rp->view_list);
rp                717 drivers/s390/char/raw3270.c 	rp->init_view.dev = rp;
rp                718 drivers/s390/char/raw3270.c 	rp->init_view.fn = &raw3270_init_fn;
rp                719 drivers/s390/char/raw3270.c 	rp->view = &rp->init_view;
rp                729 drivers/s390/char/raw3270.c 	rp->minor = -1;
rp                733 drivers/s390/char/raw3270.c 			rp->minor = minor;
rp                734 drivers/s390/char/raw3270.c 			__list_add(&rp->list, l->prev, l);
rp                739 drivers/s390/char/raw3270.c 	if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
rp                740 drivers/s390/char/raw3270.c 		rp->minor = minor;
rp                741 drivers/s390/char/raw3270.c 		list_add_tail(&rp->list, &raw3270_devices);
rp                745 drivers/s390/char/raw3270.c 	if (rp->minor == -1)
rp                747 drivers/s390/char/raw3270.c 	rp->cdev = cdev;
rp                748 drivers/s390/char/raw3270.c 	dev_set_drvdata(&cdev->dev, rp);
rp                764 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                772 drivers/s390/char/raw3270.c 	rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
rp                774 drivers/s390/char/raw3270.c 	rc = raw3270_setup_device(cdev, rp, ascebc);
rp                777 drivers/s390/char/raw3270.c 	set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
rp                785 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                787 drivers/s390/char/raw3270.c 		__raw3270_reset_device(rp);
rp                788 drivers/s390/char/raw3270.c 		while (!raw3270_state_final(rp)) {
rp                789 drivers/s390/char/raw3270.c 			ccw_device_wait_idle(rp->cdev);
rp                792 drivers/s390/char/raw3270.c 	} while (rp->state != RAW3270_STATE_READY);
rp                793 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp                794 drivers/s390/char/raw3270.c 	return rp;
rp                798 drivers/s390/char/raw3270.c raw3270_wait_cons_dev(struct raw3270 *rp)
rp                802 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                803 drivers/s390/char/raw3270.c 	ccw_device_wait_idle(rp->cdev);
rp                804 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp                815 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                819 drivers/s390/char/raw3270.c 	rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
rp                820 drivers/s390/char/raw3270.c 	if (!rp)
rp                824 drivers/s390/char/raw3270.c 		kfree(rp);
rp                827 drivers/s390/char/raw3270.c 	rc = raw3270_setup_device(cdev, rp, ascebc);
rp                829 drivers/s390/char/raw3270.c 		kfree(rp->ascebc);
rp                830 drivers/s390/char/raw3270.c 		kfree(rp);
rp                831 drivers/s390/char/raw3270.c 		rp = ERR_PTR(rc);
rp                835 drivers/s390/char/raw3270.c 	return rp;
rp                844 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                849 drivers/s390/char/raw3270.c 	rp = view->dev;
rp                850 drivers/s390/char/raw3270.c 	if (!rp)
rp                852 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                853 drivers/s390/char/raw3270.c 	if (rp->view == view)
rp                855 drivers/s390/char/raw3270.c 	else if (!raw3270_state_ready(rp))
rp                857 drivers/s390/char/raw3270.c 	else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rp                861 drivers/s390/char/raw3270.c 		if (rp->view && rp->view->fn->deactivate) {
rp                862 drivers/s390/char/raw3270.c 			oldview = rp->view;
rp                865 drivers/s390/char/raw3270.c 		rp->view = view;
rp                869 drivers/s390/char/raw3270.c 			rp->view = oldview;
rp                872 drivers/s390/char/raw3270.c 				list_for_each_entry(nv, &rp->view_list, list)
rp                874 drivers/s390/char/raw3270.c 						rp->view = nv;
rp                877 drivers/s390/char/raw3270.c 						rp->view = NULL;
rp                882 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp                893 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                895 drivers/s390/char/raw3270.c 	rp = view->dev;
rp                896 drivers/s390/char/raw3270.c 	if (!rp)
rp                898 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                899 drivers/s390/char/raw3270.c 	if (rp->view == view) {
rp                901 drivers/s390/char/raw3270.c 		rp->view = NULL;
rp                904 drivers/s390/char/raw3270.c 		list_add_tail(&view->list, &rp->view_list);
rp                906 drivers/s390/char/raw3270.c 		if (raw3270_state_ready(rp) &&
rp                907 drivers/s390/char/raw3270.c 		    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
rp                908 drivers/s390/char/raw3270.c 			list_for_each_entry(view, &rp->view_list, list) {
rp                909 drivers/s390/char/raw3270.c 				rp->view = view;
rp                912 drivers/s390/char/raw3270.c 				rp->view = NULL;
rp                916 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp                926 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                933 drivers/s390/char/raw3270.c 	list_for_each_entry(rp, &raw3270_devices, list) {
rp                934 drivers/s390/char/raw3270.c 		if (rp->minor != minor)
rp                936 drivers/s390/char/raw3270.c 		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                938 drivers/s390/char/raw3270.c 		view->dev = rp;
rp                940 drivers/s390/char/raw3270.c 		view->model = rp->model;
rp                941 drivers/s390/char/raw3270.c 		view->rows = rp->rows;
rp                942 drivers/s390/char/raw3270.c 		view->cols = rp->cols;
rp                943 drivers/s390/char/raw3270.c 		view->ascebc = rp->ascebc;
rp                946 drivers/s390/char/raw3270.c 		list_add(&view->list, &rp->view_list);
rp                948 drivers/s390/char/raw3270.c 		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp                961 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                967 drivers/s390/char/raw3270.c 	list_for_each_entry(rp, &raw3270_devices, list) {
rp                968 drivers/s390/char/raw3270.c 		if (rp->minor != minor)
rp                970 drivers/s390/char/raw3270.c 		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                971 drivers/s390/char/raw3270.c 		list_for_each_entry(tmp, &rp->view_list, list) {
rp                978 drivers/s390/char/raw3270.c 		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp                992 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp                995 drivers/s390/char/raw3270.c 	rp = view->dev;
rp                996 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp                997 drivers/s390/char/raw3270.c 	if (rp->view == view) {
rp                999 drivers/s390/char/raw3270.c 		rp->view = NULL;
rp               1002 drivers/s390/char/raw3270.c 	if (!rp->view && raw3270_state_ready(rp) &&
rp               1003 drivers/s390/char/raw3270.c 	    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
rp               1005 drivers/s390/char/raw3270.c 		list_for_each_entry(nv, &rp->view_list, list) {
rp               1007 drivers/s390/char/raw3270.c 				rp->view = nv;
rp               1012 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp               1024 drivers/s390/char/raw3270.c raw3270_delete_device(struct raw3270 *rp)
rp               1030 drivers/s390/char/raw3270.c 	list_del_init(&rp->list);
rp               1034 drivers/s390/char/raw3270.c 	cdev = rp->cdev;
rp               1035 drivers/s390/char/raw3270.c 	rp->cdev = NULL;
rp               1043 drivers/s390/char/raw3270.c 	kfree(rp->ascebc);
rp               1044 drivers/s390/char/raw3270.c 	kfree(rp);
rp               1091 drivers/s390/char/raw3270.c static int raw3270_create_attributes(struct raw3270 *rp)
rp               1093 drivers/s390/char/raw3270.c 	return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
rp               1103 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1107 drivers/s390/char/raw3270.c 	list_for_each_entry(rp, &raw3270_devices, list)
rp               1108 drivers/s390/char/raw3270.c 		notifier->create(rp->minor);
rp               1115 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1118 drivers/s390/char/raw3270.c 	list_for_each_entry(rp, &raw3270_devices, list)
rp               1119 drivers/s390/char/raw3270.c 		notifier->destroy(rp->minor);
rp               1131 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1134 drivers/s390/char/raw3270.c 	rp = raw3270_create_device(cdev);
rp               1135 drivers/s390/char/raw3270.c 	if (IS_ERR(rp))
rp               1136 drivers/s390/char/raw3270.c 		return PTR_ERR(rp);
rp               1137 drivers/s390/char/raw3270.c 	rc = raw3270_create_attributes(rp);
rp               1140 drivers/s390/char/raw3270.c 	raw3270_reset_device(rp);
rp               1143 drivers/s390/char/raw3270.c 		np->create(rp->minor);
rp               1148 drivers/s390/char/raw3270.c 	raw3270_delete_device(rp);
rp               1159 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1163 drivers/s390/char/raw3270.c 	rp = dev_get_drvdata(&cdev->dev);
rp               1170 drivers/s390/char/raw3270.c 	if (rp == NULL)
rp               1177 drivers/s390/char/raw3270.c 	if (rp->view) {
rp               1178 drivers/s390/char/raw3270.c 		if (rp->view->fn->deactivate)
rp               1179 drivers/s390/char/raw3270.c 			rp->view->fn->deactivate(rp->view);
rp               1180 drivers/s390/char/raw3270.c 		rp->view = NULL;
rp               1182 drivers/s390/char/raw3270.c 	while (!list_empty(&rp->view_list)) {
rp               1183 drivers/s390/char/raw3270.c 		v = list_entry(rp->view_list.next, struct raw3270_view, list);
rp               1194 drivers/s390/char/raw3270.c 		np->destroy(rp->minor);
rp               1198 drivers/s390/char/raw3270.c 	raw3270_reset_device(rp);
rp               1200 drivers/s390/char/raw3270.c 	raw3270_delete_device(rp);
rp               1209 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1211 drivers/s390/char/raw3270.c 	rp = dev_get_drvdata(&cdev->dev);
rp               1212 drivers/s390/char/raw3270.c 	if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
rp               1220 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1224 drivers/s390/char/raw3270.c 	rp = dev_get_drvdata(&cdev->dev);
rp               1225 drivers/s390/char/raw3270.c 	if (!rp)
rp               1227 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp               1228 drivers/s390/char/raw3270.c 	if (rp->view && rp->view->fn->deactivate)
rp               1229 drivers/s390/char/raw3270.c 		rp->view->fn->deactivate(rp->view);
rp               1230 drivers/s390/char/raw3270.c 	if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
rp               1235 drivers/s390/char/raw3270.c 		list_for_each_entry(view, &rp->view_list, list) {
rp               1240 drivers/s390/char/raw3270.c 	set_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
rp               1241 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp               1247 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1250 drivers/s390/char/raw3270.c 	rp = dev_get_drvdata(&cdev->dev);
rp               1251 drivers/s390/char/raw3270.c 	if (!rp)
rp               1253 drivers/s390/char/raw3270.c 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rp               1254 drivers/s390/char/raw3270.c 	clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
rp               1255 drivers/s390/char/raw3270.c 	if (rp->view && rp->view->fn->activate)
rp               1256 drivers/s390/char/raw3270.c 		rp->view->fn->activate(rp->view);
rp               1257 drivers/s390/char/raw3270.c 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
rp               1264 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1266 drivers/s390/char/raw3270.c 	rp = view->dev;
rp               1267 drivers/s390/char/raw3270.c 	if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rp               1268 drivers/s390/char/raw3270.c 		ccw_device_force_console(rp->cdev);
rp               1306 drivers/s390/char/raw3270.c 	struct raw3270 *rp;
rp               1317 drivers/s390/char/raw3270.c 		list_for_each_entry(rp, &raw3270_devices, list) {
rp               1318 drivers/s390/char/raw3270.c 			get_device(&rp->cdev->dev);
rp               1319 drivers/s390/char/raw3270.c 			raw3270_create_attributes(rp);
rp               2276 drivers/scsi/bfa/bfa_fcpim.c 	struct bfa_rport_s *rp = NULL;
rp               2291 drivers/scsi/bfa/bfa_fcpim.c 			rp = rp_fcs->bfa_rport;
rp               2309 drivers/scsi/bfa/bfa_fcpim.c 	if (rp) {
rp               2311 drivers/scsi/bfa/bfa_fcpim.c 						   rp->rport_info.local_pid);
rp               2312 drivers/scsi/bfa/bfa_fcpim.c 		lunm_list[free_index].rp_tag = rp->rport_tag;
rp               2338 drivers/scsi/bfa/bfa_fcpim.c 	struct bfa_rport_s	*rp = NULL;
rp               2360 drivers/scsi/bfa/bfa_fcpim.c 				rp = rp_fcs->bfa_rport;
rp               2793 drivers/scsi/bfa/bfa_fcs_rport.c 	struct bfa_fcs_rport_s *rp;
rp               2797 drivers/scsi/bfa/bfa_fcs_rport.c 		rp = (struct bfa_fcs_rport_s *) qe;
rp               2798 drivers/scsi/bfa/bfa_fcs_rport.c 		bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE);
rp               2799 drivers/scsi/bfa/bfa_fcs_rport.c 		rp->scn_online = BFA_TRUE;
rp               2809 drivers/scsi/bfa/bfa_fcs_rport.c 	struct bfa_fcs_rport_s *rp = rport;
rp               2811 drivers/scsi/bfa/bfa_fcs_rport.c 	bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
rp               2812 drivers/scsi/bfa/bfa_fcs_rport.c 	rp->scn_online = BFA_FALSE;
rp               2820 drivers/scsi/bfa/bfa_fcs_rport.c 	struct bfa_fcs_rport_s *rp;
rp               2824 drivers/scsi/bfa/bfa_fcs_rport.c 		rp = (struct bfa_fcs_rport_s *) qe;
rp               2825 drivers/scsi/bfa/bfa_fcs_rport.c 		bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
rp               2826 drivers/scsi/bfa/bfa_fcs_rport.c 		rp->scn_online = BFA_FALSE;
rp                228 drivers/scsi/bfa/bfa_svc.c static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
rp                229 drivers/scsi/bfa/bfa_svc.c static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
rp                230 drivers/scsi/bfa/bfa_svc.c static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
rp                239 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
rp                241 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
rp                243 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
rp                245 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
rp                247 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
rp                249 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
rp                251 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
rp                253 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
rp                255 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
rp                257 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
rp                259 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
rp                261 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
rp                263 drivers/scsi/bfa/bfa_svc.c static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
rp               4312 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4314 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4315 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4319 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_un_cr);
rp               4320 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_created);
rp               4324 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_un_unexp);
rp               4325 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4330 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4332 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4333 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4337 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_cr_on);
rp               4338 drivers/scsi/bfa/bfa_svc.c 		if (bfa_rport_send_fwcreate(rp))
rp               4339 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
rp               4341 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
rp               4345 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_cr_del);
rp               4346 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4347 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4351 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_cr_hwf);
rp               4352 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4356 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_cr_unexp);
rp               4357 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4365 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4367 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4368 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4372 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_rsp);
rp               4373 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_online);
rp               4374 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_online_cb(rp);
rp               4378 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_del);
rp               4379 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
rp               4383 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_off);
rp               4384 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
rp               4388 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_hwf);
rp               4389 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4393 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_unexp);
rp               4394 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4402 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4404 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4405 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4409 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
rp               4410 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_send_fwcreate(rp);
rp               4414 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_del);
rp               4415 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4416 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_wcancel(&rp->reqq_wait);
rp               4417 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4421 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_off);
rp               4422 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
rp               4423 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_wcancel(&rp->reqq_wait);
rp               4424 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_offline_cb(rp);
rp               4428 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_hwf);
rp               4429 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4430 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_wcancel(&rp->reqq_wait);
rp               4434 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwc_unexp);
rp               4435 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4443 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4447 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4448 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4452 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_on_off);
rp               4453 drivers/scsi/bfa/bfa_svc.c 		if (bfa_rport_send_fwdelete(rp))
rp               4454 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
rp               4456 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
rp               4460 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_on_del);
rp               4461 drivers/scsi/bfa/bfa_svc.c 		if (bfa_rport_send_fwdelete(rp))
rp               4462 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
rp               4464 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
rp               4468 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_on_hwf);
rp               4469 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4473 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_send_fwspeed(rp);
rp               4477 drivers/scsi/bfa/bfa_svc.c 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
rp               4478 drivers/scsi/bfa/bfa_svc.c 		rp->qos_attr = qos_scn->new_qos_attr;
rp               4479 drivers/scsi/bfa/bfa_svc.c 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
rp               4480 drivers/scsi/bfa/bfa_svc.c 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
rp               4481 drivers/scsi/bfa/bfa_svc.c 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
rp               4482 drivers/scsi/bfa/bfa_svc.c 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
rp               4491 drivers/scsi/bfa/bfa_svc.c 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
rp               4496 drivers/scsi/bfa/bfa_svc.c 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
rp               4502 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_on_unexp);
rp               4503 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4511 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4513 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4514 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4518 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwd_rsp);
rp               4519 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
rp               4520 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_offline_cb(rp);
rp               4524 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwd_del);
rp               4525 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
rp               4529 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwd_hwf);
rp               4530 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4531 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_offline_cb(rp);
rp               4535 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwd_unexp);
rp               4536 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4541 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4543 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4544 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4548 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
rp               4549 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_send_fwdelete(rp);
rp               4553 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwd_del);
rp               4554 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
rp               4558 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwd_hwf);
rp               4559 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4560 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_wcancel(&rp->reqq_wait);
rp               4561 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_offline_cb(rp);
rp               4565 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_fwd_unexp);
rp               4566 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4574 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4576 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4577 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4581 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_off_del);
rp               4582 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4583 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4587 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_off_on);
rp               4588 drivers/scsi/bfa/bfa_svc.c 		if (bfa_rport_send_fwcreate(rp))
rp               4589 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
rp               4591 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
rp               4595 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_off_hwf);
rp               4596 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4600 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_offline_cb(rp);
rp               4604 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_off_unexp);
rp               4605 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4613 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4615 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4616 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4620 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_del_fwrsp);
rp               4621 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4622 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4626 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_del_hwf);
rp               4627 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4628 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4632 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4637 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4639 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4640 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4644 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_del_fwrsp);
rp               4645 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
rp               4646 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_send_fwdelete(rp);
rp               4650 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_del_hwf);
rp               4651 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4652 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_wcancel(&rp->reqq_wait);
rp               4653 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4657 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4665 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
rp               4668 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4669 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4673 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_delp_fwrsp);
rp               4674 drivers/scsi/bfa/bfa_svc.c 		if (bfa_rport_send_fwdelete(rp))
rp               4675 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
rp               4677 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
rp               4681 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_delp_hwf);
rp               4682 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4683 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4687 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_delp_unexp);
rp               4688 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4696 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
rp               4699 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4700 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4704 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_offp_fwrsp);
rp               4705 drivers/scsi/bfa/bfa_svc.c 		if (bfa_rport_send_fwdelete(rp))
rp               4706 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
rp               4708 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
rp               4712 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_offp_del);
rp               4713 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
rp               4717 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_offp_hwf);
rp               4718 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
rp               4719 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_offline_cb(rp);
rp               4723 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_offp_unexp);
rp               4724 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4732 drivers/scsi/bfa/bfa_svc.c bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
rp               4734 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, rp->rport_tag);
rp               4735 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(rp->bfa, event);
rp               4739 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_iocd_off);
rp               4740 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_offline_cb(rp);
rp               4744 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_iocd_del);
rp               4745 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4746 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_free(rp);
rp               4750 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_iocd_on);
rp               4751 drivers/scsi/bfa/bfa_svc.c 		if (bfa_rport_send_fwcreate(rp))
rp               4752 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
rp               4754 drivers/scsi/bfa/bfa_svc.c 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
rp               4761 drivers/scsi/bfa/bfa_svc.c 		bfa_stats(rp, sm_iocd_unexp);
rp               4762 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_fault(rp->bfa, event);
rp               4775 drivers/scsi/bfa/bfa_svc.c 	struct bfa_rport_s *rp = cbarg;
rp               4778 drivers/scsi/bfa/bfa_svc.c 		bfa_cb_rport_online(rp->rport_drv);
rp               4784 drivers/scsi/bfa/bfa_svc.c 	struct bfa_rport_s *rp = cbarg;
rp               4787 drivers/scsi/bfa/bfa_svc.c 		bfa_cb_rport_offline(rp->rport_drv);
rp               4793 drivers/scsi/bfa/bfa_svc.c 	struct bfa_rport_s	*rp = cbarg;
rp               4795 drivers/scsi/bfa/bfa_svc.c 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
rp               4817 drivers/scsi/bfa/bfa_svc.c 	struct bfa_rport_s *rp;
rp               4824 drivers/scsi/bfa/bfa_svc.c 	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
rp               4825 drivers/scsi/bfa/bfa_svc.c 	mod->rps_list = rp;
rp               4831 drivers/scsi/bfa/bfa_svc.c 	for (i = 0; i < mod->num_rports; i++, rp++) {
rp               4832 drivers/scsi/bfa/bfa_svc.c 		memset(rp, 0, sizeof(struct bfa_rport_s));
rp               4833 drivers/scsi/bfa/bfa_svc.c 		rp->bfa = bfa;
rp               4834 drivers/scsi/bfa/bfa_svc.c 		rp->rport_tag = i;
rp               4835 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
rp               4841 drivers/scsi/bfa/bfa_svc.c 			list_add_tail(&rp->qe, &mod->rp_free_q);
rp               4843 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
rp               4849 drivers/scsi/bfa/bfa_svc.c 	bfa_mem_kva_curp(mod) = (u8 *) rp;
rp               4891 drivers/scsi/bfa/bfa_svc.c bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
rp               4898 drivers/scsi/bfa/bfa_svc.c 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
rp               4900 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
rp               4905 drivers/scsi/bfa/bfa_svc.c 			bfa_fn_lpu(rp->bfa));
rp               4906 drivers/scsi/bfa/bfa_svc.c 	m->bfa_handle = rp->rport_tag;
rp               4907 drivers/scsi/bfa/bfa_svc.c 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
rp               4908 drivers/scsi/bfa/bfa_svc.c 	m->pid = rp->rport_info.pid;
rp               4909 drivers/scsi/bfa/bfa_svc.c 	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
rp               4910 drivers/scsi/bfa/bfa_svc.c 	m->local_pid = rp->rport_info.local_pid;
rp               4911 drivers/scsi/bfa/bfa_svc.c 	m->fc_class = rp->rport_info.fc_class;
rp               4912 drivers/scsi/bfa/bfa_svc.c 	m->vf_en = rp->rport_info.vf_en;
rp               4913 drivers/scsi/bfa/bfa_svc.c 	m->vf_id = rp->rport_info.vf_id;
rp               4914 drivers/scsi/bfa/bfa_svc.c 	m->cisc = rp->rport_info.cisc;
rp               4919 drivers/scsi/bfa/bfa_svc.c 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
rp               4924 drivers/scsi/bfa/bfa_svc.c bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
rp               4931 drivers/scsi/bfa/bfa_svc.c 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
rp               4933 drivers/scsi/bfa/bfa_svc.c 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
rp               4938 drivers/scsi/bfa/bfa_svc.c 			bfa_fn_lpu(rp->bfa));
rp               4939 drivers/scsi/bfa/bfa_svc.c 	m->fw_handle = rp->fw_handle;
rp               4944 drivers/scsi/bfa/bfa_svc.c 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
rp               4949 drivers/scsi/bfa/bfa_svc.c bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
rp               4956 drivers/scsi/bfa/bfa_svc.c 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
rp               4958 drivers/scsi/bfa/bfa_svc.c 		bfa_trc(rp->bfa, rp->rport_info.speed);
rp               4963 drivers/scsi/bfa/bfa_svc.c 			bfa_fn_lpu(rp->bfa));
rp               4964 drivers/scsi/bfa/bfa_svc.c 	m->fw_handle = rp->fw_handle;
rp               4965 drivers/scsi/bfa/bfa_svc.c 	m->speed = (u8)rp->rport_info.speed;
rp               4970 drivers/scsi/bfa/bfa_svc.c 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
rp               4987 drivers/scsi/bfa/bfa_svc.c 	struct bfa_rport_s *rp;
rp               4995 drivers/scsi/bfa/bfa_svc.c 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
rp               4996 drivers/scsi/bfa/bfa_svc.c 		rp->fw_handle = msg.create_rsp->fw_handle;
rp               4997 drivers/scsi/bfa/bfa_svc.c 		rp->qos_attr = msg.create_rsp->qos_attr;
rp               4998 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_set_lunmask(bfa, rp);
rp               5000 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
rp               5004 drivers/scsi/bfa/bfa_svc.c 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
rp               5006 drivers/scsi/bfa/bfa_svc.c 		bfa_rport_unset_lunmask(bfa, rp);
rp               5007 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
rp               5011 drivers/scsi/bfa/bfa_svc.c 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
rp               5012 drivers/scsi/bfa/bfa_svc.c 		rp->event_arg.fw_msg = msg.qos_scn_evt;
rp               5013 drivers/scsi/bfa/bfa_svc.c 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
rp               5027 drivers/scsi/bfa/bfa_svc.c 		rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
rp               5028 drivers/scsi/bfa/bfa_svc.c 		bfa_cb_rport_scn_no_dev(rp->rport_drv);
rp               5057 drivers/scsi/bfa/bfa_svc.c 	struct bfa_rport_s *rp;
rp               5059 drivers/scsi/bfa/bfa_svc.c 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
rp               5061 drivers/scsi/bfa/bfa_svc.c 	if (rp == NULL)
rp               5064 drivers/scsi/bfa/bfa_svc.c 	rp->bfa = bfa;
rp               5065 drivers/scsi/bfa/bfa_svc.c 	rp->rport_drv = rport_drv;
rp               5066 drivers/scsi/bfa/bfa_svc.c 	memset(&rp->stats, 0, sizeof(rp->stats));
rp               5068 drivers/scsi/bfa/bfa_svc.c 	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
rp               5069 drivers/scsi/bfa/bfa_svc.c 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
rp               5071 drivers/scsi/bfa/bfa_svc.c 	return rp;
rp               5106 drivers/scsi/bfa/bfa_svc.c bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
rp               5110 drivers/scsi/bfa/bfa_svc.c 	u8 lp_tag = (u8)rp->rport_info.lp_tag;
rp               5112 drivers/scsi/bfa/bfa_svc.c 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
rp               5113 drivers/scsi/bfa/bfa_svc.c 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
rp               5115 drivers/scsi/bfa/bfa_svc.c 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
rp               5116 drivers/scsi/bfa/bfa_svc.c 					rp->lun_mask = BFA_TRUE;
rp               5117 drivers/scsi/bfa/bfa_svc.c 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
rp               5122 drivers/scsi/bfa/bfa_svc.c bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
rp               5127 drivers/scsi/bfa/bfa_svc.c 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
rp               5128 drivers/scsi/bfa/bfa_svc.c 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
rp               5130 drivers/scsi/bfa/bfa_svc.c 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
rp               5131 drivers/scsi/bfa/bfa_svc.c 				rp->lun_mask = BFA_FALSE;
rp                586 drivers/scsi/bfa/bfa_svc.h void bfa_cb_rport_scn_no_dev(void *rp);
rp                596 drivers/scsi/bfa/bfa_svc.h void	bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
rp                597 drivers/scsi/bfa/bfa_svc.h void	bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
rp                665 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fc_rport_libfc_priv *rp;
rp                692 drivers/scsi/bnx2fc/bnx2fc_io.c 	rp = rport->dd_data;
rp                704 drivers/scsi/bnx2fc/bnx2fc_io.c 	tgt = (struct bnx2fc_rport *)&rp[1];
rp               1127 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fc_rport_libfc_priv *rp = rport->dd_data;
rp               1144 drivers/scsi/bnx2fc/bnx2fc_io.c 	tgt = (struct bnx2fc_rport *)&rp[1];
rp               1245 drivers/scsi/bnx2fc/bnx2fc_io.c 						(2 * rp->r_a_tov + 1) * HZ);
rp               1859 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fc_rport_libfc_priv *rp = rport->dd_data;
rp               1878 drivers/scsi/bnx2fc/bnx2fc_io.c 	tgt = (struct bnx2fc_rport *)&rp[1];
rp                447 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct fc_rport_libfc_priv *rp;
rp                460 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		rp = rport->dd_data;
rp                489 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt = (struct bnx2fc_rport *)&rp[1];
rp                537 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		rp = rport->dd_data;
rp                543 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt = (struct bnx2fc_rport *)&rp[1];
rp                324 drivers/scsi/fnic/fnic_scsi.c 	struct fc_rport_libfc_priv *rp = rport->dd_data;
rp                389 drivers/scsi/fnic/fnic_scsi.c 	    (rp->flags & FC_RP_FLAGS_RETRY))
rp                404 drivers/scsi/fnic/fnic_scsi.c 					 rport->maxframe_size, rp->r_a_tov,
rp                405 drivers/scsi/fnic/fnic_scsi.c 					 rp->e_d_tov);
rp                437 drivers/scsi/fnic/fnic_scsi.c 	struct fc_rport_libfc_priv *rp;
rp                464 drivers/scsi/fnic/fnic_scsi.c 	rp = rport->dd_data;
rp                465 drivers/scsi/fnic/fnic_scsi.c 	if (!rp || rp->rp_state == RPORT_ST_DELETE) {
rp                476 drivers/scsi/fnic/fnic_scsi.c 	if (rp->rp_state != RPORT_ST_READY) {
rp                479 drivers/scsi/fnic/fnic_scsi.c 			rport->port_id, rp->rp_state);
rp                 73 drivers/scsi/libfc/fc_disc.c 	struct fc_els_rscn *rp;
rp                 90 drivers/scsi/libfc/fc_disc.c 	rp = fc_frame_payload_get(fp, sizeof(*rp));
rp                 91 drivers/scsi/libfc/fc_disc.c 	if (!rp)
rp                 94 drivers/scsi/libfc/fc_disc.c 	if (rp->rscn_page_len != sizeof(*pp))
rp                 97 drivers/scsi/libfc/fc_disc.c 	len = ntohs(rp->rscn_plen);
rp                 98 drivers/scsi/libfc/fc_disc.c 	if (len < sizeof(*rp))
rp                101 drivers/scsi/libfc/fc_disc.c 	rp = fc_frame_payload_get(fp, len);
rp                102 drivers/scsi/libfc/fc_disc.c 	if (!rp)
rp                105 drivers/scsi/libfc/fc_disc.c 	len -= sizeof(*rp);
rp                109 drivers/scsi/libfc/fc_disc.c 	for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
rp               1341 drivers/scsi/libfc/fc_exch.c 	struct fc_ba_rjt *rp;
rp               1348 drivers/scsi/libfc/fc_exch.c 	fp = fc_frame_alloc(lport, sizeof(*rp));
rp               1357 drivers/scsi/libfc/fc_exch.c 	memset(fh, 0, sizeof(*fh) + sizeof(*rp));
rp               1359 drivers/scsi/libfc/fc_exch.c 	rp = fc_frame_payload_get(fp, sizeof(*rp));
rp               1360 drivers/scsi/libfc/fc_exch.c 	rp->br_reason = reason;
rp               1361 drivers/scsi/libfc/fc_exch.c 	rp->br_explan = explan;
rp               2005 drivers/scsi/libfc/fc_exch.c 	struct fc_els_rec *rp;
rp               2013 drivers/scsi/libfc/fc_exch.c 	rp = fc_frame_payload_get(rfp, sizeof(*rp));
rp               2015 drivers/scsi/libfc/fc_exch.c 	if (!rp)
rp               2017 drivers/scsi/libfc/fc_exch.c 	sid = ntoh24(rp->rec_s_id);
rp               2018 drivers/scsi/libfc/fc_exch.c 	rxid = ntohs(rp->rec_rx_id);
rp               2019 drivers/scsi/libfc/fc_exch.c 	oxid = ntohs(rp->rec_ox_id);
rp               2054 drivers/scsi/libfc/fc_exch.c 	acc->reca_ox_id = rp->rec_ox_id;
rp               2055 drivers/scsi/libfc/fc_exch.c 	memcpy(acc->reca_ofid, rp->rec_s_id, 3);
rp               2281 drivers/scsi/libfc/fc_exch.c 	struct fc_els_rrq *rp;
rp               2287 drivers/scsi/libfc/fc_exch.c 	rp = fc_frame_payload_get(fp, sizeof(*rp));
rp               2289 drivers/scsi/libfc/fc_exch.c 	if (!rp)
rp               2295 drivers/scsi/libfc/fc_exch.c 	sid = ntoh24(rp->rrq_s_id);		/* subject source */
rp               2297 drivers/scsi/libfc/fc_exch.c 			ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
rp               2304 drivers/scsi/libfc/fc_exch.c 		    sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
rp               2305 drivers/scsi/libfc/fc_exch.c 	if (ep->oxid != ntohs(rp->rrq_ox_id))
rp               2307 drivers/scsi/libfc/fc_exch.c 	if (ep->rxid != ntohs(rp->rrq_rx_id) &&
rp                454 drivers/scsi/libfc/fc_lport.c 	} *rp;
rp                471 drivers/scsi/libfc/fc_lport.c 		len = sizeof(*rp);
rp                475 drivers/scsi/libfc/fc_lport.c 			len -= sizeof(rp->gen);
rp                479 drivers/scsi/libfc/fc_lport.c 			rp = fc_frame_payload_get(fp, len);
rp                480 drivers/scsi/libfc/fc_lport.c 			memset(rp, 0, len);
rp                481 drivers/scsi/libfc/fc_lport.c 			rp->rnid.rnid_cmd = ELS_LS_ACC;
rp                482 drivers/scsi/libfc/fc_lport.c 			rp->rnid.rnid_fmt = fmt;
rp                483 drivers/scsi/libfc/fc_lport.c 			rp->rnid.rnid_cid_len = sizeof(rp->cid);
rp                484 drivers/scsi/libfc/fc_lport.c 			rp->cid.rnid_wwpn = htonll(lport->wwpn);
rp                485 drivers/scsi/libfc/fc_lport.c 			rp->cid.rnid_wwnn = htonll(lport->wwnn);
rp                487 drivers/scsi/libfc/fc_lport.c 				rp->rnid.rnid_sid_len = sizeof(rp->gen);
rp                488 drivers/scsi/libfc/fc_lport.c 				memcpy(&rp->gen, &lport->rnid_gen,
rp                489 drivers/scsi/libfc/fc_lport.c 				       sizeof(rp->gen));
rp                947 drivers/scsi/qedf/qedf_io.c 	struct fc_rport_libfc_priv *rp = rport->dd_data;
rp               1009 drivers/scsi/qedf/qedf_io.c 	fcport = (struct qedf_rport *)&rp[1];
rp               2394 drivers/scsi/qedf/qedf_io.c 	struct fc_rport_libfc_priv *rp = rport->dd_data;
rp               2395 drivers/scsi/qedf/qedf_io.c 	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
rp                654 drivers/scsi/qedf/qedf_main.c 	struct fc_rport_libfc_priv *rp = rport->dd_data;
rp                667 drivers/scsi/qedf/qedf_main.c 	fcport = (struct qedf_rport *)&rp[1];
rp               1392 drivers/scsi/qedf/qedf_main.c 	struct fc_rport_libfc_priv *rp;
rp               1408 drivers/scsi/qedf/qedf_main.c 		rp = rport->dd_data;
rp               1409 drivers/scsi/qedf/qedf_main.c 		fcport = (struct qedf_rport *)&rp[1];
rp               1508 drivers/scsi/qedf/qedf_main.c 		rp = rport->dd_data;
rp               1513 drivers/scsi/qedf/qedf_main.c 		fcport = (struct qedf_rport *)&rp[1];
rp               3561 drivers/scsi/qla2xxx/qla_gs.c 	struct fab_scan_rp *rp, *trp;
rp               3597 drivers/scsi/qla2xxx/qla_gs.c 		rp = &vha->scan.l[i];
rp               3600 drivers/scsi/qla2xxx/qla_gs.c 		wwn = wwn_to_u64(rp->port_name);
rp               3607 drivers/scsi/qla2xxx/qla_gs.c 			if (rp->id.b24 == trp->id.b24) {
rp               3613 drivers/scsi/qla2xxx/qla_gs.c 				    rp->id.b24, rp->port_name, trp->port_name);
rp               3618 drivers/scsi/qla2xxx/qla_gs.c 		if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
rp               3622 drivers/scsi/qla2xxx/qla_gs.c 		if ((rp->id.b.domain & 0xf0) == 0xf0)
rp               3630 drivers/scsi/qla2xxx/qla_gs.c 			if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
rp               3640 drivers/scsi/qla2xxx/qla_gs.c 			} else if (fcport->d_id.b24 != rp->id.b24 ||
rp               3644 drivers/scsi/qla2xxx/qla_gs.c 			fcport->d_id.b24 = rp->id.b24;
rp               3652 drivers/scsi/qla2xxx/qla_gs.c 			    __func__, __LINE__, rp->port_name);
rp               3653 drivers/scsi/qla2xxx/qla_gs.c 			qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
rp               3654 drivers/scsi/qla2xxx/qla_gs.c 			    rp->node_name, NULL, rp->fc4type);
rp               3771 drivers/scsi/qla2xxx/qla_gs.c 	struct fab_scan_rp *rp;
rp               3794 drivers/scsi/qla2xxx/qla_gs.c 				rp = &vha->scan.l[j];
rp               3795 drivers/scsi/qla2xxx/qla_gs.c 				rp->id = id;
rp               3796 drivers/scsi/qla2xxx/qla_gs.c 				memcpy(rp->port_name, d->port_name, 8);
rp               3798 drivers/scsi/qla2xxx/qla_gs.c 				rp->fc4type = FS_FC4TYPE_FCP;
rp               3801 drivers/scsi/qla2xxx/qla_gs.c 					rp = &vha->scan.l[k];
rp               3802 drivers/scsi/qla2xxx/qla_gs.c 					if (id.b24 == rp->id.b24) {
rp               3803 drivers/scsi/qla2xxx/qla_gs.c 						memcpy(rp->node_name,
rp               3815 drivers/scsi/qla2xxx/qla_gs.c 					rp = &vha->scan.l[k];
rp               3816 drivers/scsi/qla2xxx/qla_gs.c 					if (!memcmp(rp->port_name,
rp               3821 drivers/scsi/qla2xxx/qla_gs.c 						rp->fc4type |= FS_FC4TYPE_NVME;
rp               3830 drivers/scsi/qla2xxx/qla_gs.c 						rp = &vha->scan.l[k];
rp               3831 drivers/scsi/qla2xxx/qla_gs.c 						if (wwn_to_u64(rp->port_name)) {
rp               3834 drivers/scsi/qla2xxx/qla_gs.c 							rp->id = id;
rp               3835 drivers/scsi/qla2xxx/qla_gs.c 							memcpy(rp->port_name,
rp               3837 drivers/scsi/qla2xxx/qla_gs.c 							rp->fc4type =
rp               3845 drivers/scsi/qla2xxx/qla_gs.c 					rp = &vha->scan.l[k];
rp               3846 drivers/scsi/qla2xxx/qla_gs.c 					if (id.b24 == rp->id.b24) {
rp               3847 drivers/scsi/qla2xxx/qla_gs.c 						memcpy(rp->node_name,
rp               9096 drivers/scsi/qla4xxx/ql4_os.c 	struct srb *rp;
rp               9112 drivers/scsi/qla4xxx/ql4_os.c 		rp = (struct srb *) CMD_SP(cmd);
rp               9113 drivers/scsi/qla4xxx/ql4_os.c 		if (rp == NULL) {
rp               2102 drivers/scsi/sg.c 	Sg_request *rp = sfp->req_arr;
rp               2109 drivers/scsi/sg.c 		for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
rp               2110 drivers/scsi/sg.c 			if (!rp->parentfp)
rp               2116 drivers/scsi/sg.c 	memset(rp, 0, sizeof (Sg_request));
rp               2117 drivers/scsi/sg.c 	rp->parentfp = sfp;
rp               2118 drivers/scsi/sg.c 	rp->header.duration = jiffies_to_msecs(jiffies);
rp               2119 drivers/scsi/sg.c 	list_add_tail(&rp->entry, &sfp->rq_list);
rp               2121 drivers/scsi/sg.c 	return rp;
rp                295 drivers/scsi/st.c 	struct st_reject_data *rp;
rp                297 drivers/scsi/st.c 	for (rp=&(reject_list[0]); rp->vendor != NULL; rp++)
rp                298 drivers/scsi/st.c 		if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) &&
rp                299 drivers/scsi/st.c 		    !strncmp(rp->model, SDp->model, strlen(rp->model)) &&
rp                300 drivers/scsi/st.c 		    !strncmp(rp->rev, SDp->rev, strlen(rp->rev))) {
rp                301 drivers/scsi/st.c 			if (rp->driver_hint)
rp                302 drivers/scsi/st.c 				return rp->driver_hint;
rp                325 drivers/scsi/xen-scsifront.c 	RING_IDX i, rp;
rp                328 drivers/scsi/xen-scsifront.c 	rp = info->ring.sring->rsp_prod;
rp                330 drivers/scsi/xen-scsifront.c 	for (i = info->ring.rsp_cons; i != rp; i++) {
rp                 37 drivers/soc/qcom/llcc-sdm845.c #define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
rp                 49 drivers/soc/qcom/llcc-sdm845.c 		.retain_on_pc = rp,		\
rp               2618 drivers/staging/comedi/comedi_fops.c 		unsigned int rp, n1, n2;
rp               2656 drivers/staging/comedi/comedi_fops.c 		rp = async->buf_read_ptr;
rp               2657 drivers/staging/comedi/comedi_fops.c 		n1 = min(n, async->prealloc_bufsz - rp);
rp               2659 drivers/staging/comedi/comedi_fops.c 		m = copy_to_user(buf, async->prealloc_buf + rp, n1);
rp                553 drivers/staging/isdn/avm/avmcard.h 		      capi_register_params *rp);
rp                576 drivers/staging/isdn/avm/avmcard.h 			 capi_register_params *rp);
rp                341 drivers/staging/isdn/avm/b1.c 		      capi_register_params *rp)
rp                347 drivers/staging/isdn/avm/b1.c 	int nconn, want = rp->level3cnt;
rp                358 drivers/staging/isdn/avm/b1.c 	b1_put_word(port, rp->datablkcnt);
rp                359 drivers/staging/isdn/avm/b1.c 	b1_put_word(port, rp->datablklen);
rp                772 drivers/staging/isdn/avm/b1dma.c 			 capi_register_params *rp)
rp                777 drivers/staging/isdn/avm/b1dma.c 	int want = rp->level3cnt;
rp                798 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, rp->datablkcnt);
rp                799 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, rp->datablklen);
rp                949 drivers/staging/isdn/avm/c4.c 			     capi_register_params *rp)
rp                954 drivers/staging/isdn/avm/c4.c 	int want = rp->level3cnt;
rp                978 drivers/staging/isdn/avm/c4.c 		_put_word(&p, rp->datablkcnt);
rp                979 drivers/staging/isdn/avm/c4.c 		_put_word(&p, rp->datablklen);
rp                 83 drivers/staging/isdn/gigaset/capi.c 	struct capi_register_params rp;
rp                995 drivers/staging/isdn/gigaset/capi.c 				  capi_register_params *rp)
rp               1003 drivers/staging/isdn/gigaset/capi.c 		__func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
rp               1018 drivers/staging/isdn/gigaset/capi.c 	ap->rp = *rp;
rp               1364 drivers/staging/isdn/gigaset/capi.c 	bcs->rx_bufsize = ap->rp.datablklen;
rp               1666 drivers/staging/isdn/gigaset/capi.c 		bcs->rx_bufsize = ap->rp.datablklen;
rp                 41 drivers/staging/isdn/hysdn/hycapi.c 	capi_register_params rp;
rp                152 drivers/staging/isdn/hysdn/hycapi.c 			 capi_register_params *rp)
rp                166 drivers/staging/isdn/hysdn/hycapi.c 	MessageBufferSize = rp->level3cnt * rp->datablkcnt * rp->datablklen;
rp                180 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
rp                181 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16));
rp                182 drivers/staging/isdn/hysdn/hycapi.c 	skb_put_data(skb, &(rp->datablklen), sizeof(__u16));
rp                206 drivers/staging/isdn/hysdn/hycapi.c 						 &hycapi_applications[i].rp);
rp                224 drivers/staging/isdn/hysdn/hycapi.c 		     capi_register_params *rp)
rp                237 drivers/staging/isdn/hysdn/hycapi.c 	MaxBDataBlocks = rp->datablkcnt > CAPI_MAXDATAWINDOW ? CAPI_MAXDATAWINDOW : rp->datablkcnt;
rp                238 drivers/staging/isdn/hysdn/hycapi.c 	rp->datablkcnt = MaxBDataBlocks;
rp                239 drivers/staging/isdn/hysdn/hycapi.c 	MaxBDataLen = rp->datablklen < 1024 ? 1024 : rp->datablklen;
rp                240 drivers/staging/isdn/hysdn/hycapi.c 	rp->datablklen = MaxBDataLen;
rp                242 drivers/staging/isdn/hysdn/hycapi.c 	MaxLogicalConnections = rp->level3cnt;
rp                250 drivers/staging/isdn/hysdn/hycapi.c 	rp->level3cnt = MaxLogicalConnections;
rp                251 drivers/staging/isdn/hysdn/hycapi.c 	memcpy(&hycapi_applications[appl - 1].rp,
rp                252 drivers/staging/isdn/hysdn/hycapi.c 	       rp, sizeof(capi_register_params));
rp                386 drivers/staging/isdn/hysdn/hycapi.c 					 &(hycapi_applications[appl_id - 1].rp));
rp                568 drivers/staging/isdn/hysdn/hycapi.c 					 hycapi_applications[ApplId - 1].rp.datablkcnt);
rp                598 drivers/staging/isdn/hysdn/hycapi.c 				 hycapi_applications[ApplId - 1].rp.datablkcnt);
rp                411 drivers/staging/ks7010/ks7010_sdio.c 	struct rx_device_buffer *rp;
rp                414 drivers/staging/ks7010/ks7010_sdio.c 		rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
rp                415 drivers/staging/ks7010/ks7010_sdio.c 		hostif_receive(priv, rp->data, rp->size);
rp                467 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	struct rapl_package *rp;
rp                473 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
rp                474 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	if (!rp) {
rp                475 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 		rp = rapl_add_package(cpu, &rapl_mmio_priv);
rp                476 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 		if (IS_ERR(rp))
rp                477 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 			return PTR_ERR(rp);
rp                479 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	cpumask_set_cpu(cpu, &rp->cpumask);
rp                485 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	struct rapl_package *rp;
rp                488 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
rp                489 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	if (!rp)
rp                492 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	cpumask_clear_cpu(cpu, &rp->cpumask);
rp                493 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	lead_cpu = cpumask_first(&rp->cpumask);
rp                495 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 		rapl_remove_package(rp);
rp                496 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	else if (rp->lead_cpu == cpu)
rp                497 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 		rp->lead_cpu = lead_cpu;
rp               1734 drivers/tty/ipwireless/hardware.c 	struct ipw_rx_packet *rp, *rq;
rp               1750 drivers/tty/ipwireless/hardware.c 	list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
rp               1751 drivers/tty/ipwireless/hardware.c 		list_del(&rp->queue);
rp               1752 drivers/tty/ipwireless/hardware.c 		kfree(rp);
rp               1755 drivers/tty/ipwireless/hardware.c 	list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
rp               1756 drivers/tty/ipwireless/hardware.c 		list_del(&rp->queue);
rp               1757 drivers/tty/ipwireless/hardware.c 		kfree(rp);
rp                515 drivers/tty/isicom.c 	unsigned char *rp;
rp                644 drivers/tty/isicom.c 		count = tty_prepare_flip_string(&port->port, &rp,
rp                649 drivers/tty/isicom.c 		insw(base, rp, word_count);
rp               1084 drivers/tty/serial/ip22zilog.c 	struct zilog_layout *rp;
rp               1101 drivers/tty/serial/ip22zilog.c 			ip22zilog_chip_regs[chip] = rp = get_zs(chip);
rp               1103 drivers/tty/serial/ip22zilog.c 			up[(chip * 2) + 0].port.membase = (char *) &rp->channelB;
rp               1104 drivers/tty/serial/ip22zilog.c 			up[(chip * 2) + 1].port.membase = (char *) &rp->channelA;
rp               1108 drivers/tty/serial/ip22zilog.c 				(unsigned long) ioremap((unsigned long) &rp->channelB, 8);
rp               1110 drivers/tty/serial/ip22zilog.c 				(unsigned long) ioremap((unsigned long) &rp->channelA, 8);
rp                682 drivers/tty/serial/rp2.c 		struct rp2_uart_port *rp = &card->ports[i];
rp                686 drivers/tty/serial/rp2.c 		rp->asic_base = card->bar1;
rp                687 drivers/tty/serial/rp2.c 		rp->base = card->bar1 + RP2_PORT_BASE + j*RP2_PORT_SPACING;
rp                688 drivers/tty/serial/rp2.c 		rp->ucode = card->bar1 + RP2_UCODE_BASE + j*RP2_UCODE_SPACING;
rp                689 drivers/tty/serial/rp2.c 		rp->card = card;
rp                690 drivers/tty/serial/rp2.c 		rp->idx = j;
rp                692 drivers/tty/serial/rp2.c 		p = &rp->port;
rp                702 drivers/tty/serial/rp2.c 		p->membase = rp->base;
rp                706 drivers/tty/serial/rp2.c 			rp->asic_base += RP2_ASIC_SPACING;
rp                707 drivers/tty/serial/rp2.c 			rp->base += RP2_ASIC_SPACING;
rp                708 drivers/tty/serial/rp2.c 			rp->ucode += RP2_ASIC_SPACING;
rp                712 drivers/tty/serial/rp2.c 		rp2_init_port(rp, fw);
rp               1440 drivers/tty/serial/sunsu.c 	struct resource *rp;
rp               1462 drivers/tty/serial/sunsu.c 	rp = &op->resource[0];
rp               1463 drivers/tty/serial/sunsu.c 	up->port.mapbase = rp->start;
rp               1464 drivers/tty/serial/sunsu.c 	up->reg_size = resource_size(rp);
rp               1465 drivers/tty/serial/sunsu.c 	up->port.membase = of_ioremap(rp, 0, up->reg_size, "su");
rp               1408 drivers/tty/serial/sunzilog.c 	struct zilog_layout __iomem *rp;
rp               1427 drivers/tty/serial/sunzilog.c 	rp = sunzilog_chip_regs[inst];
rp               1436 drivers/tty/serial/sunzilog.c 	up[0].port.membase = (void __iomem *) &rp->channelA;
rp               1453 drivers/tty/serial/sunzilog.c 	up[1].port.membase = (void __iomem *) &rp->channelB;
rp               1476 drivers/tty/serial/sunzilog.c 				   rp, sizeof(struct zilog_layout));
rp               1487 drivers/tty/serial/sunzilog.c 				   rp, sizeof(struct zilog_layout));
rp                203 drivers/usb/mon/mon_bin.c static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
rp                207 drivers/usb/mon/mon_bin.c 	    (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
rp                210 drivers/usb/mon/mon_bin.c #define MON_RING_EMPTY(rp)	((rp)->b_cnt == 0)
rp                220 drivers/usb/mon/mon_bin.c static void mon_buff_area_fill(const struct mon_reader_bin *rp,
rp                222 drivers/usb/mon/mon_bin.c static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
rp                295 drivers/usb/mon/mon_bin.c static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
rp                301 drivers/usb/mon/mon_bin.c 	if (rp->b_cnt + size > rp->b_size)
rp                303 drivers/usb/mon/mon_bin.c 	offset = rp->b_in;
rp                304 drivers/usb/mon/mon_bin.c 	rp->b_cnt += size;
rp                305 drivers/usb/mon/mon_bin.c 	if ((rp->b_in += size) >= rp->b_size)
rp                306 drivers/usb/mon/mon_bin.c 		rp->b_in -= rp->b_size;
rp                320 drivers/usb/mon/mon_bin.c static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
rp                327 drivers/usb/mon/mon_bin.c 	if (rp->b_cnt + size > rp->b_size)
rp                329 drivers/usb/mon/mon_bin.c 	if (rp->b_in + size > rp->b_size) {
rp                335 drivers/usb/mon/mon_bin.c 		fill_size = rp->b_size - rp->b_in;
rp                336 drivers/usb/mon/mon_bin.c 		if (rp->b_cnt + size + fill_size > rp->b_size)
rp                338 drivers/usb/mon/mon_bin.c 		mon_buff_area_fill(rp, rp->b_in, fill_size);
rp                341 drivers/usb/mon/mon_bin.c 		rp->b_in = size;
rp                342 drivers/usb/mon/mon_bin.c 		rp->b_cnt += size + fill_size;
rp                343 drivers/usb/mon/mon_bin.c 	} else if (rp->b_in + size == rp->b_size) {
rp                344 drivers/usb/mon/mon_bin.c 		offset = rp->b_in;
rp                345 drivers/usb/mon/mon_bin.c 		rp->b_in = 0;
rp                346 drivers/usb/mon/mon_bin.c 		rp->b_cnt += size;
rp                348 drivers/usb/mon/mon_bin.c 		offset = rp->b_in;
rp                349 drivers/usb/mon/mon_bin.c 		rp->b_in += size;
rp                350 drivers/usb/mon/mon_bin.c 		rp->b_cnt += size;
rp                359 drivers/usb/mon/mon_bin.c static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
rp                363 drivers/usb/mon/mon_bin.c 	rp->b_cnt -= size;
rp                364 drivers/usb/mon/mon_bin.c 	if (rp->b_in < size)
rp                365 drivers/usb/mon/mon_bin.c 		rp->b_in += rp->b_size;
rp                366 drivers/usb/mon/mon_bin.c 	rp->b_in -= size;
rp                373 drivers/usb/mon/mon_bin.c static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
rp                377 drivers/usb/mon/mon_bin.c 	rp->b_cnt -= size;
rp                378 drivers/usb/mon/mon_bin.c 	if ((rp->b_out += size) >= rp->b_size)
rp                379 drivers/usb/mon/mon_bin.c 		rp->b_out -= rp->b_size;
rp                382 drivers/usb/mon/mon_bin.c static void mon_buff_area_fill(const struct mon_reader_bin *rp,
rp                387 drivers/usb/mon/mon_bin.c 	ep = MON_OFF2HDR(rp, offset);
rp                403 drivers/usb/mon/mon_bin.c static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
rp                417 drivers/usb/mon/mon_bin.c 		mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
rp                432 drivers/usb/mon/mon_bin.c 			offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
rp                447 drivers/usb/mon/mon_bin.c static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp,
rp                465 drivers/usb/mon/mon_bin.c static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
rp                474 drivers/usb/mon/mon_bin.c 		    (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
rp                479 drivers/usb/mon/mon_bin.c 		if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size)
rp                485 drivers/usb/mon/mon_bin.c static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
rp                502 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp                520 drivers/usb/mon/mon_bin.c 			length = mon_bin_collate_isodesc(rp, urb, ndesc);
rp                530 drivers/usb/mon/mon_bin.c 	if (length >= rp->b_size/5)
rp                531 drivers/usb/mon/mon_bin.c 		length = rp->b_size/5;
rp                548 drivers/usb/mon/mon_bin.c 	if (rp->mmap_active) {
rp                549 drivers/usb/mon/mon_bin.c 		offset = mon_buff_area_alloc_contiguous(rp,
rp                552 drivers/usb/mon/mon_bin.c 		offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc);
rp                555 drivers/usb/mon/mon_bin.c 		rp->cnt_lost++;
rp                556 drivers/usb/mon/mon_bin.c 		spin_unlock_irqrestore(&rp->b_lock, flags);
rp                560 drivers/usb/mon/mon_bin.c 	ep = MON_OFF2HDR(rp, offset);
rp                561 drivers/usb/mon/mon_bin.c 	if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
rp                597 drivers/usb/mon/mon_bin.c 		mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc);
rp                598 drivers/usb/mon/mon_bin.c 		if ((offset += lendesc) >= rp->b_size)
rp                599 drivers/usb/mon/mon_bin.c 			offset -= rp->b_size;
rp                603 drivers/usb/mon/mon_bin.c 		length = mon_bin_get_data(rp, offset, urb, length,
rp                609 drivers/usb/mon/mon_bin.c 			mon_buff_area_shrink(rp, delta);
rp                615 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp                617 drivers/usb/mon/mon_bin.c 	wake_up(&rp->b_wait);
rp                622 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = data;
rp                623 drivers/usb/mon/mon_bin.c 	mon_bin_event(rp, urb, 'S', -EINPROGRESS);
rp                628 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = data;
rp                629 drivers/usb/mon/mon_bin.c 	mon_bin_event(rp, urb, 'C', status);
rp                634 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = data;
rp                642 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp                644 drivers/usb/mon/mon_bin.c 	offset = mon_buff_area_alloc(rp, PKT_SIZE);
rp                647 drivers/usb/mon/mon_bin.c 		spin_unlock_irqrestore(&rp->b_lock, flags);
rp                651 drivers/usb/mon/mon_bin.c 	ep = MON_OFF2HDR(rp, offset);
rp                668 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp                670 drivers/usb/mon/mon_bin.c 	wake_up(&rp->b_wait);
rp                676 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp;
rp                692 drivers/usb/mon/mon_bin.c 	rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
rp                693 drivers/usb/mon/mon_bin.c 	if (rp == NULL) {
rp                697 drivers/usb/mon/mon_bin.c 	spin_lock_init(&rp->b_lock);
rp                698 drivers/usb/mon/mon_bin.c 	init_waitqueue_head(&rp->b_wait);
rp                699 drivers/usb/mon/mon_bin.c 	mutex_init(&rp->fetch_lock);
rp                700 drivers/usb/mon/mon_bin.c 	rp->b_size = BUFF_DFL;
rp                702 drivers/usb/mon/mon_bin.c 	size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
rp                703 drivers/usb/mon/mon_bin.c 	if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
rp                708 drivers/usb/mon/mon_bin.c 	if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
rp                711 drivers/usb/mon/mon_bin.c 	rp->r.m_bus = mbus;
rp                712 drivers/usb/mon/mon_bin.c 	rp->r.r_data = rp;
rp                713 drivers/usb/mon/mon_bin.c 	rp->r.rnf_submit = mon_bin_submit;
rp                714 drivers/usb/mon/mon_bin.c 	rp->r.rnf_error = mon_bin_error;
rp                715 drivers/usb/mon/mon_bin.c 	rp->r.rnf_complete = mon_bin_complete;
rp                717 drivers/usb/mon/mon_bin.c 	mon_reader_add(mbus, &rp->r);
rp                719 drivers/usb/mon/mon_bin.c 	file->private_data = rp;
rp                724 drivers/usb/mon/mon_bin.c 	kfree(rp->b_vec);
rp                726 drivers/usb/mon/mon_bin.c 	kfree(rp);
rp                737 drivers/usb/mon/mon_bin.c static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
rp                747 drivers/usb/mon/mon_bin.c 	mutex_lock(&rp->fetch_lock);
rp                749 drivers/usb/mon/mon_bin.c 	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
rp                750 drivers/usb/mon/mon_bin.c 		mutex_unlock(&rp->fetch_lock);
rp                754 drivers/usb/mon/mon_bin.c 	ep = MON_OFF2HDR(rp, rp->b_out);
rp                757 drivers/usb/mon/mon_bin.c 		mutex_unlock(&rp->fetch_lock);
rp                762 drivers/usb/mon/mon_bin.c 	if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
rp                764 drivers/usb/mon/mon_bin.c 	if (copy_from_buf(rp, offset, data, step_len)) {
rp                765 drivers/usb/mon/mon_bin.c 		mutex_unlock(&rp->fetch_lock);
rp                769 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp                770 drivers/usb/mon/mon_bin.c 	mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
rp                771 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp                772 drivers/usb/mon/mon_bin.c 	rp->b_read = 0;
rp                774 drivers/usb/mon/mon_bin.c 	mutex_unlock(&rp->fetch_lock);
rp                780 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = file->private_data;
rp                781 drivers/usb/mon/mon_bin.c 	struct mon_bus* mbus = rp->r.m_bus;
rp                790 drivers/usb/mon/mon_bin.c 	mon_reader_del(mbus, &rp->r);
rp                792 drivers/usb/mon/mon_bin.c 	mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
rp                793 drivers/usb/mon/mon_bin.c 	kfree(rp->b_vec);
rp                794 drivers/usb/mon/mon_bin.c 	kfree(rp);
rp                803 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = file->private_data;
rp                813 drivers/usb/mon/mon_bin.c 	mutex_lock(&rp->fetch_lock);
rp                815 drivers/usb/mon/mon_bin.c 	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
rp                816 drivers/usb/mon/mon_bin.c 		mutex_unlock(&rp->fetch_lock);
rp                820 drivers/usb/mon/mon_bin.c 	ep = MON_OFF2HDR(rp, rp->b_out);
rp                822 drivers/usb/mon/mon_bin.c 	if (rp->b_read < hdrbytes) {
rp                823 drivers/usb/mon/mon_bin.c 		step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read));
rp                824 drivers/usb/mon/mon_bin.c 		ptr = ((char *)ep) + rp->b_read;
rp                826 drivers/usb/mon/mon_bin.c 			mutex_unlock(&rp->fetch_lock);
rp                831 drivers/usb/mon/mon_bin.c 		rp->b_read += step_len;
rp                835 drivers/usb/mon/mon_bin.c 	if (rp->b_read >= hdrbytes) {
rp                837 drivers/usb/mon/mon_bin.c 		step_len -= rp->b_read - hdrbytes;
rp                840 drivers/usb/mon/mon_bin.c 		offset = rp->b_out + PKT_SIZE;
rp                841 drivers/usb/mon/mon_bin.c 		offset += rp->b_read - hdrbytes;
rp                842 drivers/usb/mon/mon_bin.c 		if (offset >= rp->b_size)
rp                843 drivers/usb/mon/mon_bin.c 			offset -= rp->b_size;
rp                844 drivers/usb/mon/mon_bin.c 		if (copy_from_buf(rp, offset, buf, step_len)) {
rp                845 drivers/usb/mon/mon_bin.c 			mutex_unlock(&rp->fetch_lock);
rp                850 drivers/usb/mon/mon_bin.c 		rp->b_read += step_len;
rp                857 drivers/usb/mon/mon_bin.c 	if (rp->b_read >= hdrbytes + ep->len_cap) {
rp                858 drivers/usb/mon/mon_bin.c 		spin_lock_irqsave(&rp->b_lock, flags);
rp                859 drivers/usb/mon/mon_bin.c 		mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
rp                860 drivers/usb/mon/mon_bin.c 		spin_unlock_irqrestore(&rp->b_lock, flags);
rp                861 drivers/usb/mon/mon_bin.c 		rp->b_read = 0;
rp                864 drivers/usb/mon/mon_bin.c 	mutex_unlock(&rp->fetch_lock);
rp                872 drivers/usb/mon/mon_bin.c static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
rp                878 drivers/usb/mon/mon_bin.c 	mutex_lock(&rp->fetch_lock);
rp                879 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp                881 drivers/usb/mon/mon_bin.c 		if (MON_RING_EMPTY(rp))
rp                884 drivers/usb/mon/mon_bin.c 		ep = MON_OFF2HDR(rp, rp->b_out);
rp                885 drivers/usb/mon/mon_bin.c 		mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
rp                887 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp                888 drivers/usb/mon/mon_bin.c 	rp->b_read = 0;
rp                889 drivers/usb/mon/mon_bin.c 	mutex_unlock(&rp->fetch_lock);
rp                898 drivers/usb/mon/mon_bin.c static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
rp                909 drivers/usb/mon/mon_bin.c 	mutex_lock(&rp->fetch_lock);
rp                911 drivers/usb/mon/mon_bin.c 	if ((rc = mon_bin_wait_event(file, rp)) < 0) {
rp                912 drivers/usb/mon/mon_bin.c 		mutex_unlock(&rp->fetch_lock);
rp                916 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp                917 drivers/usb/mon/mon_bin.c 	avail = rp->b_cnt;
rp                918 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp                920 drivers/usb/mon/mon_bin.c 	cur_out = rp->b_out;
rp                927 drivers/usb/mon/mon_bin.c 		ep = MON_OFF2HDR(rp, cur_out);
rp                929 drivers/usb/mon/mon_bin.c 			mutex_unlock(&rp->fetch_lock);
rp                936 drivers/usb/mon/mon_bin.c 		if ((cur_out += size) >= rp->b_size)
rp                937 drivers/usb/mon/mon_bin.c 			cur_out -= rp->b_size;
rp                941 drivers/usb/mon/mon_bin.c 	mutex_unlock(&rp->fetch_lock);
rp                949 drivers/usb/mon/mon_bin.c static int mon_bin_queued(struct mon_reader_bin *rp)
rp                958 drivers/usb/mon/mon_bin.c 	mutex_lock(&rp->fetch_lock);
rp                960 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp                961 drivers/usb/mon/mon_bin.c 	avail = rp->b_cnt;
rp                962 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp                964 drivers/usb/mon/mon_bin.c 	cur_out = rp->b_out;
rp                968 drivers/usb/mon/mon_bin.c 		ep = MON_OFF2HDR(rp, cur_out);
rp                973 drivers/usb/mon/mon_bin.c 		if ((cur_out += size) >= rp->b_size)
rp                974 drivers/usb/mon/mon_bin.c 			cur_out -= rp->b_size;
rp                978 drivers/usb/mon/mon_bin.c 	mutex_unlock(&rp->fetch_lock);
rp                986 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = file->private_data;
rp                998 drivers/usb/mon/mon_bin.c 		spin_lock_irqsave(&rp->b_lock, flags);
rp                999 drivers/usb/mon/mon_bin.c 		if (!MON_RING_EMPTY(rp)) {
rp               1000 drivers/usb/mon/mon_bin.c 			ep = MON_OFF2HDR(rp, rp->b_out);
rp               1003 drivers/usb/mon/mon_bin.c 		spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1007 drivers/usb/mon/mon_bin.c 		mutex_lock(&rp->fetch_lock);
rp               1008 drivers/usb/mon/mon_bin.c 		ret = rp->b_size;
rp               1009 drivers/usb/mon/mon_bin.c 		mutex_unlock(&rp->fetch_lock);
rp               1040 drivers/usb/mon/mon_bin.c 		mutex_lock(&rp->fetch_lock);
rp               1041 drivers/usb/mon/mon_bin.c 		spin_lock_irqsave(&rp->b_lock, flags);
rp               1042 drivers/usb/mon/mon_bin.c 		if (rp->mmap_active) {
rp               1047 drivers/usb/mon/mon_bin.c 			mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
rp               1048 drivers/usb/mon/mon_bin.c 			kfree(rp->b_vec);
rp               1049 drivers/usb/mon/mon_bin.c 			rp->b_vec  = vec;
rp               1050 drivers/usb/mon/mon_bin.c 			rp->b_size = size;
rp               1051 drivers/usb/mon/mon_bin.c 			rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
rp               1052 drivers/usb/mon/mon_bin.c 			rp->cnt_lost = 0;
rp               1054 drivers/usb/mon/mon_bin.c 		spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1055 drivers/usb/mon/mon_bin.c 		mutex_unlock(&rp->fetch_lock);
rp               1060 drivers/usb/mon/mon_bin.c 		ret = mon_bin_flush(rp, arg);
rp               1074 drivers/usb/mon/mon_bin.c 		ret = mon_bin_get_event(file, rp, getb.hdr,
rp               1091 drivers/usb/mon/mon_bin.c 			ret = mon_bin_flush(rp, mfetch.nflush);
rp               1097 drivers/usb/mon/mon_bin.c 		ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
rp               1111 drivers/usb/mon/mon_bin.c 		spin_lock_irqsave(&rp->b_lock, flags);
rp               1112 drivers/usb/mon/mon_bin.c 		ndropped = rp->cnt_lost;
rp               1113 drivers/usb/mon/mon_bin.c 		rp->cnt_lost = 0;
rp               1114 drivers/usb/mon/mon_bin.c 		spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1115 drivers/usb/mon/mon_bin.c 		nevents = mon_bin_queued(rp);
rp               1137 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = file->private_data;
rp               1151 drivers/usb/mon/mon_bin.c 		ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32),
rp               1170 drivers/usb/mon/mon_bin.c 			ret = mon_bin_flush(rp, mfetch.nflush32);
rp               1176 drivers/usb/mon/mon_bin.c 		ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
rp               1204 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = file->private_data;
rp               1209 drivers/usb/mon/mon_bin.c 		poll_wait(file, &rp->b_wait, wait);
rp               1211 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp               1212 drivers/usb/mon/mon_bin.c 	if (!MON_RING_EMPTY(rp))
rp               1214 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1224 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = vma->vm_private_data;
rp               1227 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp               1228 drivers/usb/mon/mon_bin.c 	rp->mmap_active++;
rp               1229 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1236 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = vma->vm_private_data;
rp               1237 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp               1238 drivers/usb/mon/mon_bin.c 	rp->mmap_active--;
rp               1239 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1247 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = vmf->vma->vm_private_data;
rp               1252 drivers/usb/mon/mon_bin.c 	if (offset >= rp->b_size)
rp               1255 drivers/usb/mon/mon_bin.c 	pageptr = rp->b_vec[chunk_idx].pg;
rp               1292 drivers/usb/mon/mon_bin.c static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
rp               1297 drivers/usb/mon/mon_bin.c 	add_wait_queue(&rp->b_wait, &waita);
rp               1300 drivers/usb/mon/mon_bin.c 	spin_lock_irqsave(&rp->b_lock, flags);
rp               1301 drivers/usb/mon/mon_bin.c 	while (MON_RING_EMPTY(rp)) {
rp               1302 drivers/usb/mon/mon_bin.c 		spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1306 drivers/usb/mon/mon_bin.c 			remove_wait_queue(&rp->b_wait, &waita);
rp               1311 drivers/usb/mon/mon_bin.c 			remove_wait_queue(&rp->b_wait, &waita);
rp               1316 drivers/usb/mon/mon_bin.c 		spin_lock_irqsave(&rp->b_lock, flags);
rp               1318 drivers/usb/mon/mon_bin.c 	spin_unlock_irqrestore(&rp->b_lock, flags);
rp               1321 drivers/usb/mon/mon_bin.c 	remove_wait_queue(&rp->b_wait, &waita);
rp                106 drivers/usb/mon/mon_text.c     mon_text_read_wait(struct mon_reader_text *rp, struct file *file);
rp                107 drivers/usb/mon/mon_text.c static void mon_text_read_head_t(struct mon_reader_text *rp,
rp                109 drivers/usb/mon/mon_text.c static void mon_text_read_head_u(struct mon_reader_text *rp,
rp                111 drivers/usb/mon/mon_text.c static void mon_text_read_statset(struct mon_reader_text *rp,
rp                113 drivers/usb/mon/mon_text.c static void mon_text_read_intstat(struct mon_reader_text *rp,
rp                115 drivers/usb/mon/mon_text.c static void mon_text_read_isostat(struct mon_reader_text *rp,
rp                117 drivers/usb/mon/mon_text.c static void mon_text_read_isodesc(struct mon_reader_text *rp,
rp                119 drivers/usb/mon/mon_text.c static void mon_text_read_data(struct mon_reader_text *rp,
rp                193 drivers/usb/mon/mon_text.c static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
rp                204 drivers/usb/mon/mon_text.c 	if (rp->nevents >= EVENT_MAX ||
rp                205 drivers/usb/mon/mon_text.c 	    (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
rp                206 drivers/usb/mon/mon_text.c 		rp->r.m_bus->cnt_text_lost++;
rp                250 drivers/usb/mon/mon_text.c 	ep->setup_flag = mon_text_get_setup(ep, urb, ev_type, rp->r.m_bus);
rp                252 drivers/usb/mon/mon_text.c 			rp->r.m_bus);
rp                254 drivers/usb/mon/mon_text.c 	rp->nevents++;
rp                255 drivers/usb/mon/mon_text.c 	list_add_tail(&ep->e_link, &rp->e_list);
rp                256 drivers/usb/mon/mon_text.c 	wake_up(&rp->wait);
rp                261 drivers/usb/mon/mon_text.c 	struct mon_reader_text *rp = data;
rp                262 drivers/usb/mon/mon_text.c 	mon_text_event(rp, urb, 'S', -EINPROGRESS);
rp                267 drivers/usb/mon/mon_text.c 	struct mon_reader_text *rp = data;
rp                268 drivers/usb/mon/mon_text.c 	mon_text_event(rp, urb, 'C', status);
rp                273 drivers/usb/mon/mon_text.c 	struct mon_reader_text *rp = data;
rp                276 drivers/usb/mon/mon_text.c 	if (rp->nevents >= EVENT_MAX ||
rp                277 drivers/usb/mon/mon_text.c 	    (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
rp                278 drivers/usb/mon/mon_text.c 		rp->r.m_bus->cnt_text_lost++;
rp                296 drivers/usb/mon/mon_text.c 	rp->nevents++;
rp                297 drivers/usb/mon/mon_text.c 	list_add_tail(&ep->e_link, &rp->e_list);
rp                298 drivers/usb/mon/mon_text.c 	wake_up(&rp->wait);
rp                304 drivers/usb/mon/mon_text.c static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp,
rp                311 drivers/usb/mon/mon_text.c 	if (list_empty(&rp->e_list)) {
rp                315 drivers/usb/mon/mon_text.c 	p = rp->e_list.next;
rp                317 drivers/usb/mon/mon_text.c 	--rp->nevents;
rp                327 drivers/usb/mon/mon_text.c 	struct mon_reader_text *rp;
rp                333 drivers/usb/mon/mon_text.c 	rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL);
rp                334 drivers/usb/mon/mon_text.c 	if (rp == NULL) {
rp                338 drivers/usb/mon/mon_text.c 	INIT_LIST_HEAD(&rp->e_list);
rp                339 drivers/usb/mon/mon_text.c 	init_waitqueue_head(&rp->wait);
rp                340 drivers/usb/mon/mon_text.c 	mutex_init(&rp->printf_lock);
rp                342 drivers/usb/mon/mon_text.c 	rp->printf_size = PRINTF_DFL;
rp                343 drivers/usb/mon/mon_text.c 	rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL);
rp                344 drivers/usb/mon/mon_text.c 	if (rp->printf_buf == NULL) {
rp                349 drivers/usb/mon/mon_text.c 	rp->r.m_bus = mbus;
rp                350 drivers/usb/mon/mon_text.c 	rp->r.r_data = rp;
rp                351 drivers/usb/mon/mon_text.c 	rp->r.rnf_submit = mon_text_submit;
rp                352 drivers/usb/mon/mon_text.c 	rp->r.rnf_error = mon_text_error;
rp                353 drivers/usb/mon/mon_text.c 	rp->r.rnf_complete = mon_text_complete;
rp                355 drivers/usb/mon/mon_text.c 	snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
rp                356 drivers/usb/mon/mon_text.c 	rp->e_slab = kmem_cache_create(rp->slab_name,
rp                359 drivers/usb/mon/mon_text.c 	if (rp->e_slab == NULL) {
rp                364 drivers/usb/mon/mon_text.c 	mon_reader_add(mbus, &rp->r);
rp                366 drivers/usb/mon/mon_text.c 	file->private_data = rp;
rp                373 drivers/usb/mon/mon_text.c 	kfree(rp->printf_buf);
rp                375 drivers/usb/mon/mon_text.c 	kfree(rp);
rp                381 drivers/usb/mon/mon_text.c static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
rp                384 drivers/usb/mon/mon_text.c 	const size_t togo = min(nbytes, rp->printf_togo);
rp                386 drivers/usb/mon/mon_text.c 	if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
rp                388 drivers/usb/mon/mon_text.c 	rp->printf_togo -= togo;
rp                389 drivers/usb/mon/mon_text.c 	rp->printf_offset += togo;
rp                397 drivers/usb/mon/mon_text.c 	struct mon_reader_text *rp = file->private_data;
rp                402 drivers/usb/mon/mon_text.c 	mutex_lock(&rp->printf_lock);
rp                404 drivers/usb/mon/mon_text.c 	if (rp->printf_togo == 0) {
rp                406 drivers/usb/mon/mon_text.c 		ep = mon_text_read_wait(rp, file);
rp                408 drivers/usb/mon/mon_text.c 			mutex_unlock(&rp->printf_lock);
rp                412 drivers/usb/mon/mon_text.c 		ptr.pbuf = rp->printf_buf;
rp                413 drivers/usb/mon/mon_text.c 		ptr.limit = rp->printf_size;
rp                415 drivers/usb/mon/mon_text.c 		mon_text_read_head_t(rp, &ptr, ep);
rp                416 drivers/usb/mon/mon_text.c 		mon_text_read_statset(rp, &ptr, ep);
rp                419 drivers/usb/mon/mon_text.c 		mon_text_read_data(rp, &ptr, ep);
rp                421 drivers/usb/mon/mon_text.c 		rp->printf_togo = ptr.cnt;
rp                422 drivers/usb/mon/mon_text.c 		rp->printf_offset = 0;
rp                424 drivers/usb/mon/mon_text.c 		kmem_cache_free(rp->e_slab, ep);
rp                427 drivers/usb/mon/mon_text.c 	ret = mon_text_copy_to_user(rp, buf, nbytes);
rp                428 drivers/usb/mon/mon_text.c 	mutex_unlock(&rp->printf_lock);
rp                436 drivers/usb/mon/mon_text.c 	struct mon_reader_text *rp = file->private_data;
rp                441 drivers/usb/mon/mon_text.c 	mutex_lock(&rp->printf_lock);
rp                443 drivers/usb/mon/mon_text.c 	if (rp->printf_togo == 0) {
rp                445 drivers/usb/mon/mon_text.c 		ep = mon_text_read_wait(rp, file);
rp                447 drivers/usb/mon/mon_text.c 			mutex_unlock(&rp->printf_lock);
rp                451 drivers/usb/mon/mon_text.c 		ptr.pbuf = rp->printf_buf;
rp                452 drivers/usb/mon/mon_text.c 		ptr.limit = rp->printf_size;
rp                454 drivers/usb/mon/mon_text.c 		mon_text_read_head_u(rp, &ptr, ep);
rp                456 drivers/usb/mon/mon_text.c 			mon_text_read_statset(rp, &ptr, ep);
rp                458 drivers/usb/mon/mon_text.c 			mon_text_read_isostat(rp, &ptr, ep);
rp                459 drivers/usb/mon/mon_text.c 			mon_text_read_isodesc(rp, &ptr, ep);
rp                461 drivers/usb/mon/mon_text.c 			mon_text_read_intstat(rp, &ptr, ep);
rp                463 drivers/usb/mon/mon_text.c 			mon_text_read_statset(rp, &ptr, ep);
rp                467 drivers/usb/mon/mon_text.c 		mon_text_read_data(rp, &ptr, ep);
rp                469 drivers/usb/mon/mon_text.c 		rp->printf_togo = ptr.cnt;
rp                470 drivers/usb/mon/mon_text.c 		rp->printf_offset = 0;
rp                472 drivers/usb/mon/mon_text.c 		kmem_cache_free(rp->e_slab, ep);
rp                475 drivers/usb/mon/mon_text.c 	ret = mon_text_copy_to_user(rp, buf, nbytes);
rp                476 drivers/usb/mon/mon_text.c 	mutex_unlock(&rp->printf_lock);
rp                480 drivers/usb/mon/mon_text.c static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
rp                483 drivers/usb/mon/mon_text.c 	struct mon_bus *mbus = rp->r.m_bus;
rp                487 drivers/usb/mon/mon_text.c 	add_wait_queue(&rp->wait, &waita);
rp                489 drivers/usb/mon/mon_text.c 	while ((ep = mon_text_fetch(rp, mbus)) == NULL) {
rp                492 drivers/usb/mon/mon_text.c 			remove_wait_queue(&rp->wait, &waita);
rp                501 drivers/usb/mon/mon_text.c 			remove_wait_queue(&rp->wait, &waita);
rp                507 drivers/usb/mon/mon_text.c 	remove_wait_queue(&rp->wait, &waita);
rp                511 drivers/usb/mon/mon_text.c static void mon_text_read_head_t(struct mon_reader_text *rp,
rp                529 drivers/usb/mon/mon_text.c static void mon_text_read_head_u(struct mon_reader_text *rp,
rp                547 drivers/usb/mon/mon_text.c static void mon_text_read_statset(struct mon_reader_text *rp,
rp                568 drivers/usb/mon/mon_text.c static void mon_text_read_intstat(struct mon_reader_text *rp,
rp                575 drivers/usb/mon/mon_text.c static void mon_text_read_isostat(struct mon_reader_text *rp,
rp                588 drivers/usb/mon/mon_text.c static void mon_text_read_isodesc(struct mon_reader_text *rp,
rp                610 drivers/usb/mon/mon_text.c static void mon_text_read_data(struct mon_reader_text *rp,
rp                644 drivers/usb/mon/mon_text.c 	struct mon_reader_text *rp = file->private_data;
rp                658 drivers/usb/mon/mon_text.c 	mon_reader_del(mbus, &rp->r);
rp                668 drivers/usb/mon/mon_text.c 	while (!list_empty(&rp->e_list)) {
rp                669 drivers/usb/mon/mon_text.c 		p = rp->e_list.next;
rp                672 drivers/usb/mon/mon_text.c 		--rp->nevents;
rp                673 drivers/usb/mon/mon_text.c 		kmem_cache_free(rp->e_slab, ep);
rp                677 drivers/usb/mon/mon_text.c 	kmem_cache_destroy(rp->e_slab);
rp                678 drivers/usb/mon/mon_text.c 	kfree(rp->printf_buf);
rp                679 drivers/usb/mon/mon_text.c 	kfree(rp);
rp               2944 drivers/video/fbdev/aty/atyfb_base.c 		struct resource *rp = &pdev->resource[i];
rp               2949 drivers/video/fbdev/aty/atyfb_base.c 		base = rp->start;
rp               2951 drivers/video/fbdev/aty/atyfb_base.c 		io = (rp->flags & IORESOURCE_IO);
rp               2953 drivers/video/fbdev/aty/atyfb_base.c 		size = rp->end - base + 1;
rp               3498 drivers/video/fbdev/aty/atyfb_base.c 	struct resource *rp;
rp               3509 drivers/video/fbdev/aty/atyfb_base.c 	rp = &pdev->resource[0];
rp               3510 drivers/video/fbdev/aty/atyfb_base.c 	if (rp->flags & IORESOURCE_IO)
rp               3511 drivers/video/fbdev/aty/atyfb_base.c 		rp = &pdev->resource[1];
rp               3512 drivers/video/fbdev/aty/atyfb_base.c 	addr = rp->start;
rp               3517 drivers/video/fbdev/aty/atyfb_base.c 	res_start = rp->start;
rp               3518 drivers/video/fbdev/aty/atyfb_base.c 	res_size = resource_size(rp);
rp                479 drivers/video/fbdev/controlfb.c 	volatile struct preg	__iomem *rp;
rp                508 drivers/video/fbdev/controlfb.c 	rp = &p->control_regs->vswin;
rp                509 drivers/video/fbdev/controlfb.c 	for (i = 0; i < 16; ++i, ++rp)
rp                510 drivers/video/fbdev/controlfb.c 		out_le32(&rp->r, r->regs[i]);
rp                730 drivers/xen/xen-scsiback.c 	RING_IDX rc, rp;
rp                735 drivers/xen/xen-scsiback.c 	rp = ring->sring->req_prod;
rp                738 drivers/xen/xen-scsiback.c 	if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
rp                741 drivers/xen/xen-scsiback.c 			   info->domid, rp, rc, rp - rc);
rp                746 drivers/xen/xen-scsiback.c 	while ((rc != rp)) {
rp                433 fs/binfmt_flat.c 	u32 __user *rp;
rp                768 fs/binfmt_flat.c 		for (rp = (u32 __user *)datapos; ; rp++) {
rp                770 fs/binfmt_flat.c 			if (get_user(rp_val, rp))
rp                780 fs/binfmt_flat.c 				if (put_user(addr, rp))
rp                811 fs/binfmt_flat.c 			rp = (u32 __user *)calc_reloc(addr, libinfo, id, 1);
rp                812 fs/binfmt_flat.c 			if (rp == (u32 __user *)RELOC_FAILED) {
rp                818 fs/binfmt_flat.c 			ret = flat_get_addr_from_rp(rp, relval, flags, &addr);
rp                841 fs/binfmt_flat.c 				ret = flat_put_addr_at_rp(rp, addr, relval);
rp                 54 fs/ceph/debugfs.c 	struct rb_node *rp;
rp                 60 fs/ceph/debugfs.c 	for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
rp                 61 fs/ceph/debugfs.c 		req = rb_entry(rp, struct ceph_mds_request, r_node);
rp                326 fs/cifs/cifs_unicode.h 	register const struct UniCaseRange *rp;
rp                332 fs/cifs/cifs_unicode.h 		rp = CifsUniUpperRange;	/* Use range tables */
rp                333 fs/cifs/cifs_unicode.h 		while (rp->start) {
rp                334 fs/cifs/cifs_unicode.h 			if (uc < rp->start)	/* Before start of range */
rp                336 fs/cifs/cifs_unicode.h 			if (uc <= rp->end)	/* In range */
rp                337 fs/cifs/cifs_unicode.h 				return uc + rp->table[uc - rp->start];
rp                338 fs/cifs/cifs_unicode.h 			rp++;	/* Try next range */
rp                368 fs/cifs/cifs_unicode.h 	register const struct UniCaseRange *rp;
rp                374 fs/cifs/cifs_unicode.h 		rp = CifsUniLowerRange;	/* Use range tables */
rp                375 fs/cifs/cifs_unicode.h 		while (rp->start) {
rp                376 fs/cifs/cifs_unicode.h 			if (uc < rp->start)	/* Before start of range */
rp                378 fs/cifs/cifs_unicode.h 			if (uc <= rp->end)	/* In range */
rp                379 fs/cifs/cifs_unicode.h 				return uc + rp->table[uc - rp->start];
rp                380 fs/cifs/cifs_unicode.h 			rp++;	/* Try next range */
rp                505 fs/dlm/debug_fs.c 	struct dlm_rsb *r, *rp;
rp                517 fs/dlm/debug_fs.c 	rp = ri->rsb;
rp                518 fs/dlm/debug_fs.c 	next = rb_next(&rp->res_hashnode);
rp                525 fs/dlm/debug_fs.c 		dlm_put_rsb(rp);
rp                530 fs/dlm/debug_fs.c 	dlm_put_rsb(rp);
rp                169 fs/jfs/jfs_dtree.c static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
rp                926 fs/jfs/jfs_dtree.c 	dtpage_t *rp;		/* new right page split from sp */
rp               1084 fs/jfs/jfs_dtree.c 	if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) {
rp               1164 fs/jfs/jfs_dtree.c 		switch (rp->header.flag & BT_TYPE) {
rp               1176 fs/jfs/jfs_dtree.c 							rp, 0, &key,
rp               1189 fs/jfs/jfs_dtree.c 				dtGetKey(rp, 0, &key, sbi->mntflag);
rp               1200 fs/jfs/jfs_dtree.c 			dtGetKey(rp, 0, &key, sbi->mntflag);
rp               1237 fs/jfs/jfs_dtree.c 			    dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd);
rp               1325 fs/jfs/jfs_dtree.c 	dtpage_t *rp;		/* new right page allocated */
rp               1375 fs/jfs/jfs_dtree.c 	rp = (dtpage_t *) rmp->data;
rp               1376 fs/jfs/jfs_dtree.c 	*rpp = rp;
rp               1377 fs/jfs/jfs_dtree.c 	rp->header.self = *pxd;
rp               1399 fs/jfs/jfs_dtree.c 	rp->header.next = cpu_to_le64(nextbn);
rp               1400 fs/jfs/jfs_dtree.c 	rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
rp               1406 fs/jfs/jfs_dtree.c 	rp->header.flag = sp->header.flag;
rp               1409 fs/jfs/jfs_dtree.c 	rp->header.nextindex = 0;
rp               1410 fs/jfs/jfs_dtree.c 	rp->header.stblindex = 1;
rp               1413 fs/jfs/jfs_dtree.c 	rp->header.maxslot = n;
rp               1417 fs/jfs/jfs_dtree.c 	fsi = rp->header.stblindex + stblsize;
rp               1418 fs/jfs/jfs_dtree.c 	rp->header.freelist = fsi;
rp               1419 fs/jfs/jfs_dtree.c 	rp->header.freecnt = rp->header.maxslot - fsi;
rp               1444 fs/jfs/jfs_dtree.c 		f = &rp->slot[fsi];
rp               1445 fs/jfs/jfs_dtree.c 		for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
rp               1450 fs/jfs/jfs_dtree.c 		dtInsertEntry(rp, 0, split->key, split->data, &rdtlck);
rp               1553 fs/jfs/jfs_dtree.c 	dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip));
rp               1560 fs/jfs/jfs_dtree.c 	fsi = rp->header.freelist;
rp               1561 fs/jfs/jfs_dtree.c 	f = &rp->slot[fsi];
rp               1562 fs/jfs/jfs_dtree.c 	for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
rp               1569 fs/jfs/jfs_dtree.c 	if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
rp               1573 fs/jfs/jfs_dtree.c 		stbl = DT_GETSTBL(rp);
rp               1574 fs/jfs/jfs_dtree.c 		for (n = 0; n < rp->header.nextindex; n++) {
rp               1575 fs/jfs/jfs_dtree.c 			ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
rp               1608 fs/jfs/jfs_dtree.c 		dtInsertEntry(rp, skip, split->key, split->data, &rdtlck);
rp               1870 fs/jfs/jfs_dtree.c 	dtpage_t *rp;
rp               1906 fs/jfs/jfs_dtree.c 	rp = rmp->data;
rp               1922 fs/jfs/jfs_dtree.c 	rp->header.flag =
rp               1924 fs/jfs/jfs_dtree.c 	rp->header.self = *pxd;
rp               1927 fs/jfs/jfs_dtree.c 	rp->header.next = 0;
rp               1928 fs/jfs/jfs_dtree.c 	rp->header.prev = 0;
rp               1941 fs/jfs/jfs_dtree.c 	rp->header.maxslot = n;
rp               1945 fs/jfs/jfs_dtree.c 	rp->header.stblindex = DTROOTMAXSLOT;
rp               1946 fs/jfs/jfs_dtree.c 	stbl = (s8 *) & rp->slot[DTROOTMAXSLOT];
rp               1948 fs/jfs/jfs_dtree.c 	rp->header.nextindex = sp->header.nextindex;
rp               1951 fs/jfs/jfs_dtree.c 	memcpy(&rp->slot[1], &sp->slot[1], IDATASIZE);
rp               1958 fs/jfs/jfs_dtree.c 	f = &rp->slot[fsi];
rp               1959 fs/jfs/jfs_dtree.c 	for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
rp               1966 fs/jfs/jfs_dtree.c 		rp->header.freelist = n;
rp               1968 fs/jfs/jfs_dtree.c 		rp->header.freelist = fsi;
rp               1971 fs/jfs/jfs_dtree.c 			f = &rp->slot[fsi];
rp               1978 fs/jfs/jfs_dtree.c 	rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n;
rp               1983 fs/jfs/jfs_dtree.c 	if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
rp               1988 fs/jfs/jfs_dtree.c 		stbl = DT_GETSTBL(rp);
rp               1989 fs/jfs/jfs_dtree.c 		for (n = 0; n < rp->header.nextindex; n++) {
rp               1990 fs/jfs/jfs_dtree.c 			ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
rp               2001 fs/jfs/jfs_dtree.c 	dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
rp               2438 fs/jfs/jfs_dtree.c 	dtpage_t *p, *pp, *rp = 0, *lp= 0;
rp               2487 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
rp               2537 fs/jfs/jfs_dtree.c 		rp->header.prev = cpu_to_le64(nxaddr);
rp               3762 fs/jfs/jfs_dtree.c static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
rp               3789 fs/jfs/jfs_dtree.c 	dtGetKey(rp, ri, &rkey, flag);
rp                110 fs/jfs/jfs_unicode.h 	UNICASERANGE *rp;
rp                115 fs/jfs/jfs_unicode.h 		rp = UniUpperRange;	/* Use range tables */
rp                116 fs/jfs/jfs_unicode.h 		while (rp->start) {
rp                117 fs/jfs/jfs_unicode.h 			if (uc < rp->start)	/* Before start of range */
rp                119 fs/jfs/jfs_unicode.h 			if (uc <= rp->end)	/* In range */
rp                120 fs/jfs/jfs_unicode.h 				return uc + rp->table[uc - rp->start];
rp                121 fs/jfs/jfs_unicode.h 			rp++;	/* Try next range */
rp                952 fs/jfs/jfs_xtree.c 	xtpage_t *rp;		/* new right page allocated */
rp                998 fs/jfs/jfs_xtree.c 	rp = (xtpage_t *) rmp->data;
rp                999 fs/jfs/jfs_xtree.c 	rp->header.self = *pxd;
rp               1000 fs/jfs/jfs_xtree.c 	rp->header.flag = sp->header.flag & BT_TYPE;
rp               1001 fs/jfs/jfs_xtree.c 	rp->header.maxentry = sp->header.maxentry;	/* little-endian */
rp               1002 fs/jfs/jfs_xtree.c 	rp->header.nextindex = cpu_to_le16(XTENTRYSTART);
rp               1024 fs/jfs/jfs_xtree.c 	rp->header.next = cpu_to_le64(nextbn);
rp               1025 fs/jfs/jfs_xtree.c 	rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
rp               1050 fs/jfs/jfs_xtree.c 		xad = &rp->xad[XTENTRYSTART];
rp               1054 fs/jfs/jfs_xtree.c 		rp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1);
rp               1064 fs/jfs/jfs_xtree.c 		jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
rp               1112 fs/jfs/jfs_xtree.c 		memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle],
rp               1132 fs/jfs/jfs_xtree.c 		rp->header.nextindex =
rp               1141 fs/jfs/jfs_xtree.c 		memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle],
rp               1146 fs/jfs/jfs_xtree.c 		xad = &rp->xad[n];
rp               1152 fs/jfs/jfs_xtree.c 			memmove(&rp->xad[n + 1], &sp->xad[skip],
rp               1162 fs/jfs/jfs_xtree.c 		rp->header.nextindex = cpu_to_le16(XTENTRYSTART +
rp               1171 fs/jfs/jfs_xtree.c 		rxtlck->lwm.length = le16_to_cpu(rp->header.nextindex) -
rp               1178 fs/jfs/jfs_xtree.c 	jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp);
rp               1217 fs/jfs/jfs_xtree.c 	xtpage_t *rp;
rp               1258 fs/jfs/jfs_xtree.c 	rp = (xtpage_t *) rmp->data;
rp               1259 fs/jfs/jfs_xtree.c 	rp->header.flag =
rp               1261 fs/jfs/jfs_xtree.c 	rp->header.self = *pxd;
rp               1262 fs/jfs/jfs_xtree.c 	rp->header.nextindex = cpu_to_le16(XTENTRYSTART);
rp               1263 fs/jfs/jfs_xtree.c 	rp->header.maxentry = cpu_to_le16(PSIZE >> L2XTSLOTSIZE);
rp               1266 fs/jfs/jfs_xtree.c 	rp->header.next = 0;
rp               1267 fs/jfs/jfs_xtree.c 	rp->header.prev = 0;
rp               1273 fs/jfs/jfs_xtree.c 	memmove(&rp->xad[XTENTRYSTART], &sp->xad[XTENTRYSTART],
rp               1283 fs/jfs/jfs_xtree.c 		memmove(&rp->xad[skip + 1], &rp->xad[skip],
rp               1286 fs/jfs/jfs_xtree.c 	xad = &rp->xad[skip];
rp               1290 fs/jfs/jfs_xtree.c 	rp->header.nextindex = cpu_to_le16(nextindex + 1);
rp               1296 fs/jfs/jfs_xtree.c 		xtlck->lwm.length = le16_to_cpu(rp->header.nextindex) -
rp               1332 fs/jfs/jfs_xtree.c 	jfs_info("xtSplitRoot: sp:0x%p rp:0x%p", sp, rp);
rp               2555 fs/jfs/jfs_xtree.c 	xtpage_t *p, *pp, *rp, *lp;	/* base B+-tree index page */
rp               2718 fs/jfs/jfs_xtree.c 			XT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
rp               2754 fs/jfs/jfs_xtree.c 			rp->header.prev = cpu_to_le64(nxaddr);
rp                572 fs/nfs/blocklayout/blocklayout.c static int decode_sector_number(__be32 **rp, sector_t *sp)
rp                576 fs/nfs/blocklayout/blocklayout.c 	*rp = xdr_decode_hyper(*rp, &s);
rp                385 fs/nfsd/nfs4proc.c 		struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
rp                388 fs/nfsd/nfs4proc.c 				&rp->rp_openfh);
rp               4026 fs/nfsd/nfs4state.c static void init_nfs4_replay(struct nfs4_replay *rp)
rp               4028 fs/nfsd/nfs4state.c 	rp->rp_status = nfserr_serverfault;
rp               4029 fs/nfsd/nfs4state.c 	rp->rp_buflen = 0;
rp               4030 fs/nfsd/nfs4state.c 	rp->rp_buf = rp->rp_ibuf;
rp               4031 fs/nfsd/nfs4state.c 	mutex_init(&rp->rp_mutex);
rp               4512 fs/nfsd/nfs4xdr.c 	struct nfs4_replay *rp = op->replay;
rp               4514 fs/nfsd/nfs4xdr.c 	BUG_ON(!rp);
rp               4516 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
rp               4522 fs/nfsd/nfs4xdr.c 	*p++ = rp->rp_status;  /* already xdr'ed */
rp               4524 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, rp->rp_buf, rp->rp_buflen);
rp                 96 fs/nfsd/nfscache.c 	struct svc_cacherep	*rp;
rp                 98 fs/nfsd/nfscache.c 	rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL);
rp                 99 fs/nfsd/nfscache.c 	if (rp) {
rp                100 fs/nfsd/nfscache.c 		rp->c_state = RC_UNUSED;
rp                101 fs/nfsd/nfscache.c 		rp->c_type = RC_NOCACHE;
rp                102 fs/nfsd/nfscache.c 		RB_CLEAR_NODE(&rp->c_node);
rp                103 fs/nfsd/nfscache.c 		INIT_LIST_HEAD(&rp->c_lru);
rp                105 fs/nfsd/nfscache.c 		memset(&rp->c_key, 0, sizeof(rp->c_key));
rp                106 fs/nfsd/nfscache.c 		rp->c_key.k_xid = rqstp->rq_xid;
rp                107 fs/nfsd/nfscache.c 		rp->c_key.k_proc = rqstp->rq_proc;
rp                108 fs/nfsd/nfscache.c 		rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
rp                109 fs/nfsd/nfscache.c 		rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
rp                110 fs/nfsd/nfscache.c 		rp->c_key.k_prot = rqstp->rq_prot;
rp                111 fs/nfsd/nfscache.c 		rp->c_key.k_vers = rqstp->rq_vers;
rp                112 fs/nfsd/nfscache.c 		rp->c_key.k_len = rqstp->rq_arg.len;
rp                113 fs/nfsd/nfscache.c 		rp->c_key.k_csum = csum;
rp                115 fs/nfsd/nfscache.c 	return rp;
rp                119 fs/nfsd/nfscache.c nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
rp                122 fs/nfsd/nfscache.c 	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
rp                123 fs/nfsd/nfscache.c 		nn->drc_mem_usage -= rp->c_replvec.iov_len;
rp                124 fs/nfsd/nfscache.c 		kfree(rp->c_replvec.iov_base);
rp                126 fs/nfsd/nfscache.c 	if (rp->c_state != RC_UNUSED) {
rp                127 fs/nfsd/nfscache.c 		rb_erase(&rp->c_node, &b->rb_head);
rp                128 fs/nfsd/nfscache.c 		list_del(&rp->c_lru);
rp                130 fs/nfsd/nfscache.c 		nn->drc_mem_usage -= sizeof(*rp);
rp                132 fs/nfsd/nfscache.c 	kmem_cache_free(nn->drc_slab, rp);
rp                136 fs/nfsd/nfscache.c nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
rp                140 fs/nfsd/nfscache.c 	nfsd_reply_cache_free_locked(b, rp, nn);
rp                194 fs/nfsd/nfscache.c 	struct svc_cacherep	*rp;
rp                202 fs/nfsd/nfscache.c 			rp = list_first_entry(head, struct svc_cacherep, c_lru);
rp                204 fs/nfsd/nfscache.c 									rp, nn);
rp                221 fs/nfsd/nfscache.c lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
rp                223 fs/nfsd/nfscache.c 	rp->c_timestamp = jiffies;
rp                224 fs/nfsd/nfscache.c 	list_move_tail(&rp->c_lru, &b->lru_head);
rp                230 fs/nfsd/nfscache.c 	struct svc_cacherep *rp, *tmp;
rp                233 fs/nfsd/nfscache.c 	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
rp                238 fs/nfsd/nfscache.c 		if (rp->c_state == RC_INPROG)
rp                241 fs/nfsd/nfscache.c 		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
rp                243 fs/nfsd/nfscache.c 		nfsd_reply_cache_free_locked(b, rp, nn);
rp                323 fs/nfsd/nfscache.c 			const struct svc_cacherep *rp, struct nfsd_net *nn)
rp                325 fs/nfsd/nfscache.c 	if (key->c_key.k_xid == rp->c_key.k_xid &&
rp                326 fs/nfsd/nfscache.c 	    key->c_key.k_csum != rp->c_key.k_csum)
rp                329 fs/nfsd/nfscache.c 	return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
rp                341 fs/nfsd/nfscache.c 	struct svc_cacherep	*rp, *ret = key;
rp                350 fs/nfsd/nfscache.c 		rp = rb_entry(parent, struct svc_cacherep, c_node);
rp                352 fs/nfsd/nfscache.c 		cmp = nfsd_cache_key_cmp(key, rp, nn);
rp                358 fs/nfsd/nfscache.c 			ret = rp;
rp                391 fs/nfsd/nfscache.c 	struct svc_cacherep	*rp, *found;
rp                411 fs/nfsd/nfscache.c 	rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
rp                412 fs/nfsd/nfscache.c 	if (!rp) {
rp                418 fs/nfsd/nfscache.c 	found = nfsd_cache_insert(b, rp, nn);
rp                419 fs/nfsd/nfscache.c 	if (found != rp) {
rp                420 fs/nfsd/nfscache.c 		nfsd_reply_cache_free_locked(NULL, rp, nn);
rp                421 fs/nfsd/nfscache.c 		rp = found;
rp                426 fs/nfsd/nfscache.c 	rqstp->rq_cacherep = rp;
rp                427 fs/nfsd/nfscache.c 	rp->c_state = RC_INPROG;
rp                430 fs/nfsd/nfscache.c 	nn->drc_mem_usage += sizeof(*rp);
rp                444 fs/nfsd/nfscache.c 	if (rp->c_state == RC_INPROG)
rp                450 fs/nfsd/nfscache.c 	if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
rp                454 fs/nfsd/nfscache.c 	switch (rp->c_type) {
rp                458 fs/nfsd/nfscache.c 		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
rp                462 fs/nfsd/nfscache.c 		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
rp                467 fs/nfsd/nfscache.c 		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
rp                468 fs/nfsd/nfscache.c 		nfsd_reply_cache_free_locked(b, rp, nn);
rp                494 fs/nfsd/nfscache.c 	struct svc_cacherep *rp = rqstp->rq_cacherep;
rp                501 fs/nfsd/nfscache.c 	if (!rp)
rp                504 fs/nfsd/nfscache.c 	hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
rp                512 fs/nfsd/nfscache.c 		nfsd_reply_cache_free(b, rp, nn);
rp                520 fs/nfsd/nfscache.c 		rp->c_replstat = *statp;
rp                523 fs/nfsd/nfscache.c 		cachv = &rp->c_replvec;
rp                527 fs/nfsd/nfscache.c 			nfsd_reply_cache_free(b, rp, nn);
rp                534 fs/nfsd/nfscache.c 		nfsd_reply_cache_free(b, rp, nn);
rp                539 fs/nfsd/nfscache.c 	lru_put_end(b, rp);
rp                540 fs/nfsd/nfscache.c 	rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
rp                541 fs/nfsd/nfscache.c 	rp->c_type = cachetype;
rp                542 fs/nfsd/nfscache.c 	rp->c_state = RC_DONE;
rp               1081 fs/ntfs/layout.h 		} __attribute__ ((__packed__)) rp;
rp                 39 fs/ntfs/logfile.c 		RESTART_PAGE_HEADER *rp, s64 pos)
rp                 50 fs/ntfs/logfile.c 	logfile_system_page_size = le32_to_cpu(rp->system_page_size);
rp                 51 fs/ntfs/logfile.c 	logfile_log_page_size = le32_to_cpu(rp->log_page_size);
rp                 70 fs/ntfs/logfile.c 	if (sle16_to_cpu(rp->major_ver) != 1 ||
rp                 71 fs/ntfs/logfile.c 			sle16_to_cpu(rp->minor_ver) != 1) {
rp                 74 fs/ntfs/logfile.c 				"1.1 only.)", (int)sle16_to_cpu(rp->major_ver),
rp                 75 fs/ntfs/logfile.c 				(int)sle16_to_cpu(rp->minor_ver));
rp                 82 fs/ntfs/logfile.c 	if (ntfs_is_chkd_record(rp->magic) && !le16_to_cpu(rp->usa_count)) {
rp                 88 fs/ntfs/logfile.c 	if (usa_count != le16_to_cpu(rp->usa_count)) {
rp                 94 fs/ntfs/logfile.c 	usa_ofs = le16_to_cpu(rp->usa_ofs);
rp                109 fs/ntfs/logfile.c 	ra_ofs = le16_to_cpu(rp->restart_area_offset);
rp                121 fs/ntfs/logfile.c 	if (!ntfs_is_chkd_record(rp->magic) && sle64_to_cpu(rp->chkdsk_lsn)) {
rp                144 fs/ntfs/logfile.c static bool ntfs_check_restart_area(struct inode *vi, RESTART_PAGE_HEADER *rp)
rp                152 fs/ntfs/logfile.c 	ra_ofs = le16_to_cpu(rp->restart_area_offset);
rp                153 fs/ntfs/logfile.c 	ra = (RESTART_AREA*)((u8*)rp + ra_ofs);
rp                186 fs/ntfs/logfile.c 	if (ra_ofs + ra_len > le32_to_cpu(rp->system_page_size) ||
rp                188 fs/ntfs/logfile.c 			le32_to_cpu(rp->system_page_size) ||
rp                260 fs/ntfs/logfile.c 		RESTART_PAGE_HEADER *rp)
rp                268 fs/ntfs/logfile.c 	ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
rp                337 fs/ntfs/logfile.c 		RESTART_PAGE_HEADER *rp, s64 pos, RESTART_PAGE_HEADER **wrp,
rp                346 fs/ntfs/logfile.c 	if (!ntfs_check_restart_page_header(vi, rp, pos)) {
rp                351 fs/ntfs/logfile.c 	if (!ntfs_check_restart_area(vi, rp)) {
rp                355 fs/ntfs/logfile.c 	ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
rp                360 fs/ntfs/logfile.c 	trp = ntfs_malloc_nofs(le32_to_cpu(rp->system_page_size));
rp                372 fs/ntfs/logfile.c 	if (size >= le32_to_cpu(rp->system_page_size)) {
rp                373 fs/ntfs/logfile.c 		memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
rp                380 fs/ntfs/logfile.c 		memcpy(trp, rp, size);
rp                383 fs/ntfs/logfile.c 		to_read = le32_to_cpu(rp->system_page_size) - size;
rp                410 fs/ntfs/logfile.c 			le32_to_cpu(rp->system_page_size))) {
rp                416 fs/ntfs/logfile.c 		if (le16_to_cpu(rp->restart_area_offset) +
rp                431 fs/ntfs/logfile.c 	if (ntfs_is_rstr_record(rp->magic) &&
rp                439 fs/ntfs/logfile.c 		if (ntfs_is_rstr_record(rp->magic))
rp                442 fs/ntfs/logfile.c 			*lsn = sle64_to_cpu(rp->chkdsk_lsn);
rp                471 fs/ntfs/logfile.c bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
rp                634 fs/ntfs/logfile.c 	if (rp)
rp                635 fs/ntfs/logfile.c 		*rp = rstr1_ph;
rp                666 fs/ntfs/logfile.c bool ntfs_is_logfile_clean(struct inode *log_vi, const RESTART_PAGE_HEADER *rp)
rp                677 fs/ntfs/logfile.c 	BUG_ON(!rp);
rp                678 fs/ntfs/logfile.c 	if (!ntfs_is_rstr_record(rp->magic) &&
rp                679 fs/ntfs/logfile.c 			!ntfs_is_chkd_record(rp->magic)) {
rp                686 fs/ntfs/logfile.c 	ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
rp                286 fs/ntfs/logfile.h 		RESTART_PAGE_HEADER **rp);
rp                289 fs/ntfs/logfile.h 		const RESTART_PAGE_HEADER *rp);
rp               1203 fs/ntfs/super.c 		RESTART_PAGE_HEADER **rp)
rp               1215 fs/ntfs/super.c 	if (!ntfs_check_logfile(tmp_ino, rp)) {
rp               1765 fs/ntfs/super.c 	RESTART_PAGE_HEADER *rp;
rp               1940 fs/ntfs/super.c 	rp = NULL;
rp               1941 fs/ntfs/super.c 	if (!load_and_check_logfile(vol, &rp) ||
rp               1942 fs/ntfs/super.c 			!ntfs_is_logfile_clean(vol->logfile_ino, rp)) {
rp               1958 fs/ntfs/super.c 					BUG_ON(!rp);
rp               1959 fs/ntfs/super.c 					ntfs_free(rp);
rp               1971 fs/ntfs/super.c 	ntfs_free(rp);
rp               2246 fs/xfs/libxfs/xfs_btree.c 	union xfs_btree_rec	*rp;
rp               2258 fs/xfs/libxfs/xfs_btree.c 	rp = xfs_btree_rec_addr(cur, ptr, block);
rp               2261 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_copy_recs(cur, rp, rec, 1);
rp               3352 fs/xfs/libxfs/xfs_btree.c 		union xfs_btree_rec             *rp;
rp               3354 fs/xfs/libxfs/xfs_btree.c 		rp = xfs_btree_rec_addr(cur, ptr, block);
rp               3356 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
rp               3359 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_copy_recs(cur, rp, rec, 1);
rp               3364 fs/xfs/libxfs/xfs_btree.c 			ASSERT(cur->bc_ops->recs_inorder(cur, rp,
rp                  7 include/asm-generic/flat.h static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
rp                 11 include/asm-generic/flat.h 	return copy_from_user(addr, rp, 4) ? -EFAULT : 0;
rp                 13 include/asm-generic/flat.h 	return get_user(*addr, rp);
rp                 17 include/asm-generic/flat.h static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
rp                 20 include/asm-generic/flat.h 	return copy_to_user(rp, &addr, 4) ? -EFAULT : 0;
rp                 22 include/asm-generic/flat.h 	return put_user(addr, rp);
rp                 89 include/linux/intel_rapl.h 	struct rapl_package *rp;
rp                150 include/linux/intel_rapl.h void rapl_remove_package(struct rapl_package *rp);
rp                160 include/linux/kprobes.h 	struct kretprobe *rp;
rp                195 include/linux/kprobes.h static inline void arch_prepare_kretprobe(struct kretprobe *rp,
rp                212 include/linux/kprobes.h 				ri->rp, ri->rp->kp.addr);
rp                360 include/linux/kprobes.h int register_kretprobe(struct kretprobe *rp);
rp                361 include/linux/kprobes.h void unregister_kretprobe(struct kretprobe *rp);
rp                408 include/linux/kprobes.h static inline int register_kretprobe(struct kretprobe *rp)
rp                416 include/linux/kprobes.h static inline void unregister_kretprobe(struct kretprobe *rp)
rp                439 include/linux/kprobes.h static inline int disable_kretprobe(struct kretprobe *rp)
rp                441 include/linux/kprobes.h 	return disable_kprobe(&rp->kp);
rp                443 include/linux/kprobes.h static inline int enable_kretprobe(struct kretprobe *rp)
rp                445 include/linux/kprobes.h 	return enable_kprobe(&rp->kp);
rp                303 include/linux/nvme.h 	__u8			rp;
rp                265 include/sound/hdaudio.h 	unsigned short rp, wp;	/* RIRB read/write pointers */
rp               1181 kernel/kprobes.c 	struct kretprobe *rp = ri->rp;
rp               1186 kernel/kprobes.c 	if (likely(rp)) {
rp               1187 kernel/kprobes.c 		raw_spin_lock(&rp->lock);
rp               1188 kernel/kprobes.c 		hlist_add_head(&ri->hlist, &rp->free_instances);
rp               1189 kernel/kprobes.c 		raw_spin_unlock(&rp->lock);
rp               1272 kernel/kprobes.c static inline void free_rp_inst(struct kretprobe *rp)
rp               1277 kernel/kprobes.c 	hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
rp               1283 kernel/kprobes.c static void cleanup_rp_inst(struct kretprobe *rp)
rp               1295 kernel/kprobes.c 			if (ri->rp == rp)
rp               1296 kernel/kprobes.c 				ri->rp = NULL;
rp               1300 kernel/kprobes.c 	free_rp_inst(rp);
rp               1860 kernel/kprobes.c 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
rp               1871 kernel/kprobes.c 		rp->nmissed++;
rp               1877 kernel/kprobes.c 	raw_spin_lock_irqsave(&rp->lock, flags);
rp               1878 kernel/kprobes.c 	if (!hlist_empty(&rp->free_instances)) {
rp               1879 kernel/kprobes.c 		ri = hlist_entry(rp->free_instances.first,
rp               1882 kernel/kprobes.c 		raw_spin_unlock_irqrestore(&rp->lock, flags);
rp               1884 kernel/kprobes.c 		ri->rp = rp;
rp               1887 kernel/kprobes.c 		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
rp               1888 kernel/kprobes.c 			raw_spin_lock_irqsave(&rp->lock, flags);
rp               1889 kernel/kprobes.c 			hlist_add_head(&ri->hlist, &rp->free_instances);
rp               1890 kernel/kprobes.c 			raw_spin_unlock_irqrestore(&rp->lock, flags);
rp               1902 kernel/kprobes.c 		rp->nmissed++;
rp               1903 kernel/kprobes.c 		raw_spin_unlock_irqrestore(&rp->lock, flags);
rp               1928 kernel/kprobes.c int register_kretprobe(struct kretprobe *rp)
rp               1935 kernel/kprobes.c 	if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
rp               1939 kernel/kprobes.c 		addr = kprobe_addr(&rp->kp);
rp               1949 kernel/kprobes.c 	rp->kp.pre_handler = pre_handler_kretprobe;
rp               1950 kernel/kprobes.c 	rp->kp.post_handler = NULL;
rp               1951 kernel/kprobes.c 	rp->kp.fault_handler = NULL;
rp               1954 kernel/kprobes.c 	if (rp->maxactive <= 0) {
rp               1956 kernel/kprobes.c 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
rp               1958 kernel/kprobes.c 		rp->maxactive = num_possible_cpus();
rp               1961 kernel/kprobes.c 	raw_spin_lock_init(&rp->lock);
rp               1962 kernel/kprobes.c 	INIT_HLIST_HEAD(&rp->free_instances);
rp               1963 kernel/kprobes.c 	for (i = 0; i < rp->maxactive; i++) {
rp               1965 kernel/kprobes.c 			       rp->data_size, GFP_KERNEL);
rp               1967 kernel/kprobes.c 			free_rp_inst(rp);
rp               1971 kernel/kprobes.c 		hlist_add_head(&inst->hlist, &rp->free_instances);
rp               1974 kernel/kprobes.c 	rp->nmissed = 0;
rp               1976 kernel/kprobes.c 	ret = register_kprobe(&rp->kp);
rp               1978 kernel/kprobes.c 		free_rp_inst(rp);
rp               2001 kernel/kprobes.c void unregister_kretprobe(struct kretprobe *rp)
rp               2003 kernel/kprobes.c 	unregister_kretprobes(&rp, 1);
rp               2030 kernel/kprobes.c int register_kretprobe(struct kretprobe *rp)
rp               2042 kernel/kprobes.c void unregister_kretprobe(struct kretprobe *rp)
rp                373 kernel/rcu/rcutorture.c rcu_torture_pipe_update_one(struct rcu_torture *rp)
rp                377 kernel/rcu/rcutorture.c 	i = rp->rtort_pipe_count;
rp                381 kernel/rcu/rcutorture.c 	if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
rp                382 kernel/rcu/rcutorture.c 		rp->rtort_mbtest = 0;
rp                395 kernel/rcu/rcutorture.c 	struct rcu_torture *rp;
rp                400 kernel/rcu/rcutorture.c 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
rp                401 kernel/rcu/rcutorture.c 		if (rcu_torture_pipe_update_one(rp)) {
rp                402 kernel/rcu/rcutorture.c 			list_del(&rp->rtort_free);
rp                403 kernel/rcu/rcutorture.c 			rcu_torture_free(rp);
rp                411 kernel/rcu/rcutorture.c 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
rp                418 kernel/rcu/rcutorture.c 	if (rcu_torture_pipe_update_one(rp))
rp                419 kernel/rcu/rcutorture.c 		rcu_torture_free(rp);
rp                421 kernel/rcu/rcutorture.c 		cur_ops->deferred_free(rp);
rp                548 kernel/rcu/rcutorture.c static void srcu_torture_deferred_free(struct rcu_torture *rp)
rp                550 kernel/rcu/rcutorture.c 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
rp                946 kernel/rcu/rcutorture.c 	struct rcu_torture *rp;
rp               1000 kernel/rcu/rcutorture.c 		rp = rcu_torture_alloc();
rp               1001 kernel/rcu/rcutorture.c 		if (rp == NULL)
rp               1003 kernel/rcu/rcutorture.c 		rp->rtort_pipe_count = 0;
rp               1009 kernel/rcu/rcutorture.c 		rp->rtort_mbtest = 1;
rp               1010 kernel/rcu/rcutorture.c 		rcu_assign_pointer(rcu_torture_current, rp);
rp                295 kernel/sched/topology.c static void destroy_perf_domain_rcu(struct rcu_head *rp)
rp                299 kernel/sched/topology.c 	pd = container_of(rp, struct perf_domain, rcu);
rp                190 kernel/test_kprobes.c static struct kretprobe rp = {
rp                200 kernel/test_kprobes.c 	ret = register_kretprobe(&rp);
rp                207 kernel/test_kprobes.c 	unregister_kretprobe(&rp);
rp                242 kernel/test_kprobes.c 	struct kretprobe *rps[2] = {&rp, &rp2};
rp                245 kernel/test_kprobes.c 	rp.kp.addr = NULL;
rp                246 kernel/test_kprobes.c 	rp.kp.flags = 0;
rp                 58 kernel/trace/trace_kprobe.c 	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
rp                 89 kernel/trace/trace_kprobe.c 	return tk->rp.handler != NULL;
rp                 99 kernel/trace/trace_kprobe.c 	return tk->rp.kp.offset;
rp                104 kernel/trace/trace_kprobe.c 	return !!(kprobe_gone(&tk->rp.kp));
rp                150 kernel/trace/trace_kprobe.c 		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
rp                151 kernel/trace/trace_kprobe.c 	else if (tk->rp.kp.offset)
rp                153 kernel/trace/trace_kprobe.c 			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
rp                186 kernel/trace/trace_kprobe.c 	return !(list_empty(&tk->rp.kp.list) &&
rp                187 kernel/trace/trace_kprobe.c 		 hlist_unhashed(&tk->rp.kp.hlist));
rp                200 kernel/trace/trace_kprobe.c 			addr += tk->rp.kp.offset;
rp                202 kernel/trace/trace_kprobe.c 		addr = (unsigned long)tk->rp.kp.addr;
rp                223 kernel/trace/trace_kprobe.c 	return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
rp                224 kernel/trace/trace_kprobe.c 			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
rp                225 kernel/trace/trace_kprobe.c 			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
rp                279 kernel/trace/trace_kprobe.c 		tk->rp.kp.symbol_name = tk->symbol;
rp                280 kernel/trace/trace_kprobe.c 		tk->rp.kp.offset = offs;
rp                282 kernel/trace/trace_kprobe.c 		tk->rp.kp.addr = addr;
rp                285 kernel/trace/trace_kprobe.c 		tk->rp.handler = kretprobe_dispatcher;
rp                287 kernel/trace/trace_kprobe.c 		tk->rp.kp.pre_handler = kprobe_dispatcher;
rp                289 kernel/trace/trace_kprobe.c 	tk->rp.maxactive = maxactive;
rp                290 kernel/trace/trace_kprobe.c 	INIT_HLIST_NODE(&tk->rp.kp.hlist);
rp                291 kernel/trace/trace_kprobe.c 	INIT_LIST_HEAD(&tk->rp.kp.list);
rp                323 kernel/trace/trace_kprobe.c 			ret = enable_kretprobe(&tk->rp);
rp                325 kernel/trace/trace_kprobe.c 			ret = enable_kprobe(&tk->rp.kp);
rp                341 kernel/trace/trace_kprobe.c 			disable_kretprobe(&tk->rp);
rp                343 kernel/trace/trace_kprobe.c 			disable_kprobe(&tk->rp.kp);
rp                506 kernel/trace/trace_kprobe.c 		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
rp                508 kernel/trace/trace_kprobe.c 		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
rp                511 kernel/trace/trace_kprobe.c 		ret = register_kretprobe(&tk->rp);
rp                513 kernel/trace/trace_kprobe.c 		ret = register_kprobe(&tk->rp.kp);
rp                523 kernel/trace/trace_kprobe.c 			unregister_kretprobe(&tk->rp);
rp                525 kernel/trace/trace_kprobe.c 			unregister_kprobe(&tk->rp.kp);
rp                527 kernel/trace/trace_kprobe.c 		INIT_HLIST_NODE(&tk->rp.kp.hlist);
rp                528 kernel/trace/trace_kprobe.c 		INIT_LIST_HEAD(&tk->rp.kp.list);
rp                529 kernel/trace/trace_kprobe.c 		if (tk->rp.kp.symbol_name)
rp                530 kernel/trace/trace_kprobe.c 			tk->rp.kp.addr = NULL;
rp                921 kernel/trace/trace_kprobe.c 	if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
rp                922 kernel/trace/trace_kprobe.c 		seq_printf(m, "%d", tk->rp.maxactive);
rp                927 kernel/trace/trace_kprobe.c 		seq_printf(m, " 0x%p", tk->rp.kp.addr);
rp                928 kernel/trace/trace_kprobe.c 	else if (tk->rp.kp.offset)
rp                930 kernel/trace/trace_kprobe.c 			   tk->rp.kp.offset);
rp               1004 kernel/trace/trace_kprobe.c 		   tk->rp.kp.nmissed);
rp               1204 kernel/trace/trace_kprobe.c 	entry->ip = (unsigned long)tk->rp.kp.addr;
rp               1252 kernel/trace/trace_kprobe.c 	entry->func = (unsigned long)tk->rp.kp.addr;
rp               1412 kernel/trace/trace_kprobe.c 	entry->ip = (unsigned long)tk->rp.kp.addr;
rp               1448 kernel/trace/trace_kprobe.c 	entry->func = (unsigned long)tk->rp.kp.addr;
rp               1475 kernel/trace/trace_kprobe.c 		*probe_offset = tk->rp.kp.offset;
rp               1480 kernel/trace/trace_kprobe.c 		*probe_addr = (unsigned long)tk->rp.kp.addr;
rp               1520 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
rp               1538 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
rp                 30 lib/mpi/mpi-pow.c 	mpi_ptr_t rp, ep, mp, bp;
rp                 46 lib/mpi/mpi-pow.c 	rp = res->d;
rp                 59 lib/mpi/mpi-pow.c 			rp = res->d;
rp                 60 lib/mpi/mpi-pow.c 			rp[0] = 1;
rp                108 lib/mpi/mpi-pow.c 		if (rp == ep || rp == mp || rp == bp) {
rp                109 lib/mpi/mpi-pow.c 			rp = mpi_alloc_limb_space(size);
rp                110 lib/mpi/mpi-pow.c 			if (!rp)
rp                116 lib/mpi/mpi-pow.c 			rp = res->d;
rp                119 lib/mpi/mpi-pow.c 		if (rp == bp) {
rp                125 lib/mpi/mpi-pow.c 			MPN_COPY(bp, rp, bsize);
rp                127 lib/mpi/mpi-pow.c 		if (rp == ep) {
rp                132 lib/mpi/mpi-pow.c 			MPN_COPY(ep, rp, esize);
rp                134 lib/mpi/mpi-pow.c 		if (rp == mp) {
rp                140 lib/mpi/mpi-pow.c 			MPN_COPY(mp, rp, msize);
rp                144 lib/mpi/mpi-pow.c 	MPN_COPY(rp, bp, bsize);
rp                184 lib/mpi/mpi-pow.c 					mpih_sqr_n_basecase(xp, rp, rsize);
rp                200 lib/mpi/mpi-pow.c 					mpih_sqr_n(xp, rp, rsize, tspace);
rp                210 lib/mpi/mpi-pow.c 				tp = rp;
rp                211 lib/mpi/mpi-pow.c 				rp = xp;
rp                220 lib/mpi/mpi-pow.c 						    (xp, rp, rsize, bp, bsize,
rp                225 lib/mpi/mpi-pow.c 						    (xp, rp, rsize, bp, bsize,
rp                238 lib/mpi/mpi-pow.c 					tp = rp;
rp                239 lib/mpi/mpi-pow.c 					rp = xp;
rp                263 lib/mpi/mpi-pow.c 			    mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt);
rp                264 lib/mpi/mpi-pow.c 			rp = res->d;
rp                266 lib/mpi/mpi-pow.c 				rp[rsize] = carry_limb;
rp                270 lib/mpi/mpi-pow.c 			MPN_COPY(res->d, rp, rsize);
rp                271 lib/mpi/mpi-pow.c 			rp = res->d;
rp                275 lib/mpi/mpi-pow.c 			mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize);
rp                281 lib/mpi/mpi-pow.c 			mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
rp                282 lib/mpi/mpi-pow.c 		MPN_NORMALIZE(rp, rsize);
rp                288 lib/mpi/mpi-pow.c 		mpihelp_sub(rp, mp, msize, rp, rsize);
rp                291 lib/mpi/mpi-pow.c 		MPN_NORMALIZE(rp, rsize);
rp                301 lib/mpi/mpi-pow.c 		mpi_assign_limb_space(res, rp, size);
rp                219 net/bluetooth/amp.c 	struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
rp                223 net/bluetooth/amp.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                225 net/bluetooth/amp.c 	if (rp->status)
rp                228 net/bluetooth/amp.c 	frag_len = skb->len - sizeof(*rp);
rp                229 net/bluetooth/amp.c 	rem_len = __le16_to_cpu(rp->rem_len);
rp                234 net/bluetooth/amp.c 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
rp                238 net/bluetooth/amp.c 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
rp                243 net/bluetooth/amp.c 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
rp                249 net/bluetooth/amp.c 	a2mp_send_getampassoc_rsp(hdev, rp->status);
rp                250 net/bluetooth/amp.c 	a2mp_send_create_phy_link_req(hdev, rp->status);
rp                317 net/bluetooth/amp.c 	struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
rp                320 net/bluetooth/amp.c 	       hdev->name, rp->status, rp->phy_handle);
rp                322 net/bluetooth/amp.c 	if (rp->status)
rp                325 net/bluetooth/amp.c 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
rp                379 net/bluetooth/cmtp/capi.c static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
rp                386 net/bluetooth/cmtp/capi.c 	int err = 0, nconn, want = rp->level3cnt;
rp                389 net/bluetooth/cmtp/capi.c 		ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
rp                406 net/bluetooth/cmtp/capi.c 	capimsg_setu16(buf, 2, rp->datablkcnt);
rp                407 net/bluetooth/cmtp/capi.c 	capimsg_setu16(buf, 4, rp->datablklen);
rp                103 net/bluetooth/hci_event.c 	struct hci_rp_role_discovery *rp = (void *) skb->data;
rp                106 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                108 net/bluetooth/hci_event.c 	if (rp->status)
rp                113 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp                115 net/bluetooth/hci_event.c 		conn->role = rp->role;
rp                122 net/bluetooth/hci_event.c 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
rp                125 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                127 net/bluetooth/hci_event.c 	if (rp->status)
rp                132 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp                134 net/bluetooth/hci_event.c 		conn->link_policy = __le16_to_cpu(rp->policy);
rp                141 net/bluetooth/hci_event.c 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
rp                145 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                147 net/bluetooth/hci_event.c 	if (rp->status)
rp                156 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp                166 net/bluetooth/hci_event.c 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
rp                168 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                170 net/bluetooth/hci_event.c 	if (rp->status)
rp                173 net/bluetooth/hci_event.c 	hdev->link_policy = __le16_to_cpu(rp->policy);
rp                230 net/bluetooth/hci_event.c 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
rp                233 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                239 net/bluetooth/hci_event.c 	if (!rp->status && sent->read_all == 0x01) {
rp                240 net/bluetooth/hci_event.c 		hdev->stored_max_keys = rp->max_keys;
rp                241 net/bluetooth/hci_event.c 		hdev->stored_num_keys = rp->num_keys;
rp                248 net/bluetooth/hci_event.c 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
rp                250 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                252 net/bluetooth/hci_event.c 	if (rp->status)
rp                255 net/bluetooth/hci_event.c 	if (rp->num_keys <= hdev->stored_num_keys)
rp                256 net/bluetooth/hci_event.c 		hdev->stored_num_keys -= rp->num_keys;
rp                284 net/bluetooth/hci_event.c 	struct hci_rp_read_local_name *rp = (void *) skb->data;
rp                286 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                288 net/bluetooth/hci_event.c 	if (rp->status)
rp                293 net/bluetooth/hci_event.c 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
rp                384 net/bluetooth/hci_event.c 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
rp                386 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                388 net/bluetooth/hci_event.c 	if (rp->status)
rp                391 net/bluetooth/hci_event.c 	memcpy(hdev->dev_class, rp->dev_class, 3);
rp                421 net/bluetooth/hci_event.c 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
rp                424 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                426 net/bluetooth/hci_event.c 	if (rp->status)
rp                429 net/bluetooth/hci_event.c 	setting = __le16_to_cpu(rp->voice_setting);
rp                474 net/bluetooth/hci_event.c 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
rp                476 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                478 net/bluetooth/hci_event.c 	if (rp->status)
rp                481 net/bluetooth/hci_event.c 	hdev->num_iac = rp->num_iac;
rp                550 net/bluetooth/hci_event.c 	struct hci_rp_read_local_version *rp = (void *) skb->data;
rp                552 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                554 net/bluetooth/hci_event.c 	if (rp->status)
rp                559 net/bluetooth/hci_event.c 		hdev->hci_ver = rp->hci_ver;
rp                560 net/bluetooth/hci_event.c 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
rp                561 net/bluetooth/hci_event.c 		hdev->lmp_ver = rp->lmp_ver;
rp                562 net/bluetooth/hci_event.c 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
rp                563 net/bluetooth/hci_event.c 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
rp                570 net/bluetooth/hci_event.c 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
rp                572 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                574 net/bluetooth/hci_event.c 	if (rp->status)
rp                579 net/bluetooth/hci_event.c 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
rp                585 net/bluetooth/hci_event.c 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
rp                588 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                590 net/bluetooth/hci_event.c 	if (rp->status)
rp                595 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp                597 net/bluetooth/hci_event.c 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
rp                605 net/bluetooth/hci_event.c 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
rp                609 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                611 net/bluetooth/hci_event.c 	if (rp->status)
rp                620 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp                630 net/bluetooth/hci_event.c 	struct hci_rp_read_local_features *rp = (void *) skb->data;
rp                632 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                634 net/bluetooth/hci_event.c 	if (rp->status)
rp                637 net/bluetooth/hci_event.c 	memcpy(hdev->features, rp->features, 8);
rp                680 net/bluetooth/hci_event.c 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
rp                682 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                684 net/bluetooth/hci_event.c 	if (rp->status)
rp                687 net/bluetooth/hci_event.c 	if (hdev->max_page < rp->max_page)
rp                688 net/bluetooth/hci_event.c 		hdev->max_page = rp->max_page;
rp                690 net/bluetooth/hci_event.c 	if (rp->page < HCI_MAX_PAGES)
rp                691 net/bluetooth/hci_event.c 		memcpy(hdev->features[rp->page], rp->features, 8);
rp                697 net/bluetooth/hci_event.c 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
rp                699 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                701 net/bluetooth/hci_event.c 	if (rp->status)
rp                704 net/bluetooth/hci_event.c 	hdev->flow_ctl_mode = rp->mode;
rp                709 net/bluetooth/hci_event.c 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
rp                711 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                713 net/bluetooth/hci_event.c 	if (rp->status)
rp                716 net/bluetooth/hci_event.c 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
rp                717 net/bluetooth/hci_event.c 	hdev->sco_mtu  = rp->sco_mtu;
rp                718 net/bluetooth/hci_event.c 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
rp                719 net/bluetooth/hci_event.c 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
rp                735 net/bluetooth/hci_event.c 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
rp                737 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                739 net/bluetooth/hci_event.c 	if (rp->status)
rp                743 net/bluetooth/hci_event.c 		bacpy(&hdev->bdaddr, &rp->bdaddr);
rp                746 net/bluetooth/hci_event.c 		bacpy(&hdev->setup_addr, &rp->bdaddr);
rp                752 net/bluetooth/hci_event.c 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
rp                754 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                756 net/bluetooth/hci_event.c 	if (rp->status)
rp                760 net/bluetooth/hci_event.c 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
rp                761 net/bluetooth/hci_event.c 		hdev->page_scan_window = __le16_to_cpu(rp->window);
rp                787 net/bluetooth/hci_event.c 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
rp                789 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                791 net/bluetooth/hci_event.c 	if (rp->status)
rp                795 net/bluetooth/hci_event.c 		hdev->page_scan_type = rp->type;
rp                817 net/bluetooth/hci_event.c 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
rp                819 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                821 net/bluetooth/hci_event.c 	if (rp->status)
rp                824 net/bluetooth/hci_event.c 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
rp                825 net/bluetooth/hci_event.c 	hdev->block_len = __le16_to_cpu(rp->block_len);
rp                826 net/bluetooth/hci_event.c 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
rp                836 net/bluetooth/hci_event.c 	struct hci_rp_read_clock *rp = (void *) skb->data;
rp                842 net/bluetooth/hci_event.c 	if (skb->len < sizeof(*rp))
rp                845 net/bluetooth/hci_event.c 	if (rp->status)
rp                855 net/bluetooth/hci_event.c 		hdev->clock = le32_to_cpu(rp->clock);
rp                859 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp                861 net/bluetooth/hci_event.c 		conn->clock = le32_to_cpu(rp->clock);
rp                862 net/bluetooth/hci_event.c 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
rp                872 net/bluetooth/hci_event.c 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
rp                874 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                876 net/bluetooth/hci_event.c 	if (rp->status)
rp                879 net/bluetooth/hci_event.c 	hdev->amp_status = rp->amp_status;
rp                880 net/bluetooth/hci_event.c 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
rp                881 net/bluetooth/hci_event.c 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
rp                882 net/bluetooth/hci_event.c 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
rp                883 net/bluetooth/hci_event.c 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
rp                884 net/bluetooth/hci_event.c 	hdev->amp_type = rp->amp_type;
rp                885 net/bluetooth/hci_event.c 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
rp                886 net/bluetooth/hci_event.c 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
rp                887 net/bluetooth/hci_event.c 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
rp                888 net/bluetooth/hci_event.c 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
rp                894 net/bluetooth/hci_event.c 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
rp                896 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                898 net/bluetooth/hci_event.c 	if (rp->status)
rp                901 net/bluetooth/hci_event.c 	hdev->inq_tx_power = rp->tx_power;
rp                906 net/bluetooth/hci_event.c 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
rp                910 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                915 net/bluetooth/hci_event.c 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
rp                917 net/bluetooth/hci_event.c 	if (rp->status)
rp                934 net/bluetooth/hci_event.c 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
rp                936 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                941 net/bluetooth/hci_event.c 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
rp                942 net/bluetooth/hci_event.c 						 rp->status);
rp                950 net/bluetooth/hci_event.c 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
rp                952 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                954 net/bluetooth/hci_event.c 	if (rp->status)
rp                957 net/bluetooth/hci_event.c 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
rp                958 net/bluetooth/hci_event.c 	hdev->le_pkts = rp->le_max_pkt;
rp                968 net/bluetooth/hci_event.c 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
rp                970 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                972 net/bluetooth/hci_event.c 	if (rp->status)
rp                975 net/bluetooth/hci_event.c 	memcpy(hdev->le_features, rp->features, 8);
rp                981 net/bluetooth/hci_event.c 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
rp                983 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp                985 net/bluetooth/hci_event.c 	if (rp->status)
rp                988 net/bluetooth/hci_event.c 	hdev->adv_tx_power = rp->tx_power;
rp                993 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
rp                995 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1000 net/bluetooth/hci_event.c 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
rp               1001 net/bluetooth/hci_event.c 						 rp->status);
rp               1009 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
rp               1011 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1016 net/bluetooth/hci_event.c 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
rp               1017 net/bluetooth/hci_event.c 						     ACL_LINK, 0, rp->status);
rp               1024 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
rp               1026 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1031 net/bluetooth/hci_event.c 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
rp               1032 net/bluetooth/hci_event.c 						 0, rp->status);
rp               1040 net/bluetooth/hci_event.c 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
rp               1042 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1047 net/bluetooth/hci_event.c 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
rp               1048 net/bluetooth/hci_event.c 						     ACL_LINK, 0, rp->status);
rp               1056 net/bluetooth/hci_event.c 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
rp               1058 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1064 net/bluetooth/hci_event.c 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
rp               1066 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1381 net/bluetooth/hci_event.c 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
rp               1383 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
rp               1384 net/bluetooth/hci_event.c 	       rp->num_of_sets);
rp               1386 net/bluetooth/hci_event.c 	if (rp->status)
rp               1389 net/bluetooth/hci_event.c 	hdev->le_num_of_adv_sets = rp->num_of_sets;
rp               1395 net/bluetooth/hci_event.c 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
rp               1397 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
rp               1399 net/bluetooth/hci_event.c 	if (rp->status)
rp               1402 net/bluetooth/hci_event.c 	hdev->le_white_list_size = rp->size;
rp               1459 net/bluetooth/hci_event.c 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
rp               1461 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1463 net/bluetooth/hci_event.c 	if (rp->status)
rp               1466 net/bluetooth/hci_event.c 	memcpy(hdev->le_states, rp->le_states, 8);
rp               1472 net/bluetooth/hci_event.c 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
rp               1474 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1476 net/bluetooth/hci_event.c 	if (rp->status)
rp               1479 net/bluetooth/hci_event.c 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
rp               1480 net/bluetooth/hci_event.c 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
rp               1557 net/bluetooth/hci_event.c 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
rp               1559 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
rp               1561 net/bluetooth/hci_event.c 	if (rp->status)
rp               1564 net/bluetooth/hci_event.c 	hdev->le_resolv_list_size = rp->size;
rp               1594 net/bluetooth/hci_event.c 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
rp               1596 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1598 net/bluetooth/hci_event.c 	if (rp->status)
rp               1601 net/bluetooth/hci_event.c 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
rp               1602 net/bluetooth/hci_event.c 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
rp               1603 net/bluetooth/hci_event.c 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
rp               1604 net/bluetooth/hci_event.c 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
rp               1662 net/bluetooth/hci_event.c 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
rp               1666 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1668 net/bluetooth/hci_event.c 	if (rp->status)
rp               1679 net/bluetooth/hci_event.c 		hdev->adv_tx_power = rp->tx_power;
rp               1684 net/bluetooth/hci_event.c 			adv_instance->tx_power = rp->tx_power;
rp               1693 net/bluetooth/hci_event.c 	struct hci_rp_read_rssi *rp = (void *) skb->data;
rp               1696 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1698 net/bluetooth/hci_event.c 	if (rp->status)
rp               1703 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp               1705 net/bluetooth/hci_event.c 		conn->rssi = rp->rssi;
rp               1713 net/bluetooth/hci_event.c 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
rp               1716 net/bluetooth/hci_event.c 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
rp               1718 net/bluetooth/hci_event.c 	if (rp->status)
rp               1727 net/bluetooth/hci_event.c 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
rp               1733 net/bluetooth/hci_event.c 		conn->tx_power = rp->tx_power;
rp               1736 net/bluetooth/hci_event.c 		conn->max_tx_power = rp->tx_power;
rp               2878 net/bluetooth/hci_event.c 	const struct hci_rp_read_enc_key_size *rp;
rp               2884 net/bluetooth/hci_event.c 	if (!skb || skb->len < sizeof(*rp)) {
rp               2889 net/bluetooth/hci_event.c 	rp = (void *)skb->data;
rp               2890 net/bluetooth/hci_event.c 	handle = le16_to_cpu(rp->handle);
rp               2902 net/bluetooth/hci_event.c 	if (rp->status) {
rp               2907 net/bluetooth/hci_event.c 		conn->enc_key_size = rp->key_size;
rp                281 net/bluetooth/mgmt.c 	struct mgmt_rp_read_version *rp = ver;
rp                283 net/bluetooth/mgmt.c 	rp->version = MGMT_VERSION;
rp                284 net/bluetooth/mgmt.c 	rp->revision = cpu_to_le16(MGMT_REVISION);
rp                290 net/bluetooth/mgmt.c 	struct mgmt_rp_read_version rp;
rp                294 net/bluetooth/mgmt.c 	mgmt_fill_version_info(&rp);
rp                297 net/bluetooth/mgmt.c 				 &rp, sizeof(rp));
rp                303 net/bluetooth/mgmt.c 	struct mgmt_rp_read_commands *rp;
rp                318 net/bluetooth/mgmt.c 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
rp                320 net/bluetooth/mgmt.c 	rp = kmalloc(rp_size, GFP_KERNEL);
rp                321 net/bluetooth/mgmt.c 	if (!rp)
rp                324 net/bluetooth/mgmt.c 	rp->num_commands = cpu_to_le16(num_commands);
rp                325 net/bluetooth/mgmt.c 	rp->num_events = cpu_to_le16(num_events);
rp                328 net/bluetooth/mgmt.c 		__le16 *opcode = rp->opcodes;
rp                336 net/bluetooth/mgmt.c 		__le16 *opcode = rp->opcodes;
rp                346 net/bluetooth/mgmt.c 				rp, rp_size);
rp                347 net/bluetooth/mgmt.c 	kfree(rp);
rp                355 net/bluetooth/mgmt.c 	struct mgmt_rp_read_index_list *rp;
rp                372 net/bluetooth/mgmt.c 	rp_len = sizeof(*rp) + (2 * count);
rp                373 net/bluetooth/mgmt.c 	rp = kmalloc(rp_len, GFP_ATOMIC);
rp                374 net/bluetooth/mgmt.c 	if (!rp) {
rp                394 net/bluetooth/mgmt.c 			rp->index[count++] = cpu_to_le16(d->id);
rp                399 net/bluetooth/mgmt.c 	rp->num_controllers = cpu_to_le16(count);
rp                400 net/bluetooth/mgmt.c 	rp_len = sizeof(*rp) + (2 * count);
rp                405 net/bluetooth/mgmt.c 				0, rp, rp_len);
rp                407 net/bluetooth/mgmt.c 	kfree(rp);
rp                415 net/bluetooth/mgmt.c 	struct mgmt_rp_read_unconf_index_list *rp;
rp                432 net/bluetooth/mgmt.c 	rp_len = sizeof(*rp) + (2 * count);
rp                433 net/bluetooth/mgmt.c 	rp = kmalloc(rp_len, GFP_ATOMIC);
rp                434 net/bluetooth/mgmt.c 	if (!rp) {
rp                454 net/bluetooth/mgmt.c 			rp->index[count++] = cpu_to_le16(d->id);
rp                459 net/bluetooth/mgmt.c 	rp->num_controllers = cpu_to_le16(count);
rp                460 net/bluetooth/mgmt.c 	rp_len = sizeof(*rp) + (2 * count);
rp                465 net/bluetooth/mgmt.c 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
rp                467 net/bluetooth/mgmt.c 	kfree(rp);
rp                475 net/bluetooth/mgmt.c 	struct mgmt_rp_read_ext_index_list *rp;
rp                490 net/bluetooth/mgmt.c 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
rp                491 net/bluetooth/mgmt.c 	if (!rp) {
rp                511 net/bluetooth/mgmt.c 				rp->entry[count].type = 0x01;
rp                513 net/bluetooth/mgmt.c 				rp->entry[count].type = 0x00;
rp                515 net/bluetooth/mgmt.c 			rp->entry[count].type = 0x02;
rp                520 net/bluetooth/mgmt.c 		rp->entry[count].bus = d->bus;
rp                521 net/bluetooth/mgmt.c 		rp->entry[count++].index = cpu_to_le16(d->id);
rp                525 net/bluetooth/mgmt.c 	rp->num_controllers = cpu_to_le16(count);
rp                538 net/bluetooth/mgmt.c 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
rp                539 net/bluetooth/mgmt.c 				struct_size(rp, entry, count));
rp                541 net/bluetooth/mgmt.c 	kfree(rp);
rp                595 net/bluetooth/mgmt.c 	struct mgmt_rp_read_config_info rp;
rp                602 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp                603 net/bluetooth/mgmt.c 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
rp                611 net/bluetooth/mgmt.c 	rp.supported_options = cpu_to_le32(options);
rp                612 net/bluetooth/mgmt.c 	rp.missing_options = get_missing_options(hdev);
rp                617 net/bluetooth/mgmt.c 				 &rp, sizeof(rp));
rp                969 net/bluetooth/mgmt.c 	struct mgmt_rp_read_info rp;
rp                975 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp                977 net/bluetooth/mgmt.c 	bacpy(&rp.bdaddr, &hdev->bdaddr);
rp                979 net/bluetooth/mgmt.c 	rp.version = hdev->hci_ver;
rp                980 net/bluetooth/mgmt.c 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
rp                982 net/bluetooth/mgmt.c 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
rp                983 net/bluetooth/mgmt.c 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
rp                985 net/bluetooth/mgmt.c 	memcpy(rp.dev_class, hdev->dev_class, 3);
rp                987 net/bluetooth/mgmt.c 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
rp                988 net/bluetooth/mgmt.c 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
rp                992 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
rp                993 net/bluetooth/mgmt.c 				 sizeof(rp));
rp               1024 net/bluetooth/mgmt.c 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
rp               1033 net/bluetooth/mgmt.c 	bacpy(&rp->bdaddr, &hdev->bdaddr);
rp               1035 net/bluetooth/mgmt.c 	rp->version = hdev->hci_ver;
rp               1036 net/bluetooth/mgmt.c 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
rp               1038 net/bluetooth/mgmt.c 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
rp               1039 net/bluetooth/mgmt.c 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
rp               1042 net/bluetooth/mgmt.c 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
rp               1043 net/bluetooth/mgmt.c 	rp->eir_len = cpu_to_le16(eir_len);
rp               1056 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
rp               1057 net/bluetooth/mgmt.c 				 sizeof(*rp) + eir_len);
rp               2377 net/bluetooth/mgmt.c 	struct mgmt_rp_unpair_device rp;
rp               2384 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp               2385 net/bluetooth/mgmt.c 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp               2386 net/bluetooth/mgmt.c 	rp.addr.type = cp->addr.type;
rp               2391 net/bluetooth/mgmt.c 					 &rp, sizeof(rp));
rp               2396 net/bluetooth/mgmt.c 					 &rp, sizeof(rp));
rp               2402 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_POWERED, &rp,
rp               2403 net/bluetooth/mgmt.c 					sizeof(rp));
rp               2425 net/bluetooth/mgmt.c 						MGMT_STATUS_NOT_PAIRED, &rp,
rp               2426 net/bluetooth/mgmt.c 						sizeof(rp));
rp               2440 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_PAIRED, &rp,
rp               2441 net/bluetooth/mgmt.c 					sizeof(rp));
rp               2478 net/bluetooth/mgmt.c 					&rp, sizeof(rp));
rp               2505 net/bluetooth/mgmt.c 	struct mgmt_rp_disconnect rp;
rp               2512 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp               2513 net/bluetooth/mgmt.c 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp               2514 net/bluetooth/mgmt.c 	rp.addr.type = cp->addr.type;
rp               2519 net/bluetooth/mgmt.c 					 &rp, sizeof(rp));
rp               2525 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_POWERED, &rp,
rp               2526 net/bluetooth/mgmt.c 					sizeof(rp));
rp               2532 net/bluetooth/mgmt.c 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
rp               2545 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_CONNECTED, &rp,
rp               2546 net/bluetooth/mgmt.c 					sizeof(rp));
rp               2589 net/bluetooth/mgmt.c 	struct mgmt_rp_get_connections *rp;
rp               2610 net/bluetooth/mgmt.c 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
rp               2611 net/bluetooth/mgmt.c 	if (!rp) {
rp               2620 net/bluetooth/mgmt.c 		bacpy(&rp->addr[i].bdaddr, &c->dst);
rp               2621 net/bluetooth/mgmt.c 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
rp               2627 net/bluetooth/mgmt.c 	rp->conn_count = cpu_to_le16(i);
rp               2630 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
rp               2631 net/bluetooth/mgmt.c 				struct_size(rp, addr, i));
rp               2633 net/bluetooth/mgmt.c 	kfree(rp);
rp               2767 net/bluetooth/mgmt.c 	struct mgmt_rp_pair_device rp;
rp               2771 net/bluetooth/mgmt.c 	bacpy(&rp.addr.bdaddr, &conn->dst);
rp               2772 net/bluetooth/mgmt.c 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
rp               2775 net/bluetooth/mgmt.c 				status, &rp, sizeof(rp));
rp               2845 net/bluetooth/mgmt.c 	struct mgmt_rp_pair_device rp;
rp               2853 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp               2854 net/bluetooth/mgmt.c 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp               2855 net/bluetooth/mgmt.c 	rp.addr.type = cp->addr.type;
rp               2860 net/bluetooth/mgmt.c 					 &rp, sizeof(rp));
rp               2865 net/bluetooth/mgmt.c 					 &rp, sizeof(rp));
rp               2871 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_POWERED, &rp,
rp               2872 net/bluetooth/mgmt.c 					sizeof(rp));
rp               2878 net/bluetooth/mgmt.c 					MGMT_STATUS_ALREADY_PAIRED, &rp,
rp               2879 net/bluetooth/mgmt.c 					sizeof(rp));
rp               2925 net/bluetooth/mgmt.c 					status, &rp, sizeof(rp));
rp               2932 net/bluetooth/mgmt.c 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
rp               3318 net/bluetooth/mgmt.c 	struct mgmt_rp_get_phy_confguration rp;
rp               3324 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp               3326 net/bluetooth/mgmt.c 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
rp               3327 net/bluetooth/mgmt.c 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
rp               3328 net/bluetooth/mgmt.c 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
rp               3333 net/bluetooth/mgmt.c 				 &rp, sizeof(rp));
rp               3556 net/bluetooth/mgmt.c 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
rp               3558 net/bluetooth/mgmt.c 		if (skb->len < sizeof(*rp)) {
rp               3565 net/bluetooth/mgmt.c 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
rp               3566 net/bluetooth/mgmt.c 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
rp               3570 net/bluetooth/mgmt.c 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
rp               3572 net/bluetooth/mgmt.c 		if (skb->len < sizeof(*rp)) {
rp               3579 net/bluetooth/mgmt.c 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
rp               3580 net/bluetooth/mgmt.c 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
rp               3582 net/bluetooth/mgmt.c 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
rp               3583 net/bluetooth/mgmt.c 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
rp               5178 net/bluetooth/mgmt.c 	struct mgmt_rp_get_conn_info rp;
rp               5181 net/bluetooth/mgmt.c 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
rp               5184 net/bluetooth/mgmt.c 		rp.rssi = conn->rssi;
rp               5185 net/bluetooth/mgmt.c 		rp.tx_power = conn->tx_power;
rp               5186 net/bluetooth/mgmt.c 		rp.max_tx_power = conn->max_tx_power;
rp               5188 net/bluetooth/mgmt.c 		rp.rssi = HCI_RSSI_INVALID;
rp               5189 net/bluetooth/mgmt.c 		rp.tx_power = HCI_TX_POWER_INVALID;
rp               5190 net/bluetooth/mgmt.c 		rp.max_tx_power = HCI_TX_POWER_INVALID;
rp               5194 net/bluetooth/mgmt.c 				status, &rp, sizeof(rp));
rp               5260 net/bluetooth/mgmt.c 	struct mgmt_rp_get_conn_info rp;
rp               5267 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp               5268 net/bluetooth/mgmt.c 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp               5269 net/bluetooth/mgmt.c 	rp.addr.type = cp->addr.type;
rp               5274 net/bluetooth/mgmt.c 					 &rp, sizeof(rp));
rp               5280 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_POWERED, &rp,
rp               5281 net/bluetooth/mgmt.c 					sizeof(rp));
rp               5293 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_CONNECTED, &rp,
rp               5294 net/bluetooth/mgmt.c 					sizeof(rp));
rp               5300 net/bluetooth/mgmt.c 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
rp               5364 net/bluetooth/mgmt.c 		rp.rssi = conn->rssi;
rp               5365 net/bluetooth/mgmt.c 		rp.tx_power = conn->tx_power;
rp               5366 net/bluetooth/mgmt.c 		rp.max_tx_power = conn->max_tx_power;
rp               5369 net/bluetooth/mgmt.c 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
rp               5380 net/bluetooth/mgmt.c 	struct mgmt_rp_get_clock_info rp;
rp               5384 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp               5385 net/bluetooth/mgmt.c 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
rp               5392 net/bluetooth/mgmt.c 		rp.local_clock = cpu_to_le32(hdev->clock);
rp               5397 net/bluetooth/mgmt.c 		rp.piconet_clock = cpu_to_le32(conn->clock);
rp               5398 net/bluetooth/mgmt.c 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
rp               5402 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
rp               5403 net/bluetooth/mgmt.c 				sizeof(rp));
rp               5449 net/bluetooth/mgmt.c 	struct mgmt_rp_get_clock_info rp;
rp               5458 net/bluetooth/mgmt.c 	memset(&rp, 0, sizeof(rp));
rp               5459 net/bluetooth/mgmt.c 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp               5460 net/bluetooth/mgmt.c 	rp.addr.type = cp->addr.type;
rp               5465 net/bluetooth/mgmt.c 					 &rp, sizeof(rp));
rp               5471 net/bluetooth/mgmt.c 					MGMT_STATUS_NOT_POWERED, &rp,
rp               5472 net/bluetooth/mgmt.c 					sizeof(rp));
rp               5483 net/bluetooth/mgmt.c 						&rp, sizeof(rp));
rp               6043 net/bluetooth/mgmt.c 		struct hci_rp_read_local_oob_data *rp;
rp               6045 net/bluetooth/mgmt.c 		if (skb->len != sizeof(*rp)) {
rp               6050 net/bluetooth/mgmt.c 			rp = (void *)skb->data;
rp               6053 net/bluetooth/mgmt.c 			h192 = rp->hash;
rp               6054 net/bluetooth/mgmt.c 			r192 = rp->rand;
rp               6059 net/bluetooth/mgmt.c 		struct hci_rp_read_local_oob_ext_data *rp;
rp               6061 net/bluetooth/mgmt.c 		if (skb->len != sizeof(*rp)) {
rp               6066 net/bluetooth/mgmt.c 			rp = (void *)skb->data;
rp               6074 net/bluetooth/mgmt.c 				h192 = rp->hash192;
rp               6075 net/bluetooth/mgmt.c 				r192 = rp->rand192;
rp               6078 net/bluetooth/mgmt.c 			h256 = rp->hash256;
rp               6079 net/bluetooth/mgmt.c 			r256 = rp->rand256;
rp               6159 net/bluetooth/mgmt.c 	struct mgmt_rp_read_local_oob_ext_data *rp;
rp               6193 net/bluetooth/mgmt.c 	rp_len = sizeof(*rp) + eir_len;
rp               6194 net/bluetooth/mgmt.c 	rp = kmalloc(rp_len, GFP_ATOMIC);
rp               6195 net/bluetooth/mgmt.c 	if (!rp)
rp               6215 net/bluetooth/mgmt.c 			eir_len = eir_append_data(rp->eir, eir_len,
rp               6255 net/bluetooth/mgmt.c 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
rp               6263 net/bluetooth/mgmt.c 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
rp               6267 net/bluetooth/mgmt.c 			eir_len = eir_append_data(rp->eir, eir_len,
rp               6271 net/bluetooth/mgmt.c 			eir_len = eir_append_data(rp->eir, eir_len,
rp               6281 net/bluetooth/mgmt.c 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
rp               6293 net/bluetooth/mgmt.c 	rp->type = cp->type;
rp               6294 net/bluetooth/mgmt.c 	rp->eir_len = cpu_to_le16(eir_len);
rp               6297 net/bluetooth/mgmt.c 				status, rp, sizeof(*rp) + eir_len);
rp               6302 net/bluetooth/mgmt.c 				 rp, sizeof(*rp) + eir_len,
rp               6306 net/bluetooth/mgmt.c 	kfree(rp);
rp               6345 net/bluetooth/mgmt.c 	struct mgmt_rp_read_adv_features *rp;
rp               6360 net/bluetooth/mgmt.c 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
rp               6361 net/bluetooth/mgmt.c 	rp = kmalloc(rp_len, GFP_ATOMIC);
rp               6362 net/bluetooth/mgmt.c 	if (!rp) {
rp               6369 net/bluetooth/mgmt.c 	rp->supported_flags = cpu_to_le32(supported_flags);
rp               6370 net/bluetooth/mgmt.c 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
rp               6371 net/bluetooth/mgmt.c 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
rp               6372 net/bluetooth/mgmt.c 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
rp               6373 net/bluetooth/mgmt.c 	rp->num_instances = hdev->adv_instance_cnt;
rp               6375 net/bluetooth/mgmt.c 	instance = rp->instance;
rp               6384 net/bluetooth/mgmt.c 				MGMT_STATUS_SUCCESS, rp, rp_len);
rp               6386 net/bluetooth/mgmt.c 	kfree(rp);
rp               6491 net/bluetooth/mgmt.c 	struct mgmt_rp_add_advertising rp;
rp               6523 net/bluetooth/mgmt.c 	rp.instance = cp->instance;
rp               6530 net/bluetooth/mgmt.c 				  mgmt_status(status), &rp, sizeof(rp));
rp               6542 net/bluetooth/mgmt.c 	struct mgmt_rp_add_advertising rp;
rp               6649 net/bluetooth/mgmt.c 		rp.instance = cp->instance;
rp               6651 net/bluetooth/mgmt.c 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
rp               6686 net/bluetooth/mgmt.c 	struct mgmt_rp_remove_advertising rp;
rp               6701 net/bluetooth/mgmt.c 	rp.instance = cp->instance;
rp               6704 net/bluetooth/mgmt.c 			  &rp, sizeof(rp));
rp               6715 net/bluetooth/mgmt.c 	struct mgmt_rp_remove_advertising rp;
rp               6760 net/bluetooth/mgmt.c 		rp.instance = cp->instance;
rp               6763 net/bluetooth/mgmt.c 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
rp               6788 net/bluetooth/mgmt.c 	struct mgmt_rp_get_adv_size_info rp;
rp               6812 net/bluetooth/mgmt.c 	rp.instance = cp->instance;
rp               6813 net/bluetooth/mgmt.c 	rp.flags = cp->flags;
rp               6814 net/bluetooth/mgmt.c 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
rp               6815 net/bluetooth/mgmt.c 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
rp               6818 net/bluetooth/mgmt.c 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
rp                137 net/bluetooth/mgmt_util.c 		      void *rp, size_t rp_len)
rp                160 net/bluetooth/mgmt_util.c 	if (rp)
rp                161 net/bluetooth/mgmt_util.c 		memcpy(ev->data, rp, rp_len);
rp                 38 net/bluetooth/mgmt_util.h 		      void *rp, size_t rp_len);
rp               2256 net/bluetooth/smp.c 	struct smp_cmd_security_req *rp = (void *) skb->data;
rp               2265 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
rp               2271 net/bluetooth/smp.c 	auth = rp->auth_req & AUTH_REQ_MASK(hdev);
rp               2304 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
rp               2453 net/bluetooth/smp.c 	struct smp_cmd_encrypt_info *rp = (void *) skb->data;
rp               2459 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
rp               2464 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
rp               2466 net/bluetooth/smp.c 	memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
rp               2473 net/bluetooth/smp.c 	struct smp_cmd_master_ident *rp = (void *) skb->data;
rp               2483 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
rp               2494 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
rp               2499 net/bluetooth/smp.c 			  rp->ediv, rp->rand);
rp               2598 net/bluetooth/smp.c 	struct smp_cmd_sign_info *rp = (void *) skb->data;
rp               2605 net/bluetooth/smp.c 	if (skb->len < sizeof(*rp))
rp               2611 net/bluetooth/smp.c 	skb_pull(skb, sizeof(*rp));
rp               2619 net/bluetooth/smp.c 		memcpy(csrk->val, rp->csrk, sizeof(csrk->val));
rp                277 net/bridge/br_forward.c 	struct hlist_node *rp;
rp                279 net/bridge/br_forward.c 	rp = rcu_dereference(hlist_first_rcu(&br->router_list));
rp                281 net/bridge/br_forward.c 	while (p || rp) {
rp                285 net/bridge/br_forward.c 		rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
rp                306 net/bridge/br_forward.c 			rp = rcu_dereference(hlist_next_rcu(rp));
rp                 68 net/bridge/br_stp.c 	struct net_bridge_port *rp;
rp                 82 net/bridge/br_stp.c 	rp = br_get_port(br, root_port);
rp                 84 net/bridge/br_stp.c 	t = memcmp(&p->designated_root, &rp->designated_root, 8);
rp                 91 net/bridge/br_stp.c 	    rp->designated_cost + rp->path_cost)
rp                 94 net/bridge/br_stp.c 		 rp->designated_cost + rp->path_cost)
rp                 97 net/bridge/br_stp.c 	t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8);
rp                103 net/bridge/br_stp.c 	if (p->designated_port < rp->designated_port)
rp                105 net/bridge/br_stp.c 	else if (p->designated_port > rp->designated_port)
rp                108 net/bridge/br_stp.c 	if (p->port_id < rp->port_id)
rp                489 net/can/af_can.c static void can_rx_delete_receiver(struct rcu_head *rp)
rp                491 net/can/af_can.c 	struct receiver *rcv = container_of(rp, struct receiver, rcu);
rp                141 net/ceph/debugfs.c 	struct rb_node *rp;
rp                158 net/ceph/debugfs.c 	for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
rp                160 net/ceph/debugfs.c 		req = rb_entry(rp, struct ceph_mon_generic_request, node);
rp                317 net/decnet/dn_route.c static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
rp                339 net/decnet/dn_route.c 			*rp = rth;
rp                350 net/decnet/dn_route.c 	*rp = rt;
rp                805 net/ipv4/raw.c 	struct raw_sock *rp = raw_sk(sk);
rp                808 net/ipv4/raw.c 		memset(&rp->filter, 0, sizeof(rp->filter));
rp                104 net/ipv6/netfilter/ip6t_rt.c 		const u_int32_t *rp;
rp                106 net/ipv6/netfilter/ip6t_rt.c 		rp = skb_header_pointer(skb,
rp                112 net/ipv6/netfilter/ip6t_rt.c 		ret = (*rp == 0);
rp                419 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
rp                427 net/ipv6/raw.c 	if (!rp->checksum)
rp                540 net/ipv6/raw.c 				     struct raw6_sock *rp)
rp                550 net/ipv6/raw.c 	if (!rp->checksum)
rp                557 net/ipv6/raw.c 	offset = rp->offset;
rp                774 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
rp                956 net/ipv6/raw.c 			err = rawv6_push_pending_frames(sk, &fl6, rp);
rp               1020 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
rp               1051 net/ipv6/raw.c 			rp->checksum = 0;
rp               1053 net/ipv6/raw.c 			rp->checksum = 1;
rp               1054 net/ipv6/raw.c 			rp->offset = val;
rp               1114 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
rp               1130 net/ipv6/raw.c 		if (rp->checksum == 0)
rp               1133 net/ipv6/raw.c 			val = rp->offset;
rp               1261 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
rp               1265 net/ipv6/raw.c 		rp->checksum = 1;
rp               1266 net/ipv6/raw.c 		rp->offset   = 2;
rp               1269 net/ipv6/raw.c 		rp->checksum = 1;
rp               1270 net/ipv6/raw.c 		rp->offset   = 4;
rp                 17 net/mac80211/ethtool.c 				   struct ethtool_ringparam *rp)
rp                 21 net/mac80211/ethtool.c 	if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
rp                 24 net/mac80211/ethtool.c 	return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
rp                 28 net/mac80211/ethtool.c 				    struct ethtool_ringparam *rp)
rp                 32 net/mac80211/ethtool.c 	memset(rp, 0, sizeof(*rp));
rp                 34 net/mac80211/ethtool.c 	drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending,
rp                 35 net/mac80211/ethtool.c 			  &rp->rx_pending, &rp->rx_max_pending);
rp                136 net/packet/diag.c 	struct packet_diag_msg *rp;
rp                139 net/packet/diag.c 	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
rp                143 net/packet/diag.c 	rp = nlmsg_data(nlh);
rp                144 net/packet/diag.c 	rp->pdiag_family = AF_PACKET;
rp                145 net/packet/diag.c 	rp->pdiag_type = sk->sk_type;
rp                146 net/packet/diag.c 	rp->pdiag_num = ntohs(po->num);
rp                147 net/packet/diag.c 	rp->pdiag_ino = sk_ino;
rp                148 net/packet/diag.c 	sock_diag_save_cookie(sk, rp->pdiag_cookie);
rp                799 net/sunrpc/cache.c 	struct cache_reader *rp = filp->private_data;
rp                812 net/sunrpc/cache.c 	while (rp->q.list.next != &cd->queue &&
rp                813 net/sunrpc/cache.c 	       list_entry(rp->q.list.next, struct cache_queue, list)
rp                815 net/sunrpc/cache.c 		struct list_head *next = rp->q.list.next;
rp                816 net/sunrpc/cache.c 		list_move(&rp->q.list, next);
rp                818 net/sunrpc/cache.c 	if (rp->q.list.next == &cd->queue) {
rp                821 net/sunrpc/cache.c 		WARN_ON_ONCE(rp->offset);
rp                824 net/sunrpc/cache.c 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
rp                826 net/sunrpc/cache.c 	if (rp->offset == 0)
rp                837 net/sunrpc/cache.c 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
rp                840 net/sunrpc/cache.c 		list_move(&rp->q.list, &rq->q.list);
rp                843 net/sunrpc/cache.c 		if (rp->offset + count > rq->len)
rp                844 net/sunrpc/cache.c 			count = rq->len - rp->offset;
rp                846 net/sunrpc/cache.c 		if (copy_to_user(buf, rq->buf + rp->offset, count))
rp                848 net/sunrpc/cache.c 		rp->offset += count;
rp                849 net/sunrpc/cache.c 		if (rp->offset >= rq->len) {
rp                850 net/sunrpc/cache.c 			rp->offset = 0;
rp                852 net/sunrpc/cache.c 			list_move(&rp->q.list, &rq->q.list);
rp                858 net/sunrpc/cache.c 	if (rp->offset == 0) {
rp                958 net/sunrpc/cache.c 	struct cache_reader *rp = filp->private_data;
rp                966 net/sunrpc/cache.c 	if (!rp)
rp                971 net/sunrpc/cache.c 	for (cq= &rp->q; &cq->list != &cd->queue;
rp                986 net/sunrpc/cache.c 	struct cache_reader *rp = filp->private_data;
rp                989 net/sunrpc/cache.c 	if (cmd != FIONREAD || !rp)
rp                997 net/sunrpc/cache.c 	for (cq= &rp->q; &cq->list != &cd->queue;
rp               1002 net/sunrpc/cache.c 			len = cr->len - rp->offset;
rp               1013 net/sunrpc/cache.c 	struct cache_reader *rp = NULL;
rp               1019 net/sunrpc/cache.c 		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
rp               1020 net/sunrpc/cache.c 		if (!rp) {
rp               1024 net/sunrpc/cache.c 		rp->offset = 0;
rp               1025 net/sunrpc/cache.c 		rp->q.reader = 1;
rp               1028 net/sunrpc/cache.c 		list_add(&rp->q.list, &cd->queue);
rp               1033 net/sunrpc/cache.c 	filp->private_data = rp;
rp               1040 net/sunrpc/cache.c 	struct cache_reader *rp = filp->private_data;
rp               1042 net/sunrpc/cache.c 	if (rp) {
rp               1044 net/sunrpc/cache.c 		if (rp->offset) {
rp               1046 net/sunrpc/cache.c 			for (cq= &rp->q; &cq->list != &cd->queue;
rp               1053 net/sunrpc/cache.c 			rp->offset = 0;
rp               1055 net/sunrpc/cache.c 		list_del(&rp->q.list);
rp               1059 net/sunrpc/cache.c 		kfree(rp);
rp                426 net/xfrm/xfrm_user.c 					 struct nlattr *rp)
rp                431 net/xfrm/xfrm_user.c 	if (!replay_esn || !rp)
rp                434 net/xfrm/xfrm_user.c 	up = nla_data(rp);
rp                439 net/xfrm/xfrm_user.c 	if (nla_len(rp) < (int)ulen ||
rp                519 net/xfrm/xfrm_user.c 	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
rp                534 net/xfrm/xfrm_user.c 	if (rp) {
rp                536 net/xfrm/xfrm_user.c 		replay = nla_data(rp);
rp               2090 net/xfrm/xfrm_user.c 	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
rp               2096 net/xfrm/xfrm_user.c 	if (!lt && !rp && !re && !et && !rt)
rp                425 scripts/recordmcount.c static int arm_is_fake_mcount(Elf32_Rel const *rp)
rp                427 scripts/recordmcount.c 	switch (ELF32_R_TYPE(w(rp->r_info))) {
rp                458 scripts/recordmcount.c static uint64_t MIPS64_r_sym(Elf64_Rel const *rp)
rp                460 scripts/recordmcount.c 	return w(((union mips_r_info){ .r_info = rp->r_info }).r_mips.r_sym);
rp                463 scripts/recordmcount.c static void MIPS64_r_info(Elf64_Rel *const rp, unsigned sym, unsigned type)
rp                465 scripts/recordmcount.c 	rp->r_info = ((union mips_r_info){
rp                122 scripts/recordmcount.h static int fn_is_fake_mcount(Elf_Rel const *rp)
rp                126 scripts/recordmcount.h static int (*is_fake_mcount)(Elf_Rel const *rp) = fn_is_fake_mcount;
rp                128 scripts/recordmcount.h static uint_t fn_ELF_R_SYM(Elf_Rel const *rp)
rp                130 scripts/recordmcount.h 	return ELF_R_SYM(_w(rp->r_info));
rp                132 scripts/recordmcount.h static uint_t (*Elf_r_sym)(Elf_Rel const *rp) = fn_ELF_R_SYM;
rp                134 scripts/recordmcount.h static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type)
rp                136 scripts/recordmcount.h 	rp->r_info = _w(ELF_R_INFO(sym, type));
rp                138 scripts/recordmcount.h static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO;
rp                163 scripts/recordmcount.h static int MIPS_is_fake_mcount(Elf_Rel const *rp)
rp                166 scripts/recordmcount.h 	Elf_Addr current_r_offset = _w(rp->r_offset);
rp               1922 security/smack/smack_lsm.c 	struct smack_rule *rp;
rp               1929 security/smack/smack_lsm.c 		rp = list_entry(l, struct smack_rule, list);
rp               1930 security/smack/smack_lsm.c 		list_del(&rp->list);
rp               1931 security/smack/smack_lsm.c 		kmem_cache_free(smack_rule_cache, rp);
rp                159 sound/hda/hdac_bus.c 	unsigned int rp, caddr, res;
rp                162 sound/hda/hdac_bus.c 		rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
rp                163 sound/hda/hdac_bus.c 		bus->unsol_rp = rp;
rp                164 sound/hda/hdac_bus.c 		rp <<= 1;
rp                165 sound/hda/hdac_bus.c 		res = bus->unsol_queue[rp];
rp                166 sound/hda/hdac_bus.c 		caddr = bus->unsol_queue[rp + 1];
rp                 69 sound/hda/hdac_controller.c 	bus->rirb.wp = bus->rirb.rp = 0;
rp                144 sound/hda/hdac_controller.c 	unsigned int wp, rp;
rp                160 sound/hda/hdac_controller.c 	rp = snd_hdac_chip_readw(bus, CORBRP);
rp                161 sound/hda/hdac_controller.c 	if (wp == rp) {
rp                187 sound/hda/hdac_controller.c 	unsigned int rp, wp;
rp                201 sound/hda/hdac_controller.c 	while (bus->rirb.rp != wp) {
rp                202 sound/hda/hdac_controller.c 		bus->rirb.rp++;
rp                203 sound/hda/hdac_controller.c 		bus->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
rp                205 sound/hda/hdac_controller.c 		rp = bus->rirb.rp << 1; /* an RIRB entry is 8-bytes */
rp                206 sound/hda/hdac_controller.c 		res_ex = le32_to_cpu(bus->rirb.buf[rp + 1]);
rp                207 sound/hda/hdac_controller.c 		res = le32_to_cpu(bus->rirb.buf[rp]);
rp                212 sound/hda/hdac_controller.c 				res, res_ex, bus->rirb.rp, wp);
rp                112 sound/pci/lola/lola.c 	unsigned int rp, wp;
rp                120 sound/pci/lola/lola.c 	while (chip->rirb.rp != wp) {
rp                121 sound/pci/lola/lola.c 		chip->rirb.rp++;
rp                122 sound/pci/lola/lola.c 		chip->rirb.rp %= LOLA_CORB_ENTRIES;
rp                124 sound/pci/lola/lola.c 		rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
rp                125 sound/pci/lola/lola.c 		res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
rp                126 sound/pci/lola/lola.c 		res = le32_to_cpu(chip->rirb.buf[rp]);
rp                407 sound/pci/lola/lola.c 	chip->rirb.rp = chip->rirb.cmds = 0;
rp                212 sound/pci/lola/lola.h 	unsigned short rp, wp;	/* read/write pointers */
rp               1009 sound/sparc/amd7930.c 	struct resource *rp = &op->resource[0];
rp               1033 sound/sparc/amd7930.c 		rp->flags & 0xffL,
rp               1034 sound/sparc/amd7930.c 		(unsigned long long)rp->start,
rp               1858 sound/sparc/cs4231.c 	struct resource *rp = &op->resource[0];
rp               1868 sound/sparc/cs4231.c 		rp->flags & 0xffL,
rp               1869 sound/sparc/cs4231.c 		(unsigned long long)rp->start,
rp               2600 sound/sparc/dbri.c 	struct resource *rp;
rp               2626 sound/sparc/dbri.c 	rp = &op->resource[0];
rp               2629 sound/sparc/dbri.c 		rp->flags & 0xffL, (unsigned long long)rp->start, irq);
rp                 26 sound/xen/xen_snd_front_evtchnl.c 	RING_IDX i, rp;
rp                 34 sound/xen/xen_snd_front_evtchnl.c 	rp = channel->u.req.ring.sring->rsp_prod;
rp                 43 sound/xen/xen_snd_front_evtchnl.c 	for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
rp                263 tools/usb/usbip/src/usbip_network.c 	struct addrinfo hints, *res, *rp;
rp                280 tools/usb/usbip/src/usbip_network.c 	for (rp = res; rp; rp = rp->ai_next) {
rp                281 tools/usb/usbip/src/usbip_network.c 		sockfd = socket(rp->ai_family, rp->ai_socktype,
rp                282 tools/usb/usbip/src/usbip_network.c 				rp->ai_protocol);
rp                291 tools/usb/usbip/src/usbip_network.c 		if (connect(sockfd, rp->ai_addr, rp->ai_addrlen) == 0)
rp                299 tools/usb/usbip/src/usbip_network.c 	if (!rp)