iop               187 arch/alpha/kernel/core_wildfire.c 	wildfire_iop *iop;
iop               286 arch/alpha/kernel/core_wildfire.c 		    iop = WILDFIRE_iop(soft_qbb);
iop               290 arch/alpha/kernel/core_wildfire.c 		    if ((iop->iop_hose[i].init.csr & 1) == 1 &&
iop               579 arch/alpha/kernel/core_wildfire.c 	wildfire_iop *iop = WILDFIRE_iop(qbbno);
iop               582 arch/alpha/kernel/core_wildfire.c 	printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
iop               584 arch/alpha/kernel/core_wildfire.c 	printk(KERN_ERR " IOA_CONFIG:          0x%16lx\n", iop->ioa_config.csr);
iop               585 arch/alpha/kernel/core_wildfire.c 	printk(KERN_ERR " IOD_CONFIG:          0x%16lx\n", iop->iod_config.csr);
iop               587 arch/alpha/kernel/core_wildfire.c 	       iop->iop_switch_credits.csr);
iop               589 arch/alpha/kernel/core_wildfire.c 	       iop->iop_hose_credits.csr);
iop               593 arch/alpha/kernel/core_wildfire.c 		       i, iop->iop_hose[i].init.csr);
iop               596 arch/alpha/kernel/core_wildfire.c 		       i, iop->iop_dev_int[i].target.csr);
iop               164 arch/m68k/mac/iop.c static __inline__ void iop_loadaddr(volatile struct mac_iop *iop, __u16 addr)
iop               166 arch/m68k/mac/iop.c 	iop->ram_addr_lo = addr;
iop               167 arch/m68k/mac/iop.c 	iop->ram_addr_hi = addr >> 8;
iop               170 arch/m68k/mac/iop.c static __inline__ __u8 iop_readb(volatile struct mac_iop *iop, __u16 addr)
iop               172 arch/m68k/mac/iop.c 	iop->ram_addr_lo = addr;
iop               173 arch/m68k/mac/iop.c 	iop->ram_addr_hi = addr >> 8;
iop               174 arch/m68k/mac/iop.c 	return iop->ram_data;
iop               177 arch/m68k/mac/iop.c static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 data)
iop               179 arch/m68k/mac/iop.c 	iop->ram_addr_lo = addr;
iop               180 arch/m68k/mac/iop.c 	iop->ram_addr_hi = addr >> 8;
iop               181 arch/m68k/mac/iop.c 	iop->ram_data = data;
iop               184 arch/m68k/mac/iop.c static __inline__ void iop_stop(volatile struct mac_iop *iop)
iop               186 arch/m68k/mac/iop.c 	iop->status_ctrl &= ~IOP_RUN;
iop               189 arch/m68k/mac/iop.c static __inline__ void iop_start(volatile struct mac_iop *iop)
iop               191 arch/m68k/mac/iop.c 	iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
iop               194 arch/m68k/mac/iop.c static __inline__ void iop_bypass(volatile struct mac_iop *iop)
iop               196 arch/m68k/mac/iop.c 	iop->status_ctrl |= IOP_BYPASS;
iop               199 arch/m68k/mac/iop.c static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
iop               201 arch/m68k/mac/iop.c 	iop->status_ctrl |= IOP_IRQ;
iop               204 arch/m68k/mac/iop.c static int iop_alive(volatile struct mac_iop *iop)
iop               208 arch/m68k/mac/iop.c 	retval = (iop_readb(iop, IOP_ADDR_ALIVE) == 0xFF);
iop               209 arch/m68k/mac/iop.c 	iop_writeb(iop, IOP_ADDR_ALIVE, 0);
iop               379 arch/m68k/mac/iop.c 	volatile struct mac_iop *iop = iop_base[msg->iop_num];
iop               385 arch/m68k/mac/iop.c 		iop_writeb(iop, offset, msg->message[i]);
iop               388 arch/m68k/mac/iop.c 	iop_writeb(iop, IOP_ADDR_SEND_STATE + msg->channel, IOP_MSG_NEW);
iop               390 arch/m68k/mac/iop.c 	iop_interrupt(iop);
iop               400 arch/m68k/mac/iop.c 	volatile struct mac_iop *iop = iop_base[iop_num];
iop               406 arch/m68k/mac/iop.c 	iop_writeb(iop, IOP_ADDR_SEND_STATE + chan, IOP_MSG_IDLE);
iop               413 arch/m68k/mac/iop.c 		msg->reply[i] = iop_readb(iop, offset);
iop               429 arch/m68k/mac/iop.c 	volatile struct mac_iop *iop = iop_base[iop_num];
iop               444 arch/m68k/mac/iop.c 		msg->message[i] = iop_readb(iop, offset);
iop               447 arch/m68k/mac/iop.c 	iop_writeb(iop, IOP_ADDR_RECV_STATE + chan, IOP_MSG_RCVD);
iop               567 arch/m68k/mac/iop.c 	volatile struct mac_iop *iop = iop_base[iop_num];
iop               570 arch/m68k/mac/iop.c 	iop_pr_debug("status %02X\n", iop->status_ctrl);
iop               574 arch/m68k/mac/iop.c 	if (iop->status_ctrl & IOP_INT0) {
iop               575 arch/m68k/mac/iop.c 		iop->status_ctrl = IOP_INT0 | IOP_RUN | IOP_AUTOINC;
iop               576 arch/m68k/mac/iop.c 		iop_pr_debug("new status %02X, send states", iop->status_ctrl);
iop               578 arch/m68k/mac/iop.c 			state = iop_readb(iop, IOP_ADDR_SEND_STATE + i);
iop               587 arch/m68k/mac/iop.c 	if (iop->status_ctrl & IOP_INT1) {	/* INT1 for incoming msgs */
iop               588 arch/m68k/mac/iop.c 		iop->status_ctrl = IOP_INT1 | IOP_RUN | IOP_AUTOINC;
iop               589 arch/m68k/mac/iop.c 		iop_pr_debug("new status %02X, recv states", iop->status_ctrl);
iop               591 arch/m68k/mac/iop.c 			state = iop_readb(iop, IOP_ADDR_RECV_STATE + i);
iop               320 arch/mips/include/asm/octeon/cvmx-pow-defs.h 		uint64_t iop:13;
iop               340 arch/mips/include/asm/octeon/cvmx-pow-defs.h 		uint64_t iop:13;
iop               311 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport32e __iomem *iop;
iop               315 arch/powerpc/platforms/8xx/cpm1.c 		iop = (struct cpm_ioport32e __iomem *)
iop               318 arch/powerpc/platforms/8xx/cpm1.c 		iop = (struct cpm_ioport32e __iomem *)
iop               322 arch/powerpc/platforms/8xx/cpm1.c 		setbits32(&iop->dir, pin);
iop               324 arch/powerpc/platforms/8xx/cpm1.c 		clrbits32(&iop->dir, pin);
iop               327 arch/powerpc/platforms/8xx/cpm1.c 		setbits32(&iop->par, pin);
iop               329 arch/powerpc/platforms/8xx/cpm1.c 		clrbits32(&iop->par, pin);
iop               340 arch/powerpc/platforms/8xx/cpm1.c 			setbits32(&iop->sor, pin);
iop               342 arch/powerpc/platforms/8xx/cpm1.c 			clrbits32(&iop->sor, pin);
iop               353 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport16 __iomem *iop =
iop               359 arch/powerpc/platforms/8xx/cpm1.c 		iop += port - 1;
iop               362 arch/powerpc/platforms/8xx/cpm1.c 		setbits16(&iop->dir, pin);
iop               364 arch/powerpc/platforms/8xx/cpm1.c 		clrbits16(&iop->dir, pin);
iop               367 arch/powerpc/platforms/8xx/cpm1.c 		setbits16(&iop->par, pin);
iop               369 arch/powerpc/platforms/8xx/cpm1.c 		clrbits16(&iop->par, pin);
iop               373 arch/powerpc/platforms/8xx/cpm1.c 			setbits16(&iop->odr_sor, pin);
iop               375 arch/powerpc/platforms/8xx/cpm1.c 			clrbits16(&iop->odr_sor, pin);
iop               379 arch/powerpc/platforms/8xx/cpm1.c 			setbits16(&iop->odr_sor, pin);
iop               381 arch/powerpc/platforms/8xx/cpm1.c 			clrbits16(&iop->odr_sor, pin);
iop               383 arch/powerpc/platforms/8xx/cpm1.c 			setbits16(&iop->intr, pin);
iop               385 arch/powerpc/platforms/8xx/cpm1.c 			clrbits16(&iop->intr, pin);
iop               546 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
iop               548 arch/powerpc/platforms/8xx/cpm1.c 	cpm1_gc->cpdata = in_be16(&iop->dat);
iop               554 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
iop               559 arch/powerpc/platforms/8xx/cpm1.c 	return !!(in_be16(&iop->dat) & pin_mask);
iop               566 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
iop               573 arch/powerpc/platforms/8xx/cpm1.c 	out_be16(&iop->dat, cpm1_gc->cpdata);
iop               602 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
iop               608 arch/powerpc/platforms/8xx/cpm1.c 	setbits16(&iop->dir, pin_mask);
iop               620 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
iop               626 arch/powerpc/platforms/8xx/cpm1.c 	clrbits16(&iop->dir, pin_mask);
iop               683 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
iop               685 arch/powerpc/platforms/8xx/cpm1.c 	cpm1_gc->cpdata = in_be32(&iop->dat);
iop               691 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
iop               696 arch/powerpc/platforms/8xx/cpm1.c 	return !!(in_be32(&iop->dat) & pin_mask);
iop               703 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
iop               710 arch/powerpc/platforms/8xx/cpm1.c 	out_be32(&iop->dat, cpm1_gc->cpdata);
iop               731 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
iop               737 arch/powerpc/platforms/8xx/cpm1.c 	setbits32(&iop->dir, pin_mask);
iop               749 arch/powerpc/platforms/8xx/cpm1.c 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
iop               755 arch/powerpc/platforms/8xx/cpm1.c 	clrbits32(&iop->dir, pin_mask);
iop               332 arch/powerpc/sysdev/cpm2.c 	struct cpm2_ioports __iomem *iop =
iop               338 arch/powerpc/sysdev/cpm2.c 		setbits32(&iop[port].dir, pin);
iop               340 arch/powerpc/sysdev/cpm2.c 		clrbits32(&iop[port].dir, pin);
iop               343 arch/powerpc/sysdev/cpm2.c 		setbits32(&iop[port].par, pin);
iop               345 arch/powerpc/sysdev/cpm2.c 		clrbits32(&iop[port].par, pin);
iop               348 arch/powerpc/sysdev/cpm2.c 		setbits32(&iop[port].sor, pin);
iop               350 arch/powerpc/sysdev/cpm2.c 		clrbits32(&iop[port].sor, pin);
iop               353 arch/powerpc/sysdev/cpm2.c 		setbits32(&iop[port].odr, pin);
iop               355 arch/powerpc/sysdev/cpm2.c 		clrbits32(&iop[port].odr, pin);
iop               111 arch/powerpc/sysdev/cpm_common.c 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
iop               113 arch/powerpc/sysdev/cpm_common.c 	cpm2_gc->cpdata = in_be32(&iop->dat);
iop               119 arch/powerpc/sysdev/cpm_common.c 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
iop               124 arch/powerpc/sysdev/cpm_common.c 	return !!(in_be32(&iop->dat) & pin_mask);
iop               131 arch/powerpc/sysdev/cpm_common.c 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
iop               138 arch/powerpc/sysdev/cpm_common.c 	out_be32(&iop->dat, cpm2_gc->cpdata);
iop               159 arch/powerpc/sysdev/cpm_common.c 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
iop               165 arch/powerpc/sysdev/cpm_common.c 	setbits32(&iop->dir, pin_mask);
iop               177 arch/powerpc/sysdev/cpm_common.c 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
iop               183 arch/powerpc/sysdev/cpm_common.c 	clrbits32(&iop->dir, pin_mask);
iop               125 drivers/infiniband/core/cq.c static int ib_poll_handler(struct irq_poll *iop, int budget)
iop               127 drivers/infiniband/core/cq.c 	struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
iop               133 drivers/infiniband/core/cq.c 		irq_poll_complete(&cq->iop);
iop               135 drivers/infiniband/core/cq.c 			irq_poll_sched(&cq->iop);
iop               146 drivers/infiniband/core/cq.c 	irq_poll_sched(&cq->iop);
iop               226 drivers/infiniband/core/cq.c 		irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
iop               297 drivers/infiniband/core/cq.c 		irq_poll_disable(&cq->iop);
iop              4457 drivers/infiniband/hw/mlx4/qp.c 				irq_poll_disable(&cq->iop);
iop              4458 drivers/infiniband/hw/mlx4/qp.c 				irq_poll_enable(&cq->iop);
iop              6419 drivers/infiniband/hw/mlx5/qp.c 				irq_poll_disable(&cq->iop);
iop              6420 drivers/infiniband/hw/mlx5/qp.c 				irq_poll_enable(&cq->iop);
iop                40 drivers/iommu/io-pgtable-arm-v7s.c 	container_of((x), struct arm_v7s_io_pgtable, iop)
iop               167 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable	iop;
iop               229 drivers/iommu/io-pgtable-arm-v7s.c 	return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg));
iop               235 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop               283 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop               416 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop               477 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop               524 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable *iop = &data->iop;
iop               531 drivers/iommu/io-pgtable-arm-v7s.c 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
iop               532 drivers/iommu/io-pgtable-arm-v7s.c 		    paddr >= (1ULL << data->iop.cfg.oas)))
iop               540 drivers/iommu/io-pgtable-arm-v7s.c 	if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
iop               541 drivers/iommu/io-pgtable-arm-v7s.c 		io_pgtable_tlb_flush_walk(iop, iova, size,
iop               550 drivers/iommu/io-pgtable-arm-v7s.c static void arm_v7s_free_pgtable(struct io_pgtable *iop)
iop               552 drivers/iommu/io-pgtable-arm-v7s.c 	struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
iop               571 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable *iop = &data->iop;
iop               586 drivers/iommu/io-pgtable-arm-v7s.c 	__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
iop               589 drivers/iommu/io-pgtable-arm-v7s.c 	io_pgtable_tlb_flush_leaf(iop, iova, size, size);
iop               599 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop               634 drivers/iommu/io-pgtable-arm-v7s.c 	io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
iop               644 drivers/iommu/io-pgtable-arm-v7s.c 	struct io_pgtable *iop = &data->iop;
iop               681 drivers/iommu/io-pgtable-arm-v7s.c 		__arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
iop               686 drivers/iommu/io-pgtable-arm-v7s.c 				io_pgtable_tlb_flush_walk(iop, iova, blk_size,
iop               690 drivers/iommu/io-pgtable-arm-v7s.c 			} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
iop               698 drivers/iommu/io-pgtable-arm-v7s.c 				io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
iop               748 drivers/iommu/io-pgtable-arm-v7s.c 	return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask);
iop               786 drivers/iommu/io-pgtable-arm-v7s.c 	data->iop.ops = (struct io_pgtable_ops) {
iop               793 drivers/iommu/io-pgtable-arm-v7s.c 	data->iop.cfg = *cfg;
iop               834 drivers/iommu/io-pgtable-arm-v7s.c 	return &data->iop;
iop                29 drivers/iommu/io-pgtable-arm.c 	container_of((x), struct arm_lpae_io_pgtable, iop)
iop               181 drivers/iommu/io-pgtable-arm.c 	struct io_pgtable	iop;
iop               305 drivers/iommu/io-pgtable-arm.c 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
iop               308 drivers/iommu/io-pgtable-arm.c 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
iop               313 drivers/iommu/io-pgtable-arm.c 	if (data->iop.fmt != ARM_MALI_LPAE)
iop               318 drivers/iommu/io-pgtable-arm.c 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
iop               328 drivers/iommu/io-pgtable-arm.c 	if (iopte_leaf(pte, lvl, data->iop.fmt)) {
iop               389 drivers/iommu/io-pgtable-arm.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop               416 drivers/iommu/io-pgtable-arm.c 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
iop               433 drivers/iommu/io-pgtable-arm.c 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
iop               434 drivers/iommu/io-pgtable-arm.c 	    data->iop.fmt == ARM_32_LPAE_S1) {
iop               452 drivers/iommu/io-pgtable-arm.c 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
iop               453 drivers/iommu/io-pgtable-arm.c 	    data->iop.fmt == ARM_32_LPAE_S2) {
iop               490 drivers/iommu/io-pgtable-arm.c 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
iop               491 drivers/iommu/io-pgtable-arm.c 		    paddr >= (1ULL << data->iop.cfg.oas)))
iop               527 drivers/iommu/io-pgtable-arm.c 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
iop               533 drivers/iommu/io-pgtable-arm.c 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
iop               536 drivers/iommu/io-pgtable-arm.c static void arm_lpae_free_pgtable(struct io_pgtable *iop)
iop               538 drivers/iommu/io-pgtable-arm.c 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
iop               550 drivers/iommu/io-pgtable-arm.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop               591 drivers/iommu/io-pgtable-arm.c 		io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
iop               604 drivers/iommu/io-pgtable-arm.c 	struct io_pgtable *iop = &data->iop;
iop               617 drivers/iommu/io-pgtable-arm.c 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
iop               619 drivers/iommu/io-pgtable-arm.c 		if (!iopte_leaf(pte, lvl, iop->fmt)) {
iop               621 drivers/iommu/io-pgtable-arm.c 			io_pgtable_tlb_flush_walk(iop, iova, size,
iop               625 drivers/iommu/io-pgtable-arm.c 		} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
iop               633 drivers/iommu/io-pgtable-arm.c 			io_pgtable_tlb_add_page(iop, gather, iova, size);
iop               637 drivers/iommu/io-pgtable-arm.c 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
iop               658 drivers/iommu/io-pgtable-arm.c 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
iop               685 drivers/iommu/io-pgtable-arm.c 		if (iopte_leaf(pte, lvl, data->iop.fmt))
iop               779 drivers/iommu/io-pgtable-arm.c 	data->iop.ops = (struct io_pgtable_ops) {
iop               881 drivers/iommu/io-pgtable-arm.c 	return &data->iop;
iop               977 drivers/iommu/io-pgtable-arm.c 	return &data->iop;
iop               987 drivers/iommu/io-pgtable-arm.c 	struct io_pgtable *iop;
iop               993 drivers/iommu/io-pgtable-arm.c 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
iop               994 drivers/iommu/io-pgtable-arm.c 	if (iop) {
iop               999 drivers/iommu/io-pgtable-arm.c 	return iop;
iop              1005 drivers/iommu/io-pgtable-arm.c 	struct io_pgtable *iop;
iop              1011 drivers/iommu/io-pgtable-arm.c 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
iop              1012 drivers/iommu/io-pgtable-arm.c 	if (iop)
iop              1015 drivers/iommu/io-pgtable-arm.c 	return iop;
iop              1066 drivers/iommu/io-pgtable-arm.c 	return &data->iop;
iop              1130 drivers/iommu/io-pgtable-arm.c 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
iop                33 drivers/iommu/io-pgtable.c 	struct io_pgtable *iop;
iop                43 drivers/iommu/io-pgtable.c 	iop = fns->alloc(cfg, cookie);
iop                44 drivers/iommu/io-pgtable.c 	if (!iop)
iop                47 drivers/iommu/io-pgtable.c 	iop->fmt	= fmt;
iop                48 drivers/iommu/io-pgtable.c 	iop->cookie	= cookie;
iop                49 drivers/iommu/io-pgtable.c 	iop->cfg	= *cfg;
iop                51 drivers/iommu/io-pgtable.c 	return &iop->ops;
iop                61 drivers/iommu/io-pgtable.c 	struct io_pgtable *iop;
iop                66 drivers/iommu/io-pgtable.c 	iop = container_of(ops, struct io_pgtable, ops);
iop                67 drivers/iommu/io-pgtable.c 	io_pgtable_tlb_flush_all(iop);
iop                68 drivers/iommu/io-pgtable.c 	io_pgtable_init_table[iop->fmt]->free(iop);
iop                76 drivers/iommu/ipmmu-vmsa.c 	struct io_pgtable_ops *iop;
iop               504 drivers/iommu/ipmmu-vmsa.c 	domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
iop               506 drivers/iommu/ipmmu-vmsa.c 	if (!domain->iop) {
iop               658 drivers/iommu/ipmmu-vmsa.c 	free_io_pgtable_ops(domain->iop);
iop               734 drivers/iommu/ipmmu-vmsa.c 	return domain->iop->map(domain->iop, iova, paddr, size, prot);
iop               742 drivers/iommu/ipmmu-vmsa.c 	return domain->iop->unmap(domain->iop, iova, size, gather);
iop               766 drivers/iommu/ipmmu-vmsa.c 	return domain->iop->iova_to_phys(domain->iop, iova);
iop                45 drivers/iommu/msm_iommu.c 	struct io_pgtable_ops	*iop;
iop               361 drivers/iommu/msm_iommu.c 	priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
iop               362 drivers/iommu/msm_iommu.c 	if (!priv->iop) {
iop               488 drivers/iommu/msm_iommu.c 	free_io_pgtable_ops(priv->iop);
iop               514 drivers/iommu/msm_iommu.c 	ret = priv->iop->map(priv->iop, iova, pa, len, prot);
iop               527 drivers/iommu/msm_iommu.c 	len = priv->iop->unmap(priv->iop, iova, len, gather);
iop               107 drivers/iommu/mtk_iommu.c 	struct io_pgtable_ops		*iop;
iop               348 drivers/iommu/mtk_iommu.c 	dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
iop               349 drivers/iommu/mtk_iommu.c 	if (!dom->iop) {
iop               393 drivers/iommu/mtk_iommu.c 	free_io_pgtable_ops(dom->iop);
iop               442 drivers/iommu/mtk_iommu.c 	ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
iop               457 drivers/iommu/mtk_iommu.c 	unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
iop               488 drivers/iommu/mtk_iommu.c 	pa = dom->iop->iova_to_phys(dom->iop, iova);
iop               493 drivers/md/bcache/request.c 	struct data_insert_op	iop;
iop               510 drivers/md/bcache/request.c 		s->iop.status = bio->bi_status;
iop               512 drivers/md/bcache/request.c 		 ptr_stale(s->iop.c, &b->key, 0)) {
iop               513 drivers/md/bcache/request.c 		atomic_long_inc(&s->iop.c->cache_read_races);
iop               514 drivers/md/bcache/request.c 		s->iop.status = BLK_STS_IOERR;
iop               517 drivers/md/bcache/request.c 	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
iop               531 drivers/md/bcache/request.c 	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
iop               534 drivers/md/bcache/request.c 	if (KEY_INODE(k) != s->iop.inode ||
iop               537 drivers/md/bcache/request.c 		unsigned int sectors = KEY_INODE(k) == s->iop.inode
iop               568 drivers/md/bcache/request.c 	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
iop               569 drivers/md/bcache/request.c 	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
iop               591 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, iop.cl);
iop               598 drivers/md/bcache/request.c 	ret = bch_btree_map_keys(&s->op, s->iop.c,
iop               599 drivers/md/bcache/request.c 				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
iop               623 drivers/md/bcache/request.c 		if (!s->iop.status)
iop               624 drivers/md/bcache/request.c 			s->iop.status = BLK_STS_IOERR;
iop               639 drivers/md/bcache/request.c 		s->iop.status = bio->bi_status;
iop               663 drivers/md/bcache/request.c 		if (unlikely(s->iop.writeback &&
iop               669 drivers/md/bcache/request.c 			s->iop.status = bio->bi_status;
iop               687 drivers/md/bcache/request.c 		s->orig_bio->bi_status = s->iop.status;
iop               717 drivers/md/bcache/request.c 	atomic_dec(&s->iop.c->search_inflight);
iop               719 drivers/md/bcache/request.c 	if (s->iop.bio)
iop               720 drivers/md/bcache/request.c 		bio_put(s->iop.bio);
iop               724 drivers/md/bcache/request.c 	mempool_free(s, &s->iop.c->search);
iop               747 drivers/md/bcache/request.c 	s->iop.c		= d->c;
iop               748 drivers/md/bcache/request.c 	s->iop.bio		= NULL;
iop               749 drivers/md/bcache/request.c 	s->iop.inode		= d->id;
iop               750 drivers/md/bcache/request.c 	s->iop.write_point	= hash_long((unsigned long) current, 16);
iop               751 drivers/md/bcache/request.c 	s->iop.write_prio	= 0;
iop               752 drivers/md/bcache/request.c 	s->iop.status		= 0;
iop               753 drivers/md/bcache/request.c 	s->iop.flags		= 0;
iop               754 drivers/md/bcache/request.c 	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
iop               755 drivers/md/bcache/request.c 	s->iop.wq		= bcache_wq;
iop               777 drivers/md/bcache/request.c 	if (s->iop.replace_collision)
iop               778 drivers/md/bcache/request.c 		bch_mark_cache_miss_collision(s->iop.c, s->d);
iop               780 drivers/md/bcache/request.c 	if (s->iop.bio)
iop               781 drivers/md/bcache/request.c 		bio_free_pages(s->iop.bio);
iop               802 drivers/md/bcache/request.c 		s->iop.status = 0;
iop               808 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
iop               819 drivers/md/bcache/request.c 	if (s->iop.replace_collision)
iop               820 drivers/md/bcache/request.c 		bch_mark_cache_miss_collision(s->iop.c, s->d);
iop               822 drivers/md/bcache/request.c 	if (s->iop.bio)
iop               823 drivers/md/bcache/request.c 		bio_free_pages(s->iop.bio);
iop               842 drivers/md/bcache/request.c 	if (s->iop.bio) {
iop               843 drivers/md/bcache/request.c 		bio_reset(s->iop.bio);
iop               844 drivers/md/bcache/request.c 		s->iop.bio->bi_iter.bi_sector =
iop               846 drivers/md/bcache/request.c 		bio_copy_dev(s->iop.bio, s->cache_miss);
iop               847 drivers/md/bcache/request.c 		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
iop               848 drivers/md/bcache/request.c 		bch_bio_map(s->iop.bio, NULL);
iop               850 drivers/md/bcache/request.c 		bio_copy_data(s->cache_miss, s->iop.bio);
iop               862 drivers/md/bcache/request.c 	if (s->iop.bio &&
iop               863 drivers/md/bcache/request.c 	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
iop               864 drivers/md/bcache/request.c 		BUG_ON(!s->iop.replace);
iop               865 drivers/md/bcache/request.c 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
iop               876 drivers/md/bcache/request.c 	bch_mark_cache_accounting(s->iop.c, s->d,
iop               877 drivers/md/bcache/request.c 				  !s->cache_missed, s->iop.bypass);
iop               878 drivers/md/bcache/request.c 	trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
iop               880 drivers/md/bcache/request.c 	if (s->iop.status)
iop               882 drivers/md/bcache/request.c 	else if (s->iop.bio || verify(dc))
iop               898 drivers/md/bcache/request.c 	if (s->cache_miss || s->iop.bypass) {
iop               906 drivers/md/bcache/request.c 	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
iop               912 drivers/md/bcache/request.c 	s->iop.replace_key = KEY(s->iop.inode,
iop               916 drivers/md/bcache/request.c 	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
iop               920 drivers/md/bcache/request.c 	s->iop.replace = true;
iop               945 drivers/md/bcache/request.c 		bch_mark_cache_readahead(s->iop.c, s->d);
iop               948 drivers/md/bcache/request.c 	s->iop.bio	= cache_bio;
iop               951 drivers/md/bcache/request.c 	closure_bio_submit(s->iop.c, cache_bio, &s->cl);
iop               960 drivers/md/bcache/request.c 	closure_bio_submit(s->iop.c, miss, &s->cl);
iop               968 drivers/md/bcache/request.c 	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
iop               990 drivers/md/bcache/request.c 	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
iop               998 drivers/md/bcache/request.c 		s->iop.bypass = false;
iop               999 drivers/md/bcache/request.c 		s->iop.writeback = true;
iop              1010 drivers/md/bcache/request.c 		s->iop.bypass = true;
iop              1014 drivers/md/bcache/request.c 			     s->iop.bypass)) {
iop              1015 drivers/md/bcache/request.c 		s->iop.bypass = false;
iop              1016 drivers/md/bcache/request.c 		s->iop.writeback = true;
iop              1019 drivers/md/bcache/request.c 	if (s->iop.bypass) {
iop              1020 drivers/md/bcache/request.c 		s->iop.bio = s->orig_bio;
iop              1021 drivers/md/bcache/request.c 		bio_get(s->iop.bio);
iop              1029 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
iop              1031 drivers/md/bcache/request.c 	} else if (s->iop.writeback) {
iop              1033 drivers/md/bcache/request.c 		s->iop.bio = bio;
iop              1045 drivers/md/bcache/request.c 				s->iop.status = BLK_STS_RESOURCE;
iop              1053 drivers/md/bcache/request.c 			closure_bio_submit(s->iop.c, flush, cl);
iop              1056 drivers/md/bcache/request.c 		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
iop              1059 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
iop              1063 drivers/md/bcache/request.c 	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
iop              1072 drivers/md/bcache/request.c 	if (s->iop.flush_journal)
iop              1073 drivers/md/bcache/request.c 		bch_journal_meta(s->iop.c, cl);
iop              1077 drivers/md/bcache/request.c 	closure_bio_submit(s->iop.c, bio, cl);
iop              1227 drivers/md/bcache/request.c 			s->iop.bypass = check_should_bypass(dc, bio);
iop              1310 drivers/md/bcache/request.c 	if (s->iop.flush_journal)
iop              1311 drivers/md/bcache/request.c 		bch_journal_meta(s->iop.c, cl);
iop              1347 drivers/md/bcache/request.c 		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
iop              1351 drivers/md/bcache/request.c 		s->iop.bypass		= (bio_op(bio) == REQ_OP_DISCARD) != 0;
iop              1352 drivers/md/bcache/request.c 		s->iop.writeback	= true;
iop              1353 drivers/md/bcache/request.c 		s->iop.bio		= bio;
iop              1355 drivers/md/bcache/request.c 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
iop              1357 drivers/md/bcache/request.c 		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
iop               702 drivers/net/fddi/skfp/h/skfbi.h #define	ADDR(a)		(char far *) smc->hw.iop+(a)
iop               703 drivers/net/fddi/skfp/h/skfbi.h #define	ADDRS(smc,a)	(char far *) (smc)->hw.iop+(a)
iop               705 drivers/net/fddi/skfp/h/skfbi.h #define	ADDR(a)	(((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), \
iop               706 drivers/net/fddi/skfp/h/skfbi.h 	(smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \
iop               707 drivers/net/fddi/skfp/h/skfbi.h 	(smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
iop               708 drivers/net/fddi/skfp/h/skfbi.h #define	ADDRS(smc,a) (((a)>>7) ? (outp((smc)->hw.iop+B0_RAP,(a)>>7), \
iop               709 drivers/net/fddi/skfp/h/skfbi.h 	((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \
iop               710 drivers/net/fddi/skfp/h/skfbi.h 	((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
iop               747 drivers/net/fddi/skfp/h/skfbi.h #define GET_ISR_SMP(iop)	inpd((iop)+B0_ISRC)
iop               749 drivers/net/fddi/skfp/h/skfbi.h #define CHECK_ISR_SMP(iop)	(inpd((iop)+B0_ISRC) & inpd((iop)+B0_IMSK))
iop               769 drivers/net/fddi/skfp/h/skfbi.h #define CLI_FBI_SMP(iop)	outpd((iop)+B0_IMSK,0)
iop               770 drivers/net/fddi/skfp/h/skfbi.h #define	STI_FBI_SMP(smc,iop)	outpd((iop)+B0_IMSK,(smc)->hw.is_imask)
iop                60 drivers/net/fddi/skfp/h/targethw.h 	HW_PTR	iop ;			/* IO base address */
iop                51 drivers/net/fddi/skfp/h/targetos.h #define	ADDR(a) (smc->hw.iop+(a))
iop                53 drivers/net/fddi/skfp/h/targetos.h #define	ADDR(a) (((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), (smc->hw.iop+( ((a)&0x7F) | ((a)>>7 ? 0x80:0)) )) : (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
iop               268 drivers/net/fddi/skfp/skfddi.c 	smc->hw.iop = mem;
iop               341 drivers/net/fddi/skfp/skfddi.c 	iounmap(lp->hw.iop);
iop               343 drivers/net/fddi/skfp/skfddi.c 	ioport_unmap(lp->hw.iop);
iop               790 drivers/scsi/advansys.c #define ASC_GET_EISA_SLOT(iop)  (PortAddr)((iop) & 0xF000)
iop              10906 drivers/scsi/advansys.c static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
iop              10923 drivers/scsi/advansys.c 		asc_dvc_varp->iop_base = iop;
iop              10958 drivers/scsi/advansys.c 		boardp->ioport = iop;
iop              10961 drivers/scsi/advansys.c 				(ushort)inp(iop + 1), (ushort)inpw(iop));
iop              11194 drivers/scsi/advansys.c 		shost->io_port = iop;
iop              2032 drivers/scsi/be2iscsi/be_main.c static int be_iopoll(struct irq_poll *iop, int budget)
iop              2040 drivers/scsi/be2iscsi/be_main.c 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
iop              2043 drivers/scsi/be2iscsi/be_main.c 		irq_poll_complete(iop);
iop              2062 drivers/scsi/be2iscsi/be_main.c 		irq_poll_complete(iop);
iop                52 drivers/scsi/hptiop.c 		req = readl(&hba->u.itl.iop->inbound_queue);
iop                59 drivers/scsi/hptiop.c 		writel(req, &hba->u.itl.iop->outbound_queue);
iop                60 drivers/scsi/hptiop.c 		readl(&hba->u.itl.iop->outbound_intstatus);
iop                90 drivers/scsi/hptiop.c 	while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
iop                99 drivers/scsi/hptiop.c 				((char __iomem *)hba->u.itl.iop + req);
iop               115 drivers/scsi/hptiop.c 	struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
iop               123 drivers/scsi/hptiop.c 	status = readl(&iop->outbound_intstatus);
iop               126 drivers/scsi/hptiop.c 		u32 msg = readl(&iop->outbound_msgaddr0);
iop               129 drivers/scsi/hptiop.c 		writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
iop               306 drivers/scsi/hptiop.c 	writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
iop               307 drivers/scsi/hptiop.c 			&hba->u.itl.iop->inbound_queue);
iop               308 drivers/scsi/hptiop.c 	readl(&hba->u.itl.iop->outbound_intstatus);
iop               362 drivers/scsi/hptiop.c 	writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
iop               363 drivers/scsi/hptiop.c 	readl(&hba->u.itl.iop->outbound_intstatus);
iop               406 drivers/scsi/hptiop.c 	req32 = readl(&hba->u.itl.iop->inbound_queue);
iop               411 drivers/scsi/hptiop.c 			((unsigned long)hba->u.itl.iop + req32);
iop               424 drivers/scsi/hptiop.c 	writel(req32, &hba->u.itl.iop->outbound_queue);
iop               478 drivers/scsi/hptiop.c 	req32 = readl(&hba->u.itl.iop->inbound_queue);
iop               483 drivers/scsi/hptiop.c 			((unsigned long)hba->u.itl.iop + req32);
iop               500 drivers/scsi/hptiop.c 	writel(req32, &hba->u.itl.iop->outbound_queue);
iop               552 drivers/scsi/hptiop.c 		&hba->u.itl.iop->outbound_intmask);
iop               614 drivers/scsi/hptiop.c 	hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
iop               615 drivers/scsi/hptiop.c 	if (hba->u.itl.iop == NULL)
iop               618 drivers/scsi/hptiop.c 		hba->u.itl.plx = hba->u.itl.iop;
iop               619 drivers/scsi/hptiop.c 		hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
iop               620 drivers/scsi/hptiop.c 		if (hba->u.itl.iop == NULL) {
iop               632 drivers/scsi/hptiop.c 	iounmap(hba->u.itl.iop);
iop               802 drivers/scsi/hptiop.c 			((unsigned long)hba->u.itl.iop + tag);
iop               831 drivers/scsi/hptiop.c 	writel(tag, &hba->u.itl.iop->outbound_queue);
iop               895 drivers/scsi/hptiop.c 			&hba->u.itl.iop->inbound_queue);
iop               898 drivers/scsi/hptiop.c 					&hba->u.itl.iop->inbound_queue);
iop              1527 drivers/scsi/hptiop.c 	int_mask = readl(&hba->u.itl.iop->outbound_intmask);
iop              1530 drivers/scsi/hptiop.c 		&hba->u.itl.iop->outbound_intmask);
iop              1531 drivers/scsi/hptiop.c 	readl(&hba->u.itl.iop->outbound_intmask);
iop               273 drivers/scsi/hptiop.h 			struct hpt_iopmu_itl __iomem *iop;
iop              3748 drivers/scsi/ipr.c static int ipr_iopoll(struct irq_poll *iop, int budget);
iop              5763 drivers/scsi/ipr.c static int ipr_iopoll(struct irq_poll *iop, int budget)
iop              5772 drivers/scsi/ipr.c 	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
iop              5779 drivers/scsi/ipr.c 		irq_poll_complete(iop);
iop               447 drivers/scsi/qla2xxx/qla_def.h 			u32 iop[2];
iop              2495 drivers/scsi/qla2xxx/qla_def.h 	u32			iop[2];
iop               285 drivers/scsi/qla2xxx/qla_init.c 		ea.iop[0] = lio->u.logio.iop[0];
iop               286 drivers/scsi/qla2xxx/qla_init.c 		ea.iop[1] = lio->u.logio.iop[1];
iop               557 drivers/scsi/qla2xxx/qla_init.c 	ea.iop[0] = lio->u.logio.iop[0];
iop               558 drivers/scsi/qla2xxx/qla_init.c 	ea.iop[1] = lio->u.logio.iop[1];
iop              1212 drivers/scsi/qla2xxx/qla_init.c 		ea.iop[0] = lio->u.logio.iop[0];
iop              1213 drivers/scsi/qla2xxx/qla_init.c 		ea.iop[1] = lio->u.logio.iop[1];
iop              1883 drivers/scsi/qla2xxx/qla_init.c 		ea->fcport->nvme_prli_service_param = ea->iop[0];
iop              1884 drivers/scsi/qla2xxx/qla_init.c 		if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
iop              1886 drivers/scsi/qla2xxx/qla_init.c 			    (ea->iop[1] & 0xffff) * 512;
iop              1892 drivers/scsi/qla2xxx/qla_init.c 		if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
iop              1893 drivers/scsi/qla2xxx/qla_init.c 		    (ea->iop[1] == 0x50000)) {   /* reson 5=busy expl:0x0 */
iop              1949 drivers/scsi/qla2xxx/qla_init.c 	    ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
iop              2026 drivers/scsi/qla2xxx/qla_init.c 		cid.b.domain = (ea->iop[1] >> 16) & 0xff;
iop              2027 drivers/scsi/qla2xxx/qla_init.c 		cid.b.area   = (ea->iop[1] >>  8) & 0xff;
iop              2028 drivers/scsi/qla2xxx/qla_init.c 		cid.b.al_pa  = ea->iop[1] & 0xff;
iop              2041 drivers/scsi/qla2xxx/qla_init.c 		lid = ea->iop[1] & 0xffff;
iop              1687 drivers/scsi/qla2xxx/qla_isr.c 	uint32_t iop[2];
iop              1726 drivers/scsi/qla2xxx/qla_isr.c 			lio->u.logio.iop[0] =
iop              1728 drivers/scsi/qla2xxx/qla_isr.c 			lio->u.logio.iop[1] =
iop              1736 drivers/scsi/qla2xxx/qla_isr.c 		iop[0] = le32_to_cpu(logio->io_parameter[0]);
iop              1737 drivers/scsi/qla2xxx/qla_isr.c 		if (iop[0] & BIT_4) {
iop              1739 drivers/scsi/qla2xxx/qla_isr.c 			if (iop[0] & BIT_8)
iop              1741 drivers/scsi/qla2xxx/qla_isr.c 		} else if (iop[0] & BIT_5)
iop              1744 drivers/scsi/qla2xxx/qla_isr.c 		if (iop[0] & BIT_7)
iop              1755 drivers/scsi/qla2xxx/qla_isr.c 	iop[0] = le32_to_cpu(logio->io_parameter[0]);
iop              1756 drivers/scsi/qla2xxx/qla_isr.c 	iop[1] = le32_to_cpu(logio->io_parameter[1]);
iop              1757 drivers/scsi/qla2xxx/qla_isr.c 	lio->u.logio.iop[0] = iop[0];
iop              1758 drivers/scsi/qla2xxx/qla_isr.c 	lio->u.logio.iop[1] = iop[1];
iop              1759 drivers/scsi/qla2xxx/qla_isr.c 	switch (iop[0]) {
iop              1762 drivers/scsi/qla2xxx/qla_isr.c 		data[1] = LSW(iop[1]);
iop              1768 drivers/scsi/qla2xxx/qla_isr.c 		if (iop[1] == 0x0606) {
iop              2366 drivers/scsi/qla2xxx/qla_mbx.c 	uint32_t	iop[2];
iop              2409 drivers/scsi/qla2xxx/qla_mbx.c 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
iop              2410 drivers/scsi/qla2xxx/qla_mbx.c 		iop[1] = le32_to_cpu(lg->io_parameter[1]);
iop              2415 drivers/scsi/qla2xxx/qla_mbx.c 		    iop[0], iop[1]);
iop              2417 drivers/scsi/qla2xxx/qla_mbx.c 		switch (iop[0]) {
iop              2420 drivers/scsi/qla2xxx/qla_mbx.c 			mb[1] = LSW(iop[1]);
iop              2446 drivers/scsi/qla2xxx/qla_mbx.c 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
iop              2450 drivers/scsi/qla2xxx/qla_mbx.c 		if (iop[0] & BIT_4) {
iop              2451 drivers/scsi/qla2xxx/qla_mbx.c 			if (iop[0] & BIT_8)
iop              1662 drivers/video/fbdev/cyber2000fb.c 	unsigned char __iomem *iop;
iop              1664 drivers/video/fbdev/cyber2000fb.c 	iop = ioremap(0x3000000, 0x5000);
iop              1665 drivers/video/fbdev/cyber2000fb.c 	if (iop == NULL) {
iop              1670 drivers/video/fbdev/cyber2000fb.c 	writeb(0x18, iop + 0x46e8);
iop              1671 drivers/video/fbdev/cyber2000fb.c 	writeb(0x01, iop + 0x102);
iop              1672 drivers/video/fbdev/cyber2000fb.c 	writeb(0x08, iop + 0x46e8);
iop              1673 drivers/video/fbdev/cyber2000fb.c 	writeb(EXT_BIU_MISC, iop + 0x3ce);
iop              1674 drivers/video/fbdev/cyber2000fb.c 	writeb(EXT_BIU_MISC_LIN_ENABLE, iop + 0x3cf);
iop              1676 drivers/video/fbdev/cyber2000fb.c 	iounmap(iop);
iop               224 fs/fuse/control.c 					  const struct inode_operations *iop,
iop               247 fs/fuse/control.c 	if (iop)
iop               248 fs/fuse/control.c 		inode->i_op = iop;
iop                25 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
iop                27 fs/iomap/buffered-io.c 	if (iop || i_blocksize(inode) == PAGE_SIZE)
iop                28 fs/iomap/buffered-io.c 		return iop;
iop                30 fs/iomap/buffered-io.c 	iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
iop                31 fs/iomap/buffered-io.c 	atomic_set(&iop->read_count, 0);
iop                32 fs/iomap/buffered-io.c 	atomic_set(&iop->write_count, 0);
iop                33 fs/iomap/buffered-io.c 	bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
iop                40 fs/iomap/buffered-io.c 	set_page_private(page, (unsigned long)iop);
iop                42 fs/iomap/buffered-io.c 	return iop;
iop                48 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
iop                50 fs/iomap/buffered-io.c 	if (!iop)
iop                52 fs/iomap/buffered-io.c 	WARN_ON_ONCE(atomic_read(&iop->read_count));
iop                53 fs/iomap/buffered-io.c 	WARN_ON_ONCE(atomic_read(&iop->write_count));
iop                57 fs/iomap/buffered-io.c 	kfree(iop);
iop                64 fs/iomap/buffered-io.c iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
iop                81 fs/iomap/buffered-io.c 	if (iop) {
iop                86 fs/iomap/buffered-io.c 			if (!test_bit(i, iop->uptodate))
iop                96 fs/iomap/buffered-io.c 			if (test_bit(i, iop->uptodate)) {
iop               123 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
iop               130 fs/iomap/buffered-io.c 	if (iop) {
iop               133 fs/iomap/buffered-io.c 				set_bit(i, iop->uptodate);
iop               134 fs/iomap/buffered-io.c 			else if (!test_bit(i, iop->uptodate))
iop               144 fs/iomap/buffered-io.c iomap_read_finish(struct iomap_page *iop, struct page *page)
iop               146 fs/iomap/buffered-io.c 	if (!iop || atomic_dec_and_test(&iop->read_count))
iop               154 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
iop               163 fs/iomap/buffered-io.c 	iomap_read_finish(iop, page);
iop               212 fs/iomap/buffered-io.c 	struct iomap_page *iop = iomap_page_create(inode, page);
iop               225 fs/iomap/buffered-io.c 	iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
iop               246 fs/iomap/buffered-io.c 		if (!same_page && iop)
iop               247 fs/iomap/buffered-io.c 			atomic_inc(&iop->read_count);
iop               256 fs/iomap/buffered-io.c 	if (iop)
iop               257 fs/iomap/buffered-io.c 		atomic_inc(&iop->read_count);
iop               432 fs/iomap/buffered-io.c 	struct iomap_page *iop = to_iomap_page(page);
iop               444 fs/iomap/buffered-io.c 	if (iop) {
iop               446 fs/iomap/buffered-io.c 			if (!test_bit(i, iop->uptodate))
iop               553 fs/iomap/buffered-io.c 	struct iomap_page *iop = iomap_page_create(inode, page);
iop               564 fs/iomap/buffered-io.c 		iomap_adjust_read_range(inode, iop, &block_start,
iop               120 fs/proc/base.c 	const struct inode_operations *iop;
iop               129 fs/proc/base.c 	.iop  = IOP,					\
iop              2466 fs/proc/base.c 	if (p->iop)
iop              2467 fs/proc/base.c 		inode->i_op = p->iop;
iop                64 fs/xfs/xfs_aops.c 	struct iomap_page	*iop = to_iomap_page(bvec->bv_page);
iop                71 fs/xfs/xfs_aops.c 	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
iop                72 fs/xfs/xfs_aops.c 	ASSERT(!iop || atomic_read(&iop->write_count) > 0);
iop                74 fs/xfs/xfs_aops.c 	if (!iop || atomic_dec_and_test(&iop->write_count))
iop               759 fs/xfs/xfs_aops.c 	struct iomap_page	*iop,
iop               789 fs/xfs/xfs_aops.c 	if (iop && !same_page)
iop               790 fs/xfs/xfs_aops.c 		atomic_inc(&iop->write_count);
iop               874 fs/xfs/xfs_aops.c 	struct iomap_page	*iop = to_iomap_page(page);
iop               880 fs/xfs/xfs_aops.c 	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
iop               881 fs/xfs/xfs_aops.c 	ASSERT(!iop || atomic_read(&iop->write_count) == 0);
iop               891 fs/xfs/xfs_aops.c 		if (iop && !test_bit(i, iop->uptodate))
iop               899 fs/xfs/xfs_aops.c 		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
iop               192 include/linux/io-pgtable.h static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
iop               194 include/linux/io-pgtable.h 	iop->cfg.tlb->tlb_flush_all(iop->cookie);
iop               198 include/linux/io-pgtable.h io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
iop               201 include/linux/io-pgtable.h 	iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
iop               205 include/linux/io-pgtable.h io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
iop               208 include/linux/io-pgtable.h 	iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
iop               212 include/linux/io-pgtable.h io_pgtable_tlb_add_page(struct io_pgtable *iop,
iop               216 include/linux/io-pgtable.h 	if (iop->cfg.tlb->tlb_add_page)
iop               217 include/linux/io-pgtable.h 		iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
iop               229 include/linux/io-pgtable.h 	void (*free)(struct io_pgtable *iop);
iop              1553 include/rdma/ib_verbs.h 		struct irq_poll		iop;
iop                50 include/uapi/linux/i2o-dev.h 	unsigned int iop;	/* IOP unit number */
iop                55 include/uapi/linux/i2o-dev.h 	unsigned int iop;	/* IOP unit number */
iop                60 include/uapi/linux/i2o-dev.h 	unsigned int iop;	/* IOP unit number */
iop                66 include/uapi/linux/i2o-dev.h 	unsigned int iop;	/* IOP unit number */
iop                75 include/uapi/linux/i2o-dev.h 	unsigned int iop;	/* IOP unit number */
iop                86 include/uapi/linux/i2o-dev.h 	unsigned int iop;	/* IOP unit number */
iop                98 include/uapi/linux/i2o-dev.h 	unsigned int iop;
iop                27 lib/irq_poll.c void irq_poll_sched(struct irq_poll *iop)
iop                31 lib/irq_poll.c 	if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
iop                33 lib/irq_poll.c 	if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
iop                37 lib/irq_poll.c 	list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
iop                51 lib/irq_poll.c static void __irq_poll_complete(struct irq_poll *iop)
iop                53 lib/irq_poll.c 	list_del(&iop->list);
iop                55 lib/irq_poll.c 	clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
iop                68 lib/irq_poll.c void irq_poll_complete(struct irq_poll *iop)
iop                73 lib/irq_poll.c 	__irq_poll_complete(iop);
iop                87 lib/irq_poll.c 		struct irq_poll *iop;
iop               105 lib/irq_poll.c 		iop = list_entry(list->next, struct irq_poll, list);
iop               107 lib/irq_poll.c 		weight = iop->weight;
iop               109 lib/irq_poll.c 		if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
iop               110 lib/irq_poll.c 			work = iop->poll(iop, weight);
iop               125 lib/irq_poll.c 			if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
iop               126 lib/irq_poll.c 				__irq_poll_complete(iop);
iop               128 lib/irq_poll.c 				list_move_tail(&iop->list, list);
iop               145 lib/irq_poll.c void irq_poll_disable(struct irq_poll *iop)
iop               147 lib/irq_poll.c 	set_bit(IRQ_POLL_F_DISABLE, &iop->state);
iop               148 lib/irq_poll.c 	while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
iop               150 lib/irq_poll.c 	clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
iop               162 lib/irq_poll.c void irq_poll_enable(struct irq_poll *iop)
iop               164 lib/irq_poll.c 	BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
iop               166 lib/irq_poll.c 	clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
iop               179 lib/irq_poll.c void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
iop               181 lib/irq_poll.c 	memset(iop, 0, sizeof(*iop));
iop               182 lib/irq_poll.c 	INIT_LIST_HEAD(&iop->list);
iop               183 lib/irq_poll.c 	iop->weight = weight;
iop               184 lib/irq_poll.c 	iop->poll = poll_fn;