ctlr 191 arch/powerpc/include/asm/mpc52xx_psc.h u8 ctlr; /* PSC + 0x1c */ ctlr 335 arch/powerpc/include/asm/mpc52xx_psc.h u8 ctlr; /* PSC + 0x2c */ ctlr 1463 drivers/dma/ti/edma.c int ctlr; ctlr 1468 drivers/dma/ti/edma.c ctlr = ecc->id; ctlr 1469 drivers/dma/ti/edma.c if (ctlr < 0) ctlr 1563 drivers/dma/ti/edma.c int ctlr; ctlr 1567 drivers/dma/ti/edma.c ctlr = ecc->id; ctlr 1568 drivers/dma/ti/edma.c if (ctlr < 0) ctlr 55 drivers/ipack/devices/scc2698.h u8 d7, ctlr; /* Counter timer lower register of block */ ctlr 3535 drivers/irqchip/irq-gic-v3-its.c u32 ctlr; ctlr 3550 drivers/irqchip/irq-gic-v3-its.c ctlr = readl_relaxed(its_base + GITS_CTLR); ctlr 3551 drivers/irqchip/irq-gic-v3-its.c ctlr &= ~GITS_CTLR_ITS_NUMBER; ctlr 3552 drivers/irqchip/irq-gic-v3-its.c ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; ctlr 3553 drivers/irqchip/irq-gic-v3-its.c writel_relaxed(ctlr, its_base + GITS_CTLR); ctlr 3554 drivers/irqchip/irq-gic-v3-its.c ctlr = readl_relaxed(its_base + GITS_CTLR); ctlr 3555 drivers/irqchip/irq-gic-v3-its.c if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { ctlr 3556 drivers/irqchip/irq-gic-v3-its.c its_number = ctlr & GITS_CTLR_ITS_NUMBER; ctlr 3574 drivers/irqchip/irq-gic-v3-its.c u32 val, ctlr; ctlr 3681 drivers/irqchip/irq-gic-v3-its.c ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr 3682 drivers/irqchip/irq-gic-v3-its.c ctlr |= GITS_CTLR_ENABLE; ctlr 3684 drivers/irqchip/irq-gic-v3-its.c ctlr |= GITS_CTLR_ImDe; ctlr 3685 drivers/irqchip/irq-gic-v3-its.c writel_relaxed(ctlr, its->base + GITS_CTLR); ctlr 21 drivers/mtd/hyperbus/hbmc-am654.c struct hyperbus_ctlr ctlr; ctlr 91 drivers/mtd/hyperbus/hbmc-am654.c priv->ctlr.dev = dev; ctlr 92 drivers/mtd/hyperbus/hbmc-am654.c priv->ctlr.ops = &am654_hbmc_ops; ctlr 93 drivers/mtd/hyperbus/hbmc-am654.c priv->hbdev.ctlr = &priv->ctlr; ctlr 24 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_ctlr *ctlr = hbdev->ctlr; ctlr 27 drivers/mtd/hyperbus/hyperbus-core.c read_data.x[0] = ctlr->ops->read16(hbdev, addr); ctlr 36 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_ctlr *ctlr = hbdev->ctlr; ctlr 38 drivers/mtd/hyperbus/hyperbus-core.c ctlr->ops->write16(hbdev, addr, d.x[0]); ctlr 45 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_ctlr *ctlr = hbdev->ctlr; ctlr 47 drivers/mtd/hyperbus/hyperbus-core.c ctlr->ops->copy_from(hbdev, to, from, len); ctlr 54 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_ctlr *ctlr = hbdev->ctlr; ctlr 56 drivers/mtd/hyperbus/hyperbus-core.c ctlr->ops->copy_to(hbdev, to, from, len); ctlr 62 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_ctlr *ctlr; ctlr 69 drivers/mtd/hyperbus/hyperbus-core.c if (!hbdev || !hbdev->np || !hbdev->ctlr || !hbdev->ctlr->dev) { ctlr 75 drivers/mtd/hyperbus/hyperbus-core.c ctlr = hbdev->ctlr; ctlr 85 drivers/mtd/hyperbus/hyperbus-core.c dev = ctlr->dev; ctlr 97 drivers/mtd/hyperbus/hyperbus-core.c ops = ctlr->ops; ctlr 108 drivers/mtd/hyperbus/hyperbus-core.c if (ops->calibrate && !ctlr->calibrated) { ctlr 114 drivers/mtd/hyperbus/hyperbus-core.c ctlr->calibrated = true; ctlr 63 drivers/net/can/rcar/rcar_can.c u16 ctlr; /* Control Register */ ctlr 446 drivers/net/can/rcar/rcar_can.c u16 ctlr; ctlr 455 drivers/net/can/rcar/rcar_can.c ctlr = readw(&priv->regs->ctlr); ctlr 456 drivers/net/can/rcar/rcar_can.c ctlr &= ~RCAR_CAN_CTLR_SLPM; ctlr 457 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 459 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; ctlr 460 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 466 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */ ctlr 467 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */ ctlr 469 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */ ctlr 470 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */ ctlr 471 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 498 drivers/net/can/rcar/rcar_can.c writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr); ctlr 556 drivers/net/can/rcar/rcar_can.c u16 ctlr; ctlr 560 drivers/net/can/rcar/rcar_can.c ctlr = readw(&priv->regs->ctlr); ctlr 561 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; ctlr 562 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 572 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_SLPM; ctlr 573 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 849 drivers/net/can/rcar/rcar_can.c u16 ctlr; ctlr 855 drivers/net/can/rcar/rcar_can.c ctlr = readw(&priv->regs->ctlr); ctlr 856 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_CANM_HALT; ctlr 857 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 858 drivers/net/can/rcar/rcar_can.c ctlr |= RCAR_CAN_CTLR_SLPM; ctlr 859 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 870 drivers/net/can/rcar/rcar_can.c u16 ctlr; ctlr 879 drivers/net/can/rcar/rcar_can.c ctlr = readw(&priv->regs->ctlr); ctlr 880 drivers/net/can/rcar/rcar_can.c ctlr &= ~RCAR_CAN_CTLR_SLPM; ctlr 881 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 882 drivers/net/can/rcar/rcar_can.c ctlr &= ~RCAR_CAN_CTLR_CANM; ctlr 883 drivers/net/can/rcar/rcar_can.c writew(ctlr, &priv->regs->ctlr); ctlr 112 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr; ctlr 173 drivers/net/ethernet/ti/davinci_cpdma.c #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs)) ctlr 176 drivers/net/ethernet/ti/davinci_cpdma.c #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs)) ctlr 190 drivers/net/ethernet/ti/davinci_cpdma.c static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) ctlr 192 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc_pool *pool = ctlr->pool; ctlr 202 drivers/net/ethernet/ti/davinci_cpdma.c dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, ctlr 212 drivers/net/ethernet/ti/davinci_cpdma.c static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) ctlr 214 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_params *cpdma_params = &ctlr->params; ctlr 218 drivers/net/ethernet/ti/davinci_cpdma.c pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); ctlr 221 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->pool = pool; ctlr 240 drivers/net/ethernet/ti/davinci_cpdma.c pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), ctlr 244 drivers/net/ethernet/ti/davinci_cpdma.c dev_err(ctlr->dev, "pool create failed %d\n", ret); ctlr 250 drivers/net/ethernet/ti/davinci_cpdma.c pool->iomap = devm_ioremap(ctlr->dev, pool->phys, ctlr 254 drivers/net/ethernet/ti/davinci_cpdma.c pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, ctlr 266 drivers/net/ethernet/ti/davinci_cpdma.c dev_err(ctlr->dev, "pool add failed %d\n", ret); ctlr 273 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_desc_pool_destroy(ctlr); ctlr 275 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->pool = NULL; ctlr 306 drivers/net/ethernet/ti/davinci_cpdma.c static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) ctlr 311 drivers/net/ethernet/ti/davinci_cpdma.c if (!ctlr->params.has_ext_regs) ctlr 314 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->state != CPDMA_STATE_ACTIVE) ctlr 323 drivers/net/ethernet/ti/davinci_cpdma.c val = dma_reg_read(ctlr, info->reg); ctlr 326 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, info->reg, val); ctlr 331 drivers/net/ethernet/ti/davinci_cpdma.c static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control) ctlr 336 drivers/net/ethernet/ti/davinci_cpdma.c if (!ctlr->params.has_ext_regs) ctlr 339 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->state != CPDMA_STATE_ACTIVE) ctlr 348 drivers/net/ethernet/ti/davinci_cpdma.c ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; ctlr 357 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 366 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, rate_reg, chan->rate_factor); ctlr 368 drivers/net/ethernet/ti/davinci_cpdma.c rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM); ctlr 371 drivers/net/ethernet/ti/davinci_cpdma.c ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); ctlr 377 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 378 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc_pool *pool = ctlr->pool; ctlr 386 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->state != CPDMA_STATE_ACTIVE) { ctlr 390 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, chan->int_set, chan->mask); ctlr 409 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = ch->ctlr; ctlr 417 drivers/net/ethernet/ti/davinci_cpdma.c chan = ctlr->channels[i]; ctlr 440 drivers/net/ethernet/ti/davinci_cpdma.c dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n", ctlr 445 drivers/net/ethernet/ti/davinci_cpdma.c static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, ctlr 460 drivers/net/ethernet/ti/davinci_cpdma.c freq = ctlr->params.bus_freq_mhz * 1000 * 32; ctlr 462 drivers/net/ethernet/ti/davinci_cpdma.c dev_err(ctlr->dev, "The bus frequency is not set\n"); ctlr 506 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, rate_reg, ch->rate_factor); ctlr 512 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr; ctlr 514 drivers/net/ethernet/ti/davinci_cpdma.c ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); ctlr 515 drivers/net/ethernet/ti/davinci_cpdma.c if (!ctlr) ctlr 518 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->state = CPDMA_STATE_IDLE; ctlr 519 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->params = *params; ctlr 520 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->dev = params->dev; ctlr 521 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->chan_num = 0; ctlr 522 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_init(&ctlr->lock); ctlr 524 drivers/net/ethernet/ti/davinci_cpdma.c if (cpdma_desc_pool_create(ctlr)) ctlr 527 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->num_tx_desc = ctlr->pool->num_desc / 2; ctlr 528 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; ctlr 530 drivers/net/ethernet/ti/davinci_cpdma.c if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) ctlr 531 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->num_chan = CPDMA_MAX_CHANNELS; ctlr 532 drivers/net/ethernet/ti/davinci_cpdma.c return ctlr; ctlr 535 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) ctlr 541 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 542 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->state != CPDMA_STATE_IDLE) { ctlr 543 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 547 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->params.has_soft_reset) { ctlr 550 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); ctlr 552 drivers/net/ethernet/ti/davinci_cpdma.c if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) ctlr 560 drivers/net/ethernet/ti/davinci_cpdma.c for (i = 0; i < ctlr->num_chan; i++) { ctlr 561 drivers/net/ethernet/ti/davinci_cpdma.c writel(0, ctlr->params.txhdp + 4 * i); ctlr 562 drivers/net/ethernet/ti/davinci_cpdma.c writel(0, ctlr->params.rxhdp + 4 * i); ctlr 563 drivers/net/ethernet/ti/davinci_cpdma.c writel(0, ctlr->params.txcp + 4 * i); ctlr 564 drivers/net/ethernet/ti/davinci_cpdma.c writel(0, ctlr->params.rxcp + 4 * i); ctlr 567 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); ctlr 568 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); ctlr 570 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); ctlr 571 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); ctlr 573 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->state = CPDMA_STATE_ACTIVE; ctlr 576 drivers/net/ethernet/ti/davinci_cpdma.c for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { ctlr 577 drivers/net/ethernet/ti/davinci_cpdma.c chan = ctlr->channels[i]; ctlr 588 drivers/net/ethernet/ti/davinci_cpdma.c _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); ctlr 589 drivers/net/ethernet/ti/davinci_cpdma.c _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0); ctlr 591 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 595 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) ctlr 600 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 601 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->state != CPDMA_STATE_ACTIVE) { ctlr 602 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 606 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->state = CPDMA_STATE_TEARDOWN; ctlr 607 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 609 drivers/net/ethernet/ti/davinci_cpdma.c for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { ctlr 610 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->channels[i]) ctlr 611 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_chan_stop(ctlr->channels[i]); ctlr 614 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 615 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); ctlr 616 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); ctlr 618 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); ctlr 619 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); ctlr 621 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->state = CPDMA_STATE_IDLE; ctlr 623 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 627 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) ctlr 631 drivers/net/ethernet/ti/davinci_cpdma.c if (!ctlr) ctlr 634 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->state != CPDMA_STATE_IDLE) ctlr 635 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_ctlr_stop(ctlr); ctlr 637 drivers/net/ethernet/ti/davinci_cpdma.c for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) ctlr 638 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_chan_destroy(ctlr->channels[i]); ctlr 640 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_desc_pool_destroy(ctlr); ctlr 644 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) ctlr 649 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 650 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->state != CPDMA_STATE_ACTIVE) { ctlr 651 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 655 drivers/net/ethernet/ti/davinci_cpdma.c for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { ctlr 656 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->channels[i]) ctlr 657 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_chan_int_ctrl(ctlr->channels[i], enable); ctlr 660 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 664 drivers/net/ethernet/ti/davinci_cpdma.c void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) ctlr 666 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); ctlr 669 drivers/net/ethernet/ti/davinci_cpdma.c u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) ctlr 671 drivers/net/ethernet/ti/davinci_cpdma.c return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); ctlr 674 drivers/net/ethernet/ti/davinci_cpdma.c u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) ctlr 676 drivers/net/ethernet/ti/davinci_cpdma.c return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); ctlr 679 drivers/net/ethernet/ti/davinci_cpdma.c static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, ctlr 700 drivers/net/ethernet/ti/davinci_cpdma.c chan = ctlr->channels[i]; ctlr 725 drivers/net/ethernet/ti/davinci_cpdma.c static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) ctlr 734 drivers/net/ethernet/ti/davinci_cpdma.c if (!ctlr->chan_num) ctlr 737 drivers/net/ethernet/ti/davinci_cpdma.c for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { ctlr 738 drivers/net/ethernet/ti/davinci_cpdma.c chan = ctlr->channels[i]; ctlr 756 drivers/net/ethernet/ti/davinci_cpdma.c tx_desc_num = ctlr->num_tx_desc; ctlr 757 drivers/net/ethernet/ti/davinci_cpdma.c rx_desc_num = ctlr->num_rx_desc; ctlr 768 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc); ctlr 769 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc); ctlr 786 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = ch->ctlr; ctlr 790 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 794 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 801 drivers/net/ethernet/ti/davinci_cpdma.c ret = cpdma_chan_split_pool(ctlr); ctlr 802 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 810 drivers/net/ethernet/ti/davinci_cpdma.c u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr) ctlr 814 drivers/net/ethernet/ti/davinci_cpdma.c divident = ctlr->params.bus_freq_mhz * 32 * 1000; ctlr 828 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr; ctlr 838 drivers/net/ethernet/ti/davinci_cpdma.c ctlr = ch->ctlr; ctlr 839 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 846 drivers/net/ethernet/ti/davinci_cpdma.c ret = cpdma_chan_set_factors(ctlr, ch); ctlr 853 drivers/net/ethernet/ti/davinci_cpdma.c _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); ctlr 854 drivers/net/ethernet/ti/davinci_cpdma.c _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); ctlr 855 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 860 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 876 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, ctlr 885 drivers/net/ethernet/ti/davinci_cpdma.c if (__chan_linear(chan_num) >= ctlr->num_chan) ctlr 888 drivers/net/ethernet/ti/davinci_cpdma.c chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); ctlr 892 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 893 drivers/net/ethernet/ti/davinci_cpdma.c if (ctlr->channels[chan_num]) { ctlr 894 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 895 drivers/net/ethernet/ti/davinci_cpdma.c devm_kfree(ctlr->dev, chan); ctlr 899 drivers/net/ethernet/ti/davinci_cpdma.c chan->ctlr = ctlr; ctlr 907 drivers/net/ethernet/ti/davinci_cpdma.c chan->hdp = ctlr->params.rxhdp + offset; ctlr 908 drivers/net/ethernet/ti/davinci_cpdma.c chan->cp = ctlr->params.rxcp + offset; ctlr 909 drivers/net/ethernet/ti/davinci_cpdma.c chan->rxfree = ctlr->params.rxfree + offset; ctlr 915 drivers/net/ethernet/ti/davinci_cpdma.c chan->hdp = ctlr->params.txhdp + offset; ctlr 916 drivers/net/ethernet/ti/davinci_cpdma.c chan->cp = ctlr->params.txcp + offset; ctlr 926 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->channels[chan_num] = chan; ctlr 927 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->chan_num++; ctlr 929 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_chan_split_pool(ctlr); ctlr 931 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 949 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr; ctlr 954 drivers/net/ethernet/ti/davinci_cpdma.c ctlr = chan->ctlr; ctlr 956 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 959 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->channels[chan->chan_num] = NULL; ctlr 960 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->chan_num--; ctlr 961 drivers/net/ethernet/ti/davinci_cpdma.c devm_kfree(ctlr->dev, chan); ctlr 962 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_chan_split_pool(ctlr); ctlr 964 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 983 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 985 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc_pool *pool = ctlr->pool; ctlr 1019 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 1031 drivers/net/ethernet/ti/davinci_cpdma.c desc = cpdma_desc_alloc(ctlr->pool); ctlr 1037 drivers/net/ethernet/ti/davinci_cpdma.c if (len < ctlr->params.min_packet_size) { ctlr 1038 drivers/net/ethernet/ti/davinci_cpdma.c len = ctlr->params.min_packet_size; ctlr 1047 drivers/net/ethernet/ti/davinci_cpdma.c dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); ctlr 1049 drivers/net/ethernet/ti/davinci_cpdma.c buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); ctlr 1050 drivers/net/ethernet/ti/davinci_cpdma.c ret = dma_mapping_error(ctlr->dev, buffer); ctlr 1052 drivers/net/ethernet/ti/davinci_cpdma.c cpdma_desc_free(ctlr->pool, desc, 1); ctlr 1181 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 1182 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc_pool *pool = ctlr->pool; ctlr 1197 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 1198 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc_pool *pool = ctlr->pool; ctlr 1209 drivers/net/ethernet/ti/davinci_cpdma.c dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen, ctlr 1212 drivers/net/ethernet/ti/davinci_cpdma.c dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); ctlr 1221 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 1225 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc_pool *pool = ctlr->pool; ctlr 1295 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 1299 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 1301 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 1314 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_ctlr *ctlr = chan->ctlr; ctlr 1315 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc_pool *pool = ctlr->pool; ctlr 1327 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, chan->int_clear, chan->mask); ctlr 1330 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(ctlr, chan->td, chan_linear(chan)); ctlr 1384 drivers/net/ethernet/ti/davinci_cpdma.c dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, ctlr 1391 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) ctlr 1396 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 1397 drivers/net/ethernet/ti/davinci_cpdma.c ret = _cpdma_control_get(ctlr, control); ctlr 1398 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 1403 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) ctlr 1408 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 1409 drivers/net/ethernet/ti/davinci_cpdma.c ret = _cpdma_control_set(ctlr, control, value); ctlr 1410 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 1415 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) ctlr 1417 drivers/net/ethernet/ti/davinci_cpdma.c return ctlr->num_rx_desc; ctlr 1420 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) ctlr 1422 drivers/net/ethernet/ti/davinci_cpdma.c return ctlr->num_tx_desc; ctlr 1425 drivers/net/ethernet/ti/davinci_cpdma.c int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) ctlr 1430 drivers/net/ethernet/ti/davinci_cpdma.c spin_lock_irqsave(&ctlr->lock, flags); ctlr 1432 drivers/net/ethernet/ti/davinci_cpdma.c temp = ctlr->num_rx_desc; ctlr 1433 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->num_rx_desc = num_rx_desc; ctlr 1434 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; ctlr 1435 drivers/net/ethernet/ti/davinci_cpdma.c ret = cpdma_chan_split_pool(ctlr); ctlr 1437 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->num_rx_desc = temp; ctlr 1438 drivers/net/ethernet/ti/davinci_cpdma.c ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; ctlr 1441 drivers/net/ethernet/ti/davinci_cpdma.c spin_unlock_irqrestore(&ctlr->lock, flags); ctlr 67 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr); ctlr 68 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_ctlr_start(struct cpdma_ctlr *ctlr); ctlr 69 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr); ctlr 71 drivers/net/ethernet/ti/davinci_cpdma.h struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, ctlr 90 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); ctlr 91 drivers/net/ethernet/ti/davinci_cpdma.h void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); ctlr 93 drivers/net/ethernet/ti/davinci_cpdma.h u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr); ctlr 94 drivers/net/ethernet/ti/davinci_cpdma.h u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr); ctlr 99 drivers/net/ethernet/ti/davinci_cpdma.h u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr); ctlr 116 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_control_get(struct cpdma_ctlr *ctlr, int control); ctlr 117 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value); ctlr 118 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr); ctlr 119 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc); ctlr 120 drivers/net/ethernet/ti/davinci_cpdma.h int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr); ctlr 685 drivers/pci/hotplug/ibmphp_ebda.c u16 ctlr, slot, bus; ctlr 696 drivers/pci/hotplug/ibmphp_ebda.c for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { ctlr 721 drivers/pci/hotplug/ibmphp_ebda.c hpc_ptr->ctlr_relative_id = ctlr; ctlr 370 drivers/pci/hotplug/ibmphp_hpc.c static u8 ctrl_read(struct controller *ctlr, void __iomem *base, u8 offset) ctlr 373 drivers/pci/hotplug/ibmphp_hpc.c switch (ctlr->ctlr_type) { ctlr 375 drivers/pci/hotplug/ibmphp_hpc.c rc = isa_ctrl_read(ctlr, offset); ctlr 378 drivers/pci/hotplug/ibmphp_hpc.c rc = pci_ctrl_read(ctlr, offset); ctlr 382 drivers/pci/hotplug/ibmphp_hpc.c rc = i2c_ctrl_read(ctlr, base, offset); ctlr 390 drivers/pci/hotplug/ibmphp_hpc.c static u8 ctrl_write(struct controller *ctlr, void __iomem *base, u8 offset, u8 data) ctlr 393 drivers/pci/hotplug/ibmphp_hpc.c switch (ctlr->ctlr_type) { ctlr 395 drivers/pci/hotplug/ibmphp_hpc.c isa_ctrl_write(ctlr, offset, data); ctlr 398 drivers/pci/hotplug/ibmphp_hpc.c rc = pci_ctrl_write(ctlr, offset, data); ctlr 402 drivers/pci/hotplug/ibmphp_hpc.c rc = i2c_ctrl_write(ctlr, base, offset, data); ctlr 141 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 142 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr); ctlr 269 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 282 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 295 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!ctlr->sel_fcf) { ctlr 300 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (fcoe_ctlr_els_send(ctlr, lport, skb)) ctlr 373 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (ctlr->map_dest) ctlr 377 drivers/scsi/bnx2fc/bnx2fc_fcoe.c memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); ctlr 379 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) ctlr 380 drivers/scsi/bnx2fc/bnx2fc_fcoe.c memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); ctlr 430 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 437 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 438 drivers/scsi/bnx2fc/bnx2fc_fcoe.c lport = ctlr->lp; ctlr 522 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 560 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 580 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (ctlr->state) { ctlr 581 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!ether_addr_equal(mac, ctlr->dest_addr)) { ctlr 583 drivers/scsi/bnx2fc/bnx2fc_fcoe.c mac, ctlr->dest_addr); ctlr 798 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 804 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 823 drivers/scsi/bnx2fc/bnx2fc_fcoe.c wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, ctlr 829 drivers/scsi/bnx2fc/bnx2fc_fcoe.c wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, ctlr 867 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 920 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 921 drivers/scsi/bnx2fc/bnx2fc_fcoe.c lport = ctlr->lp; ctlr 927 drivers/scsi/bnx2fc/bnx2fc_fcoe.c cdev = fcoe_ctlr_to_ctlr_dev(ctlr); ctlr 944 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 946 drivers/scsi/bnx2fc/bnx2fc_fcoe.c } else if (fcoe_ctlr_link_down(ctlr)) { ctlr 1060 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 1063 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 1064 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_recv(ctlr, skb); ctlr 1239 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 1254 drivers/scsi/bnx2fc/bnx2fc_fcoe.c memcpy(ctlr->ctl_src_addr, ha->addr, ctlr 1311 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 1317 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 1318 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); ctlr 1323 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_destroy(ctlr); ctlr 1443 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 1454 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 1455 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr->cdev = ctlr_dev; ctlr 1456 drivers/scsi/bnx2fc/bnx2fc_fcoe.c interface = fcoe_ctlr_priv(ctlr); ctlr 1463 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_init(ctlr, fip_mode); ctlr 1464 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr->send = bnx2fc_fip_send; ctlr 1465 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr->update_mac = bnx2fc_update_src_mac; ctlr 1466 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr->get_src_addr = bnx2fc_get_src_mac; ctlr 1473 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_destroy(ctlr); ctlr 1493 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 1504 drivers/scsi/bnx2fc/bnx2fc_fcoe.c BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n"); ctlr 1604 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 1605 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fc_lport *lport = ctlr->lp; ctlr 1645 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 1646 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fc_lport *lport = ctlr->lp; ctlr 1670 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 1677 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 1678 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!interface || !ctlr->lp) { ctlr 1838 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 1850 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 1851 drivers/scsi/bnx2fc/bnx2fc_fcoe.c lport = ctlr->lp; ctlr 1871 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 1878 drivers/scsi/bnx2fc/bnx2fc_fcoe.c lport = ctlr->lp; ctlr 1887 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_link_down(ctlr); ctlr 1998 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 2009 drivers/scsi/bnx2fc/bnx2fc_fcoe.c lport = ctlr->lp; ctlr 2014 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 2020 drivers/scsi/bnx2fc/bnx2fc_fcoe.c while (!ctlr->sel_fcf) { ctlr 2084 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) ctlr 2086 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); ctlr 2089 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!ctlr->lp) { ctlr 2094 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_link_down(ctlr); ctlr 2095 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_clean_pending_queue(ctlr->lp); ctlr 2107 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 2114 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 2120 drivers/scsi/bnx2fc/bnx2fc_fcoe.c rc = __bnx2fc_disable(ctlr); ctlr 2179 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) ctlr 2181 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); ctlr 2187 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (!ctlr->lp) { ctlr 2190 drivers/scsi/bnx2fc/bnx2fc_fcoe.c } else if (!bnx2fc_link_ok(ctlr->lp)) { ctlr 2191 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 2198 drivers/scsi/bnx2fc/bnx2fc_fcoe.c lport = ctlr->lp; ctlr 2235 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 2242 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 2247 drivers/scsi/bnx2fc/bnx2fc_fcoe.c rc = __bnx2fc_enable(ctlr); ctlr 2267 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); ctlr 2271 drivers/scsi/bnx2fc/bnx2fc_fcoe.c return __bnx2fc_enable(ctlr); ctlr 2273 drivers/scsi/bnx2fc/bnx2fc_fcoe.c return __bnx2fc_disable(ctlr); ctlr 2305 drivers/scsi/bnx2fc/bnx2fc_fcoe.c struct fcoe_ctlr *ctlr; ctlr 2373 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr = bnx2fc_to_ctlr(interface); ctlr 2374 drivers/scsi/bnx2fc/bnx2fc_fcoe.c cdev = fcoe_ctlr_to_ctlr_dev(ctlr); ctlr 2400 drivers/scsi/bnx2fc/bnx2fc_fcoe.c ctlr->lp = lport; ctlr 2409 drivers/scsi/bnx2fc/bnx2fc_fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 176 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 324 drivers/scsi/bnx2fc/bnx2fc_hwi.c ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; ctlr 326 drivers/scsi/bnx2fc/bnx2fc_hwi.c ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; ctlr 327 drivers/scsi/bnx2fc/bnx2fc_hwi.c ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; ctlr 328 drivers/scsi/bnx2fc/bnx2fc_hwi.c ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; ctlr 329 drivers/scsi/bnx2fc/bnx2fc_hwi.c ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; ctlr 330 drivers/scsi/bnx2fc/bnx2fc_hwi.c ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; ctlr 361 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 385 drivers/scsi/bnx2fc/bnx2fc_hwi.c enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; ctlr 386 drivers/scsi/bnx2fc/bnx2fc_hwi.c enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; ctlr 387 drivers/scsi/bnx2fc/bnx2fc_hwi.c enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; ctlr 388 drivers/scsi/bnx2fc/bnx2fc_hwi.c enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; ctlr 389 drivers/scsi/bnx2fc/bnx2fc_hwi.c enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; ctlr 390 drivers/scsi/bnx2fc/bnx2fc_hwi.c enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; ctlr 430 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); ctlr 452 drivers/scsi/bnx2fc/bnx2fc_hwi.c disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; ctlr 453 drivers/scsi/bnx2fc/bnx2fc_hwi.c disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; ctlr 454 drivers/scsi/bnx2fc/bnx2fc_hwi.c disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; ctlr 455 drivers/scsi/bnx2fc/bnx2fc_hwi.c disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; ctlr 456 drivers/scsi/bnx2fc/bnx2fc_hwi.c disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; ctlr 457 drivers/scsi/bnx2fc/bnx2fc_hwi.c disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; ctlr 186 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_controller *ctlr; ctlr 264 drivers/scsi/device_handler/scsi_dh_rdac.c static unsigned int rdac_failover_get(struct rdac_controller *ctlr, ctlr 273 drivers/scsi/device_handler/scsi_dh_rdac.c if (ctlr->use_ms10) { ctlr 277 drivers/scsi/device_handler/scsi_dh_rdac.c rdac_pg = &ctlr->mode_select.expanded; ctlr 289 drivers/scsi/device_handler/scsi_dh_rdac.c rdac_pg = &ctlr->mode_select.legacy; ctlr 305 drivers/scsi/device_handler/scsi_dh_rdac.c if (ctlr->use_ms10) { ctlr 319 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_controller *ctlr; ctlr 320 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr = container_of(kref, struct rdac_controller, kref); ctlr 322 drivers/scsi/device_handler/scsi_dh_rdac.c list_del(&ctlr->node); ctlr 323 drivers/scsi/device_handler/scsi_dh_rdac.c kfree(ctlr); ctlr 329 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_controller *ctlr, *tmp; ctlr 339 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); ctlr 340 drivers/scsi/device_handler/scsi_dh_rdac.c if (!ctlr) ctlr 344 drivers/scsi/device_handler/scsi_dh_rdac.c memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN); ctlr 345 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->index = index; ctlr 346 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->host = sdev->host; ctlr 347 drivers/scsi/device_handler/scsi_dh_rdac.c memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); ctlr 349 drivers/scsi/device_handler/scsi_dh_rdac.c kref_init(&ctlr->kref); ctlr 350 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->use_ms10 = -1; ctlr 351 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->ms_queued = 0; ctlr 352 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->ms_sdev = NULL; ctlr 353 drivers/scsi/device_handler/scsi_dh_rdac.c spin_lock_init(&ctlr->ms_lock); ctlr 354 drivers/scsi/device_handler/scsi_dh_rdac.c INIT_WORK(&ctlr->ms_work, send_mode_select); ctlr 355 drivers/scsi/device_handler/scsi_dh_rdac.c INIT_LIST_HEAD(&ctlr->ms_head); ctlr 356 drivers/scsi/device_handler/scsi_dh_rdac.c list_add(&ctlr->node, &ctlr_list); ctlr 357 drivers/scsi/device_handler/scsi_dh_rdac.c INIT_LIST_HEAD(&ctlr->dh_list); ctlr 359 drivers/scsi/device_handler/scsi_dh_rdac.c return ctlr; ctlr 425 drivers/scsi/device_handler/scsi_dh_rdac.c list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) { ctlr 452 drivers/scsi/device_handler/scsi_dh_rdac.c h->ctlr = get_controller(index, array_name, array_id, sdev); ctlr 453 drivers/scsi/device_handler/scsi_dh_rdac.c if (!h->ctlr) ctlr 456 drivers/scsi/device_handler/scsi_dh_rdac.c list_add_rcu(&h->node, &h->ctlr->dh_list); ctlr 477 drivers/scsi/device_handler/scsi_dh_rdac.c h->ctlr->use_ms10 = 1; ctlr 479 drivers/scsi/device_handler/scsi_dh_rdac.c h->ctlr->use_ms10 = 0; ctlr 520 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index, ctlr 529 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_controller *ctlr = ctlr 531 drivers/scsi/device_handler/scsi_dh_rdac.c struct scsi_device *sdev = ctlr->ms_sdev; ctlr 542 drivers/scsi/device_handler/scsi_dh_rdac.c spin_lock(&ctlr->ms_lock); ctlr 543 drivers/scsi/device_handler/scsi_dh_rdac.c list_splice_init(&ctlr->ms_head, &list); ctlr 544 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->ms_queued = 0; ctlr 545 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->ms_sdev = NULL; ctlr 546 drivers/scsi/device_handler/scsi_dh_rdac.c spin_unlock(&ctlr->ms_lock); ctlr 551 drivers/scsi/device_handler/scsi_dh_rdac.c data_size = rdac_failover_get(ctlr, &list, cdb); ctlr 555 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index, ctlr 558 drivers/scsi/device_handler/scsi_dh_rdac.c if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select, ctlr 571 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index); ctlr 589 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_controller *ctlr; ctlr 599 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr = qdata->h->ctlr; ctlr 600 drivers/scsi/device_handler/scsi_dh_rdac.c spin_lock(&ctlr->ms_lock); ctlr 601 drivers/scsi/device_handler/scsi_dh_rdac.c list_add_tail(&qdata->entry, &ctlr->ms_head); ctlr 602 drivers/scsi/device_handler/scsi_dh_rdac.c if (!ctlr->ms_queued) { ctlr 603 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->ms_queued = 1; ctlr 604 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr->ms_sdev = sdev; ctlr 605 drivers/scsi/device_handler/scsi_dh_rdac.c queue_work(kmpath_rdacd, &ctlr->ms_work); ctlr 607 drivers/scsi/device_handler/scsi_dh_rdac.c spin_unlock(&ctlr->ms_lock); ctlr 666 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index, ctlr 763 drivers/scsi/device_handler/scsi_dh_rdac.c kref_put(&h->ctlr->kref, release_controller); ctlr 775 drivers/scsi/device_handler/scsi_dh_rdac.c if (h->ctlr && h->ctlr->ms_queued) ctlr 779 drivers/scsi/device_handler/scsi_dh_rdac.c if (h->ctlr) { ctlr 782 drivers/scsi/device_handler/scsi_dh_rdac.c kref_put(&h->ctlr->kref, release_controller); ctlr 383 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 404 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 405 drivers/scsi/fcoe/fcoe.c ctlr->cdev = ctlr_dev; ctlr 406 drivers/scsi/fcoe/fcoe.c fcoe = fcoe_ctlr_priv(ctlr); ctlr 413 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_init(ctlr, fip_mode); ctlr 414 drivers/scsi/fcoe/fcoe.c ctlr->send = fcoe_fip_send; ctlr 415 drivers/scsi/fcoe/fcoe.c ctlr->update_mac = fcoe_update_src_mac; ctlr 416 drivers/scsi/fcoe/fcoe.c ctlr->get_src_addr = fcoe_get_src_mac; ctlr 420 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_destroy(ctlr); ctlr 514 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 517 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 518 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_recv(ctlr, skb); ctlr 537 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 540 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 541 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_recv(ctlr, skb); ctlr 706 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 712 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 745 drivers/scsi/fcoe/fcoe.c wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0); ctlr 748 drivers/scsi/fcoe/fcoe.c wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, ctlr 1126 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); ctlr 1197 drivers/scsi/fcoe/fcoe.c rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1); ctlr 1340 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 1348 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 1349 drivers/scsi/fcoe/fcoe.c lport = ctlr->lp; ctlr 1371 drivers/scsi/fcoe/fcoe.c if (is_fip_mode(ctlr) && ctlr 1372 drivers/scsi/fcoe/fcoe.c !ether_addr_equal(eh->h_source, ctlr->dest_addr)) { ctlr 1485 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); ctlr 1501 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_els_send(ctlr, lport, skb)) ctlr 1565 drivers/scsi/fcoe/fcoe.c memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); ctlr 1566 drivers/scsi/fcoe/fcoe.c if (ctlr->map_dest) ctlr 1569 drivers/scsi/fcoe/fcoe.c if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) ctlr 1570 drivers/scsi/fcoe/fcoe.c memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); ctlr 1610 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 1631 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 1632 drivers/scsi/fcoe/fcoe.c if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && ctlr 1796 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 1813 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 1825 drivers/scsi/fcoe/fcoe.c ctlr->priority = prio; ctlr 1849 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 1858 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 1859 drivers/scsi/fcoe/fcoe.c lport = ctlr->lp; ctlr 1895 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr)); ctlr 1908 drivers/scsi/fcoe/fcoe.c cdev = fcoe_ctlr_to_ctlr_dev(ctlr); ctlr 1917 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 1919 drivers/scsi/fcoe/fcoe.c } else if (fcoe_ctlr_link_down(ctlr)) { ctlr 1948 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 1959 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 1960 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_link_down(ctlr); ctlr 1961 drivers/scsi/fcoe/fcoe.c fcoe_clean_pending_queue(ctlr->lp); ctlr 1979 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 1993 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 1995 drivers/scsi/fcoe/fcoe.c if (!fcoe_link_ok(ctlr->lp)) ctlr 1996 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 2015 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); ctlr 2016 drivers/scsi/fcoe/fcoe.c struct fc_lport *lport = ctlr->lp; ctlr 2040 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 2041 drivers/scsi/fcoe/fcoe.c struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); ctlr 2044 drivers/scsi/fcoe/fcoe.c ctlr->mode != FIP_MODE_VN2VN) { ctlr 2048 drivers/scsi/fcoe/fcoe.c } else if (ctlr->mode != FIP_MODE_FABRIC) { ctlr 2066 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 2079 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 2080 drivers/scsi/fcoe/fcoe.c lport = ctlr->lp; ctlr 2097 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 2108 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 2109 drivers/scsi/fcoe/fcoe.c cdev = fcoe_ctlr_to_ctlr_dev(ctlr); ctlr 2147 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); ctlr 2178 drivers/scsi/fcoe/fcoe.c ctlr->priority = ctlr_prio; ctlr 2204 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 2223 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 2224 drivers/scsi/fcoe/fcoe.c ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); ctlr 2240 drivers/scsi/fcoe/fcoe.c ctlr->lp = lport; ctlr 2269 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 2361 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); ctlr 2362 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr); ctlr 2364 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_link_down(ctlr); ctlr 2365 drivers/scsi/fcoe/fcoe.c fcoe_clean_pending_queue(ctlr->lp); ctlr 2368 drivers/scsi/fcoe/fcoe.c !fcoe_link_ok(ctlr->lp)) ctlr 2369 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_link_up(ctlr); ctlr 2404 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 2408 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 2409 drivers/scsi/fcoe/fcoe.c return (fcoe) ? ctlr->lp : NULL; ctlr 2523 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr; ctlr 2534 drivers/scsi/fcoe/fcoe.c ctlr = fcoe_to_ctlr(fcoe); ctlr 2535 drivers/scsi/fcoe/fcoe.c port = lport_priv(ctlr->lp); ctlr 2811 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 2812 drivers/scsi/fcoe/fcoe.c struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); ctlr 2834 drivers/scsi/fcoe/fcoe.c struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); ctlr 2837 drivers/scsi/fcoe/fcoe.c fcoe_ctlr_recv_flogi(ctlr, lport, fp); ctlr 3244 drivers/scsi/fcoe/fcoe_ctlr.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 3245 drivers/scsi/fcoe/fcoe_ctlr.c struct fc_lport *lport = ctlr->lp; ctlr 3247 drivers/scsi/fcoe/fcoe_ctlr.c mutex_lock(&ctlr->ctlr_mutex); ctlr 3250 drivers/scsi/fcoe/fcoe_ctlr.c ctlr->mode = FIP_MODE_VN2VN; ctlr 3254 drivers/scsi/fcoe/fcoe_ctlr.c ctlr->mode = FIP_MODE_FABRIC; ctlr 3258 drivers/scsi/fcoe/fcoe_ctlr.c mutex_unlock(&ctlr->ctlr_mutex); ctlr 3260 drivers/scsi/fcoe/fcoe_ctlr.c fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); ctlr 141 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ ctlr 142 drivers/scsi/fcoe/fcoe_sysfs.c if (ctlr->f->get_fcoe_ctlr_##field) \ ctlr 143 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->f->get_fcoe_ctlr_##field(ctlr); \ ctlr 145 drivers/scsi/fcoe/fcoe_sysfs.c cast fcoe_ctlr_##field(ctlr)); \ ctlr 154 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \ ctlr 155 drivers/scsi/fcoe/fcoe_sysfs.c if (ctlr->f->get_fcoe_fcf_##field) \ ctlr 156 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->f->get_fcoe_fcf_##field(fcf); \ ctlr 166 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ ctlr 167 drivers/scsi/fcoe/fcoe_sysfs.c return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \ ctlr 181 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ ctlr 186 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ ctlr 201 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ ctlr 262 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 265 drivers/scsi/fcoe/fcoe_sysfs.c name = get_fcoe_ctlr_mode_name(ctlr->mode); ctlr 276 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 289 drivers/scsi/fcoe/fcoe_sysfs.c switch (ctlr->enabled) { ctlr 291 drivers/scsi/fcoe/fcoe_sysfs.c LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); ctlr 294 drivers/scsi/fcoe/fcoe_sysfs.c if (!ctlr->f->set_fcoe_ctlr_mode) { ctlr 295 drivers/scsi/fcoe/fcoe_sysfs.c LIBFCOE_SYSFS_DBG(ctlr, ctlr 300 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->mode = fcoe_parse_mode(mode); ctlr 301 drivers/scsi/fcoe/fcoe_sysfs.c if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { ctlr 302 drivers/scsi/fcoe/fcoe_sysfs.c LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", ctlr 307 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->f->set_fcoe_ctlr_mode(ctlr); ctlr 308 drivers/scsi/fcoe/fcoe_sysfs.c LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); ctlr 313 drivers/scsi/fcoe/fcoe_sysfs.c LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); ctlr 318 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR, ctlr 325 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 336 drivers/scsi/fcoe/fcoe_sysfs.c switch (ctlr->enabled) { ctlr 340 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->enabled = FCOE_CTLR_DISABLED; ctlr 345 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->enabled = FCOE_CTLR_ENABLED; ctlr 351 drivers/scsi/fcoe/fcoe_sysfs.c rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr); ctlr 370 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 373 drivers/scsi/fcoe/fcoe_sysfs.c name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled); ctlr 380 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, ctlr 388 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 389 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); ctlr 412 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 413 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); ctlr 418 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, ctlr 442 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 447 drivers/scsi/fcoe/fcoe_sysfs.c return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count); ctlr 456 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 458 drivers/scsi/fcoe/fcoe_sysfs.c return sprintf(buf, "%d\n", ctlr->lp->r_a_tov); ctlr 461 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR, ctlr 469 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 474 drivers/scsi/fcoe/fcoe_sysfs.c return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count); ctlr 483 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr 485 drivers/scsi/fcoe/fcoe_sysfs.c return sprintf(buf, "%d\n", ctlr->lp->e_d_tov); ctlr 488 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR, ctlr 496 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 505 drivers/scsi/fcoe/fcoe_sysfs.c fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val; ctlr 506 drivers/scsi/fcoe/fcoe_sysfs.c mutex_lock(&ctlr->lock); ctlr 507 drivers/scsi/fcoe/fcoe_sysfs.c list_for_each_entry(fcf, &ctlr->fcfs, peers) ctlr 509 drivers/scsi/fcoe/fcoe_sysfs.c mutex_unlock(&ctlr->lock); ctlr 513 drivers/scsi/fcoe/fcoe_sysfs.c static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR, ctlr 634 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); ctlr 635 drivers/scsi/fcoe/fcoe_sysfs.c kfree(ctlr); ctlr 693 drivers/scsi/fcoe/fcoe_sysfs.c static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) ctlr 695 drivers/scsi/fcoe/fcoe_sysfs.c if (!fcoe_ctlr_work_q(ctlr)) { ctlr 698 drivers/scsi/fcoe/fcoe_sysfs.c "when no workqueue created.\n", ctlr->id); ctlr 703 drivers/scsi/fcoe/fcoe_sysfs.c flush_workqueue(fcoe_ctlr_work_q(ctlr)); ctlr 714 drivers/scsi/fcoe/fcoe_sysfs.c static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, ctlr 717 drivers/scsi/fcoe/fcoe_sysfs.c if (unlikely(!fcoe_ctlr_work_q(ctlr))) { ctlr 720 drivers/scsi/fcoe/fcoe_sysfs.c "when no workqueue created.\n", ctlr->id); ctlr 726 drivers/scsi/fcoe/fcoe_sysfs.c return queue_work(fcoe_ctlr_work_q(ctlr), work); ctlr 733 drivers/scsi/fcoe/fcoe_sysfs.c static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) ctlr 735 drivers/scsi/fcoe/fcoe_sysfs.c if (!fcoe_ctlr_devloss_work_q(ctlr)) { ctlr 738 drivers/scsi/fcoe/fcoe_sysfs.c "when no workqueue created.\n", ctlr->id); ctlr 743 drivers/scsi/fcoe/fcoe_sysfs.c flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr)); ctlr 755 drivers/scsi/fcoe/fcoe_sysfs.c static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, ctlr 759 drivers/scsi/fcoe/fcoe_sysfs.c if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) { ctlr 762 drivers/scsi/fcoe/fcoe_sysfs.c "when no workqueue created.\n", ctlr->id); ctlr 768 drivers/scsi/fcoe/fcoe_sysfs.c return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); ctlr 797 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr; ctlr 800 drivers/scsi/fcoe/fcoe_sysfs.c ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size, ctlr 802 drivers/scsi/fcoe/fcoe_sysfs.c if (!ctlr) ctlr 805 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->id = atomic_inc_return(&ctlr_num) - 1; ctlr 806 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->f = f; ctlr 807 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->mode = FIP_CONN_TYPE_FABRIC; ctlr 808 drivers/scsi/fcoe/fcoe_sysfs.c INIT_LIST_HEAD(&ctlr->fcfs); ctlr 809 drivers/scsi/fcoe/fcoe_sysfs.c mutex_init(&ctlr->lock); ctlr 810 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->dev.parent = parent; ctlr 811 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->dev.bus = &fcoe_bus_type; ctlr 812 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->dev.type = &fcoe_ctlr_device_type; ctlr 814 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; ctlr 816 drivers/scsi/fcoe/fcoe_sysfs.c snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), ctlr 817 drivers/scsi/fcoe/fcoe_sysfs.c "ctlr_wq_%d", ctlr->id); ctlr 818 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->work_q = create_singlethread_workqueue( ctlr 819 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->work_q_name); ctlr 820 drivers/scsi/fcoe/fcoe_sysfs.c if (!ctlr->work_q) ctlr 823 drivers/scsi/fcoe/fcoe_sysfs.c snprintf(ctlr->devloss_work_q_name, ctlr 824 drivers/scsi/fcoe/fcoe_sysfs.c sizeof(ctlr->devloss_work_q_name), ctlr 825 drivers/scsi/fcoe/fcoe_sysfs.c "ctlr_dl_wq_%d", ctlr->id); ctlr 826 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->devloss_work_q = create_singlethread_workqueue( ctlr 827 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->devloss_work_q_name); ctlr 828 drivers/scsi/fcoe/fcoe_sysfs.c if (!ctlr->devloss_work_q) ctlr 831 drivers/scsi/fcoe/fcoe_sysfs.c dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); ctlr 832 drivers/scsi/fcoe/fcoe_sysfs.c error = device_register(&ctlr->dev); ctlr 836 drivers/scsi/fcoe/fcoe_sysfs.c return ctlr; ctlr 839 drivers/scsi/fcoe/fcoe_sysfs.c destroy_workqueue(ctlr->devloss_work_q); ctlr 840 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->devloss_work_q = NULL; ctlr 842 drivers/scsi/fcoe/fcoe_sysfs.c destroy_workqueue(ctlr->work_q); ctlr 843 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->work_q = NULL; ctlr 845 drivers/scsi/fcoe/fcoe_sysfs.c kfree(ctlr); ctlr 871 drivers/scsi/fcoe/fcoe_sysfs.c void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr) ctlr 875 drivers/scsi/fcoe/fcoe_sysfs.c mutex_lock(&ctlr->lock); ctlr 877 drivers/scsi/fcoe/fcoe_sysfs.c &ctlr->fcfs, peers) { ctlr 880 drivers/scsi/fcoe/fcoe_sysfs.c fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); ctlr 882 drivers/scsi/fcoe/fcoe_sysfs.c mutex_unlock(&ctlr->lock); ctlr 884 drivers/scsi/fcoe/fcoe_sysfs.c fcoe_ctlr_device_flush_work(ctlr); ctlr 886 drivers/scsi/fcoe/fcoe_sysfs.c destroy_workqueue(ctlr->devloss_work_q); ctlr 887 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->devloss_work_q = NULL; ctlr 888 drivers/scsi/fcoe/fcoe_sysfs.c destroy_workqueue(ctlr->work_q); ctlr 889 drivers/scsi/fcoe/fcoe_sysfs.c ctlr->work_q = NULL; ctlr 891 drivers/scsi/fcoe/fcoe_sysfs.c device_unregister(&ctlr->dev); ctlr 906 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); ctlr 914 drivers/scsi/fcoe/fcoe_sysfs.c fcoe_ctlr_device_flush_devloss(ctlr); ctlr 930 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); ctlr 932 drivers/scsi/fcoe/fcoe_sysfs.c mutex_lock(&ctlr->lock); ctlr 948 drivers/scsi/fcoe/fcoe_sysfs.c fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); ctlr 951 drivers/scsi/fcoe/fcoe_sysfs.c mutex_unlock(&ctlr->lock); ctlr 965 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); ctlr 980 drivers/scsi/fcoe/fcoe_sysfs.c fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work, ctlr 992 drivers/scsi/fcoe/fcoe_sysfs.c struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, ctlr 998 drivers/scsi/fcoe/fcoe_sysfs.c list_for_each_entry(fcf, &ctlr->fcfs, peers) { ctlr 1006 drivers/scsi/fcoe/fcoe_sysfs.c fcoe_ctlr_device_flush_devloss(ctlr); ctlr 1019 drivers/scsi/fcoe/fcoe_sysfs.c fcf->dev.parent = &ctlr->dev; ctlr 1025 drivers/scsi/fcoe/fcoe_sysfs.c fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; ctlr 1043 drivers/scsi/fcoe/fcoe_sysfs.c list_add_tail(&fcf->peers, &ctlr->fcfs); ctlr 219 drivers/scsi/fnic/fnic.h struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ ctlr 319 drivers/scsi/fnic/fnic.h return container_of(fip, struct fnic, ctlr); ctlr 128 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_link_down(&fnic->ctlr); ctlr 143 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_link_up(&fnic->ctlr); ctlr 168 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_link_up(&fnic->ctlr); ctlr 183 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_link_down(&fnic->ctlr); ctlr 375 drivers/scsi/fnic/fnic_fcs.c struct fcoe_ctlr *fip = &fnic->ctlr; ctlr 430 drivers/scsi/fnic/fnic_fcs.c struct fcoe_ctlr *fip = &fnic->ctlr; ctlr 516 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_link_up(&fnic->ctlr); ctlr 646 drivers/scsi/fnic/fnic_fcs.c if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { ctlr 651 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_link_down(&fnic->ctlr); ctlr 657 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_recv(&fnic->ctlr, skb); ctlr 732 drivers/scsi/fnic/fnic_fcs.c u8 *ctl = fnic->ctlr.ctl_src_addr; ctlr 789 drivers/scsi/fnic/fnic_fcs.c fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); ctlr 798 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); ctlr 1099 drivers/scsi/fnic/fnic_fcs.c fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) ctlr 1117 drivers/scsi/fnic/fnic_fcs.c if (fnic->ctlr.map_dest) ctlr 1120 drivers/scsi/fnic/fnic_fcs.c memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); ctlr 1346 drivers/scsi/fnic/fnic_fcs.c if (fnic->ctlr.mode == FIP_MODE_NON_FIP) ctlr 582 drivers/scsi/fnic/fnic_main.c fnic->ctlr.lp = lp; ctlr 679 drivers/scsi/fnic/fnic_main.c err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); ctlr 686 drivers/scsi/fnic/fnic_main.c memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); ctlr 763 drivers/scsi/fnic/fnic_main.c fnic->ctlr.send = fnic_eth_send; ctlr 764 drivers/scsi/fnic/fnic_main.c fnic->ctlr.update_mac = fnic_update_mac; ctlr 765 drivers/scsi/fnic/fnic_main.c fnic->ctlr.get_src_addr = fnic_get_mac; ctlr 772 drivers/scsi/fnic/fnic_main.c vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); ctlr 774 drivers/scsi/fnic/fnic_main.c fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); ctlr 785 drivers/scsi/fnic/fnic_main.c fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); ctlr 786 drivers/scsi/fnic/fnic_main.c fnic->ctlr.state = FIP_ST_NON_FIP; ctlr 849 drivers/scsi/fnic/fnic_main.c fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); ctlr 988 drivers/scsi/fnic/fnic_main.c fcoe_ctlr_destroy(&fnic->ctlr); ctlr 149 drivers/scsi/fnic/fnic_res.c fnic->ctlr.ctl_src_addr, ctlr 277 drivers/scsi/fnic/fnic_scsi.c if (fnic->ctlr.map_dest) { ctlr 281 drivers/scsi/fnic/fnic_scsi.c memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); ctlr 285 drivers/scsi/fnic/fnic_scsi.c if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { ctlr 298 drivers/scsi/fnic/fnic_scsi.c fc_id, fnic->ctlr.map_dest, gw_mac); ctlr 2685 drivers/scsi/fnic/fnic_scsi.c fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); ctlr 2732 drivers/scsi/fnic/fnic_scsi.c fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); ctlr 888 drivers/scsi/hpsa.c return snprintf(buf, 20, "%d\n", h->ctlr); ctlr 3959 drivers/scsi/hpsa.c h->ctlr, __func__, ctlr 8573 drivers/scsi/hpsa.c wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); ctlr 8675 drivers/scsi/hpsa.c h->ctlr = number_of_controllers; ctlr 165 drivers/scsi/hpsa.h int ctlr; ctlr 1647 drivers/scsi/myrs.c mbox->SCSI_10.pdev.ctlr = 0; ctlr 1693 drivers/scsi/myrs.c mbox->SCSI_255.pdev.ctlr = 0; ctlr 406 drivers/scsi/myrs.h unsigned char ctlr; /* Byte 0 */ ctlr 620 drivers/scsi/myrs.h unsigned char ctlr:5; /* Byte 2 Bits 3-7 */ ctlr 629 drivers/scsi/myrs.h unsigned char ctlr:5; /* Byte 2 Bits 3-7 */ ctlr 311 drivers/scsi/qedf/qedf.h struct fcoe_ctlr ctlr; ctlr 56 drivers/scsi/qedf/qedf_attr.c if (qedf->ctlr.sel_fcf) ctlr 57 drivers/scsi/qedf/qedf_attr.c fka_period = qedf->ctlr.sel_fcf->fka_period; ctlr 334 drivers/scsi/qedf/qedf_debugfs.c seq_printf(s, "FIP state: %s\n", fip_state_names[qedf->ctlr.state]); ctlr 337 drivers/scsi/qedf/qedf_debugfs.c if (qedf->ctlr.sel_fcf) { ctlr 339 drivers/scsi/qedf/qedf_debugfs.c qedf->ctlr.sel_fcf->switch_name); ctlr 340 drivers/scsi/qedf/qedf_debugfs.c seq_printf(s, "FCF MAC: %pM\n", qedf->ctlr.sel_fcf->fcf_mac); ctlr 123 drivers/scsi/qedf/qedf_fip.c struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr); ctlr 219 drivers/scsi/qedf/qedf_fip.c if (qedf->ctlr.sel_fcf == NULL) { ctlr 241 drivers/scsi/qedf/qedf_fip.c qedf->ctlr.sel_fcf->fcf_mac)) ctlr 250 drivers/scsi/qedf/qedf_fip.c qedf->ctlr.sel_fcf->switch_name); ctlr 252 drivers/scsi/qedf/qedf_fip.c qedf->ctlr.sel_fcf->switch_name) ctlr 292 drivers/scsi/qedf/qedf_fip.c fcoe_ctlr_recv(&qedf->ctlr, skb); ctlr 144 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_link_up(&qedf->ctlr); ctlr 194 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_link_up(&qedf->ctlr); ctlr 204 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_link_down(&qedf->ctlr); ctlr 239 drivers/scsi/qedf/qedf_main.c } else if (qedf->ctlr.sel_fcf->fc_map != 0) { ctlr 240 drivers/scsi/qedf/qedf_main.c hton24(fc_map, qedf->ctlr.sel_fcf->fc_map); ctlr 370 drivers/scsi/qedf/qedf_main.c qedf->ctlr.state = FIP_ST_LINK_WAIT; ctlr 371 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_link_down(&qedf->ctlr); ctlr 377 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_link_up(&qedf->ctlr); ctlr 391 drivers/scsi/qedf/qedf_main.c if (qedf->ctlr.sel_fcf) { ctlr 1049 drivers/scsi/qedf/qedf_main.c if (!qedf->ctlr.sel_fcf) { ctlr 1067 drivers/scsi/qedf/qedf_main.c if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) ctlr 1134 drivers/scsi/qedf/qedf_main.c if (qedf->ctlr.map_dest) ctlr 1138 drivers/scsi/qedf/qedf_main.c ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); ctlr 1275 drivers/scsi/qedf/qedf_main.c ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); ctlr 1569 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); ctlr 1571 drivers/scsi/qedf/qedf_main.c qedf->ctlr.send = qedf_fip_send; ctlr 1572 drivers/scsi/qedf/qedf_main.c qedf->ctlr.get_src_addr = qedf_get_src_mac; ctlr 1573 drivers/scsi/qedf/qedf_main.c ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); ctlr 1649 drivers/scsi/qedf/qedf_main.c if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { ctlr 2417 drivers/scsi/qedf/qedf_main.c if (qedf->ctlr.state) { ctlr 2418 drivers/scsi/qedf/qedf_main.c if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { ctlr 2421 drivers/scsi/qedf/qedf_main.c mac, qedf->ctlr.dest_addr); ctlr 3187 drivers/scsi/qedf/qedf_main.c qedf->ctlr.lp = lport; ctlr 3512 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_link_up(&qedf->ctlr); ctlr 3575 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_link_down(&qedf->ctlr); ctlr 3613 drivers/scsi/qedf/qedf_main.c fcoe_ctlr_destroy(&qedf->ctlr); ctlr 3714 drivers/scsi/qedf/qedf_main.c if (qedf->ctlr.sel_fcf) { ctlr 3716 drivers/scsi/qedf/qedf_main.c u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name); ctlr 23 drivers/spi/internals.h int spi_map_buf(struct spi_controller *ctlr, struct device *dev, ctlr 26 drivers/spi/internals.h void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, ctlr 29 drivers/spi/internals.h static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev, ctlr 36 drivers/spi/internals.h static inline void spi_unmap_buf(struct spi_controller *ctlr, ctlr 106 drivers/spi/spi-at91-usart.c struct spi_controller *ctlr = data; ctlr 107 drivers/spi/spi-at91-usart.c struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); ctlr 123 drivers/spi/spi-at91-usart.c static int at91_usart_spi_configure_dma(struct spi_controller *ctlr, ctlr 135 drivers/spi/spi-at91-usart.c ctlr->dma_tx = dma_request_slave_channel_reason(dev, "tx"); ctlr 136 drivers/spi/spi-at91-usart.c if (IS_ERR_OR_NULL(ctlr->dma_tx)) { ctlr 137 drivers/spi/spi-at91-usart.c if (IS_ERR(ctlr->dma_tx)) { ctlr 138 drivers/spi/spi-at91-usart.c err = PTR_ERR(ctlr->dma_tx); ctlr 148 drivers/spi/spi-at91-usart.c ctlr->dma_rx = dma_request_slave_channel_reason(dev, "rx"); ctlr 149 drivers/spi/spi-at91-usart.c if (IS_ERR_OR_NULL(ctlr->dma_rx)) { ctlr 150 drivers/spi/spi-at91-usart.c if (IS_ERR(ctlr->dma_rx)) { ctlr 151 drivers/spi/spi-at91-usart.c err = PTR_ERR(ctlr->dma_rx); ctlr 170 drivers/spi/spi-at91-usart.c if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) { ctlr 171 drivers/spi/spi-at91-usart.c dev_err(&ctlr->dev, ctlr 178 drivers/spi/spi-at91-usart.c if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) { ctlr 179 drivers/spi/spi-at91-usart.c dev_err(&ctlr->dev, ctlr 189 drivers/spi/spi-at91-usart.c if (!IS_ERR_OR_NULL(ctlr->dma_tx)) ctlr 190 drivers/spi/spi-at91-usart.c dma_release_channel(ctlr->dma_tx); ctlr 191 drivers/spi/spi-at91-usart.c if (!IS_ERR_OR_NULL(ctlr->dma_rx)) ctlr 192 drivers/spi/spi-at91-usart.c dma_release_channel(ctlr->dma_rx); ctlr 193 drivers/spi/spi-at91-usart.c ctlr->dma_tx = NULL; ctlr 194 drivers/spi/spi-at91-usart.c ctlr->dma_rx = NULL; ctlr 200 drivers/spi/spi-at91-usart.c static void at91_usart_spi_release_dma(struct spi_controller *ctlr) ctlr 202 drivers/spi/spi-at91-usart.c if (ctlr->dma_rx) ctlr 203 drivers/spi/spi-at91-usart.c dma_release_channel(ctlr->dma_rx); ctlr 204 drivers/spi/spi-at91-usart.c if (ctlr->dma_tx) ctlr 205 drivers/spi/spi-at91-usart.c dma_release_channel(ctlr->dma_tx); ctlr 208 drivers/spi/spi-at91-usart.c static void at91_usart_spi_stop_dma(struct spi_controller *ctlr) ctlr 210 drivers/spi/spi-at91-usart.c if (ctlr->dma_rx) ctlr 211 drivers/spi/spi-at91-usart.c dmaengine_terminate_all(ctlr->dma_rx); ctlr 212 drivers/spi/spi-at91-usart.c if (ctlr->dma_tx) ctlr 213 drivers/spi/spi-at91-usart.c dmaengine_terminate_all(ctlr->dma_tx); ctlr 216 drivers/spi/spi-at91-usart.c static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr, ctlr 219 drivers/spi/spi-at91-usart.c struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); ctlr 220 drivers/spi/spi-at91-usart.c struct dma_chan *rxchan = ctlr->dma_rx; ctlr 221 drivers/spi/spi-at91-usart.c struct dma_chan *txchan = ctlr->dma_tx; ctlr 248 drivers/spi/spi-at91-usart.c rxdesc->callback_param = ctlr; ctlr 266 drivers/spi/spi-at91-usart.c at91_usart_spi_stop_dma(ctlr); ctlr 398 drivers/spi/spi-at91-usart.c static int at91_usart_spi_transfer_one(struct spi_controller *ctlr, ctlr 402 drivers/spi/spi-at91-usart.c struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); ctlr 415 drivers/spi/spi-at91-usart.c if (at91_usart_spi_can_dma(ctlr, spi, xfer) && ctlr 417 drivers/spi/spi-at91-usart.c ret = at91_usart_spi_dma_transfer(ctlr, xfer); ctlr 444 drivers/spi/spi-at91-usart.c static int at91_usart_spi_prepare_message(struct spi_controller *ctlr, ctlr 447 drivers/spi/spi-at91-usart.c struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); ctlr 458 drivers/spi/spi-at91-usart.c static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr, ctlr 461 drivers/spi/spi-at91-usart.c struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); ctlr 616 drivers/spi/spi-at91-usart.c struct spi_controller *ctlr = dev_get_drvdata(dev); ctlr 617 drivers/spi/spi-at91-usart.c struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); ctlr 669 drivers/spi/spi-at91-usart.c struct spi_controller *ctlr = platform_get_drvdata(pdev); ctlr 670 drivers/spi/spi-at91-usart.c struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); ctlr 672 drivers/spi/spi-at91-usart.c at91_usart_spi_release_dma(ctlr); ctlr 338 drivers/spi/spi-bcm2835.c static void bcm2835_spi_reset_hw(struct spi_controller *ctlr) ctlr 340 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 366 drivers/spi/spi-bcm2835.c struct spi_controller *ctlr = dev_id; ctlr 367 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 389 drivers/spi/spi-bcm2835.c bcm2835_spi_reset_hw(ctlr); ctlr 391 drivers/spi/spi-bcm2835.c complete(&ctlr->xfer_completion); ctlr 397 drivers/spi/spi-bcm2835.c static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr, ctlr 402 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 472 drivers/spi/spi-bcm2835.c static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr, ctlr 517 drivers/spi/spi-bcm2835.c dma_sync_single_for_device(ctlr->dma_rx->device->dev, ctlr 596 drivers/spi/spi-bcm2835.c struct spi_controller *ctlr = data; ctlr 597 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 604 drivers/spi/spi-bcm2835.c dmaengine_terminate_async(ctlr->dma_tx); ctlr 610 drivers/spi/spi-bcm2835.c bcm2835_spi_reset_hw(ctlr); ctlr 613 drivers/spi/spi-bcm2835.c complete(&ctlr->xfer_completion); ctlr 624 drivers/spi/spi-bcm2835.c struct spi_controller *ctlr = data; ctlr 625 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 641 drivers/spi/spi-bcm2835.c dmaengine_terminate_async(ctlr->dma_rx); ctlr 644 drivers/spi/spi-bcm2835.c bcm2835_spi_reset_hw(ctlr); ctlr 645 drivers/spi/spi-bcm2835.c complete(&ctlr->xfer_completion); ctlr 659 drivers/spi/spi-bcm2835.c static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr, ctlr 676 drivers/spi/spi-bcm2835.c chan = ctlr->dma_tx; ctlr 682 drivers/spi/spi-bcm2835.c chan = ctlr->dma_rx; ctlr 698 drivers/spi/spi-bcm2835.c desc->callback_param = ctlr; ctlr 701 drivers/spi/spi-bcm2835.c desc->callback_param = ctlr; ctlr 758 drivers/spi/spi-bcm2835.c static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr, ctlr 763 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 774 drivers/spi/spi-bcm2835.c bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs); ctlr 778 drivers/spi/spi-bcm2835.c ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true); ctlr 797 drivers/spi/spi-bcm2835.c dma_async_issue_pending(ctlr->dma_tx); ctlr 804 drivers/spi/spi-bcm2835.c ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false); ctlr 811 drivers/spi/spi-bcm2835.c dmaengine_terminate_sync(ctlr->dma_tx); ctlr 817 drivers/spi/spi-bcm2835.c dma_async_issue_pending(ctlr->dma_rx); ctlr 827 drivers/spi/spi-bcm2835.c dmaengine_terminate_async(ctlr->dma_rx); ctlr 828 drivers/spi/spi-bcm2835.c bcm2835_spi_reset_hw(ctlr); ctlr 835 drivers/spi/spi-bcm2835.c bcm2835_spi_reset_hw(ctlr); ctlr 840 drivers/spi/spi-bcm2835.c static bool bcm2835_spi_can_dma(struct spi_controller *ctlr, ctlr 852 drivers/spi/spi-bcm2835.c static void bcm2835_dma_release(struct spi_controller *ctlr, ctlr 857 drivers/spi/spi-bcm2835.c if (ctlr->dma_tx) { ctlr 858 drivers/spi/spi-bcm2835.c dmaengine_terminate_sync(ctlr->dma_tx); ctlr 864 drivers/spi/spi-bcm2835.c dma_unmap_page_attrs(ctlr->dma_tx->device->dev, ctlr 869 drivers/spi/spi-bcm2835.c dma_release_channel(ctlr->dma_tx); ctlr 870 drivers/spi/spi-bcm2835.c ctlr->dma_tx = NULL; ctlr 873 drivers/spi/spi-bcm2835.c if (ctlr->dma_rx) { ctlr 874 drivers/spi/spi-bcm2835.c dmaengine_terminate_sync(ctlr->dma_rx); ctlr 881 drivers/spi/spi-bcm2835.c dma_unmap_single(ctlr->dma_rx->device->dev, ctlr 886 drivers/spi/spi-bcm2835.c dma_release_channel(ctlr->dma_rx); ctlr 887 drivers/spi/spi-bcm2835.c ctlr->dma_rx = NULL; ctlr 891 drivers/spi/spi-bcm2835.c static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, ctlr 900 drivers/spi/spi-bcm2835.c addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); ctlr 908 drivers/spi/spi-bcm2835.c ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); ctlr 909 drivers/spi/spi-bcm2835.c if (!ctlr->dma_tx) { ctlr 913 drivers/spi/spi-bcm2835.c ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); ctlr 914 drivers/spi/spi-bcm2835.c if (!ctlr->dma_rx) { ctlr 927 drivers/spi/spi-bcm2835.c ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config); ctlr 931 drivers/spi/spi-bcm2835.c bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev, ctlr 935 drivers/spi/spi-bcm2835.c if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) { ctlr 941 drivers/spi/spi-bcm2835.c bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx, ctlr 966 drivers/spi/spi-bcm2835.c ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config); ctlr 970 drivers/spi/spi-bcm2835.c bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev, ctlr 974 drivers/spi/spi-bcm2835.c if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) { ctlr 981 drivers/spi/spi-bcm2835.c bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx, ctlr 998 drivers/spi/spi-bcm2835.c ctlr->can_dma = bcm2835_spi_can_dma; ctlr 1006 drivers/spi/spi-bcm2835.c bcm2835_dma_release(ctlr, bs); ctlr 1011 drivers/spi/spi-bcm2835.c static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, ctlr 1016 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 1055 drivers/spi/spi-bcm2835.c return bcm2835_spi_transfer_one_irq(ctlr, spi, ctlr 1061 drivers/spi/spi-bcm2835.c bcm2835_spi_reset_hw(ctlr); ctlr 1066 drivers/spi/spi-bcm2835.c static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, ctlr 1070 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 1115 drivers/spi/spi-bcm2835.c return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs); ctlr 1121 drivers/spi/spi-bcm2835.c if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr)) ctlr 1122 drivers/spi/spi-bcm2835.c return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs); ctlr 1125 drivers/spi/spi-bcm2835.c return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true); ctlr 1128 drivers/spi/spi-bcm2835.c static int bcm2835_spi_prepare_message(struct spi_controller *ctlr, ctlr 1132 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 1135 drivers/spi/spi-bcm2835.c if (ctlr->can_dma) { ctlr 1141 drivers/spi/spi-bcm2835.c ret = spi_split_transfers_maxsize(ctlr, msg, 65532, ctlr 1156 drivers/spi/spi-bcm2835.c static void bcm2835_spi_handle_err(struct spi_controller *ctlr, ctlr 1159 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 1162 drivers/spi/spi-bcm2835.c dmaengine_terminate_sync(ctlr->dma_tx); ctlr 1164 drivers/spi/spi-bcm2835.c dmaengine_terminate_sync(ctlr->dma_rx); ctlr 1169 drivers/spi/spi-bcm2835.c bcm2835_spi_reset_hw(ctlr); ctlr 1179 drivers/spi/spi-bcm2835.c struct spi_controller *ctlr = spi->controller; ctlr 1180 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 1202 drivers/spi/spi-bcm2835.c if (ctlr->dma_rx) { ctlr 1207 drivers/spi/spi-bcm2835.c dma_sync_single_for_device(ctlr->dma_rx->device->dev, ctlr 1276 drivers/spi/spi-bcm2835.c struct spi_controller *ctlr; ctlr 1280 drivers/spi/spi-bcm2835.c ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs), ctlr 1282 drivers/spi/spi-bcm2835.c if (!ctlr) ctlr 1285 drivers/spi/spi-bcm2835.c platform_set_drvdata(pdev, ctlr); ctlr 1287 drivers/spi/spi-bcm2835.c ctlr->use_gpio_descriptors = true; ctlr 1288 drivers/spi/spi-bcm2835.c ctlr->mode_bits = BCM2835_SPI_MODE_BITS; ctlr 1289 drivers/spi/spi-bcm2835.c ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr 1290 drivers/spi/spi-bcm2835.c ctlr->num_chipselect = BCM2835_SPI_NUM_CS; ctlr 1291 drivers/spi/spi-bcm2835.c ctlr->setup = bcm2835_spi_setup; ctlr 1292 drivers/spi/spi-bcm2835.c ctlr->transfer_one = bcm2835_spi_transfer_one; ctlr 1293 drivers/spi/spi-bcm2835.c ctlr->handle_err = bcm2835_spi_handle_err; ctlr 1294 drivers/spi/spi-bcm2835.c ctlr->prepare_message = bcm2835_spi_prepare_message; ctlr 1295 drivers/spi/spi-bcm2835.c ctlr->dev.of_node = pdev->dev.of_node; ctlr 1297 drivers/spi/spi-bcm2835.c bs = spi_controller_get_devdata(ctlr); ctlr 1320 drivers/spi/spi-bcm2835.c bcm2835_dma_init(ctlr, &pdev->dev, bs); ctlr 1327 drivers/spi/spi-bcm2835.c dev_name(&pdev->dev), ctlr); ctlr 1333 drivers/spi/spi-bcm2835.c err = spi_register_controller(ctlr); ctlr 1347 drivers/spi/spi-bcm2835.c spi_controller_put(ctlr); ctlr 1353 drivers/spi/spi-bcm2835.c struct spi_controller *ctlr = platform_get_drvdata(pdev); ctlr 1354 drivers/spi/spi-bcm2835.c struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); ctlr 1358 drivers/spi/spi-bcm2835.c spi_unregister_controller(ctlr); ctlr 1366 drivers/spi/spi-bcm2835.c bcm2835_dma_release(ctlr, bs); ctlr 174 drivers/spi/spi-fsl-dspi.c struct spi_controller *ctlr; ctlr 221 drivers/spi/spi-fsl-dspi.c if (spi_controller_is_slave(dspi->ctlr)) ctlr 320 drivers/spi/spi-fsl-dspi.c if (spi_controller_is_slave(dspi->ctlr)) { ctlr 711 drivers/spi/spi-fsl-dspi.c static int dspi_transfer_one_message(struct spi_controller *ctlr, ctlr 714 drivers/spi/spi-fsl-dspi.c struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); ctlr 810 drivers/spi/spi-fsl-dspi.c spi_finalize_current_message(ctlr); ctlr 863 drivers/spi/spi-fsl-dspi.c if (!spi_controller_is_slave(dspi->ctlr)) { ctlr 901 drivers/spi/spi-fsl-dspi.c struct spi_controller *ctlr = dev_get_drvdata(dev); ctlr 902 drivers/spi/spi-fsl-dspi.c struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); ctlr 904 drivers/spi/spi-fsl-dspi.c spi_controller_suspend(ctlr); ctlr 914 drivers/spi/spi-fsl-dspi.c struct spi_controller *ctlr = dev_get_drvdata(dev); ctlr 915 drivers/spi/spi-fsl-dspi.c struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); ctlr 923 drivers/spi/spi-fsl-dspi.c spi_controller_resume(ctlr); ctlr 985 drivers/spi/spi-fsl-dspi.c if (!spi_controller_is_slave(dspi->ctlr)) ctlr 1000 drivers/spi/spi-fsl-dspi.c struct spi_controller *ctlr; ctlr 1006 drivers/spi/spi-fsl-dspi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); ctlr 1007 drivers/spi/spi-fsl-dspi.c if (!ctlr) ctlr 1010 drivers/spi/spi-fsl-dspi.c dspi = spi_controller_get_devdata(ctlr); ctlr 1012 drivers/spi/spi-fsl-dspi.c dspi->ctlr = ctlr; ctlr 1014 drivers/spi/spi-fsl-dspi.c ctlr->setup = dspi_setup; ctlr 1015 drivers/spi/spi-fsl-dspi.c ctlr->transfer_one_message = dspi_transfer_one_message; ctlr 1016 drivers/spi/spi-fsl-dspi.c ctlr->dev.of_node = pdev->dev.of_node; ctlr 1018 drivers/spi/spi-fsl-dspi.c ctlr->cleanup = dspi_cleanup; ctlr 1019 drivers/spi/spi-fsl-dspi.c ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; ctlr 1023 drivers/spi/spi-fsl-dspi.c ctlr->num_chipselect = pdata->cs_num; ctlr 1024 drivers/spi/spi-fsl-dspi.c ctlr->bus_num = pdata->bus_num; ctlr 1034 drivers/spi/spi-fsl-dspi.c ctlr->num_chipselect = cs_num; ctlr 1041 drivers/spi/spi-fsl-dspi.c ctlr->bus_num = bus_num; ctlr 1044 drivers/spi/spi-fsl-dspi.c ctlr->slave = true; ctlr 1055 drivers/spi/spi-fsl-dspi.c ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); ctlr 1057 drivers/spi/spi-fsl-dspi.c ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); ctlr 1129 drivers/spi/spi-fsl-dspi.c ctlr->max_speed_hz = ctlr 1132 drivers/spi/spi-fsl-dspi.c platform_set_drvdata(pdev, ctlr); ctlr 1134 drivers/spi/spi-fsl-dspi.c ret = spi_register_controller(ctlr); ctlr 1145 drivers/spi/spi-fsl-dspi.c spi_controller_put(ctlr); ctlr 1152 drivers/spi/spi-fsl-dspi.c struct spi_controller *ctlr = platform_get_drvdata(pdev); ctlr 1153 drivers/spi/spi-fsl-dspi.c struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); ctlr 1158 drivers/spi/spi-fsl-dspi.c spi_unregister_controller(dspi->ctlr); ctlr 828 drivers/spi/spi-fsl-qspi.c struct spi_controller *ctlr; ctlr 835 drivers/spi/spi-fsl-qspi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(*q)); ctlr 836 drivers/spi/spi-fsl-qspi.c if (!ctlr) ctlr 839 drivers/spi/spi-fsl-qspi.c ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | ctlr 842 drivers/spi/spi-fsl-qspi.c q = spi_controller_get_devdata(ctlr); ctlr 903 drivers/spi/spi-fsl-qspi.c ctlr->bus_num = -1; ctlr 904 drivers/spi/spi-fsl-qspi.c ctlr->num_chipselect = 4; ctlr 905 drivers/spi/spi-fsl-qspi.c ctlr->mem_ops = &fsl_qspi_mem_ops; ctlr 909 drivers/spi/spi-fsl-qspi.c ctlr->dev.of_node = np; ctlr 911 drivers/spi/spi-fsl-qspi.c ret = devm_spi_register_controller(dev, ctlr); ctlr 924 drivers/spi/spi-fsl-qspi.c spi_controller_put(ctlr); ctlr 34 drivers/spi/spi-mem.c int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, ctlr 43 drivers/spi/spi-mem.c if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) ctlr 44 drivers/spi/spi-mem.c dmadev = ctlr->dma_tx->device->dev; ctlr 45 drivers/spi/spi-mem.c else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) ctlr 46 drivers/spi/spi-mem.c dmadev = ctlr->dma_rx->device->dev; ctlr 48 drivers/spi/spi-mem.c dmadev = ctlr->dev.parent; ctlr 53 drivers/spi/spi-mem.c return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, ctlr 80 drivers/spi/spi-mem.c void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, ctlr 89 drivers/spi/spi-mem.c if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) ctlr 90 drivers/spi/spi-mem.c dmadev = ctlr->dma_tx->device->dev; ctlr 91 drivers/spi/spi-mem.c else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) ctlr 92 drivers/spi/spi-mem.c dmadev = ctlr->dma_rx->device->dev; ctlr 94 drivers/spi/spi-mem.c dmadev = ctlr->dev.parent; ctlr 96 drivers/spi/spi-mem.c spi_unmap_buf(ctlr, dmadev, sgt, ctlr 191 drivers/spi/spi-mem.c struct spi_controller *ctlr = mem->spi->controller; ctlr 193 drivers/spi/spi-mem.c if (ctlr->mem_ops && ctlr->mem_ops->supports_op) ctlr 194 drivers/spi/spi-mem.c return ctlr->mem_ops->supports_op(mem, op); ctlr 225 drivers/spi/spi-mem.c struct spi_controller *ctlr = mem->spi->controller; ctlr 231 drivers/spi/spi-mem.c spi_flush_queue(ctlr); ctlr 233 drivers/spi/spi-mem.c if (ctlr->auto_runtime_pm) { ctlr 236 drivers/spi/spi-mem.c ret = pm_runtime_get_sync(ctlr->dev.parent); ctlr 238 drivers/spi/spi-mem.c dev_err(&ctlr->dev, "Failed to power device: %d\n", ctlr 244 drivers/spi/spi-mem.c mutex_lock(&ctlr->bus_lock_mutex); ctlr 245 drivers/spi/spi-mem.c mutex_lock(&ctlr->io_mutex); ctlr 252 drivers/spi/spi-mem.c struct spi_controller *ctlr = mem->spi->controller; ctlr 254 drivers/spi/spi-mem.c mutex_unlock(&ctlr->io_mutex); ctlr 255 drivers/spi/spi-mem.c mutex_unlock(&ctlr->bus_lock_mutex); ctlr 257 drivers/spi/spi-mem.c if (ctlr->auto_runtime_pm) ctlr 258 drivers/spi/spi-mem.c pm_runtime_put(ctlr->dev.parent); ctlr 276 drivers/spi/spi-mem.c struct spi_controller *ctlr = mem->spi->controller; ctlr 289 drivers/spi/spi-mem.c if (ctlr->mem_ops) { ctlr 294 drivers/spi/spi-mem.c ret = ctlr->mem_ops->exec_op(mem, op); ctlr 418 drivers/spi/spi-mem.c struct spi_controller *ctlr = mem->spi->controller; ctlr 423 drivers/spi/spi-mem.c if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) ctlr 424 drivers/spi/spi-mem.c return ctlr->mem_ops->adjust_op_size(mem, op); ctlr 426 drivers/spi/spi-mem.c if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { ctlr 499 drivers/spi/spi-mem.c struct spi_controller *ctlr = mem->spi->controller; ctlr 517 drivers/spi/spi-mem.c if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) ctlr 518 drivers/spi/spi-mem.c ret = ctlr->mem_ops->dirmap_create(desc); ctlr 546 drivers/spi/spi-mem.c struct spi_controller *ctlr = desc->mem->spi->controller; ctlr 548 drivers/spi/spi-mem.c if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) ctlr 549 drivers/spi/spi-mem.c ctlr->mem_ops->dirmap_destroy(desc); ctlr 643 drivers/spi/spi-mem.c struct spi_controller *ctlr = desc->mem->spi->controller; ctlr 654 drivers/spi/spi-mem.c } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) { ctlr 659 drivers/spi/spi-mem.c ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf); ctlr 689 drivers/spi/spi-mem.c struct spi_controller *ctlr = desc->mem->spi->controller; ctlr 700 drivers/spi/spi-mem.c } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) { ctlr 705 drivers/spi/spi-mem.c ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf); ctlr 724 drivers/spi/spi-mem.c struct spi_controller *ctlr = spi->controller; ctlr 733 drivers/spi/spi-mem.c if (ctlr->mem_ops && ctlr->mem_ops->get_name) ctlr 734 drivers/spi/spi-mem.c mem->name = ctlr->mem_ops->get_name(mem); ctlr 447 drivers/spi/spi-mpc512x-psc.c out_8(psc_addr(mps, ctlr), 0x82); ctlr 339 drivers/spi/spi-mpc52xx-psc.c out_8(&psc->ctlr, 0x84); ctlr 945 drivers/spi/spi-nxp-fspi.c struct spi_controller *ctlr; ctlr 952 drivers/spi/spi-nxp-fspi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(*f)); ctlr 953 drivers/spi/spi-nxp-fspi.c if (!ctlr) ctlr 956 drivers/spi/spi-nxp-fspi.c ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL | ctlr 959 drivers/spi/spi-nxp-fspi.c f = spi_controller_get_devdata(ctlr); ctlr 1022 drivers/spi/spi-nxp-fspi.c ctlr->bus_num = -1; ctlr 1023 drivers/spi/spi-nxp-fspi.c ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; ctlr 1024 drivers/spi/spi-nxp-fspi.c ctlr->mem_ops = &nxp_fspi_mem_ops; ctlr 1028 drivers/spi/spi-nxp-fspi.c ctlr->dev.of_node = np; ctlr 1030 drivers/spi/spi-nxp-fspi.c ret = spi_register_controller(ctlr); ctlr 1043 drivers/spi/spi-nxp-fspi.c spi_controller_put(ctlr); ctlr 183 drivers/spi/spi-rspi.c struct spi_controller *ctlr; ctlr 240 drivers/spi/spi-rspi.c int (*transfer_one)(struct spi_controller *ctlr, ctlr 472 drivers/spi/spi-rspi.c dev_err(&rspi->ctlr->dev, "transmit timeout\n"); ctlr 486 drivers/spi/spi-rspi.c dev_err(&rspi->ctlr->dev, "receive timeout\n"); ctlr 532 drivers/spi/spi-rspi.c desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl, ctlr 552 drivers/spi/spi-rspi.c desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl, ctlr 590 drivers/spi/spi-rspi.c dma_async_issue_pending(rspi->ctlr->dma_rx); ctlr 592 drivers/spi/spi-rspi.c dma_async_issue_pending(rspi->ctlr->dma_tx); ctlr 600 drivers/spi/spi-rspi.c dev_err(&rspi->ctlr->dev, "DMA timeout\n"); ctlr 604 drivers/spi/spi-rspi.c dmaengine_terminate_all(rspi->ctlr->dma_tx); ctlr 606 drivers/spi/spi-rspi.c dmaengine_terminate_all(rspi->ctlr->dma_rx); ctlr 620 drivers/spi/spi-rspi.c dmaengine_terminate_all(rspi->ctlr->dma_rx); ctlr 624 drivers/spi/spi-rspi.c dev_driver_string(&rspi->ctlr->dev), ctlr 625 drivers/spi/spi-rspi.c dev_name(&rspi->ctlr->dev)); ctlr 666 drivers/spi/spi-rspi.c static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, ctlr 669 drivers/spi/spi-rspi.c struct rspi_data *rspi = spi_controller_get_devdata(ctlr); ctlr 677 drivers/spi/spi-rspi.c if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer)) ctlr 704 drivers/spi/spi-rspi.c static int rspi_transfer_one(struct spi_controller *ctlr, ctlr 707 drivers/spi/spi-rspi.c struct rspi_data *rspi = spi_controller_get_devdata(ctlr); ctlr 722 drivers/spi/spi-rspi.c static int rspi_rz_transfer_one(struct spi_controller *ctlr, ctlr 726 drivers/spi/spi-rspi.c struct rspi_data *rspi = spi_controller_get_devdata(ctlr); ctlr 744 drivers/spi/spi-rspi.c dev_err(&rspi->ctlr->dev, "transmit timeout\n"); ctlr 752 drivers/spi/spi-rspi.c dev_err(&rspi->ctlr->dev, "receive timeout\n"); ctlr 786 drivers/spi/spi-rspi.c if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { ctlr 796 drivers/spi/spi-rspi.c dev_err(&rspi->ctlr->dev, "transmit timeout\n"); ctlr 818 drivers/spi/spi-rspi.c if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { ctlr 828 drivers/spi/spi-rspi.c dev_err(&rspi->ctlr->dev, "receive timeout\n"); ctlr 840 drivers/spi/spi-rspi.c static int qspi_transfer_one(struct spi_controller *ctlr, ctlr 843 drivers/spi/spi-rspi.c struct rspi_data *rspi = spi_controller_get_devdata(ctlr); ctlr 924 drivers/spi/spi-rspi.c static int rspi_prepare_message(struct spi_controller *ctlr, ctlr 927 drivers/spi/spi-rspi.c struct rspi_data *rspi = spi_controller_get_devdata(ctlr); ctlr 959 drivers/spi/spi-rspi.c static int rspi_unprepare_message(struct spi_controller *ctlr, ctlr 962 drivers/spi/spi-rspi.c struct rspi_data *rspi = spi_controller_get_devdata(ctlr); ctlr 1066 drivers/spi/spi-rspi.c static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr, ctlr 1084 drivers/spi/spi-rspi.c ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, ctlr 1086 drivers/spi/spi-rspi.c if (!ctlr->dma_tx) ctlr 1089 drivers/spi/spi-rspi.c ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, ctlr 1091 drivers/spi/spi-rspi.c if (!ctlr->dma_rx) { ctlr 1092 drivers/spi/spi-rspi.c dma_release_channel(ctlr->dma_tx); ctlr 1093 drivers/spi/spi-rspi.c ctlr->dma_tx = NULL; ctlr 1097 drivers/spi/spi-rspi.c ctlr->can_dma = rspi_can_dma; ctlr 1102 drivers/spi/spi-rspi.c static void rspi_release_dma(struct spi_controller *ctlr) ctlr 1104 drivers/spi/spi-rspi.c if (ctlr->dma_tx) ctlr 1105 drivers/spi/spi-rspi.c dma_release_channel(ctlr->dma_tx); ctlr 1106 drivers/spi/spi-rspi.c if (ctlr->dma_rx) ctlr 1107 drivers/spi/spi-rspi.c dma_release_channel(ctlr->dma_rx); ctlr 1114 drivers/spi/spi-rspi.c rspi_release_dma(rspi->ctlr); ctlr 1159 drivers/spi/spi-rspi.c static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) ctlr 1171 drivers/spi/spi-rspi.c ctlr->num_chipselect = num_cs; ctlr 1176 drivers/spi/spi-rspi.c static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) ctlr 1197 drivers/spi/spi-rspi.c struct spi_controller *ctlr; ctlr 1203 drivers/spi/spi-rspi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); ctlr 1204 drivers/spi/spi-rspi.c if (ctlr == NULL) ctlr 1209 drivers/spi/spi-rspi.c ret = rspi_parse_dt(&pdev->dev, ctlr); ctlr 1216 drivers/spi/spi-rspi.c ctlr->num_chipselect = rspi_pd->num_chipselect; ctlr 1218 drivers/spi/spi-rspi.c ctlr->num_chipselect = 2; /* default */ ctlr 1228 drivers/spi/spi-rspi.c rspi = spi_controller_get_devdata(ctlr); ctlr 1231 drivers/spi/spi-rspi.c rspi->ctlr = ctlr; ctlr 1251 drivers/spi/spi-rspi.c ctlr->bus_num = pdev->id; ctlr 1252 drivers/spi/spi-rspi.c ctlr->auto_runtime_pm = true; ctlr 1253 drivers/spi/spi-rspi.c ctlr->transfer_one = ops->transfer_one; ctlr 1254 drivers/spi/spi-rspi.c ctlr->prepare_message = rspi_prepare_message; ctlr 1255 drivers/spi/spi-rspi.c ctlr->unprepare_message = rspi_unprepare_message; ctlr 1256 drivers/spi/spi-rspi.c ctlr->mode_bits = ops->mode_bits; ctlr 1257 drivers/spi/spi-rspi.c ctlr->flags = ops->flags; ctlr 1258 drivers/spi/spi-rspi.c ctlr->dev.of_node = pdev->dev.of_node; ctlr 1291 drivers/spi/spi-rspi.c ret = rspi_request_dma(&pdev->dev, ctlr, res); ctlr 1295 drivers/spi/spi-rspi.c ret = devm_spi_register_controller(&pdev->dev, ctlr); ctlr 1306 drivers/spi/spi-rspi.c rspi_release_dma(ctlr); ctlr 1310 drivers/spi/spi-rspi.c spi_controller_put(ctlr); ctlr 1329 drivers/spi/spi-rspi.c return spi_controller_suspend(rspi->ctlr); ctlr 1336 drivers/spi/spi-rspi.c return spi_controller_resume(rspi->ctlr); ctlr 38 drivers/spi/spi-sh-hspi.c struct spi_controller *ctlr; ctlr 143 drivers/spi/spi-sh-hspi.c static int hspi_transfer_one_message(struct spi_controller *ctlr, ctlr 146 drivers/spi/spi-sh-hspi.c struct hspi_priv *hspi = spi_controller_get_devdata(ctlr); ctlr 208 drivers/spi/spi-sh-hspi.c spi_finalize_current_message(ctlr); ctlr 216 drivers/spi/spi-sh-hspi.c struct spi_controller *ctlr; ctlr 228 drivers/spi/spi-sh-hspi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi)); ctlr 229 drivers/spi/spi-sh-hspi.c if (!ctlr) ctlr 239 drivers/spi/spi-sh-hspi.c hspi = spi_controller_get_devdata(ctlr); ctlr 243 drivers/spi/spi-sh-hspi.c hspi->ctlr = ctlr; ctlr 255 drivers/spi/spi-sh-hspi.c ctlr->bus_num = pdev->id; ctlr 256 drivers/spi/spi-sh-hspi.c ctlr->mode_bits = SPI_CPOL | SPI_CPHA; ctlr 257 drivers/spi/spi-sh-hspi.c ctlr->dev.of_node = pdev->dev.of_node; ctlr 258 drivers/spi/spi-sh-hspi.c ctlr->auto_runtime_pm = true; ctlr 259 drivers/spi/spi-sh-hspi.c ctlr->transfer_one_message = hspi_transfer_one_message; ctlr 260 drivers/spi/spi-sh-hspi.c ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr 262 drivers/spi/spi-sh-hspi.c ret = devm_spi_register_controller(&pdev->dev, ctlr); ctlr 275 drivers/spi/spi-sh-hspi.c spi_controller_put(ctlr); ctlr 44 drivers/spi/spi-sh-msiof.c struct spi_controller *ctlr; ctlr 300 drivers/spi/spi-sh-msiof.c if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) ctlr 364 drivers/spi/spi-sh-msiof.c if (spi_controller_is_slave(p->ctlr)) { ctlr 371 drivers/spi/spi-sh-msiof.c if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { ctlr 395 drivers/spi/spi-sh-msiof.c if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) ctlr 556 drivers/spi/spi-sh-msiof.c if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr)) ctlr 581 drivers/spi/spi-sh-msiof.c static int sh_msiof_prepare_message(struct spi_controller *ctlr, ctlr 584 drivers/spi/spi-sh-msiof.c struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); ctlr 605 drivers/spi/spi-sh-msiof.c bool slave = spi_controller_is_slave(p->ctlr); ctlr 625 drivers/spi/spi-sh-msiof.c bool slave = spi_controller_is_slave(p->ctlr); ctlr 641 drivers/spi/spi-sh-msiof.c static int sh_msiof_slave_abort(struct spi_controller *ctlr) ctlr 643 drivers/spi/spi-sh-msiof.c struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); ctlr 654 drivers/spi/spi-sh-msiof.c if (spi_controller_is_slave(p->ctlr)) { ctlr 754 drivers/spi/spi-sh-msiof.c desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, ctlr 769 drivers/spi/spi-sh-msiof.c dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, ctlr 771 drivers/spi/spi-sh-msiof.c desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, ctlr 803 drivers/spi/spi-sh-msiof.c dma_async_issue_pending(p->ctlr->dma_rx); ctlr 805 drivers/spi/spi-sh-msiof.c dma_async_issue_pending(p->ctlr->dma_tx); ctlr 845 drivers/spi/spi-sh-msiof.c dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev, ctlr 855 drivers/spi/spi-sh-msiof.c dmaengine_terminate_all(p->ctlr->dma_tx); ctlr 858 drivers/spi/spi-sh-msiof.c dmaengine_terminate_all(p->ctlr->dma_rx); ctlr 906 drivers/spi/spi-sh-msiof.c static int sh_msiof_transfer_one(struct spi_controller *ctlr, ctlr 910 drivers/spi/spi-sh-msiof.c struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); ctlr 928 drivers/spi/spi-sh-msiof.c if (!spi_controller_is_slave(p->ctlr)) ctlr 931 drivers/spi/spi-sh-msiof.c while (ctlr->dma_tx && len > 15) { ctlr 1139 drivers/spi/spi-sh-msiof.c num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); ctlr 1213 drivers/spi/spi-sh-msiof.c struct spi_controller *ctlr; ctlr 1233 drivers/spi/spi-sh-msiof.c ctlr = p->ctlr; ctlr 1234 drivers/spi/spi-sh-msiof.c ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, ctlr 1236 drivers/spi/spi-sh-msiof.c if (!ctlr->dma_tx) ctlr 1239 drivers/spi/spi-sh-msiof.c ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, ctlr 1241 drivers/spi/spi-sh-msiof.c if (!ctlr->dma_rx) ctlr 1252 drivers/spi/spi-sh-msiof.c tx_dev = ctlr->dma_tx->device->dev; ctlr 1258 drivers/spi/spi-sh-msiof.c rx_dev = ctlr->dma_rx->device->dev; ctlr 1274 drivers/spi/spi-sh-msiof.c dma_release_channel(ctlr->dma_rx); ctlr 1276 drivers/spi/spi-sh-msiof.c dma_release_channel(ctlr->dma_tx); ctlr 1277 drivers/spi/spi-sh-msiof.c ctlr->dma_tx = NULL; ctlr 1283 drivers/spi/spi-sh-msiof.c struct spi_controller *ctlr = p->ctlr; ctlr 1285 drivers/spi/spi-sh-msiof.c if (!ctlr->dma_tx) ctlr 1288 drivers/spi/spi-sh-msiof.c dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE, ctlr 1290 drivers/spi/spi-sh-msiof.c dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE, ctlr 1294 drivers/spi/spi-sh-msiof.c dma_release_channel(ctlr->dma_rx); ctlr 1295 drivers/spi/spi-sh-msiof.c dma_release_channel(ctlr->dma_tx); ctlr 1300 drivers/spi/spi-sh-msiof.c struct spi_controller *ctlr; ctlr 1321 drivers/spi/spi-sh-msiof.c ctlr = spi_alloc_slave(&pdev->dev, ctlr 1324 drivers/spi/spi-sh-msiof.c ctlr = spi_alloc_master(&pdev->dev, ctlr 1326 drivers/spi/spi-sh-msiof.c if (ctlr == NULL) ctlr 1329 drivers/spi/spi-sh-msiof.c p = spi_controller_get_devdata(ctlr); ctlr 1332 drivers/spi/spi-sh-msiof.c p->ctlr = ctlr; ctlr 1377 drivers/spi/spi-sh-msiof.c ctlr->num_chipselect = p->info->num_chipselect; ctlr 1383 drivers/spi/spi-sh-msiof.c ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; ctlr 1384 drivers/spi/spi-sh-msiof.c ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; ctlr 1385 drivers/spi/spi-sh-msiof.c ctlr->flags = chipdata->ctlr_flags; ctlr 1386 drivers/spi/spi-sh-msiof.c ctlr->bus_num = pdev->id; ctlr 1387 drivers/spi/spi-sh-msiof.c ctlr->dev.of_node = pdev->dev.of_node; ctlr 1388 drivers/spi/spi-sh-msiof.c ctlr->setup = sh_msiof_spi_setup; ctlr 1389 drivers/spi/spi-sh-msiof.c ctlr->prepare_message = sh_msiof_prepare_message; ctlr 1390 drivers/spi/spi-sh-msiof.c ctlr->slave_abort = sh_msiof_slave_abort; ctlr 1391 drivers/spi/spi-sh-msiof.c ctlr->bits_per_word_mask = chipdata->bits_per_word_mask; ctlr 1392 drivers/spi/spi-sh-msiof.c ctlr->auto_runtime_pm = true; ctlr 1393 drivers/spi/spi-sh-msiof.c ctlr->transfer_one = sh_msiof_transfer_one; ctlr 1394 drivers/spi/spi-sh-msiof.c ctlr->use_gpio_descriptors = true; ctlr 1400 drivers/spi/spi-sh-msiof.c ret = devm_spi_register_controller(&pdev->dev, ctlr); ctlr 1412 drivers/spi/spi-sh-msiof.c spi_controller_put(ctlr); ctlr 1436 drivers/spi/spi-sh-msiof.c return spi_controller_suspend(p->ctlr); ctlr 1443 drivers/spi/spi-sh-msiof.c return spi_controller_resume(p->ctlr); ctlr 112 drivers/spi/spi-slave-mt27xx.c static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr, ctlr 115 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 145 drivers/spi/spi-slave-mt27xx.c static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr, ctlr 149 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 182 drivers/spi/spi-slave-mt27xx.c static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr, ctlr 186 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 265 drivers/spi/spi-slave-mt27xx.c static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr, ctlr 269 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 276 drivers/spi/spi-slave-mt27xx.c return mtk_spi_slave_dma_transfer(ctlr, spi, xfer); ctlr 278 drivers/spi/spi-slave-mt27xx.c return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer); ctlr 300 drivers/spi/spi-slave-mt27xx.c static int mtk_slave_abort(struct spi_controller *ctlr) ctlr 302 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 312 drivers/spi/spi-slave-mt27xx.c struct spi_controller *ctlr = dev_id; ctlr 313 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 357 drivers/spi/spi-slave-mt27xx.c dev_warn(&ctlr->dev, "cmd invalid\n"); ctlr 369 drivers/spi/spi-slave-mt27xx.c struct spi_controller *ctlr; ctlr 374 drivers/spi/spi-slave-mt27xx.c ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata)); ctlr 375 drivers/spi/spi-slave-mt27xx.c if (!ctlr) { ctlr 380 drivers/spi/spi-slave-mt27xx.c ctlr->auto_runtime_pm = true; ctlr 381 drivers/spi/spi-slave-mt27xx.c ctlr->dev.of_node = pdev->dev.of_node; ctlr 382 drivers/spi/spi-slave-mt27xx.c ctlr->mode_bits = SPI_CPOL | SPI_CPHA; ctlr 383 drivers/spi/spi-slave-mt27xx.c ctlr->mode_bits |= SPI_LSB_FIRST; ctlr 385 drivers/spi/spi-slave-mt27xx.c ctlr->prepare_message = mtk_spi_slave_prepare_message; ctlr 386 drivers/spi/spi-slave-mt27xx.c ctlr->transfer_one = mtk_spi_slave_transfer_one; ctlr 387 drivers/spi/spi-slave-mt27xx.c ctlr->setup = mtk_spi_slave_setup; ctlr 388 drivers/spi/spi-slave-mt27xx.c ctlr->slave_abort = mtk_slave_abort; ctlr 390 drivers/spi/spi-slave-mt27xx.c mdata = spi_controller_get_devdata(ctlr); ctlr 392 drivers/spi/spi-slave-mt27xx.c platform_set_drvdata(pdev, ctlr); ctlr 418 drivers/spi/spi-slave-mt27xx.c IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr); ctlr 439 drivers/spi/spi-slave-mt27xx.c ret = devm_spi_register_controller(&pdev->dev, ctlr); ctlr 454 drivers/spi/spi-slave-mt27xx.c spi_controller_put(ctlr); ctlr 469 drivers/spi/spi-slave-mt27xx.c struct spi_controller *ctlr = dev_get_drvdata(dev); ctlr 470 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 473 drivers/spi/spi-slave-mt27xx.c ret = spi_controller_suspend(ctlr); ctlr 485 drivers/spi/spi-slave-mt27xx.c struct spi_controller *ctlr = dev_get_drvdata(dev); ctlr 486 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 497 drivers/spi/spi-slave-mt27xx.c ret = spi_controller_resume(ctlr); ctlr 508 drivers/spi/spi-slave-mt27xx.c struct spi_controller *ctlr = dev_get_drvdata(dev); ctlr 509 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 518 drivers/spi/spi-slave-mt27xx.c struct spi_controller *ctlr = dev_get_drvdata(dev); ctlr 519 drivers/spi/spi-slave-mt27xx.c struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); ctlr 110 drivers/spi/spi-sprd-adi.c struct spi_controller *ctlr; ctlr 274 drivers/spi/spi-sprd-adi.c static int sprd_adi_transfer_one(struct spi_controller *ctlr, ctlr 278 drivers/spi/spi-sprd-adi.c struct sprd_adi *sadi = spi_controller_get_devdata(ctlr); ctlr 456 drivers/spi/spi-sprd-adi.c struct spi_controller *ctlr; ctlr 470 drivers/spi/spi-sprd-adi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi)); ctlr 471 drivers/spi/spi-sprd-adi.c if (!ctlr) ctlr 474 drivers/spi/spi-sprd-adi.c dev_set_drvdata(&pdev->dev, ctlr); ctlr 475 drivers/spi/spi-sprd-adi.c sadi = spi_controller_get_devdata(ctlr); ctlr 486 drivers/spi/spi-sprd-adi.c sadi->ctlr = ctlr; ctlr 513 drivers/spi/spi-sprd-adi.c ctlr->dev.of_node = pdev->dev.of_node; ctlr 514 drivers/spi/spi-sprd-adi.c ctlr->bus_num = pdev->id; ctlr 515 drivers/spi/spi-sprd-adi.c ctlr->num_chipselect = num_chipselect; ctlr 516 drivers/spi/spi-sprd-adi.c ctlr->flags = SPI_MASTER_HALF_DUPLEX; ctlr 517 drivers/spi/spi-sprd-adi.c ctlr->bits_per_word_mask = 0; ctlr 518 drivers/spi/spi-sprd-adi.c ctlr->transfer_one = sprd_adi_transfer_one; ctlr 520 drivers/spi/spi-sprd-adi.c ret = devm_spi_register_controller(&pdev->dev, ctlr); ctlr 537 drivers/spi/spi-sprd-adi.c spi_controller_put(ctlr); ctlr 543 drivers/spi/spi-sprd-adi.c struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev); ctlr 544 drivers/spi/spi-sprd-adi.c struct sprd_adi *sadi = spi_controller_get_devdata(ctlr); ctlr 619 drivers/spi/spi-zynq-qspi.c struct spi_controller *ctlr; ctlr 625 drivers/spi/spi-zynq-qspi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); ctlr 626 drivers/spi/spi-zynq-qspi.c if (!ctlr) ctlr 629 drivers/spi/spi-zynq-qspi.c xqspi = spi_controller_get_devdata(ctlr); ctlr 685 drivers/spi/spi-zynq-qspi.c ctlr->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS; ctlr 687 drivers/spi/spi-zynq-qspi.c ctlr->num_chipselect = num_cs; ctlr 689 drivers/spi/spi-zynq-qspi.c ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | ctlr 691 drivers/spi/spi-zynq-qspi.c ctlr->mem_ops = &zynq_qspi_mem_ops; ctlr 692 drivers/spi/spi-zynq-qspi.c ctlr->setup = zynq_qspi_setup_op; ctlr 693 drivers/spi/spi-zynq-qspi.c ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2; ctlr 694 drivers/spi/spi-zynq-qspi.c ctlr->dev.of_node = np; ctlr 695 drivers/spi/spi-zynq-qspi.c ret = devm_spi_register_controller(&pdev->dev, ctlr); ctlr 708 drivers/spi/spi-zynq-qspi.c spi_controller_put(ctlr); ctlr 123 drivers/spi/spi.c struct spi_controller *ctlr = container_of(dev, \ ctlr 125 drivers/spi/spi.c return spi_statistics_##field##_show(&ctlr->statistics, buf); \ ctlr 294 drivers/spi/spi.c struct spi_controller *ctlr) ctlr 309 drivers/spi/spi.c (xfer->tx_buf != ctlr->dummy_tx)) ctlr 312 drivers/spi/spi.c (xfer->rx_buf != ctlr->dummy_rx)) ctlr 495 drivers/spi/spi.c struct spi_device *spi_alloc_device(struct spi_controller *ctlr) ctlr 499 drivers/spi/spi.c if (!spi_controller_get(ctlr)) ctlr 504 drivers/spi/spi.c spi_controller_put(ctlr); ctlr 508 drivers/spi/spi.c spi->master = spi->controller = ctlr; ctlr 509 drivers/spi/spi.c spi->dev.parent = &ctlr->dev; ctlr 557 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 558 drivers/spi/spi.c struct device *dev = ctlr->dev.parent; ctlr 562 drivers/spi/spi.c if (spi->chip_select >= ctlr->num_chipselect) { ctlr 564 drivers/spi/spi.c ctlr->num_chipselect); ctlr 585 drivers/spi/spi.c if (ctlr->cs_gpiods) ctlr 586 drivers/spi/spi.c spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; ctlr 587 drivers/spi/spi.c else if (ctlr->cs_gpios) ctlr 588 drivers/spi/spi.c spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; ctlr 629 drivers/spi/spi.c struct spi_device *spi_new_device(struct spi_controller *ctlr, ctlr 642 drivers/spi/spi.c proxy = spi_alloc_device(ctlr); ctlr 660 drivers/spi/spi.c dev_err(&ctlr->dev, ctlr 704 drivers/spi/spi.c static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, ctlr 709 drivers/spi/spi.c if (ctlr->bus_num != bi->bus_num) ctlr 712 drivers/spi/spi.c dev = spi_new_device(ctlr, bi); ctlr 714 drivers/spi/spi.c dev_err(ctlr->dev.parent, "can't create new device for %s\n", ctlr 753 drivers/spi/spi.c struct spi_controller *ctlr; ctlr 765 drivers/spi/spi.c list_for_each_entry(ctlr, &spi_controller_list, list) ctlr 766 drivers/spi/spi.c spi_match_controller_to_boardinfo(ctlr, ctlr 806 drivers/spi/spi.c int spi_map_buf(struct spi_controller *ctlr, struct device *dev, ctlr 831 drivers/spi/spi.c desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); ctlr 887 drivers/spi/spi.c void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, ctlr 896 drivers/spi/spi.c static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) ctlr 902 drivers/spi/spi.c if (!ctlr->can_dma) ctlr 905 drivers/spi/spi.c if (ctlr->dma_tx) ctlr 906 drivers/spi/spi.c tx_dev = ctlr->dma_tx->device->dev; ctlr 908 drivers/spi/spi.c tx_dev = ctlr->dev.parent; ctlr 910 drivers/spi/spi.c if (ctlr->dma_rx) ctlr 911 drivers/spi/spi.c rx_dev = ctlr->dma_rx->device->dev; ctlr 913 drivers/spi/spi.c rx_dev = ctlr->dev.parent; ctlr 916 drivers/spi/spi.c if (!ctlr->can_dma(ctlr, msg->spi, xfer)) ctlr 920 drivers/spi/spi.c ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, ctlr 928 drivers/spi/spi.c ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, ctlr 932 drivers/spi/spi.c spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, ctlr 939 drivers/spi/spi.c ctlr->cur_msg_mapped = true; ctlr 944 drivers/spi/spi.c static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) ctlr 949 drivers/spi/spi.c if (!ctlr->cur_msg_mapped || !ctlr->can_dma) ctlr 952 drivers/spi/spi.c if (ctlr->dma_tx) ctlr 953 drivers/spi/spi.c tx_dev = ctlr->dma_tx->device->dev; ctlr 955 drivers/spi/spi.c tx_dev = ctlr->dev.parent; ctlr 957 drivers/spi/spi.c if (ctlr->dma_rx) ctlr 958 drivers/spi/spi.c rx_dev = ctlr->dma_rx->device->dev; ctlr 960 drivers/spi/spi.c rx_dev = ctlr->dev.parent; ctlr 963 drivers/spi/spi.c if (!ctlr->can_dma(ctlr, msg->spi, xfer)) ctlr 966 drivers/spi/spi.c spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); ctlr 967 drivers/spi/spi.c spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); ctlr 973 drivers/spi/spi.c static inline int __spi_map_msg(struct spi_controller *ctlr, ctlr 979 drivers/spi/spi.c static inline int __spi_unmap_msg(struct spi_controller *ctlr, ctlr 986 drivers/spi/spi.c static inline int spi_unmap_msg(struct spi_controller *ctlr, ctlr 996 drivers/spi/spi.c if (xfer->tx_buf == ctlr->dummy_tx) ctlr 998 drivers/spi/spi.c if (xfer->rx_buf == ctlr->dummy_rx) ctlr 1002 drivers/spi/spi.c return __spi_unmap_msg(ctlr, msg); ctlr 1005 drivers/spi/spi.c static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) ctlr 1011 drivers/spi/spi.c if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) { ctlr 1016 drivers/spi/spi.c if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && ctlr 1019 drivers/spi/spi.c if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && ctlr 1025 drivers/spi/spi.c tmp = krealloc(ctlr->dummy_tx, max_tx, ctlr 1029 drivers/spi/spi.c ctlr->dummy_tx = tmp; ctlr 1034 drivers/spi/spi.c tmp = krealloc(ctlr->dummy_rx, max_rx, ctlr 1038 drivers/spi/spi.c ctlr->dummy_rx = tmp; ctlr 1047 drivers/spi/spi.c xfer->tx_buf = ctlr->dummy_tx; ctlr 1049 drivers/spi/spi.c xfer->rx_buf = ctlr->dummy_rx; ctlr 1054 drivers/spi/spi.c return __spi_map_msg(ctlr, msg); ctlr 1057 drivers/spi/spi.c static int spi_transfer_wait(struct spi_controller *ctlr, ctlr 1061 drivers/spi/spi.c struct spi_statistics *statm = &ctlr->statistics; ctlr 1065 drivers/spi/spi.c if (spi_controller_is_slave(ctlr)) { ctlr 1066 drivers/spi/spi.c if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { ctlr 1078 drivers/spi/spi.c ms = wait_for_completion_timeout(&ctlr->xfer_completion, ctlr 1154 drivers/spi/spi.c static int spi_transfer_one_message(struct spi_controller *ctlr, ctlr 1160 drivers/spi/spi.c struct spi_statistics *statm = &ctlr->statistics; ctlr 1171 drivers/spi/spi.c spi_statistics_add_transfer_stats(statm, xfer, ctlr); ctlr 1172 drivers/spi/spi.c spi_statistics_add_transfer_stats(stats, xfer, ctlr); ctlr 1175 drivers/spi/spi.c reinit_completion(&ctlr->xfer_completion); ctlr 1177 drivers/spi/spi.c ret = ctlr->transfer_one(ctlr, msg->spi, xfer); ctlr 1189 drivers/spi/spi.c ret = spi_transfer_wait(ctlr, msg, xfer); ctlr 1229 drivers/spi/spi.c if (msg->status && ctlr->handle_err) ctlr 1230 drivers/spi/spi.c ctlr->handle_err(ctlr, msg); ctlr 1232 drivers/spi/spi.c spi_res_release(ctlr, msg); ctlr 1234 drivers/spi/spi.c spi_finalize_current_message(ctlr); ctlr 1247 drivers/spi/spi.c void spi_finalize_current_transfer(struct spi_controller *ctlr) ctlr 1249 drivers/spi/spi.c complete(&ctlr->xfer_completion); ctlr 1266 drivers/spi/spi.c static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ctlr 1274 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1277 drivers/spi/spi.c if (ctlr->cur_msg) { ctlr 1278 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1283 drivers/spi/spi.c if (ctlr->idling) { ctlr 1284 drivers/spi/spi.c kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); ctlr 1285 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1290 drivers/spi/spi.c if (list_empty(&ctlr->queue) || !ctlr->running) { ctlr 1291 drivers/spi/spi.c if (!ctlr->busy) { ctlr 1292 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1298 drivers/spi/spi.c kthread_queue_work(&ctlr->kworker, ctlr 1299 drivers/spi/spi.c &ctlr->pump_messages); ctlr 1300 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1304 drivers/spi/spi.c ctlr->busy = false; ctlr 1305 drivers/spi/spi.c ctlr->idling = true; ctlr 1306 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1308 drivers/spi/spi.c kfree(ctlr->dummy_rx); ctlr 1309 drivers/spi/spi.c ctlr->dummy_rx = NULL; ctlr 1310 drivers/spi/spi.c kfree(ctlr->dummy_tx); ctlr 1311 drivers/spi/spi.c ctlr->dummy_tx = NULL; ctlr 1312 drivers/spi/spi.c if (ctlr->unprepare_transfer_hardware && ctlr 1313 drivers/spi/spi.c ctlr->unprepare_transfer_hardware(ctlr)) ctlr 1314 drivers/spi/spi.c dev_err(&ctlr->dev, ctlr 1316 drivers/spi/spi.c if (ctlr->auto_runtime_pm) { ctlr 1317 drivers/spi/spi.c pm_runtime_mark_last_busy(ctlr->dev.parent); ctlr 1318 drivers/spi/spi.c pm_runtime_put_autosuspend(ctlr->dev.parent); ctlr 1320 drivers/spi/spi.c trace_spi_controller_idle(ctlr); ctlr 1322 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1323 drivers/spi/spi.c ctlr->idling = false; ctlr 1324 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1329 drivers/spi/spi.c msg = list_first_entry(&ctlr->queue, struct spi_message, queue); ctlr 1330 drivers/spi/spi.c ctlr->cur_msg = msg; ctlr 1333 drivers/spi/spi.c if (ctlr->busy) ctlr 1336 drivers/spi/spi.c ctlr->busy = true; ctlr 1337 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1339 drivers/spi/spi.c mutex_lock(&ctlr->io_mutex); ctlr 1341 drivers/spi/spi.c if (!was_busy && ctlr->auto_runtime_pm) { ctlr 1342 drivers/spi/spi.c ret = pm_runtime_get_sync(ctlr->dev.parent); ctlr 1344 drivers/spi/spi.c pm_runtime_put_noidle(ctlr->dev.parent); ctlr 1345 drivers/spi/spi.c dev_err(&ctlr->dev, "Failed to power device: %d\n", ctlr 1347 drivers/spi/spi.c mutex_unlock(&ctlr->io_mutex); ctlr 1353 drivers/spi/spi.c trace_spi_controller_busy(ctlr); ctlr 1355 drivers/spi/spi.c if (!was_busy && ctlr->prepare_transfer_hardware) { ctlr 1356 drivers/spi/spi.c ret = ctlr->prepare_transfer_hardware(ctlr); ctlr 1358 drivers/spi/spi.c dev_err(&ctlr->dev, ctlr 1362 drivers/spi/spi.c if (ctlr->auto_runtime_pm) ctlr 1363 drivers/spi/spi.c pm_runtime_put(ctlr->dev.parent); ctlr 1366 drivers/spi/spi.c spi_finalize_current_message(ctlr); ctlr 1368 drivers/spi/spi.c mutex_unlock(&ctlr->io_mutex); ctlr 1375 drivers/spi/spi.c if (ctlr->prepare_message) { ctlr 1376 drivers/spi/spi.c ret = ctlr->prepare_message(ctlr, msg); ctlr 1378 drivers/spi/spi.c dev_err(&ctlr->dev, "failed to prepare message: %d\n", ctlr 1381 drivers/spi/spi.c spi_finalize_current_message(ctlr); ctlr 1384 drivers/spi/spi.c ctlr->cur_msg_prepared = true; ctlr 1387 drivers/spi/spi.c ret = spi_map_msg(ctlr, msg); ctlr 1390 drivers/spi/spi.c spi_finalize_current_message(ctlr); ctlr 1394 drivers/spi/spi.c ret = ctlr->transfer_one_message(ctlr, msg); ctlr 1396 drivers/spi/spi.c dev_err(&ctlr->dev, ctlr 1402 drivers/spi/spi.c mutex_unlock(&ctlr->io_mutex); ctlr 1415 drivers/spi/spi.c struct spi_controller *ctlr = ctlr 1418 drivers/spi/spi.c __spi_pump_messages(ctlr, true); ctlr 1436 drivers/spi/spi.c static void spi_set_thread_rt(struct spi_controller *ctlr) ctlr 1440 drivers/spi/spi.c dev_info(&ctlr->dev, ctlr 1442 drivers/spi/spi.c sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); ctlr 1445 drivers/spi/spi.c static int spi_init_queue(struct spi_controller *ctlr) ctlr 1447 drivers/spi/spi.c ctlr->running = false; ctlr 1448 drivers/spi/spi.c ctlr->busy = false; ctlr 1450 drivers/spi/spi.c kthread_init_worker(&ctlr->kworker); ctlr 1451 drivers/spi/spi.c ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, ctlr 1452 drivers/spi/spi.c "%s", dev_name(&ctlr->dev)); ctlr 1453 drivers/spi/spi.c if (IS_ERR(ctlr->kworker_task)) { ctlr 1454 drivers/spi/spi.c dev_err(&ctlr->dev, "failed to create message pump task\n"); ctlr 1455 drivers/spi/spi.c return PTR_ERR(ctlr->kworker_task); ctlr 1457 drivers/spi/spi.c kthread_init_work(&ctlr->pump_messages, spi_pump_messages); ctlr 1466 drivers/spi/spi.c if (ctlr->rt) ctlr 1467 drivers/spi/spi.c spi_set_thread_rt(ctlr); ctlr 1482 drivers/spi/spi.c struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) ctlr 1488 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1489 drivers/spi/spi.c next = list_first_entry_or_null(&ctlr->queue, struct spi_message, ctlr 1491 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1504 drivers/spi/spi.c void spi_finalize_current_message(struct spi_controller *ctlr) ctlr 1510 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1511 drivers/spi/spi.c mesg = ctlr->cur_msg; ctlr 1512 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1514 drivers/spi/spi.c spi_unmap_msg(ctlr, mesg); ctlr 1516 drivers/spi/spi.c if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { ctlr 1517 drivers/spi/spi.c ret = ctlr->unprepare_message(ctlr, mesg); ctlr 1519 drivers/spi/spi.c dev_err(&ctlr->dev, "failed to unprepare message: %d\n", ctlr 1524 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1525 drivers/spi/spi.c ctlr->cur_msg = NULL; ctlr 1526 drivers/spi/spi.c ctlr->cur_msg_prepared = false; ctlr 1527 drivers/spi/spi.c kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); ctlr 1528 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1538 drivers/spi/spi.c static int spi_start_queue(struct spi_controller *ctlr) ctlr 1542 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1544 drivers/spi/spi.c if (ctlr->running || ctlr->busy) { ctlr 1545 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1549 drivers/spi/spi.c ctlr->running = true; ctlr 1550 drivers/spi/spi.c ctlr->cur_msg = NULL; ctlr 1551 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1553 drivers/spi/spi.c kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); ctlr 1558 drivers/spi/spi.c static int spi_stop_queue(struct spi_controller *ctlr) ctlr 1564 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1572 drivers/spi/spi.c while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { ctlr 1573 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1575 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1578 drivers/spi/spi.c if (!list_empty(&ctlr->queue) || ctlr->busy) ctlr 1581 drivers/spi/spi.c ctlr->running = false; ctlr 1583 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1586 drivers/spi/spi.c dev_warn(&ctlr->dev, "could not stop message queue\n"); ctlr 1592 drivers/spi/spi.c static int spi_destroy_queue(struct spi_controller *ctlr) ctlr 1596 drivers/spi/spi.c ret = spi_stop_queue(ctlr); ctlr 1605 drivers/spi/spi.c dev_err(&ctlr->dev, "problem destroying queue\n"); ctlr 1609 drivers/spi/spi.c kthread_flush_worker(&ctlr->kworker); ctlr 1610 drivers/spi/spi.c kthread_stop(ctlr->kworker_task); ctlr 1619 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 1622 drivers/spi/spi.c spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr 1624 drivers/spi/spi.c if (!ctlr->running) { ctlr 1625 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1631 drivers/spi/spi.c list_add_tail(&msg->queue, &ctlr->queue); ctlr 1632 drivers/spi/spi.c if (!ctlr->busy && need_pump) ctlr 1633 drivers/spi/spi.c kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); ctlr 1635 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->queue_lock, flags); ctlr 1651 drivers/spi/spi.c static int spi_controller_initialize_queue(struct spi_controller *ctlr) ctlr 1655 drivers/spi/spi.c ctlr->transfer = spi_queued_transfer; ctlr 1656 drivers/spi/spi.c if (!ctlr->transfer_one_message) ctlr 1657 drivers/spi/spi.c ctlr->transfer_one_message = spi_transfer_one_message; ctlr 1660 drivers/spi/spi.c ret = spi_init_queue(ctlr); ctlr 1662 drivers/spi/spi.c dev_err(&ctlr->dev, "problem initializing queue\n"); ctlr 1665 drivers/spi/spi.c ctlr->queued = true; ctlr 1666 drivers/spi/spi.c ret = spi_start_queue(ctlr); ctlr 1668 drivers/spi/spi.c dev_err(&ctlr->dev, "problem starting queue\n"); ctlr 1675 drivers/spi/spi.c spi_destroy_queue(ctlr); ctlr 1690 drivers/spi/spi.c void spi_flush_queue(struct spi_controller *ctlr) ctlr 1692 drivers/spi/spi.c if (ctlr->transfer == spi_queued_transfer) ctlr 1693 drivers/spi/spi.c __spi_pump_messages(ctlr, false); ctlr 1699 drivers/spi/spi.c static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, ctlr 1732 drivers/spi/spi.c dev_warn(&ctlr->dev, ctlr 1753 drivers/spi/spi.c dev_warn(&ctlr->dev, ctlr 1760 drivers/spi/spi.c if (spi_controller_is_slave(ctlr)) { ctlr 1762 drivers/spi/spi.c dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", ctlr 1772 drivers/spi/spi.c dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", ctlr 1783 drivers/spi/spi.c if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods && ctlr 1784 drivers/spi/spi.c ctlr->cs_gpiods[spi->chip_select]) ctlr 1790 drivers/spi/spi.c dev_err(&ctlr->dev, ctlr 1800 drivers/spi/spi.c of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) ctlr 1806 drivers/spi/spi.c spi = spi_alloc_device(ctlr); ctlr 1808 drivers/spi/spi.c dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); ctlr 1817 drivers/spi/spi.c dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); ctlr 1821 drivers/spi/spi.c rc = of_spi_parse_dt(ctlr, spi, nc); ctlr 1832 drivers/spi/spi.c dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); ctlr 1852 drivers/spi/spi.c static void of_register_spi_devices(struct spi_controller *ctlr) ctlr 1857 drivers/spi/spi.c if (!ctlr->dev.of_node) ctlr 1860 drivers/spi/spi.c for_each_available_child_of_node(ctlr->dev.of_node, nc) { ctlr 1863 drivers/spi/spi.c spi = of_register_spi_device(ctlr, nc); ctlr 1865 drivers/spi/spi.c dev_warn(&ctlr->dev, ctlr 1872 drivers/spi/spi.c static void of_register_spi_devices(struct spi_controller *ctlr) { } ctlr 1877 drivers/spi/spi.c struct spi_controller *ctlr; ctlr 1917 drivers/spi/spi.c struct spi_controller *ctlr = lookup->ctlr; ctlr 1932 drivers/spi/spi.c ACPI_HANDLE(ctlr->dev.parent) != parent_handle) ctlr 1942 drivers/spi/spi.c if (ctlr->fw_translate_cs) { ctlr 1943 drivers/spi/spi.c int cs = ctlr->fw_translate_cs(ctlr, ctlr 1972 drivers/spi/spi.c static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, ctlr 1985 drivers/spi/spi.c lookup.ctlr = ctlr; ctlr 1999 drivers/spi/spi.c ACPI_HANDLE(ctlr->dev.parent) == parent_handle) { ctlr 2007 drivers/spi/spi.c spi = spi_alloc_device(ctlr); ctlr 2009 drivers/spi/spi.c dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", ctlr 2032 drivers/spi/spi.c dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", ctlr 2043 drivers/spi/spi.c struct spi_controller *ctlr = data; ctlr 2049 drivers/spi/spi.c return acpi_register_spi_device(ctlr, adev); ctlr 2054 drivers/spi/spi.c static void acpi_register_spi_devices(struct spi_controller *ctlr) ctlr 2059 drivers/spi/spi.c handle = ACPI_HANDLE(ctlr->dev.parent); ctlr 2065 drivers/spi/spi.c acpi_spi_add_device, NULL, ctlr, NULL); ctlr 2067 drivers/spi/spi.c dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); ctlr 2070 drivers/spi/spi.c static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} ctlr 2075 drivers/spi/spi.c struct spi_controller *ctlr; ctlr 2077 drivers/spi/spi.c ctlr = container_of(dev, struct spi_controller, dev); ctlr 2078 drivers/spi/spi.c kfree(ctlr); ctlr 2096 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 2098 drivers/spi/spi.c if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) ctlr 2099 drivers/spi/spi.c return ctlr->slave_abort(ctlr); ctlr 2113 drivers/spi/spi.c struct spi_controller *ctlr = container_of(dev, struct spi_controller, ctlr 2117 drivers/spi/spi.c child = device_find_child(&ctlr->dev, NULL, match_true); ctlr 2125 drivers/spi/spi.c struct spi_controller *ctlr = container_of(dev, struct spi_controller, ctlr 2136 drivers/spi/spi.c child = device_find_child(&ctlr->dev, NULL, match_true); ctlr 2145 drivers/spi/spi.c spi = spi_alloc_device(ctlr); ctlr 2216 drivers/spi/spi.c struct spi_controller *ctlr; ctlr 2217 drivers/spi/spi.c size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); ctlr 2222 drivers/spi/spi.c ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); ctlr 2223 drivers/spi/spi.c if (!ctlr) ctlr 2226 drivers/spi/spi.c device_initialize(&ctlr->dev); ctlr 2227 drivers/spi/spi.c ctlr->bus_num = -1; ctlr 2228 drivers/spi/spi.c ctlr->num_chipselect = 1; ctlr 2229 drivers/spi/spi.c ctlr->slave = slave; ctlr 2231 drivers/spi/spi.c ctlr->dev.class = &spi_slave_class; ctlr 2233 drivers/spi/spi.c ctlr->dev.class = &spi_master_class; ctlr 2234 drivers/spi/spi.c ctlr->dev.parent = dev; ctlr 2235 drivers/spi/spi.c pm_suspend_ignore_children(&ctlr->dev, true); ctlr 2236 drivers/spi/spi.c spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); ctlr 2238 drivers/spi/spi.c return ctlr; ctlr 2243 drivers/spi/spi.c static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) ctlr 2246 drivers/spi/spi.c struct device_node *np = ctlr->dev.of_node; ctlr 2252 drivers/spi/spi.c ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); ctlr 2260 drivers/spi/spi.c cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), ctlr 2262 drivers/spi/spi.c ctlr->cs_gpios = cs; ctlr 2264 drivers/spi/spi.c if (!ctlr->cs_gpios) ctlr 2267 drivers/spi/spi.c for (i = 0; i < ctlr->num_chipselect; i++) ctlr 2276 drivers/spi/spi.c static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) ctlr 2286 drivers/spi/spi.c static int spi_get_gpio_descs(struct spi_controller *ctlr) ctlr 2290 drivers/spi/spi.c struct device *dev = &ctlr->dev; ctlr 2293 drivers/spi/spi.c ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); ctlr 2301 drivers/spi/spi.c cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), ctlr 2305 drivers/spi/spi.c ctlr->cs_gpiods = cs; ctlr 2338 drivers/spi/spi.c static int spi_controller_check_ops(struct spi_controller *ctlr) ctlr 2347 drivers/spi/spi.c if (ctlr->mem_ops) { ctlr 2348 drivers/spi/spi.c if (!ctlr->mem_ops->exec_op) ctlr 2350 drivers/spi/spi.c } else if (!ctlr->transfer && !ctlr->transfer_one && ctlr 2351 drivers/spi/spi.c !ctlr->transfer_one_message) { ctlr 2381 drivers/spi/spi.c int spi_register_controller(struct spi_controller *ctlr) ctlr 2383 drivers/spi/spi.c struct device *dev = ctlr->dev.parent; ctlr 2395 drivers/spi/spi.c status = spi_controller_check_ops(ctlr); ctlr 2399 drivers/spi/spi.c if (ctlr->bus_num >= 0) { ctlr 2402 drivers/spi/spi.c id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, ctlr 2403 drivers/spi/spi.c ctlr->bus_num + 1, GFP_KERNEL); ctlr 2407 drivers/spi/spi.c ctlr->bus_num = id; ctlr 2408 drivers/spi/spi.c } else if (ctlr->dev.of_node) { ctlr 2410 drivers/spi/spi.c id = of_alias_get_id(ctlr->dev.of_node, "spi"); ctlr 2412 drivers/spi/spi.c ctlr->bus_num = id; ctlr 2414 drivers/spi/spi.c id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, ctlr 2415 drivers/spi/spi.c ctlr->bus_num + 1, GFP_KERNEL); ctlr 2421 drivers/spi/spi.c if (ctlr->bus_num < 0) { ctlr 2429 drivers/spi/spi.c id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, ctlr 2434 drivers/spi/spi.c ctlr->bus_num = id; ctlr 2436 drivers/spi/spi.c INIT_LIST_HEAD(&ctlr->queue); ctlr 2437 drivers/spi/spi.c spin_lock_init(&ctlr->queue_lock); ctlr 2438 drivers/spi/spi.c spin_lock_init(&ctlr->bus_lock_spinlock); ctlr 2439 drivers/spi/spi.c mutex_init(&ctlr->bus_lock_mutex); ctlr 2440 drivers/spi/spi.c mutex_init(&ctlr->io_mutex); ctlr 2441 drivers/spi/spi.c ctlr->bus_lock_flag = 0; ctlr 2442 drivers/spi/spi.c init_completion(&ctlr->xfer_completion); ctlr 2443 drivers/spi/spi.c if (!ctlr->max_dma_len) ctlr 2444 drivers/spi/spi.c ctlr->max_dma_len = INT_MAX; ctlr 2449 drivers/spi/spi.c dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); ctlr 2451 drivers/spi/spi.c if (!spi_controller_is_slave(ctlr)) { ctlr 2452 drivers/spi/spi.c if (ctlr->use_gpio_descriptors) { ctlr 2453 drivers/spi/spi.c status = spi_get_gpio_descs(ctlr); ctlr 2460 drivers/spi/spi.c ctlr->mode_bits |= SPI_CS_HIGH; ctlr 2463 drivers/spi/spi.c status = of_spi_get_gpio_numbers(ctlr); ctlr 2473 drivers/spi/spi.c if (!ctlr->num_chipselect) { ctlr 2478 drivers/spi/spi.c status = device_add(&ctlr->dev); ctlr 2482 drivers/spi/spi.c spi_controller_is_slave(ctlr) ? "slave" : "master", ctlr 2483 drivers/spi/spi.c dev_name(&ctlr->dev)); ctlr 2490 drivers/spi/spi.c if (ctlr->transfer) { ctlr 2492 drivers/spi/spi.c } else if (ctlr->transfer_one || ctlr->transfer_one_message) { ctlr 2493 drivers/spi/spi.c status = spi_controller_initialize_queue(ctlr); ctlr 2495 drivers/spi/spi.c device_del(&ctlr->dev); ctlr 2500 drivers/spi/spi.c spin_lock_init(&ctlr->statistics.lock); ctlr 2503 drivers/spi/spi.c list_add_tail(&ctlr->list, &spi_controller_list); ctlr 2505 drivers/spi/spi.c spi_match_controller_to_boardinfo(ctlr, &bi->board_info); ctlr 2509 drivers/spi/spi.c of_register_spi_devices(ctlr); ctlr 2510 drivers/spi/spi.c acpi_register_spi_devices(ctlr); ctlr 2515 drivers/spi/spi.c idr_remove(&spi_master_idr, ctlr->bus_num); ctlr 2540 drivers/spi/spi.c struct spi_controller *ctlr) ctlr 2549 drivers/spi/spi.c ret = spi_register_controller(ctlr); ctlr 2551 drivers/spi/spi.c *ptr = ctlr; ctlr 2579 drivers/spi/spi.c void spi_unregister_controller(struct spi_controller *ctlr) ctlr 2582 drivers/spi/spi.c int id = ctlr->bus_num; ctlr 2584 drivers/spi/spi.c device_for_each_child(&ctlr->dev, NULL, __unregister); ctlr 2590 drivers/spi/spi.c if (ctlr->queued) { ctlr 2591 drivers/spi/spi.c if (spi_destroy_queue(ctlr)) ctlr 2592 drivers/spi/spi.c dev_err(&ctlr->dev, "queue remove failed\n"); ctlr 2595 drivers/spi/spi.c list_del(&ctlr->list); ctlr 2598 drivers/spi/spi.c device_unregister(&ctlr->dev); ctlr 2601 drivers/spi/spi.c if (found == ctlr) ctlr 2607 drivers/spi/spi.c int spi_controller_suspend(struct spi_controller *ctlr) ctlr 2612 drivers/spi/spi.c if (!ctlr->queued) ctlr 2615 drivers/spi/spi.c ret = spi_stop_queue(ctlr); ctlr 2617 drivers/spi/spi.c dev_err(&ctlr->dev, "queue stop failed\n"); ctlr 2623 drivers/spi/spi.c int spi_controller_resume(struct spi_controller *ctlr) ctlr 2627 drivers/spi/spi.c if (!ctlr->queued) ctlr 2630 drivers/spi/spi.c ret = spi_start_queue(ctlr); ctlr 2632 drivers/spi/spi.c dev_err(&ctlr->dev, "queue restart failed\n"); ctlr 2640 drivers/spi/spi.c struct spi_controller *ctlr; ctlr 2643 drivers/spi/spi.c ctlr = container_of(dev, struct spi_controller, dev); ctlr 2644 drivers/spi/spi.c return ctlr->bus_num == *bus_num; ctlr 2662 drivers/spi/spi.c struct spi_controller *ctlr = NULL; ctlr 2667 drivers/spi/spi.c ctlr = container_of(dev, struct spi_controller, dev); ctlr 2669 drivers/spi/spi.c return ctlr; ctlr 2744 drivers/spi/spi.c void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) ctlr 2750 drivers/spi/spi.c res->release(ctlr, message, res->data); ctlr 2763 drivers/spi/spi.c static void __spi_replace_transfers_release(struct spi_controller *ctlr, ctlr 2772 drivers/spi/spi.c rxfer->release(ctlr, msg, res); ctlr 2891 drivers/spi/spi.c static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, ctlr 2948 drivers/spi/spi.c SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, ctlr 2967 drivers/spi/spi.c int spi_split_transfers_maxsize(struct spi_controller *ctlr, ctlr 2983 drivers/spi/spi.c ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, ctlr 3000 drivers/spi/spi.c static int __spi_validate_bits_per_word(struct spi_controller *ctlr, ctlr 3003 drivers/spi/spi.c if (ctlr->bits_per_word_mask) { ctlr 3007 drivers/spi/spi.c if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) ctlr 3131 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 3144 drivers/spi/spi.c if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || ctlr 3155 drivers/spi/spi.c ret = spi_split_transfers_maxsize(ctlr, message, maxsize, ctlr 3173 drivers/spi/spi.c if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || ctlr 3175 drivers/spi/spi.c unsigned flags = ctlr->flags; ctlr 3205 drivers/spi/spi.c if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) ctlr 3206 drivers/spi/spi.c xfer->speed_hz = ctlr->max_speed_hz; ctlr 3208 drivers/spi/spi.c if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) ctlr 3226 drivers/spi/spi.c if (xfer->speed_hz && ctlr->min_speed_hz && ctlr 3227 drivers/spi/spi.c xfer->speed_hz < ctlr->min_speed_hz) ctlr 3275 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 3281 drivers/spi/spi.c if (!ctlr->transfer) ctlr 3286 drivers/spi/spi.c SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); ctlr 3291 drivers/spi/spi.c return ctlr->transfer(spi, message); ctlr 3327 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 3335 drivers/spi/spi.c spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); ctlr 3337 drivers/spi/spi.c if (ctlr->bus_lock_flag) ctlr 3342 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); ctlr 3381 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 3389 drivers/spi/spi.c spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); ctlr 3393 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); ctlr 3416 drivers/spi/spi.c struct spi_controller *ctlr = spi->controller; ctlr 3427 drivers/spi/spi.c SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); ctlr 3435 drivers/spi/spi.c if (ctlr->transfer == spi_queued_transfer) { ctlr 3436 drivers/spi/spi.c spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); ctlr 3442 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); ctlr 3451 drivers/spi/spi.c if (ctlr->transfer == spi_queued_transfer) { ctlr 3452 drivers/spi/spi.c SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, ctlr 3456 drivers/spi/spi.c __spi_pump_messages(ctlr, false); ctlr 3536 drivers/spi/spi.c int spi_bus_lock(struct spi_controller *ctlr) ctlr 3540 drivers/spi/spi.c mutex_lock(&ctlr->bus_lock_mutex); ctlr 3542 drivers/spi/spi.c spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); ctlr 3543 drivers/spi/spi.c ctlr->bus_lock_flag = 1; ctlr 3544 drivers/spi/spi.c spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); ctlr 3565 drivers/spi/spi.c int spi_bus_unlock(struct spi_controller *ctlr) ctlr 3567 drivers/spi/spi.c ctlr->bus_lock_flag = 0; ctlr 3569 drivers/spi/spi.c mutex_unlock(&ctlr->bus_lock_mutex); ctlr 3688 drivers/spi/spi.c struct spi_controller *ctlr; ctlr 3693 drivers/spi/spi.c ctlr = of_find_spi_controller_by_node(rd->dn->parent); ctlr 3694 drivers/spi/spi.c if (ctlr == NULL) ctlr 3698 drivers/spi/spi.c put_device(&ctlr->dev); ctlr 3702 drivers/spi/spi.c spi = of_register_spi_device(ctlr, rd->dn); ctlr 3703 drivers/spi/spi.c put_device(&ctlr->dev); ctlr 3774 drivers/spi/spi.c struct spi_controller *ctlr; ctlr 3779 drivers/spi/spi.c ctlr = acpi_spi_find_controller_by_adev(adev->parent); ctlr 3780 drivers/spi/spi.c if (!ctlr) ctlr 3783 drivers/spi/spi.c acpi_register_spi_device(ctlr, adev); ctlr 3784 drivers/spi/spi.c put_device(&ctlr->dev); ctlr 397 drivers/spi/spidev.c struct spi_controller *ctlr = spi->controller; ctlr 405 drivers/spi/spidev.c if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods && ctlr 406 drivers/spi/spidev.c ctlr->cs_gpiods[spi->chip_select]) ctlr 132 drivers/tty/serial/mpc52xx_uart.c out_8(&psc->ctlr, divisor & 0xff); ctlr 883 drivers/tty/serial/mpc52xx_uart.c out_8(&psc->ctlr, divisor & 0xff); ctlr 29 include/linux/mtd/hyperbus.h struct hyperbus_ctlr *ctlr; ctlr 48 include/linux/platform_data/edma.h #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) ctlr 52 include/linux/platform_data/edma.h #define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) }) ctlr 291 include/linux/spi/spi-mem.h int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, ctlr 295 include/linux/spi/spi-mem.h void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, ctlr 304 include/linux/spi/spi-mem.h spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, ctlr 312 include/linux/spi/spi-mem.h spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, ctlr 79 include/linux/spi/spi.h struct spi_controller *ctlr); ctlr 540 include/linux/spi/spi.h bool (*can_dma)(struct spi_controller *ctlr, ctlr 567 include/linux/spi/spi.h int (*prepare_transfer_hardware)(struct spi_controller *ctlr); ctlr 568 include/linux/spi/spi.h int (*transfer_one_message)(struct spi_controller *ctlr, ctlr 570 include/linux/spi/spi.h int (*unprepare_transfer_hardware)(struct spi_controller *ctlr); ctlr 571 include/linux/spi/spi.h int (*prepare_message)(struct spi_controller *ctlr, ctlr 573 include/linux/spi/spi.h int (*unprepare_message)(struct spi_controller *ctlr, ctlr 575 include/linux/spi/spi.h int (*slave_abort)(struct spi_controller *ctlr); ctlr 582 include/linux/spi/spi.h int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, ctlr 584 include/linux/spi/spi.h void (*handle_err)(struct spi_controller *ctlr, ctlr 606 include/linux/spi/spi.h int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs); ctlr 609 include/linux/spi/spi.h static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) ctlr 611 include/linux/spi/spi.h return dev_get_drvdata(&ctlr->dev); ctlr 614 include/linux/spi/spi.h static inline void spi_controller_set_devdata(struct spi_controller *ctlr, ctlr 617 include/linux/spi/spi.h dev_set_drvdata(&ctlr->dev, data); ctlr 620 include/linux/spi/spi.h static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr) ctlr 622 include/linux/spi/spi.h if (!ctlr || !get_device(&ctlr->dev)) ctlr 624 include/linux/spi/spi.h return ctlr; ctlr 627 include/linux/spi/spi.h static inline void spi_controller_put(struct spi_controller *ctlr) ctlr 629 include/linux/spi/spi.h if (ctlr) ctlr 630 include/linux/spi/spi.h put_device(&ctlr->dev); ctlr 633 include/linux/spi/spi.h static inline bool spi_controller_is_slave(struct spi_controller *ctlr) ctlr 635 include/linux/spi/spi.h return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave; ctlr 639 include/linux/spi/spi.h extern int spi_controller_suspend(struct spi_controller *ctlr); ctlr 640 include/linux/spi/spi.h extern int spi_controller_resume(struct spi_controller *ctlr); ctlr 643 include/linux/spi/spi.h extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr); ctlr 644 include/linux/spi/spi.h extern void spi_finalize_current_message(struct spi_controller *ctlr); ctlr 645 include/linux/spi/spi.h extern void spi_finalize_current_transfer(struct spi_controller *ctlr); ctlr 666 include/linux/spi/spi.h extern int spi_register_controller(struct spi_controller *ctlr); ctlr 668 include/linux/spi/spi.h struct spi_controller *ctlr); ctlr 669 include/linux/spi/spi.h extern void spi_unregister_controller(struct spi_controller *ctlr); ctlr 677 include/linux/spi/spi.h typedef void (*spi_res_release_t)(struct spi_controller *ctlr, ctlr 702 include/linux/spi/spi.h extern void spi_res_release(struct spi_controller *ctlr, ctlr 996 include/linux/spi/spi.h struct spi_controller *ctlr = spi->controller; ctlr 998 include/linux/spi/spi.h if (!ctlr->max_message_size) ctlr 1000 include/linux/spi/spi.h return ctlr->max_message_size(spi); ctlr 1006 include/linux/spi/spi.h struct spi_controller *ctlr = spi->controller; ctlr 1010 include/linux/spi/spi.h if (ctlr->max_transfer_size) ctlr 1011 include/linux/spi/spi.h tr_max = ctlr->max_transfer_size(spi); ctlr 1042 include/linux/spi/spi.h typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, ctlr 1086 include/linux/spi/spi.h extern int spi_split_transfers_maxsize(struct spi_controller *ctlr, ctlr 1100 include/linux/spi/spi.h extern int spi_bus_lock(struct spi_controller *ctlr); ctlr 1101 include/linux/spi/spi.h extern int spi_bus_unlock(struct spi_controller *ctlr); ctlr 1362 include/linux/spi/spi.h spi_alloc_device(struct spi_controller *ctlr); ctlr 1376 include/linux/spi/spi.h spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) ctlr 1378 include/linux/spi/spi.h return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); ctlr 68 include/scsi/fcoe_sysfs.h static inline void *fcoe_ctlr_device_priv(const struct fcoe_ctlr_device *ctlr) ctlr 70 include/scsi/fcoe_sysfs.h return (void *)(ctlr + 1); ctlr 161 include/scsi/libfcoe.h static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr) ctlr 163 include/scsi/libfcoe.h return (void *)(ctlr + 1);