Searched refs:pd (Results 1 - 200 of 477) sorted by relevance

123

/linux-4.1.27/arch/x86/kernel/
H A Dpcspeaker.c7 struct platform_device *pd; add_pcspkr() local
9 pd = platform_device_register_simple("pcspkr", -1, NULL, 0); add_pcspkr()
11 return IS_ERR(pd) ? PTR_ERR(pd) : 0; add_pcspkr()
H A Dsysfb.c45 struct platform_device *pd; sysfb_init() local
68 pd = platform_device_register_resndata(NULL, name, 0, sysfb_init()
70 return PTR_ERR_OR_ZERO(pd); sysfb_init()
H A Dsysfb_simplefb.c67 struct platform_device *pd; create_simplefb() local
89 pd = platform_device_register_resndata(NULL, "simple-framebuffer", 0, create_simplefb()
91 return PTR_ERR_OR_ZERO(pd); create_simplefb()
/linux-4.1.27/drivers/net/ethernet/smsc/
H A Dsmsc9420.c100 static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) smsc9420_reg_read() argument
102 return ioread32(pd->ioaddr + offset); smsc9420_reg_read()
106 smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) smsc9420_reg_write() argument
108 iowrite32(value, pd->ioaddr + offset); smsc9420_reg_write()
111 static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) smsc9420_pci_flush_write() argument
114 smsc9420_reg_read(pd, ID_REV); smsc9420_pci_flush_write()
119 struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; smsc9420_mii_read() local
124 spin_lock_irqsave(&pd->phy_lock, flags); smsc9420_mii_read()
127 if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { smsc9420_mii_read()
128 netif_warn(pd, drv, pd->dev, "MII is busy???\n"); smsc9420_mii_read()
135 smsc9420_reg_write(pd, MII_ACCESS, addr); smsc9420_mii_read()
139 if (!(smsc9420_reg_read(pd, MII_ACCESS) & smsc9420_mii_read()
141 reg = (u16)smsc9420_reg_read(pd, MII_DATA); smsc9420_mii_read()
147 netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); smsc9420_mii_read()
150 spin_unlock_irqrestore(&pd->phy_lock, flags); smsc9420_mii_read()
157 struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; smsc9420_mii_write() local
162 spin_lock_irqsave(&pd->phy_lock, flags); smsc9420_mii_write()
165 if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { smsc9420_mii_write()
166 netif_warn(pd, drv, pd->dev, "MII is busy???\n"); smsc9420_mii_write()
171 smsc9420_reg_write(pd, MII_DATA, (u32)val); smsc9420_mii_write()
176 smsc9420_reg_write(pd, MII_ACCESS, addr); smsc9420_mii_write()
180 if (!(smsc9420_reg_read(pd, MII_ACCESS) & smsc9420_mii_write()
188 netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); smsc9420_mii_write()
191 spin_unlock_irqrestore(&pd->phy_lock, flags); smsc9420_mii_write()
203 static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) smsc9420_eeprom_reload() argument
207 BUG_ON(!pd); smsc9420_eeprom_reload()
209 if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { smsc9420_eeprom_reload()
210 netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__); smsc9420_eeprom_reload()
214 smsc9420_reg_write(pd, E2P_CMD, smsc9420_eeprom_reload()
219 if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_)) smsc9420_eeprom_reload()
223 netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__); smsc9420_eeprom_reload()
230 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_do_ioctl() local
232 if (!netif_running(dev) || !pd->phy_dev) smsc9420_do_ioctl()
235 return phy_mii_ioctl(pd->phy_dev, ifr, cmd); smsc9420_do_ioctl()
241 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_ethtool_get_settings() local
243 if (!pd->phy_dev) smsc9420_ethtool_get_settings()
248 return phy_ethtool_gset(pd->phy_dev, cmd); smsc9420_ethtool_get_settings()
254 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_ethtool_set_settings() local
256 if (!pd->phy_dev) smsc9420_ethtool_set_settings()
259 return phy_ethtool_sset(pd->phy_dev, cmd); smsc9420_ethtool_set_settings()
265 struct smsc9420_pdata *pd = netdev_priv(netdev); smsc9420_ethtool_get_drvinfo() local
268 strlcpy(drvinfo->bus_info, pci_name(pd->pdev), smsc9420_ethtool_get_drvinfo()
275 struct smsc9420_pdata *pd = netdev_priv(netdev); smsc9420_ethtool_get_msglevel() local
276 return pd->msg_enable; smsc9420_ethtool_get_msglevel()
281 struct smsc9420_pdata *pd = netdev_priv(netdev); smsc9420_ethtool_set_msglevel() local
282 pd->msg_enable = data; smsc9420_ethtool_set_msglevel()
287 struct smsc9420_pdata *pd = netdev_priv(netdev); smsc9420_ethtool_nway_reset() local
289 if (!pd->phy_dev) smsc9420_ethtool_nway_reset()
292 return phy_start_aneg(pd->phy_dev); smsc9420_ethtool_nway_reset()
305 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_ethtool_getregs() local
306 struct phy_device *phy_dev = pd->phy_dev; smsc9420_ethtool_getregs()
310 regs->version = smsc9420_reg_read(pd, ID_REV); smsc9420_ethtool_getregs()
312 data[j++] = smsc9420_reg_read(pd, i); smsc9420_ethtool_getregs()
322 static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) smsc9420_eeprom_enable_access() argument
324 unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG); smsc9420_eeprom_enable_access()
326 smsc9420_reg_write(pd, GPIO_CFG, temp); smsc9420_eeprom_enable_access()
330 static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op) smsc9420_eeprom_send_cmd() argument
335 netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op); smsc9420_eeprom_send_cmd()
336 if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { smsc9420_eeprom_send_cmd()
337 netif_warn(pd, hw, pd->dev, "Busy at start\n"); smsc9420_eeprom_send_cmd()
342 smsc9420_reg_write(pd, E2P_CMD, e2cmd); smsc9420_eeprom_send_cmd()
346 e2cmd = smsc9420_reg_read(pd, E2P_CMD); smsc9420_eeprom_send_cmd()
350 netif_info(pd, hw, pd->dev, "TIMED OUT\n"); smsc9420_eeprom_send_cmd()
355 netif_info(pd, hw, pd->dev, smsc9420_eeprom_send_cmd()
363 static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd, smsc9420_eeprom_read_location() argument
369 netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address); smsc9420_eeprom_read_location()
370 ret = smsc9420_eeprom_send_cmd(pd, op); smsc9420_eeprom_read_location()
373 data[address] = smsc9420_reg_read(pd, E2P_DATA); smsc9420_eeprom_read_location()
378 static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd, smsc9420_eeprom_write_location() argument
384 netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data); smsc9420_eeprom_write_location()
385 ret = smsc9420_eeprom_send_cmd(pd, op); smsc9420_eeprom_write_location()
389 smsc9420_reg_write(pd, E2P_DATA, (u32)data); smsc9420_eeprom_write_location()
390 ret = smsc9420_eeprom_send_cmd(pd, op); smsc9420_eeprom_write_location()
404 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_ethtool_get_eeprom() local
408 smsc9420_eeprom_enable_access(pd); smsc9420_ethtool_get_eeprom()
412 int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data); smsc9420_ethtool_get_eeprom()
428 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_ethtool_set_eeprom() local
434 smsc9420_eeprom_enable_access(pd); smsc9420_ethtool_set_eeprom()
435 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_); smsc9420_ethtool_set_eeprom()
436 ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data); smsc9420_ethtool_set_eeprom()
437 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_); smsc9420_ethtool_set_eeprom()
464 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_set_mac_address() local
470 smsc9420_reg_write(pd, ADDRH, mac_high16); smsc9420_set_mac_address()
471 smsc9420_reg_write(pd, ADDRL, mac_low32); smsc9420_set_mac_address()
476 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_check_mac_address() local
481 netif_dbg(pd, probe, pd->dev, smsc9420_check_mac_address()
486 u32 mac_high16 = smsc9420_reg_read(pd, ADDRH); smsc9420_check_mac_address()
487 u32 mac_low32 = smsc9420_reg_read(pd, ADDRL); smsc9420_check_mac_address()
497 netif_dbg(pd, probe, pd->dev, smsc9420_check_mac_address()
503 netif_dbg(pd, probe, pd->dev, smsc9420_check_mac_address()
509 static void smsc9420_stop_tx(struct smsc9420_pdata *pd) smsc9420_stop_tx() argument
515 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); smsc9420_stop_tx()
517 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); smsc9420_stop_tx()
521 if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_) smsc9420_stop_tx()
527 netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n"); smsc9420_stop_tx()
530 smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_); smsc9420_stop_tx()
533 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); smsc9420_stop_tx()
535 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); smsc9420_stop_tx()
536 smsc9420_pci_flush_write(pd); smsc9420_stop_tx()
539 mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_); smsc9420_stop_tx()
540 smsc9420_reg_write(pd, MAC_CR, mac_cr); smsc9420_stop_tx()
541 smsc9420_pci_flush_write(pd); smsc9420_stop_tx()
544 static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd) smsc9420_free_tx_ring() argument
548 BUG_ON(!pd->tx_ring); smsc9420_free_tx_ring()
550 if (!pd->tx_buffers) smsc9420_free_tx_ring()
554 struct sk_buff *skb = pd->tx_buffers[i].skb; smsc9420_free_tx_ring()
557 BUG_ON(!pd->tx_buffers[i].mapping); smsc9420_free_tx_ring()
558 pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, smsc9420_free_tx_ring()
563 pd->tx_ring[i].status = 0; smsc9420_free_tx_ring()
564 pd->tx_ring[i].length = 0; smsc9420_free_tx_ring()
565 pd->tx_ring[i].buffer1 = 0; smsc9420_free_tx_ring()
566 pd->tx_ring[i].buffer2 = 0; smsc9420_free_tx_ring()
570 kfree(pd->tx_buffers); smsc9420_free_tx_ring()
571 pd->tx_buffers = NULL; smsc9420_free_tx_ring()
573 pd->tx_ring_head = 0; smsc9420_free_tx_ring()
574 pd->tx_ring_tail = 0; smsc9420_free_tx_ring()
577 static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd) smsc9420_free_rx_ring() argument
581 BUG_ON(!pd->rx_ring); smsc9420_free_rx_ring()
583 if (!pd->rx_buffers) smsc9420_free_rx_ring()
587 if (pd->rx_buffers[i].skb) smsc9420_free_rx_ring()
588 dev_kfree_skb_any(pd->rx_buffers[i].skb); smsc9420_free_rx_ring()
590 if (pd->rx_buffers[i].mapping) smsc9420_free_rx_ring()
591 pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, smsc9420_free_rx_ring()
594 pd->rx_ring[i].status = 0; smsc9420_free_rx_ring()
595 pd->rx_ring[i].length = 0; smsc9420_free_rx_ring()
596 pd->rx_ring[i].buffer1 = 0; smsc9420_free_rx_ring()
597 pd->rx_ring[i].buffer2 = 0; smsc9420_free_rx_ring()
601 kfree(pd->rx_buffers); smsc9420_free_rx_ring()
602 pd->rx_buffers = NULL; smsc9420_free_rx_ring()
604 pd->rx_ring_head = 0; smsc9420_free_rx_ring()
605 pd->rx_ring_tail = 0; smsc9420_free_rx_ring()
608 static void smsc9420_stop_rx(struct smsc9420_pdata *pd) smsc9420_stop_rx() argument
614 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); smsc9420_stop_rx()
616 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); smsc9420_stop_rx()
617 smsc9420_pci_flush_write(pd); smsc9420_stop_rx()
620 mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_); smsc9420_stop_rx()
621 smsc9420_reg_write(pd, MAC_CR, mac_cr); smsc9420_stop_rx()
622 smsc9420_pci_flush_write(pd); smsc9420_stop_rx()
625 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); smsc9420_stop_rx()
627 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); smsc9420_stop_rx()
628 smsc9420_pci_flush_write(pd); smsc9420_stop_rx()
632 if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_) smsc9420_stop_rx()
638 netif_warn(pd, ifdown, pd->dev, smsc9420_stop_rx()
642 smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_); smsc9420_stop_rx()
647 struct smsc9420_pdata *pd = dev_id; smsc9420_isr() local
652 BUG_ON(!pd); smsc9420_isr()
653 BUG_ON(!pd->ioaddr); smsc9420_isr()
655 int_cfg = smsc9420_reg_read(pd, INT_CFG); smsc9420_isr()
662 int_sts = smsc9420_reg_read(pd, INT_STAT); smsc9420_isr()
665 u32 status = smsc9420_reg_read(pd, DMAC_STATUS); smsc9420_isr()
670 netif_wake_queue(pd->dev); smsc9420_isr()
675 u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); smsc9420_isr()
677 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); smsc9420_isr()
678 smsc9420_pci_flush_write(pd); smsc9420_isr()
681 napi_schedule(&pd->napi); smsc9420_isr()
685 smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear); smsc9420_isr()
692 spin_lock_irqsave(&pd->int_lock, flags); smsc9420_isr()
693 int_ctl = smsc9420_reg_read(pd, INT_CTL); smsc9420_isr()
695 smsc9420_reg_write(pd, INT_CTL, int_ctl); smsc9420_isr()
696 spin_unlock_irqrestore(&pd->int_lock, flags); smsc9420_isr()
698 smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_); smsc9420_isr()
699 pd->software_irq_signal = true; smsc9420_isr()
706 smsc9420_pci_flush_write(pd); smsc9420_isr()
714 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_poll_controller() local
715 const int irq = pd->pdev->irq; smsc9420_poll_controller()
723 static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd) smsc9420_dmac_soft_reset() argument
725 smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_); smsc9420_dmac_soft_reset()
726 smsc9420_reg_read(pd, BUS_MODE); smsc9420_dmac_soft_reset()
728 if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_) smsc9420_dmac_soft_reset()
729 netif_warn(pd, drv, pd->dev, "Software reset not cleared\n"); smsc9420_dmac_soft_reset()
734 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_stop() local
738 BUG_ON(!pd); smsc9420_stop()
739 BUG_ON(!pd->phy_dev); smsc9420_stop()
742 spin_lock_irqsave(&pd->int_lock, flags); smsc9420_stop()
743 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); smsc9420_stop()
744 smsc9420_reg_write(pd, INT_CFG, int_cfg); smsc9420_stop()
745 spin_unlock_irqrestore(&pd->int_lock, flags); smsc9420_stop()
748 napi_disable(&pd->napi); smsc9420_stop()
750 smsc9420_stop_tx(pd); smsc9420_stop()
751 smsc9420_free_tx_ring(pd); smsc9420_stop()
753 smsc9420_stop_rx(pd); smsc9420_stop()
754 smsc9420_free_rx_ring(pd); smsc9420_stop()
756 free_irq(pd->pdev->irq, pd); smsc9420_stop()
758 smsc9420_dmac_soft_reset(pd); smsc9420_stop()
760 phy_stop(pd->phy_dev); smsc9420_stop()
762 phy_disconnect(pd->phy_dev); smsc9420_stop()
763 pd->phy_dev = NULL; smsc9420_stop()
764 mdiobus_unregister(pd->mii_bus); smsc9420_stop()
765 mdiobus_free(pd->mii_bus); smsc9420_stop()
794 static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index, smsc9420_rx_handoff() argument
797 struct net_device *dev = pd->dev; smsc9420_rx_handoff()
805 if (pd->rx_csum) smsc9420_rx_handoff()
811 pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, smsc9420_rx_handoff()
813 pd->rx_buffers[index].mapping = 0; smsc9420_rx_handoff()
815 skb = pd->rx_buffers[index].skb; smsc9420_rx_handoff()
816 pd->rx_buffers[index].skb = NULL; smsc9420_rx_handoff()
818 if (pd->rx_csum) { smsc9420_rx_handoff()
833 static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) smsc9420_alloc_rx_buffer() argument
835 struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ); smsc9420_alloc_rx_buffer()
838 BUG_ON(pd->rx_buffers[index].skb); smsc9420_alloc_rx_buffer()
839 BUG_ON(pd->rx_buffers[index].mapping); smsc9420_alloc_rx_buffer()
844 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), smsc9420_alloc_rx_buffer()
846 if (pci_dma_mapping_error(pd->pdev, mapping)) { smsc9420_alloc_rx_buffer()
848 netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n"); smsc9420_alloc_rx_buffer()
852 pd->rx_buffers[index].skb = skb; smsc9420_alloc_rx_buffer()
853 pd->rx_buffers[index].mapping = mapping; smsc9420_alloc_rx_buffer()
854 pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN; smsc9420_alloc_rx_buffer()
855 pd->rx_ring[index].status = RDES0_OWN_; smsc9420_alloc_rx_buffer()
861 static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd) smsc9420_alloc_new_rx_buffers() argument
863 while (pd->rx_ring_tail != pd->rx_ring_head) { smsc9420_alloc_new_rx_buffers()
864 if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail)) smsc9420_alloc_new_rx_buffers()
867 pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE; smsc9420_alloc_new_rx_buffers()
873 struct smsc9420_pdata *pd = smsc9420_rx_poll() local
875 struct net_device *dev = pd->dev; smsc9420_rx_poll()
881 status = pd->rx_ring[pd->rx_ring_head].status; smsc9420_rx_poll()
888 smsc9420_rx_handoff(pd, pd->rx_ring_head, status); smsc9420_rx_poll()
889 pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE; smsc9420_rx_poll()
890 smsc9420_alloc_new_rx_buffers(pd); smsc9420_rx_poll()
893 drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR); smsc9420_rx_poll()
898 smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); smsc9420_rx_poll()
899 smsc9420_pci_flush_write(pd); smsc9420_rx_poll()
902 napi_complete(&pd->napi); smsc9420_rx_poll()
905 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); smsc9420_rx_poll()
907 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); smsc9420_rx_poll()
908 smsc9420_pci_flush_write(pd); smsc9420_rx_poll()
944 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_complete_tx() local
946 while (pd->tx_ring_tail != pd->tx_ring_head) { smsc9420_complete_tx()
947 int index = pd->tx_ring_tail; smsc9420_complete_tx()
951 status = pd->tx_ring[index].status; smsc9420_complete_tx()
952 length = pd->tx_ring[index].length; smsc9420_complete_tx()
960 BUG_ON(!pd->tx_buffers[index].skb); smsc9420_complete_tx()
961 BUG_ON(!pd->tx_buffers[index].mapping); smsc9420_complete_tx()
963 pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping, smsc9420_complete_tx()
964 pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE); smsc9420_complete_tx()
965 pd->tx_buffers[index].mapping = 0; smsc9420_complete_tx()
967 dev_kfree_skb_any(pd->tx_buffers[index].skb); smsc9420_complete_tx()
968 pd->tx_buffers[index].skb = NULL; smsc9420_complete_tx()
970 pd->tx_ring[index].buffer1 = 0; smsc9420_complete_tx()
973 pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; smsc9420_complete_tx()
980 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_hard_start_xmit() local
982 int index = pd->tx_ring_head; smsc9420_hard_start_xmit()
985 (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); smsc9420_hard_start_xmit()
990 BUG_ON(pd->tx_ring[index].status & TDES0_OWN_); smsc9420_hard_start_xmit()
991 BUG_ON(pd->tx_buffers[index].skb); smsc9420_hard_start_xmit()
992 BUG_ON(pd->tx_buffers[index].mapping); smsc9420_hard_start_xmit()
994 mapping = pci_map_single(pd->pdev, skb->data, smsc9420_hard_start_xmit()
996 if (pci_dma_mapping_error(pd->pdev, mapping)) { smsc9420_hard_start_xmit()
997 netif_warn(pd, tx_err, pd->dev, smsc9420_hard_start_xmit()
1002 pd->tx_buffers[index].skb = skb; smsc9420_hard_start_xmit()
1003 pd->tx_buffers[index].mapping = mapping; smsc9420_hard_start_xmit()
1008 netif_stop_queue(pd->dev); smsc9420_hard_start_xmit()
1015 pd->tx_ring[index].buffer1 = mapping; smsc9420_hard_start_xmit()
1016 pd->tx_ring[index].length = tmp_desc1; smsc9420_hard_start_xmit()
1020 pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; smsc9420_hard_start_xmit()
1023 pd->tx_ring[index].status = TDES0_OWN_; smsc9420_hard_start_xmit()
1029 smsc9420_reg_write(pd, TX_POLL_DEMAND, 1); smsc9420_hard_start_xmit()
1030 smsc9420_pci_flush_write(pd); smsc9420_hard_start_xmit()
1037 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_get_stats() local
1038 u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR); smsc9420_get_stats()
1046 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_set_multicast_list() local
1047 u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); smsc9420_set_multicast_list()
1050 netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n"); smsc9420_set_multicast_list()
1055 netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n"); smsc9420_set_multicast_list()
1063 netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n"); netdev_for_each_mc_addr()
1074 smsc9420_reg_write(pd, HASHH, hash_hi);
1075 smsc9420_reg_write(pd, HASHL, hash_lo);
1081 netif_dbg(pd, hw, pd->dev, "Receive own packets only\n");
1082 smsc9420_reg_write(pd, HASHH, 0);
1083 smsc9420_reg_write(pd, HASHL, 0);
1090 smsc9420_reg_write(pd, MAC_CR, mac_cr);
1091 smsc9420_pci_flush_write(pd);
1094 static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) smsc9420_phy_update_flowcontrol() argument
1096 struct phy_device *phy_dev = pd->phy_dev; smsc9420_phy_update_flowcontrol()
1109 netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n", smsc9420_phy_update_flowcontrol()
1113 netif_info(pd, link, pd->dev, "half duplex\n"); smsc9420_phy_update_flowcontrol()
1117 smsc9420_reg_write(pd, FLOW, flow); smsc9420_phy_update_flowcontrol()
1124 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_phy_adjust_link() local
1125 struct phy_device *phy_dev = pd->phy_dev; smsc9420_phy_adjust_link()
1128 if (phy_dev->duplex != pd->last_duplex) { smsc9420_phy_adjust_link()
1129 u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); smsc9420_phy_adjust_link()
1131 netif_dbg(pd, link, pd->dev, "full duplex mode\n"); smsc9420_phy_adjust_link()
1134 netif_dbg(pd, link, pd->dev, "half duplex mode\n"); smsc9420_phy_adjust_link()
1137 smsc9420_reg_write(pd, MAC_CR, mac_cr); smsc9420_phy_adjust_link()
1139 smsc9420_phy_update_flowcontrol(pd); smsc9420_phy_adjust_link()
1140 pd->last_duplex = phy_dev->duplex; smsc9420_phy_adjust_link()
1144 if (carrier != pd->last_carrier) { smsc9420_phy_adjust_link()
1146 netif_dbg(pd, link, pd->dev, "carrier OK\n"); smsc9420_phy_adjust_link()
1148 netif_dbg(pd, link, pd->dev, "no carrier\n"); smsc9420_phy_adjust_link()
1149 pd->last_carrier = carrier; smsc9420_phy_adjust_link()
1155 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_mii_probe() local
1158 BUG_ON(pd->phy_dev); smsc9420_mii_probe()
1161 if (!pd->mii_bus->phy_map[1]) { smsc9420_mii_probe()
1166 phydev = pd->mii_bus->phy_map[1]; smsc9420_mii_probe()
1167 netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n", smsc9420_mii_probe()
1186 pd->phy_dev = phydev; smsc9420_mii_probe()
1187 pd->last_duplex = -1; smsc9420_mii_probe()
1188 pd->last_carrier = -1; smsc9420_mii_probe()
1195 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_mii_init() local
1198 pd->mii_bus = mdiobus_alloc(); smsc9420_mii_init()
1199 if (!pd->mii_bus) { smsc9420_mii_init()
1203 pd->mii_bus->name = DRV_MDIONAME; smsc9420_mii_init()
1204 snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", smsc9420_mii_init()
1205 (pd->pdev->bus->number << 8) | pd->pdev->devfn); smsc9420_mii_init()
1206 pd->mii_bus->priv = pd; smsc9420_mii_init()
1207 pd->mii_bus->read = smsc9420_mii_read; smsc9420_mii_init()
1208 pd->mii_bus->write = smsc9420_mii_write; smsc9420_mii_init()
1209 pd->mii_bus->irq = pd->phy_irq; smsc9420_mii_init()
1211 pd->mii_bus->irq[i] = PHY_POLL; smsc9420_mii_init()
1214 pd->mii_bus->phy_mask = ~(1 << 1); smsc9420_mii_init()
1216 if (mdiobus_register(pd->mii_bus)) { smsc9420_mii_init()
1217 netif_warn(pd, probe, pd->dev, "Error registering mii bus\n"); smsc9420_mii_init()
1222 netif_warn(pd, probe, pd->dev, "Error probing mii bus\n"); smsc9420_mii_init()
1229 mdiobus_unregister(pd->mii_bus); smsc9420_mii_init()
1231 mdiobus_free(pd->mii_bus); smsc9420_mii_init()
1236 static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd) smsc9420_alloc_tx_ring() argument
1240 BUG_ON(!pd->tx_ring); smsc9420_alloc_tx_ring()
1242 pd->tx_buffers = kmalloc_array(TX_RING_SIZE, smsc9420_alloc_tx_ring()
1245 if (!pd->tx_buffers) smsc9420_alloc_tx_ring()
1250 pd->tx_buffers[i].skb = NULL; smsc9420_alloc_tx_ring()
1251 pd->tx_buffers[i].mapping = 0; smsc9420_alloc_tx_ring()
1252 pd->tx_ring[i].status = 0; smsc9420_alloc_tx_ring()
1253 pd->tx_ring[i].length = 0; smsc9420_alloc_tx_ring()
1254 pd->tx_ring[i].buffer1 = 0; smsc9420_alloc_tx_ring()
1255 pd->tx_ring[i].buffer2 = 0; smsc9420_alloc_tx_ring()
1257 pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; smsc9420_alloc_tx_ring()
1260 pd->tx_ring_head = 0; smsc9420_alloc_tx_ring()
1261 pd->tx_ring_tail = 0; smsc9420_alloc_tx_ring()
1263 smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr); smsc9420_alloc_tx_ring()
1264 smsc9420_pci_flush_write(pd); smsc9420_alloc_tx_ring()
1269 static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) smsc9420_alloc_rx_ring() argument
1273 BUG_ON(!pd->rx_ring); smsc9420_alloc_rx_ring()
1275 pd->rx_buffers = kmalloc_array(RX_RING_SIZE, smsc9420_alloc_rx_ring()
1278 if (pd->rx_buffers == NULL) smsc9420_alloc_rx_ring()
1283 pd->rx_ring[i].status = 0; smsc9420_alloc_rx_ring()
1284 pd->rx_ring[i].length = PKT_BUF_SZ; smsc9420_alloc_rx_ring()
1285 pd->rx_ring[i].buffer2 = 0; smsc9420_alloc_rx_ring()
1286 pd->rx_buffers[i].skb = NULL; smsc9420_alloc_rx_ring()
1287 pd->rx_buffers[i].mapping = 0; smsc9420_alloc_rx_ring()
1289 pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_); smsc9420_alloc_rx_ring()
1293 if (smsc9420_alloc_rx_buffer(pd, i)) { smsc9420_alloc_rx_ring()
1294 netif_warn(pd, ifup, pd->dev, smsc9420_alloc_rx_ring()
1300 pd->rx_ring_head = 0; smsc9420_alloc_rx_ring()
1301 pd->rx_ring_tail = 0; smsc9420_alloc_rx_ring()
1303 smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q); smsc9420_alloc_rx_ring()
1304 netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n", smsc9420_alloc_rx_ring()
1305 smsc9420_reg_read(pd, VLAN1)); smsc9420_alloc_rx_ring()
1307 if (pd->rx_csum) { smsc9420_alloc_rx_ring()
1309 u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN; smsc9420_alloc_rx_ring()
1310 smsc9420_reg_write(pd, COE_CR, coe); smsc9420_alloc_rx_ring()
1311 netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe); smsc9420_alloc_rx_ring()
1314 smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr); smsc9420_alloc_rx_ring()
1315 smsc9420_pci_flush_write(pd); smsc9420_alloc_rx_ring()
1320 smsc9420_free_rx_ring(pd); smsc9420_alloc_rx_ring()
1327 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_open() local
1329 const int irq = pd->pdev->irq; smsc9420_open()
1334 netif_warn(pd, ifup, pd->dev, smsc9420_open()
1343 spin_lock_irqsave(&pd->int_lock, flags); smsc9420_open()
1344 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); smsc9420_open()
1345 smsc9420_reg_write(pd, INT_CFG, int_cfg); smsc9420_open()
1346 smsc9420_reg_write(pd, INT_CTL, 0); smsc9420_open()
1347 spin_unlock_irqrestore(&pd->int_lock, flags); smsc9420_open()
1348 smsc9420_reg_write(pd, DMAC_INTR_ENA, 0); smsc9420_open()
1349 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); smsc9420_open()
1350 smsc9420_pci_flush_write(pd); smsc9420_open()
1352 result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd); smsc9420_open()
1354 netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq); smsc9420_open()
1359 smsc9420_dmac_soft_reset(pd); smsc9420_open()
1362 smsc9420_reg_write(pd, MAC_CR, 0); smsc9420_open()
1367 smsc9420_reg_write(pd, GPIO_CFG, smsc9420_open()
1376 smsc9420_reg_write(pd, BUS_MODE, bus_mode); smsc9420_open()
1378 smsc9420_pci_flush_write(pd); smsc9420_open()
1381 smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1); smsc9420_open()
1383 smsc9420_reg_write(pd, DMAC_CONTROL, smsc9420_open()
1386 smsc9420_pci_flush_write(pd); smsc9420_open()
1389 netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq); smsc9420_open()
1390 pd->software_irq_signal = false; smsc9420_open()
1392 spin_lock_irqsave(&pd->int_lock, flags); smsc9420_open()
1394 int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; smsc9420_open()
1397 smsc9420_reg_write(pd, INT_CFG, int_cfg); smsc9420_open()
1400 int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_; smsc9420_open()
1401 smsc9420_reg_write(pd, INT_CTL, int_ctl); smsc9420_open()
1402 spin_unlock_irqrestore(&pd->int_lock, flags); smsc9420_open()
1403 smsc9420_pci_flush_write(pd); smsc9420_open()
1407 if (pd->software_irq_signal) smsc9420_open()
1413 spin_lock_irqsave(&pd->int_lock, flags); smsc9420_open()
1414 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); smsc9420_open()
1415 smsc9420_reg_write(pd, INT_CFG, int_cfg); smsc9420_open()
1416 spin_unlock_irqrestore(&pd->int_lock, flags); smsc9420_open()
1418 if (!pd->software_irq_signal) { smsc9420_open()
1419 netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n"); smsc9420_open()
1424 netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq); smsc9420_open()
1426 result = smsc9420_alloc_tx_ring(pd); smsc9420_open()
1428 netif_warn(pd, ifup, pd->dev, smsc9420_open()
1434 result = smsc9420_alloc_rx_ring(pd); smsc9420_open()
1436 netif_warn(pd, ifup, pd->dev, smsc9420_open()
1444 netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n"); smsc9420_open()
1450 phy_start(pd->phy_dev); smsc9420_open()
1452 napi_enable(&pd->napi); smsc9420_open()
1455 mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_; smsc9420_open()
1456 smsc9420_reg_write(pd, MAC_CR, mac_cr); smsc9420_open()
1458 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); smsc9420_open()
1460 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); smsc9420_open()
1461 smsc9420_pci_flush_write(pd); smsc9420_open()
1463 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); smsc9420_open()
1466 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); smsc9420_open()
1467 smsc9420_pci_flush_write(pd); smsc9420_open()
1471 smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); smsc9420_open()
1474 spin_lock_irqsave(&pd->int_lock, flags); smsc9420_open()
1475 int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; smsc9420_open()
1476 smsc9420_reg_write(pd, INT_CFG, int_cfg); smsc9420_open()
1477 spin_unlock_irqrestore(&pd->int_lock, flags); smsc9420_open()
1482 smsc9420_free_rx_ring(pd); smsc9420_open()
1484 smsc9420_free_tx_ring(pd); smsc9420_open()
1486 free_irq(irq, pd); smsc9420_open()
1496 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_suspend() local
1501 spin_lock_irqsave(&pd->int_lock, flags); smsc9420_suspend()
1502 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); smsc9420_suspend()
1503 smsc9420_reg_write(pd, INT_CFG, int_cfg); smsc9420_suspend()
1504 spin_unlock_irqrestore(&pd->int_lock, flags); smsc9420_suspend()
1508 smsc9420_stop_tx(pd); smsc9420_suspend()
1509 smsc9420_free_tx_ring(pd); smsc9420_suspend()
1511 napi_disable(&pd->napi); smsc9420_suspend()
1512 smsc9420_stop_rx(pd); smsc9420_suspend()
1513 smsc9420_free_rx_ring(pd); smsc9420_suspend()
1515 free_irq(pd->pdev->irq, pd); smsc9420_suspend()
1531 struct smsc9420_pdata *pd = netdev_priv(dev); smsc9420_resume() local
1545 netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n", smsc9420_resume()
1576 struct smsc9420_pdata *pd; smsc9420_probe() local
1592 dev = alloc_etherdev(sizeof(*pd)); smsc9420_probe()
1623 pd = netdev_priv(dev); smsc9420_probe()
1626 pd->rx_ring = pci_alloc_consistent(pdev, smsc9420_probe()
1629 &pd->rx_dma_addr); smsc9420_probe()
1631 if (!pd->rx_ring) smsc9420_probe()
1635 pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); smsc9420_probe()
1636 pd->tx_dma_addr = pd->rx_dma_addr + smsc9420_probe()
1639 pd->pdev = pdev; smsc9420_probe()
1640 pd->dev = dev; smsc9420_probe()
1641 pd->ioaddr = virt_addr; smsc9420_probe()
1642 pd->msg_enable = smsc_debug; smsc9420_probe()
1643 pd->rx_csum = true; smsc9420_probe()
1645 netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr); smsc9420_probe()
1647 id_rev = smsc9420_reg_read(pd, ID_REV); smsc9420_probe()
1650 netif_info(pd, probe, pd->dev, smsc9420_probe()
1654 netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n"); smsc9420_probe()
1655 netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev); smsc9420_probe()
1659 smsc9420_dmac_soft_reset(pd); smsc9420_probe()
1660 smsc9420_eeprom_reload(pd); smsc9420_probe()
1666 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); smsc9420_probe()
1670 netif_warn(pd, probe, pd->dev, "error %i registering device\n", smsc9420_probe()
1677 spin_lock_init(&pd->int_lock); smsc9420_probe()
1678 spin_lock_init(&pd->phy_lock); smsc9420_probe()
1686 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); smsc9420_probe()
1702 struct smsc9420_pdata *pd; smsc9420_remove() local
1708 pd = netdev_priv(dev); smsc9420_remove()
1712 BUG_ON(pd->tx_buffers); smsc9420_remove()
1713 BUG_ON(pd->rx_buffers); smsc9420_remove()
1715 BUG_ON(!pd->tx_ring); smsc9420_remove()
1716 BUG_ON(!pd->rx_ring); smsc9420_remove()
1719 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); smsc9420_remove()
1721 iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET); smsc9420_remove()
/linux-4.1.27/drivers/i2c/busses/
H A Di2c-simtec.c44 struct simtec_i2c_data *pd = pw; simtec_i2c_setsda() local
45 writeb(CMD_SET_SDA | (state ? STATE_SDA : 0), pd->reg); simtec_i2c_setsda()
50 struct simtec_i2c_data *pd = pw; simtec_i2c_setscl() local
51 writeb(CMD_SET_SCL | (state ? STATE_SCL : 0), pd->reg); simtec_i2c_setscl()
56 struct simtec_i2c_data *pd = pw; simtec_i2c_getsda() local
57 return readb(pd->reg) & STATE_SDA ? 1 : 0; simtec_i2c_getsda()
62 struct simtec_i2c_data *pd = pw; simtec_i2c_getscl() local
63 return readb(pd->reg) & STATE_SCL ? 1 : 0; simtec_i2c_getscl()
70 struct simtec_i2c_data *pd; simtec_i2c_probe() local
75 pd = kzalloc(sizeof(struct simtec_i2c_data), GFP_KERNEL); simtec_i2c_probe()
76 if (pd == NULL) simtec_i2c_probe()
79 platform_set_drvdata(dev, pd); simtec_i2c_probe()
90 pd->ioarea = request_mem_region(res->start, size, dev->name); simtec_i2c_probe()
91 if (pd->ioarea == NULL) { simtec_i2c_probe()
97 pd->reg = ioremap(res->start, size); simtec_i2c_probe()
98 if (pd->reg == NULL) { simtec_i2c_probe()
106 pd->adap.owner = THIS_MODULE; simtec_i2c_probe()
107 pd->adap.algo_data = &pd->bit; simtec_i2c_probe()
108 pd->adap.dev.parent = &dev->dev; simtec_i2c_probe()
110 strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name)); simtec_i2c_probe()
112 pd->bit.data = pd; simtec_i2c_probe()
113 pd->bit.setsda = simtec_i2c_setsda; simtec_i2c_probe()
114 pd->bit.setscl = simtec_i2c_setscl; simtec_i2c_probe()
115 pd->bit.getsda = simtec_i2c_getsda; simtec_i2c_probe()
116 pd->bit.getscl = simtec_i2c_getscl; simtec_i2c_probe()
117 pd->bit.timeout = HZ; simtec_i2c_probe()
118 pd->bit.udelay = 20; simtec_i2c_probe()
120 ret = i2c_bit_add_bus(&pd->adap); simtec_i2c_probe()
127 iounmap(pd->reg); simtec_i2c_probe()
130 release_resource(pd->ioarea); simtec_i2c_probe()
131 kfree(pd->ioarea); simtec_i2c_probe()
134 kfree(pd); simtec_i2c_probe()
140 struct simtec_i2c_data *pd = platform_get_drvdata(dev); simtec_i2c_remove() local
142 i2c_del_adapter(&pd->adap); simtec_i2c_remove()
144 iounmap(pd->reg); simtec_i2c_remove()
145 release_resource(pd->ioarea); simtec_i2c_remove()
146 kfree(pd->ioarea); simtec_i2c_remove()
147 kfree(pd); simtec_i2c_remove()
H A Di2c-sh_mobile.c193 static void iic_wr(struct sh_mobile_i2c_data *pd, int offs, unsigned char data) iic_wr() argument
196 data |= pd->icic; iic_wr()
198 iowrite8(data, pd->reg + offs); iic_wr()
201 static unsigned char iic_rd(struct sh_mobile_i2c_data *pd, int offs) iic_rd() argument
203 return ioread8(pd->reg + offs); iic_rd()
206 static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs, iic_set_clr() argument
209 iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr); iic_set_clr()
246 static int sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_init() argument
253 clk_prepare_enable(pd->clk); sh_mobile_i2c_init()
254 i2c_clk_khz = clk_get_rate(pd->clk) / 1000; sh_mobile_i2c_init()
255 clk_disable_unprepare(pd->clk); sh_mobile_i2c_init()
256 i2c_clk_khz /= pd->clks_per_count; sh_mobile_i2c_init()
258 if (pd->bus_speed == STANDARD_MODE) { sh_mobile_i2c_init()
262 } else if (pd->bus_speed == FAST_MODE) { sh_mobile_i2c_init()
267 dev_err(pd->dev, "unrecognized bus speed %lu Hz\n", sh_mobile_i2c_init()
268 pd->bus_speed); sh_mobile_i2c_init()
272 pd->iccl = sh_mobile_i2c_iccl(i2c_clk_khz, tLOW, tf); sh_mobile_i2c_init()
273 pd->icch = sh_mobile_i2c_icch(i2c_clk_khz, tHIGH, tf); sh_mobile_i2c_init()
275 max_val = pd->flags & IIC_FLAG_HAS_ICIC67 ? 0x1ff : 0xff; sh_mobile_i2c_init()
276 if (pd->iccl > max_val || pd->icch > max_val) { sh_mobile_i2c_init()
277 dev_err(pd->dev, "timing values out of range: L/H=0x%x/0x%x\n", sh_mobile_i2c_init()
278 pd->iccl, pd->icch); sh_mobile_i2c_init()
283 if (pd->iccl & 0x100) sh_mobile_i2c_init()
284 pd->icic |= ICIC_ICCLB8; sh_mobile_i2c_init()
286 pd->icic &= ~ICIC_ICCLB8; sh_mobile_i2c_init()
289 if (pd->icch & 0x100) sh_mobile_i2c_init()
290 pd->icic |= ICIC_ICCHB8; sh_mobile_i2c_init()
292 pd->icic &= ~ICIC_ICCHB8; sh_mobile_i2c_init()
294 dev_dbg(pd->dev, "timing values: L/H=0x%x/0x%x\n", pd->iccl, pd->icch); sh_mobile_i2c_init()
298 static void activate_ch(struct sh_mobile_i2c_data *pd) activate_ch() argument
301 pm_runtime_get_sync(pd->dev); activate_ch()
302 clk_prepare_enable(pd->clk); activate_ch()
305 iic_set_clr(pd, ICCR, ICCR_ICE, 0); activate_ch()
308 iic_wr(pd, ICIC, 0); activate_ch()
311 iic_wr(pd, ICCL, pd->iccl & 0xff); activate_ch()
312 iic_wr(pd, ICCH, pd->icch & 0xff); activate_ch()
315 static void deactivate_ch(struct sh_mobile_i2c_data *pd) deactivate_ch() argument
318 iic_wr(pd, ICSR, 0); deactivate_ch()
319 iic_wr(pd, ICIC, 0); deactivate_ch()
322 iic_set_clr(pd, ICCR, 0, ICCR_ICE); deactivate_ch()
325 clk_disable_unprepare(pd->clk); deactivate_ch()
326 pm_runtime_put_sync(pd->dev); deactivate_ch()
329 static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, i2c_op() argument
335 dev_dbg(pd->dev, "op %d, data in 0x%02x\n", op, data); i2c_op()
337 spin_lock_irqsave(&pd->lock, flags); i2c_op()
341 iic_wr(pd, ICCR, ICCR_ICE | ICCR_TRS | ICCR_BBSY); i2c_op()
344 iic_wr(pd, ICIC, ICIC_WAITE | ICIC_ALE | ICIC_TACKE); i2c_op()
345 iic_wr(pd, ICDR, data); i2c_op()
348 iic_wr(pd, ICDR, data); i2c_op()
351 iic_wr(pd, ICDR, data); i2c_op()
354 iic_wr(pd, ICCR, pd->send_stop ? ICCR_ICE | ICCR_TRS i2c_op()
358 iic_wr(pd, ICCR, ICCR_ICE | ICCR_SCP); i2c_op()
361 ret = iic_rd(pd, ICDR); i2c_op()
364 iic_wr(pd, ICIC, i2c_op()
366 iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); i2c_op()
369 iic_wr(pd, ICIC, i2c_op()
371 ret = iic_rd(pd, ICDR); i2c_op()
372 iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); i2c_op()
376 spin_unlock_irqrestore(&pd->lock, flags); i2c_op()
378 dev_dbg(pd->dev, "op %d, data out 0x%02x\n", op, ret); i2c_op()
382 static bool sh_mobile_i2c_is_first_byte(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_is_first_byte() argument
384 return pd->pos == -1; sh_mobile_i2c_is_first_byte()
387 static bool sh_mobile_i2c_is_last_byte(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_is_last_byte() argument
389 return pd->pos == pd->msg->len - 1; sh_mobile_i2c_is_last_byte()
392 static void sh_mobile_i2c_get_data(struct sh_mobile_i2c_data *pd, sh_mobile_i2c_get_data() argument
395 switch (pd->pos) { sh_mobile_i2c_get_data()
397 *buf = (pd->msg->addr & 0x7f) << 1; sh_mobile_i2c_get_data()
398 *buf |= (pd->msg->flags & I2C_M_RD) ? 1 : 0; sh_mobile_i2c_get_data()
401 *buf = pd->msg->buf[pd->pos]; sh_mobile_i2c_get_data()
405 static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_isr_tx() argument
409 if (pd->pos == pd->msg->len) { sh_mobile_i2c_isr_tx()
411 if (pd->send_stop && pd->stop_after_dma) sh_mobile_i2c_isr_tx()
412 i2c_op(pd, OP_TX_STOP, 0); sh_mobile_i2c_isr_tx()
416 sh_mobile_i2c_get_data(pd, &data); sh_mobile_i2c_isr_tx()
418 if (sh_mobile_i2c_is_last_byte(pd)) sh_mobile_i2c_isr_tx()
419 i2c_op(pd, OP_TX_STOP_DATA, data); sh_mobile_i2c_isr_tx()
420 else if (sh_mobile_i2c_is_first_byte(pd)) sh_mobile_i2c_isr_tx()
421 i2c_op(pd, OP_TX_FIRST, data); sh_mobile_i2c_isr_tx()
423 i2c_op(pd, OP_TX, data); sh_mobile_i2c_isr_tx()
425 pd->pos++; sh_mobile_i2c_isr_tx()
429 static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_isr_rx() argument
435 if (pd->pos <= -1) { sh_mobile_i2c_isr_rx()
436 sh_mobile_i2c_get_data(pd, &data); sh_mobile_i2c_isr_rx()
438 if (sh_mobile_i2c_is_first_byte(pd)) sh_mobile_i2c_isr_rx()
439 i2c_op(pd, OP_TX_FIRST, data); sh_mobile_i2c_isr_rx()
441 i2c_op(pd, OP_TX, data); sh_mobile_i2c_isr_rx()
445 if (pd->pos == 0) { sh_mobile_i2c_isr_rx()
446 i2c_op(pd, OP_TX_TO_RX, 0); sh_mobile_i2c_isr_rx()
450 real_pos = pd->pos - 2; sh_mobile_i2c_isr_rx()
452 if (pd->pos == pd->msg->len) { sh_mobile_i2c_isr_rx()
453 if (pd->stop_after_dma) { sh_mobile_i2c_isr_rx()
455 i2c_op(pd, OP_RX_STOP, 0); sh_mobile_i2c_isr_rx()
456 pd->pos++; sh_mobile_i2c_isr_rx()
461 i2c_op(pd, OP_RX_STOP, 0); sh_mobile_i2c_isr_rx()
464 data = i2c_op(pd, OP_RX_STOP_DATA, 0); sh_mobile_i2c_isr_rx()
466 data = i2c_op(pd, OP_RX, 0); sh_mobile_i2c_isr_rx()
469 pd->msg->buf[real_pos] = data; sh_mobile_i2c_isr_rx()
472 pd->pos++; sh_mobile_i2c_isr_rx()
473 return pd->pos == (pd->msg->len + 2); sh_mobile_i2c_isr_rx()
478 struct sh_mobile_i2c_data *pd = dev_id; sh_mobile_i2c_isr() local
482 sr = iic_rd(pd, ICSR); sh_mobile_i2c_isr()
483 pd->sr |= sr; /* remember state */ sh_mobile_i2c_isr()
485 dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr, sh_mobile_i2c_isr()
486 (pd->msg->flags & I2C_M_RD) ? "read" : "write", sh_mobile_i2c_isr()
487 pd->pos, pd->msg->len); sh_mobile_i2c_isr()
490 if (pd->dma_direction == DMA_TO_DEVICE && pd->pos == 0) sh_mobile_i2c_isr()
491 iic_set_clr(pd, ICIC, ICIC_TDMAE, 0); sh_mobile_i2c_isr()
494 iic_wr(pd, ICSR, sr & ~(ICSR_AL | ICSR_TACK)); sh_mobile_i2c_isr()
495 else if (pd->msg->flags & I2C_M_RD) sh_mobile_i2c_isr()
496 wakeup = sh_mobile_i2c_isr_rx(pd); sh_mobile_i2c_isr()
498 wakeup = sh_mobile_i2c_isr_tx(pd); sh_mobile_i2c_isr()
501 if (pd->dma_direction == DMA_FROM_DEVICE && pd->pos == 1) sh_mobile_i2c_isr()
502 iic_set_clr(pd, ICIC, ICIC_RDMAE, 0); sh_mobile_i2c_isr()
505 iic_wr(pd, ICSR, sr & ~ICSR_WAIT); sh_mobile_i2c_isr()
508 pd->sr |= SW_DONE; sh_mobile_i2c_isr()
509 wake_up(&pd->wait); sh_mobile_i2c_isr()
513 iic_rd(pd, ICSR); sh_mobile_i2c_isr()
518 static void sh_mobile_i2c_dma_unmap(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_dma_unmap() argument
520 struct dma_chan *chan = pd->dma_direction == DMA_FROM_DEVICE sh_mobile_i2c_dma_unmap()
521 ? pd->dma_rx : pd->dma_tx; sh_mobile_i2c_dma_unmap()
523 dma_unmap_single(chan->device->dev, sg_dma_address(&pd->sg), sh_mobile_i2c_dma_unmap()
524 pd->msg->len, pd->dma_direction); sh_mobile_i2c_dma_unmap()
526 pd->dma_direction = DMA_NONE; sh_mobile_i2c_dma_unmap()
529 static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_cleanup_dma() argument
531 if (pd->dma_direction == DMA_NONE) sh_mobile_i2c_cleanup_dma()
533 else if (pd->dma_direction == DMA_FROM_DEVICE) sh_mobile_i2c_cleanup_dma()
534 dmaengine_terminate_all(pd->dma_rx); sh_mobile_i2c_cleanup_dma()
535 else if (pd->dma_direction == DMA_TO_DEVICE) sh_mobile_i2c_cleanup_dma()
536 dmaengine_terminate_all(pd->dma_tx); sh_mobile_i2c_cleanup_dma()
538 sh_mobile_i2c_dma_unmap(pd); sh_mobile_i2c_cleanup_dma()
543 struct sh_mobile_i2c_data *pd = data; sh_mobile_i2c_dma_callback() local
545 sh_mobile_i2c_dma_unmap(pd); sh_mobile_i2c_dma_callback()
546 pd->pos = pd->msg->len; sh_mobile_i2c_dma_callback()
547 pd->stop_after_dma = true; sh_mobile_i2c_dma_callback()
549 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); sh_mobile_i2c_dma_callback()
588 static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_xfer_dma() argument
590 bool read = pd->msg->flags & I2C_M_RD; sh_mobile_i2c_xfer_dma()
592 struct dma_chan *chan = read ? pd->dma_rx : pd->dma_tx; sh_mobile_i2c_xfer_dma()
599 chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM, sh_mobile_i2c_xfer_dma()
600 pd->res->start + ICDR); sh_mobile_i2c_xfer_dma()
602 chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV, sh_mobile_i2c_xfer_dma()
603 pd->res->start + ICDR); sh_mobile_i2c_xfer_dma()
609 dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir); sh_mobile_i2c_xfer_dma()
610 if (dma_mapping_error(pd->dev, dma_addr)) { sh_mobile_i2c_xfer_dma()
611 dev_dbg(pd->dev, "dma map failed, using PIO\n"); sh_mobile_i2c_xfer_dma()
615 sg_dma_len(&pd->sg) = pd->msg->len; sh_mobile_i2c_xfer_dma()
616 sg_dma_address(&pd->sg) = dma_addr; sh_mobile_i2c_xfer_dma()
618 pd->dma_direction = dir; sh_mobile_i2c_xfer_dma()
620 txdesc = dmaengine_prep_slave_sg(chan, &pd->sg, 1, sh_mobile_i2c_xfer_dma()
624 dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n"); sh_mobile_i2c_xfer_dma()
625 sh_mobile_i2c_cleanup_dma(pd); sh_mobile_i2c_xfer_dma()
630 txdesc->callback_param = pd; sh_mobile_i2c_xfer_dma()
634 dev_dbg(pd->dev, "submitting dma failed, using PIO\n"); sh_mobile_i2c_xfer_dma()
635 sh_mobile_i2c_cleanup_dma(pd); sh_mobile_i2c_xfer_dma()
642 static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, start_ch() argument
646 dev_err(pd->dev, "Unsupported zero length i2c read\n"); start_ch()
652 iic_set_clr(pd, ICCR, 0, ICCR_ICE); start_ch()
655 iic_set_clr(pd, ICCR, ICCR_ICE, 0); start_ch()
658 iic_wr(pd, ICCL, pd->iccl & 0xff); start_ch()
659 iic_wr(pd, ICCH, pd->icch & 0xff); start_ch()
662 pd->msg = usr_msg; start_ch()
663 pd->pos = -1; start_ch()
664 pd->sr = 0; start_ch()
666 if (pd->msg->len > 8) start_ch()
667 sh_mobile_i2c_xfer_dma(pd); start_ch()
670 iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); start_ch()
674 static int poll_dte(struct sh_mobile_i2c_data *pd) poll_dte() argument
679 u_int8_t val = iic_rd(pd, ICSR); poll_dte()
693 static int poll_busy(struct sh_mobile_i2c_data *pd) poll_busy() argument
698 u_int8_t val = iic_rd(pd, ICSR); poll_busy()
700 dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr); poll_busy()
708 val |= pd->sr; poll_busy()
726 struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); sh_mobile_i2c_xfer() local
731 activate_ch(pd); sh_mobile_i2c_xfer()
735 bool do_start = pd->send_stop || !i; sh_mobile_i2c_xfer()
737 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; sh_mobile_i2c_xfer()
738 pd->stop_after_dma = false; sh_mobile_i2c_xfer()
740 err = start_ch(pd, msg, do_start); sh_mobile_i2c_xfer()
745 i2c_op(pd, OP_START, 0); sh_mobile_i2c_xfer()
748 k = wait_event_timeout(pd->wait, sh_mobile_i2c_xfer()
749 pd->sr & (ICSR_TACK | SW_DONE), sh_mobile_i2c_xfer()
752 dev_err(pd->dev, "Transfer request timed out\n"); sh_mobile_i2c_xfer()
753 if (pd->dma_direction != DMA_NONE) sh_mobile_i2c_xfer()
754 sh_mobile_i2c_cleanup_dma(pd); sh_mobile_i2c_xfer()
760 if (pd->send_stop) sh_mobile_i2c_xfer()
761 err = poll_busy(pd); sh_mobile_i2c_xfer()
763 err = poll_dte(pd); sh_mobile_i2c_xfer()
768 deactivate_ch(pd); sh_mobile_i2c_xfer()
806 static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd) sh_mobile_i2c_release_dma() argument
808 if (!IS_ERR(pd->dma_tx)) { sh_mobile_i2c_release_dma()
809 dma_release_channel(pd->dma_tx); sh_mobile_i2c_release_dma()
810 pd->dma_tx = ERR_PTR(-EPROBE_DEFER); sh_mobile_i2c_release_dma()
813 if (!IS_ERR(pd->dma_rx)) { sh_mobile_i2c_release_dma()
814 dma_release_channel(pd->dma_rx); sh_mobile_i2c_release_dma()
815 pd->dma_rx = ERR_PTR(-EPROBE_DEFER); sh_mobile_i2c_release_dma()
819 static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile_i2c_data *pd) sh_mobile_i2c_hook_irqs() argument
828 0, dev_name(&dev->dev), pd); sh_mobile_i2c_hook_irqs()
843 struct sh_mobile_i2c_data *pd; sh_mobile_i2c_probe() local
849 pd = devm_kzalloc(&dev->dev, sizeof(struct sh_mobile_i2c_data), GFP_KERNEL); sh_mobile_i2c_probe()
850 if (!pd) sh_mobile_i2c_probe()
853 pd->clk = devm_clk_get(&dev->dev, NULL); sh_mobile_i2c_probe()
854 if (IS_ERR(pd->clk)) { sh_mobile_i2c_probe()
856 return PTR_ERR(pd->clk); sh_mobile_i2c_probe()
859 ret = sh_mobile_i2c_hook_irqs(dev, pd); sh_mobile_i2c_probe()
863 pd->dev = &dev->dev; sh_mobile_i2c_probe()
864 platform_set_drvdata(dev, pd); sh_mobile_i2c_probe()
868 pd->res = res; sh_mobile_i2c_probe()
869 pd->reg = devm_ioremap_resource(&dev->dev, res); sh_mobile_i2c_probe()
870 if (IS_ERR(pd->reg)) sh_mobile_i2c_probe()
871 return PTR_ERR(pd->reg); sh_mobile_i2c_probe()
875 pd->bus_speed = ret ? STANDARD_MODE : bus_speed; sh_mobile_i2c_probe()
877 pd->clks_per_count = 1; sh_mobile_i2c_probe()
887 pd->clks_per_count = config->clks_per_count; sh_mobile_i2c_probe()
891 pd->bus_speed = pdata->bus_speed; sh_mobile_i2c_probe()
893 pd->clks_per_count = pdata->clks_per_count; sh_mobile_i2c_probe()
900 pd->flags |= IIC_FLAG_HAS_ICIC67; sh_mobile_i2c_probe()
902 ret = sh_mobile_i2c_init(pd); sh_mobile_i2c_probe()
907 sg_init_table(&pd->sg, 1); sh_mobile_i2c_probe()
908 pd->dma_direction = DMA_NONE; sh_mobile_i2c_probe()
909 pd->dma_rx = pd->dma_tx = ERR_PTR(-EPROBE_DEFER); sh_mobile_i2c_probe()
925 adap = &pd->adap; sh_mobile_i2c_probe()
926 i2c_set_adapdata(adap, pd); sh_mobile_i2c_probe()
937 spin_lock_init(&pd->lock); sh_mobile_i2c_probe()
938 init_waitqueue_head(&pd->wait); sh_mobile_i2c_probe()
942 sh_mobile_i2c_release_dma(pd); sh_mobile_i2c_probe()
947 dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz\n", adap->nr, pd->bus_speed); sh_mobile_i2c_probe()
954 struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); sh_mobile_i2c_remove() local
956 i2c_del_adapter(&pd->adap); sh_mobile_i2c_remove()
957 sh_mobile_i2c_release_dma(pd); sh_mobile_i2c_remove()
H A Di2c-pca-platform.c42 static int i2c_pca_pf_readbyte8(void *pd, int reg) i2c_pca_pf_readbyte8() argument
44 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_readbyte8()
48 static int i2c_pca_pf_readbyte16(void *pd, int reg) i2c_pca_pf_readbyte16() argument
50 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_readbyte16()
54 static int i2c_pca_pf_readbyte32(void *pd, int reg) i2c_pca_pf_readbyte32() argument
56 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_readbyte32()
60 static void i2c_pca_pf_writebyte8(void *pd, int reg, int val) i2c_pca_pf_writebyte8() argument
62 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_writebyte8()
66 static void i2c_pca_pf_writebyte16(void *pd, int reg, int val) i2c_pca_pf_writebyte16() argument
68 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_writebyte16()
72 static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) i2c_pca_pf_writebyte32() argument
74 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_writebyte32()
79 static int i2c_pca_pf_waitforcompletion(void *pd) i2c_pca_pf_waitforcompletion() argument
81 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_waitforcompletion()
104 static void i2c_pca_pf_dummyreset(void *pd) i2c_pca_pf_dummyreset() argument
106 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_dummyreset()
111 static void i2c_pca_pf_resetchip(void *pd) i2c_pca_pf_resetchip() argument
113 struct i2c_pca_pf_data *i2c = pd; i2c_pca_pf_resetchip()
H A Di2c-pca-isa.c46 static void pca_isa_writebyte(void *pd, int reg, int val) pca_isa_writebyte() argument
56 static int pca_isa_readbyte(void *pd, int reg) pca_isa_readbyte() argument
68 static int pca_isa_waitforcompletion(void *pd) pca_isa_waitforcompletion() argument
75 pca_isa_readbyte(pd, I2C_PCA_CON) pca_isa_waitforcompletion()
82 if (pca_isa_readbyte(pd, I2C_PCA_CON) pca_isa_waitforcompletion()
92 static void pca_isa_resetchip(void *pd) pca_isa_resetchip() argument
H A Di2c-mv64xxx.c875 mv64xxx_i2c_probe(struct platform_device *pd) mv64xxx_i2c_probe() argument
878 struct mv64xxx_i2c_pdata *pdata = dev_get_platdata(&pd->dev); mv64xxx_i2c_probe()
882 if ((!pdata && !pd->dev.of_node)) mv64xxx_i2c_probe()
885 drv_data = devm_kzalloc(&pd->dev, sizeof(struct mv64xxx_i2c_data), mv64xxx_i2c_probe()
890 r = platform_get_resource(pd, IORESOURCE_MEM, 0); mv64xxx_i2c_probe()
891 drv_data->reg_base = devm_ioremap_resource(&pd->dev, r); mv64xxx_i2c_probe()
903 drv_data->clk = devm_clk_get(&pd->dev, NULL); mv64xxx_i2c_probe()
912 drv_data->irq = platform_get_irq(pd, 0); mv64xxx_i2c_probe()
916 } else if (pd->dev.of_node) { mv64xxx_i2c_probe()
917 rc = mv64xxx_of_config(drv_data, &pd->dev); mv64xxx_i2c_probe()
926 drv_data->adapter.dev.parent = &pd->dev; mv64xxx_i2c_probe()
930 drv_data->adapter.nr = pd->id; mv64xxx_i2c_probe()
931 drv_data->adapter.dev.of_node = pd->dev.of_node; mv64xxx_i2c_probe()
932 platform_set_drvdata(pd, drv_data); mv64xxx_i2c_probe()
/linux-4.1.27/drivers/staging/ozwpan/
H A Dozpd.c24 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
25 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
26 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
27 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
28 static int oz_send_isoc_frame(struct oz_pd *pd);
29 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
31 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
64 void oz_pd_set_state(struct oz_pd *pd, unsigned state) oz_pd_set_state() argument
66 pd->state = state; oz_pd_set_state()
69 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n"); oz_pd_set_state()
72 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n"); oz_pd_set_state()
75 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n"); oz_pd_set_state()
78 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n"); oz_pd_set_state()
86 void oz_pd_get(struct oz_pd *pd) oz_pd_get() argument
88 atomic_inc(&pd->ref_count); oz_pd_get()
94 void oz_pd_put(struct oz_pd *pd) oz_pd_put() argument
96 if (atomic_dec_and_test(&pd->ref_count)) oz_pd_put()
97 oz_pd_destroy(pd); oz_pd_put()
105 struct oz_pd *pd; oz_pd_alloc() local
108 pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC); oz_pd_alloc()
109 if (!pd) oz_pd_alloc()
112 atomic_set(&pd->ref_count, 2); oz_pd_alloc()
114 spin_lock_init(&pd->app_lock[i]); oz_pd_alloc()
115 pd->last_rx_pkt_num = 0xffffffff; oz_pd_alloc()
116 oz_pd_set_state(pd, OZ_PD_S_IDLE); oz_pd_alloc()
117 pd->max_tx_size = OZ_MAX_TX_SIZE; oz_pd_alloc()
118 ether_addr_copy(pd->mac_addr, mac_addr); oz_pd_alloc()
119 oz_elt_buf_init(&pd->elt_buff); oz_pd_alloc()
120 spin_lock_init(&pd->tx_frame_lock); oz_pd_alloc()
121 INIT_LIST_HEAD(&pd->tx_queue); oz_pd_alloc()
122 INIT_LIST_HEAD(&pd->farewell_list); oz_pd_alloc()
123 pd->last_sent_frame = &pd->tx_queue; oz_pd_alloc()
124 spin_lock_init(&pd->stream_lock); oz_pd_alloc()
125 INIT_LIST_HEAD(&pd->stream_list); oz_pd_alloc()
126 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler, oz_pd_alloc()
127 (unsigned long)pd); oz_pd_alloc()
128 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler, oz_pd_alloc()
129 (unsigned long)pd); oz_pd_alloc()
130 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL); oz_pd_alloc()
131 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL); oz_pd_alloc()
132 pd->heartbeat.function = oz_pd_heartbeat_event; oz_pd_alloc()
133 pd->timeout.function = oz_pd_timeout_event; oz_pd_alloc()
135 return pd; oz_pd_alloc()
144 struct oz_pd *pd; oz_pd_free() local
146 oz_pd_dbg(pd, ON, "Destroying PD\n"); oz_pd_free()
147 pd = container_of(work, struct oz_pd, workitem); oz_pd_free()
149 tasklet_kill(&pd->heartbeat_tasklet); oz_pd_free()
150 tasklet_kill(&pd->timeout_tasklet); oz_pd_free()
154 list_for_each_safe(e, n, &pd->stream_list) oz_pd_free()
157 list_for_each_safe(e, n, &pd->tx_queue) { oz_pd_free()
162 oz_retire_frame(pd, f); oz_pd_free()
165 oz_elt_buf_term(&pd->elt_buff); oz_pd_free()
167 list_for_each_safe(e, n, &pd->farewell_list) oz_pd_free()
170 if (pd->net_dev) oz_pd_free()
171 dev_put(pd->net_dev); oz_pd_free()
172 kfree(pd); oz_pd_free()
178 void oz_pd_destroy(struct oz_pd *pd) oz_pd_destroy() argument
180 if (hrtimer_active(&pd->timeout)) oz_pd_destroy()
181 hrtimer_cancel(&pd->timeout); oz_pd_destroy()
182 if (hrtimer_active(&pd->heartbeat)) oz_pd_destroy()
183 hrtimer_cancel(&pd->heartbeat); oz_pd_destroy()
185 INIT_WORK(&pd->workitem, oz_pd_free); oz_pd_destroy()
186 if (!schedule_work(&pd->workitem)) oz_pd_destroy()
187 oz_pd_dbg(pd, ON, "failed to schedule workitem\n"); oz_pd_destroy()
193 int oz_services_start(struct oz_pd *pd, u16 apps, int resume) oz_services_start() argument
197 oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume); oz_services_start()
200 if (g_app_if[i].start(pd, resume)) { oz_services_start()
202 oz_pd_dbg(pd, ON, oz_services_start()
207 pd->total_apps |= (1 << i); oz_services_start()
209 pd->paused_apps &= ~(1 << i); oz_services_start()
219 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause) oz_services_stop() argument
223 oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause); oz_services_stop()
228 pd->paused_apps |= (1 << i); oz_services_stop()
230 pd->total_apps &= ~(1 << i); oz_services_stop()
231 pd->paused_apps &= ~(1 << i); oz_services_stop()
234 g_app_if[i].stop(pd, pause); oz_services_stop()
242 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps) oz_pd_heartbeat() argument
248 if (g_app_if[i].heartbeat(pd)) oz_pd_heartbeat()
252 if ((!more) && (hrtimer_active(&pd->heartbeat))) oz_pd_heartbeat()
253 hrtimer_cancel(&pd->heartbeat); oz_pd_heartbeat()
254 if (pd->mode & OZ_F_ISOC_ANYTIME) { oz_pd_heartbeat()
257 while (count-- && (oz_send_isoc_frame(pd) >= 0)) oz_pd_heartbeat()
265 void oz_pd_stop(struct oz_pd *pd) oz_pd_stop() argument
269 oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state); oz_pd_stop()
270 oz_pd_indicate_farewells(pd); oz_pd_stop()
272 stop_apps = pd->total_apps; oz_pd_stop()
273 pd->total_apps = 0; oz_pd_stop()
274 pd->paused_apps = 0; oz_pd_stop()
276 oz_services_stop(pd, stop_apps, 0); oz_pd_stop()
278 oz_pd_set_state(pd, OZ_PD_S_STOPPED); oz_pd_stop()
280 list_del(&pd->link); oz_pd_stop()
282 oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count)); oz_pd_stop()
283 oz_pd_put(pd); oz_pd_stop()
289 int oz_pd_sleep(struct oz_pd *pd) oz_pd_sleep() argument
295 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) { oz_pd_sleep()
299 if (pd->keep_alive && pd->session_id) oz_pd_sleep()
300 oz_pd_set_state(pd, OZ_PD_S_SLEEP); oz_pd_sleep()
304 stop_apps = pd->total_apps; oz_pd_sleep()
307 oz_pd_stop(pd); oz_pd_sleep()
309 oz_services_stop(pd, stop_apps, 1); oz_pd_sleep()
310 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive); oz_pd_sleep()
318 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd) oz_tx_frame_alloc() argument
334 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f) oz_tx_isoc_free() argument
336 pd->nb_queued_isoc_frames--; oz_tx_isoc_free()
342 pd->nb_queued_isoc_frames); oz_tx_isoc_free()
348 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f) oz_tx_frame_free() argument
366 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb) oz_set_last_pkt_nb() argument
370 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK; oz_set_last_pkt_nb()
376 int oz_prepare_frame(struct oz_pd *pd, int empty) oz_prepare_frame() argument
380 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED) oz_prepare_frame()
382 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES) oz_prepare_frame()
384 if (!empty && !oz_are_elts_available(&pd->elt_buff)) oz_prepare_frame()
386 f = oz_tx_frame_alloc(pd); oz_prepare_frame()
392 ++pd->last_tx_pkt_num; oz_prepare_frame()
393 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num); oz_prepare_frame()
395 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size, oz_prepare_frame()
396 pd->max_tx_size, &f->elt_list); oz_prepare_frame()
398 spin_lock(&pd->tx_frame_lock); oz_prepare_frame()
399 list_add_tail(&f->link, &pd->tx_queue); oz_prepare_frame()
400 pd->nb_queued_frames++; oz_prepare_frame()
401 spin_unlock(&pd->tx_frame_lock); oz_prepare_frame()
408 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f) oz_build_frame() argument
411 struct net_device *dev = pd->net_dev; oz_build_frame()
428 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, oz_build_frame()
434 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK; oz_build_frame()
452 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f) oz_retire_frame() argument
459 ei->callback(pd, ei->context); oz_retire_frame()
460 spin_lock_bh(&pd->elt_buff.lock); oz_retire_frame()
461 oz_elt_info_free(&pd->elt_buff, ei); oz_retire_frame()
462 spin_unlock_bh(&pd->elt_buff.lock); oz_retire_frame()
464 oz_tx_frame_free(pd, f); oz_retire_frame()
470 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data) oz_send_next_queued_frame() argument
476 spin_lock(&pd->tx_frame_lock); oz_send_next_queued_frame()
477 e = pd->last_sent_frame->next; oz_send_next_queued_frame()
478 if (e == &pd->tx_queue) { oz_send_next_queued_frame()
479 spin_unlock(&pd->tx_frame_lock); oz_send_next_queued_frame()
486 oz_tx_isoc_free(pd, f); oz_send_next_queued_frame()
487 spin_unlock(&pd->tx_frame_lock); oz_send_next_queued_frame()
490 oz_set_last_pkt_nb(pd, skb); oz_send_next_queued_frame()
499 pd->nb_queued_isoc_frames); oz_send_next_queued_frame()
507 pd->last_sent_frame = e; oz_send_next_queued_frame()
508 skb = oz_build_frame(pd, f); oz_send_next_queued_frame()
509 spin_unlock(&pd->tx_frame_lock); oz_send_next_queued_frame()
524 void oz_send_queued_frames(struct oz_pd *pd, int backlog) oz_send_queued_frames() argument
526 while (oz_prepare_frame(pd, 0) >= 0) oz_send_queued_frames()
529 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) { oz_send_queued_frames()
532 backlog += pd->nb_queued_isoc_frames; oz_send_queued_frames()
540 if ((backlog <= 0) && (pd->isoc_sent == 0)) oz_send_queued_frames()
551 if (oz_send_next_queued_frame(pd, backlog) < 0) oz_send_queued_frames()
556 out: oz_prepare_frame(pd, 1); oz_send_queued_frames()
557 oz_send_next_queued_frame(pd, 0); oz_send_queued_frames()
563 static int oz_send_isoc_frame(struct oz_pd *pd) oz_send_isoc_frame() argument
566 struct net_device *dev = pd->net_dev; oz_send_isoc_frame()
573 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size, oz_send_isoc_frame()
574 pd->max_tx_size, &list); oz_send_isoc_frame()
580 oz_elt_info_free_chain(&pd->elt_buff, &list); oz_send_isoc_frame()
587 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, oz_send_isoc_frame()
594 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK; oz_send_isoc_frame()
602 oz_elt_info_free_chain(&pd->elt_buff, &list); oz_send_isoc_frame()
609 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn) oz_retire_tx_frames() argument
617 spin_lock(&pd->tx_frame_lock); oz_retire_tx_frames()
618 list_for_each_entry(f, &pd->tx_queue, link) { oz_retire_tx_frames()
624 pkt_num, pd->nb_queued_frames); oz_retire_tx_frames()
626 pd->nb_queued_frames--; oz_retire_tx_frames()
629 list_cut_position(&list, &pd->tx_queue, &tmp->link); oz_retire_tx_frames()
630 pd->last_sent_frame = &pd->tx_queue; oz_retire_tx_frames()
631 spin_unlock(&pd->tx_frame_lock); oz_retire_tx_frames()
634 oz_retire_frame(pd, f); oz_retire_tx_frames()
641 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num) pd_stream_find() argument
645 list_for_each_entry(st, &pd->stream_list, link) { pd_stream_find()
655 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num) oz_isoc_stream_create() argument
663 spin_lock_bh(&pd->stream_lock); oz_isoc_stream_create()
664 if (!pd_stream_find(pd, ep_num)) { oz_isoc_stream_create()
665 list_add(&st->link, &pd->stream_list); oz_isoc_stream_create()
668 spin_unlock_bh(&pd->stream_lock); oz_isoc_stream_create()
685 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num) oz_isoc_stream_delete() argument
689 spin_lock_bh(&pd->stream_lock); oz_isoc_stream_delete()
690 st = pd_stream_find(pd, ep_num); oz_isoc_stream_delete()
693 spin_unlock_bh(&pd->stream_lock); oz_isoc_stream_delete()
710 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len) oz_send_isoc_unit() argument
712 struct net_device *dev = pd->net_dev; oz_send_isoc_unit()
719 spin_lock_bh(&pd->stream_lock); oz_send_isoc_unit()
720 st = pd_stream_find(pd, ep_num); oz_send_isoc_unit()
729 spin_unlock_bh(&pd->stream_lock); oz_send_isoc_unit()
734 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev), oz_send_isoc_unit()
750 if (++nb_units < pd->ms_per_isoc) { oz_send_isoc_unit()
751 spin_lock_bh(&pd->stream_lock); oz_send_isoc_unit()
756 spin_unlock_bh(&pd->stream_lock); oz_send_isoc_unit()
761 spin_lock_bh(&pd->stream_lock); oz_send_isoc_unit()
764 spin_unlock_bh(&pd->stream_lock); oz_send_isoc_unit()
767 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK; oz_send_isoc_unit()
774 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, oz_send_isoc_unit()
780 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) { oz_send_isoc_unit()
782 int nb = pd->nb_queued_isoc_frames; oz_send_isoc_unit()
784 if (nb >= pd->isoc_latency) { oz_send_isoc_unit()
789 spin_lock(&pd->tx_frame_lock); oz_send_isoc_unit()
790 list_for_each_entry(f, &pd->tx_queue, link) { oz_send_isoc_unit()
792 oz_tx_isoc_free(pd, f); oz_send_isoc_unit()
796 spin_unlock(&pd->tx_frame_lock); oz_send_isoc_unit()
798 isoc_unit = oz_tx_frame_alloc(pd); oz_send_isoc_unit()
803 spin_lock_bh(&pd->tx_frame_lock); oz_send_isoc_unit()
804 list_add_tail(&isoc_unit->link, &pd->tx_queue); oz_send_isoc_unit()
805 pd->nb_queued_isoc_frames++; oz_send_isoc_unit()
806 spin_unlock_bh(&pd->tx_frame_lock); oz_send_isoc_unit()
809 pd->nb_queued_isoc_frames, pd->nb_queued_frames); oz_send_isoc_unit()
858 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt) oz_handle_app_elt() argument
861 g_app_if[app_id].rx(pd, elt); oz_handle_app_elt()
867 void oz_pd_indicate_farewells(struct oz_pd *pd) oz_pd_indicate_farewells() argument
874 if (list_empty(&pd->farewell_list)) { oz_pd_indicate_farewells()
878 f = list_first_entry(&pd->farewell_list, oz_pd_indicate_farewells()
883 ai->farewell(pd, f->ep_num, f->report, f->len); oz_pd_indicate_farewells()
H A Dozproto.c75 static void oz_send_conn_rsp(struct oz_pd *pd, u8 status) oz_send_conn_rsp() argument
78 struct net_device *dev = pd->net_dev; oz_send_conn_rsp()
96 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr, oz_send_conn_rsp()
109 body->mode = pd->mode; oz_send_conn_rsp()
110 body->session_id = pd->session_id; oz_send_conn_rsp()
111 put_unaligned(cpu_to_le16(pd->total_apps), &body->apps); oz_send_conn_rsp()
120 static void pd_set_keepalive(struct oz_pd *pd, u8 kalive) pd_set_keepalive() argument
126 pd->keep_alive = keep_alive * 1000*60*60*24*20; pd_set_keepalive()
129 pd->keep_alive = keep_alive*1000; pd_set_keepalive()
132 pd->keep_alive = keep_alive*1000*60; pd_set_keepalive()
135 pd->keep_alive = keep_alive*1000*60*60; pd_set_keepalive()
138 pd->keep_alive = 0; pd_set_keepalive()
140 oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive); pd_set_keepalive()
146 static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer) pd_set_presleep() argument
149 pd->presleep = presleep*100; pd_set_presleep()
151 pd->presleep = OZ_PRESLEEP_TOUT; pd_set_presleep()
154 oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep); pd_set_presleep()
157 oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep); pd_set_presleep()
166 struct oz_pd *pd; oz_connect_req() local
176 pd = cur_pd; oz_connect_req()
182 pd = oz_pd_alloc(pd_addr); oz_connect_req()
183 if (pd == NULL) oz_connect_req()
185 getnstimeofday(&pd->last_rx_timestamp); oz_connect_req()
190 free_pd = pd; oz_connect_req()
191 pd = pd2; oz_connect_req()
195 if (pd != pd2) oz_connect_req()
196 list_add_tail(&pd->link, &g_pd_list); oz_connect_req()
198 if (pd == NULL) { oz_connect_req()
202 if (pd->net_dev != net_dev) { oz_connect_req()
203 old_net_dev = pd->net_dev; oz_connect_req()
205 pd->net_dev = net_dev; oz_connect_req()
208 pd->max_tx_size = OZ_MAX_TX_SIZE; oz_connect_req()
209 pd->mode = body->mode; oz_connect_req()
210 pd->pd_info = body->pd_info; oz_connect_req()
211 if (pd->mode & OZ_F_ISOC_NO_ELTS) { oz_connect_req()
212 pd->ms_per_isoc = body->ms_per_isoc; oz_connect_req()
213 if (!pd->ms_per_isoc) oz_connect_req()
214 pd->ms_per_isoc = 4; oz_connect_req()
218 pd->isoc_latency = (body->ms_isoc_latency & oz_connect_req()
219 ~OZ_LATENCY_MASK) / pd->ms_per_isoc; oz_connect_req()
222 pd->isoc_latency = ((body->ms_isoc_latency & oz_connect_req()
223 ~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc; oz_connect_req()
226 pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC; oz_connect_req()
230 pd->max_tx_size = ((u16)body->max_len_div16)<<4; oz_connect_req()
232 pd->max_tx_size, pd->ms_per_isoc); oz_connect_req()
233 pd->max_stream_buffering = 3*1024; oz_connect_req()
234 pd->pulse_period = OZ_QUANTUM; oz_connect_req()
235 pd_set_presleep(pd, body->presleep, 0); oz_connect_req()
236 pd_set_keepalive(pd, body->keep_alive); oz_connect_req()
240 if (pd->session_id) { oz_connect_req()
241 if (pd->session_id != body->session_id) { oz_connect_req()
247 pd->session_id = oz_connect_req()
251 if (pd->session_id && !body->session_id) { oz_connect_req()
256 pd->session_id = oz_connect_req()
262 u16 start_apps = new_apps & ~pd->total_apps & ~0x1; oz_connect_req()
263 u16 stop_apps = pd->total_apps & ~new_apps & ~0x1; oz_connect_req()
264 u16 resume_apps = new_apps & pd->paused_apps & ~0x1; oz_connect_req()
267 oz_pd_set_state(pd, OZ_PD_S_CONNECTED); oz_connect_req()
269 new_apps, pd->total_apps, pd->paused_apps); oz_connect_req()
271 if (oz_services_start(pd, start_apps, 0)) oz_connect_req()
275 if (oz_services_start(pd, resume_apps, 1)) oz_connect_req()
278 oz_services_stop(pd, stop_apps, 0); oz_connect_req()
279 oz_pd_request_heartbeat(pd); oz_connect_req()
283 oz_send_conn_rsp(pd, rsp_status); oz_connect_req()
286 oz_pd_stop(pd); oz_connect_req()
287 oz_pd_put(pd); oz_connect_req()
288 pd = NULL; oz_connect_req()
294 return pd; oz_connect_req()
300 static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index, oz_add_farewell() argument
316 list_for_each_entry(f2, &pd->farewell_list, link) { oz_add_farewell()
323 list_add_tail(&f->link, &pd->farewell_list); oz_add_farewell()
338 struct oz_pd *pd = NULL; oz_rx_frame() local
359 pd = oz_pd_find(src_addr); oz_rx_frame()
360 if (pd) { oz_rx_frame()
361 if (!(pd->state & OZ_PD_S_CONNECTED)) oz_rx_frame()
362 oz_pd_set_state(pd, OZ_PD_S_CONNECTED); oz_rx_frame()
364 if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) || oz_rx_frame()
365 (pd->presleep < MSEC_PER_SEC)) { oz_rx_frame()
366 oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep); oz_rx_frame()
367 pd->last_rx_timestamp = current_time; oz_rx_frame()
369 if (pkt_num != pd->last_rx_pkt_num) { oz_rx_frame()
370 pd->last_rx_pkt_num = pkt_num; oz_rx_frame()
377 if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) { oz_rx_frame()
379 pd->last_sent_frame = &pd->tx_queue; oz_rx_frame()
382 oz_retire_tx_frames(pd, oz_hdr->last_pkt_num); oz_rx_frame()
385 (pd->state == OZ_PD_S_CONNECTED)) { oz_rx_frame()
386 int backlog = pd->nb_queued_frames; oz_rx_frame()
388 pd->trigger_pkt_num = pkt_num; oz_rx_frame()
390 oz_send_queued_frames(pd, backlog); oz_rx_frame()
404 pd = oz_connect_req(pd, elt, src_addr, skb->dev); oz_rx_frame()
408 if (pd) oz_rx_frame()
409 oz_pd_sleep(pd); oz_rx_frame()
415 if (pd && (pd->state & OZ_PD_S_CONNECTED)) { oz_rx_frame()
417 pd_set_keepalive(pd, body->keepalive); oz_rx_frame()
418 pd_set_presleep(pd, body->presleep, 1); oz_rx_frame()
427 oz_add_farewell(pd, body->ep_num, oz_rx_frame()
433 if (pd && (pd->state & OZ_PD_S_CONNECTED)) { oz_rx_frame()
438 oz_handle_app_elt(pd, app_hdr->app_id, elt); oz_rx_frame()
447 if (pd) oz_rx_frame()
448 oz_pd_put(pd); oz_rx_frame()
478 struct oz_pd *pd = oz_protocol_term() local
480 oz_pd_get(pd); oz_protocol_term()
482 oz_pd_stop(pd); oz_protocol_term()
483 oz_pd_put(pd); oz_protocol_term()
498 struct oz_pd *pd = (struct oz_pd *)data; oz_pd_heartbeat_handler() local
502 if (pd->state & OZ_PD_S_CONNECTED) oz_pd_heartbeat_handler()
503 apps = pd->total_apps; oz_pd_heartbeat_handler()
506 oz_pd_heartbeat(pd, apps); oz_pd_heartbeat_handler()
507 oz_pd_put(pd); oz_pd_heartbeat_handler()
516 struct oz_pd *pd = (struct oz_pd *)data; oz_pd_timeout_handler() local
519 type = pd->timeout_type; oz_pd_timeout_handler()
523 oz_pd_sleep(pd); oz_pd_timeout_handler()
526 oz_pd_stop(pd); oz_pd_timeout_handler()
529 oz_pd_put(pd); oz_pd_timeout_handler()
537 struct oz_pd *pd; oz_pd_heartbeat_event() local
539 pd = container_of(timer, struct oz_pd, heartbeat); oz_pd_heartbeat_event()
540 hrtimer_forward_now(timer, ktime_set(pd->pulse_period / oz_pd_heartbeat_event()
541 MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC)); oz_pd_heartbeat_event()
542 oz_pd_get(pd); oz_pd_heartbeat_event()
543 tasklet_schedule(&pd->heartbeat_tasklet); oz_pd_heartbeat_event()
552 struct oz_pd *pd; oz_pd_timeout_event() local
554 pd = container_of(timer, struct oz_pd, timeout); oz_pd_timeout_event()
555 oz_pd_get(pd); oz_pd_timeout_event()
556 tasklet_schedule(&pd->timeout_tasklet); oz_pd_timeout_event()
563 void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time) oz_timer_add() argument
569 if (hrtimer_active(&pd->timeout)) { oz_timer_add()
570 hrtimer_set_expires(&pd->timeout, ktime_set(due_time / oz_timer_add()
573 hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL); oz_timer_add()
575 hrtimer_start(&pd->timeout, ktime_set(due_time / oz_timer_add()
579 pd->timeout_type = type; oz_timer_add()
582 if (!hrtimer_active(&pd->heartbeat)) oz_timer_add()
583 hrtimer_start(&pd->heartbeat, ktime_set(due_time / oz_timer_add()
594 void oz_pd_request_heartbeat(struct oz_pd *pd) oz_pd_request_heartbeat() argument
596 oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ? oz_pd_request_heartbeat()
597 pd->pulse_period : OZ_QUANTUM); oz_pd_request_heartbeat()
605 struct oz_pd *pd; oz_pd_find() local
608 list_for_each_entry(pd, &g_pd_list, link) { oz_pd_find()
609 if (ether_addr_equal(pd->mac_addr, mac_addr)) { oz_pd_find()
610 oz_pd_get(pd); oz_pd_find()
612 return pd; oz_pd_find()
704 struct oz_pd *pd; pd_stop_all_for_device() local
708 list_for_each_entry_safe(pd, n, &g_pd_list, link) { pd_stop_all_for_device()
709 if (pd->net_dev == net_dev) { pd_stop_all_for_device()
710 list_move(&pd->link, &h); pd_stop_all_for_device()
711 oz_pd_get(pd); pd_stop_all_for_device()
716 pd = list_first_entry(&h, struct oz_pd, link); pd_stop_all_for_device()
717 oz_pd_stop(pd); pd_stop_all_for_device()
718 oz_pd_put(pd); pd_stop_all_for_device()
801 struct oz_pd *pd; oz_get_pd_list() local
805 list_for_each_entry(pd, &g_pd_list, link) { oz_get_pd_list()
808 ether_addr_copy((u8 *)&addr[count++], pd->mac_addr); oz_get_pd_list()
H A Dozusbsvc.h18 struct oz_pd *pd; member in struct:oz_usb_ctx
25 int oz_usb_start(struct oz_pd *pd, int resume);
26 void oz_usb_stop(struct oz_pd *pd, int pause);
27 void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt);
28 int oz_usb_heartbeat(struct oz_pd *pd);
29 void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
H A Dozcdev.h13 int oz_cdev_start(struct oz_pd *pd, int resume);
14 void oz_cdev_stop(struct oz_pd *pd, int pause);
15 void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
H A Dozusbsvc.c52 int oz_usb_start(struct oz_pd *pd, int resume) oz_usb_start() argument
70 usb_ctx->pd = pd; oz_usb_start()
76 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_start()
77 old_ctx = pd->app_ctx[OZ_APPID_USB]; oz_usb_start()
79 pd->app_ctx[OZ_APPID_USB] = usb_ctx; oz_usb_start()
80 oz_usb_get(pd->app_ctx[OZ_APPID_USB]); oz_usb_start()
81 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_start()
90 oz_pd_get(pd); oz_usb_start()
102 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_start()
103 pd->app_ctx[OZ_APPID_USB] = NULL; oz_usb_start()
104 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_start()
117 void oz_usb_stop(struct oz_pd *pd, int pause) oz_usb_stop() argument
125 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_stop()
126 usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB]; oz_usb_stop()
127 pd->app_ctx[OZ_APPID_USB] = NULL; oz_usb_stop()
128 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_stop()
178 oz_pd_put(usb_ctx->pd); oz_usb_put()
186 int oz_usb_heartbeat(struct oz_pd *pd) oz_usb_heartbeat() argument
191 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_heartbeat()
192 usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB]; oz_usb_heartbeat()
195 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_heartbeat()
214 struct oz_pd *pd = usb_ctx->pd; oz_usb_stream_create() local
217 if (pd->mode & OZ_F_ISOC_NO_ELTS) { oz_usb_stream_create()
218 oz_isoc_stream_create(pd, ep_num); oz_usb_stream_create()
220 oz_pd_get(pd); oz_usb_stream_create()
221 if (oz_elt_stream_create(&pd->elt_buff, ep_num, oz_usb_stream_create()
222 4*pd->max_tx_size)) { oz_usb_stream_create()
223 oz_pd_put(pd); oz_usb_stream_create()
238 struct oz_pd *pd = usb_ctx->pd; oz_usb_stream_delete() local
240 if (pd) { oz_usb_stream_delete()
242 if (pd->mode & OZ_F_ISOC_NO_ELTS) { oz_usb_stream_delete()
243 oz_isoc_stream_delete(pd, ep_num); oz_usb_stream_delete()
245 if (oz_elt_stream_delete(&pd->elt_buff, ep_num)) oz_usb_stream_delete()
247 oz_pd_put(pd); oz_usb_stream_delete()
261 if (usb_ctx && usb_ctx->pd) oz_usb_request_heartbeat()
262 oz_pd_request_heartbeat(usb_ctx->pd); oz_usb_request_heartbeat()
H A Dozpd.h111 void oz_pd_destroy(struct oz_pd *pd);
112 void oz_pd_get(struct oz_pd *pd);
113 void oz_pd_put(struct oz_pd *pd);
114 void oz_pd_set_state(struct oz_pd *pd, unsigned state);
115 void oz_pd_indicate_farewells(struct oz_pd *pd);
116 int oz_pd_sleep(struct oz_pd *pd);
117 void oz_pd_stop(struct oz_pd *pd);
118 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps);
119 int oz_services_start(struct oz_pd *pd, u16 apps, int resume);
120 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause);
121 int oz_prepare_frame(struct oz_pd *pd, int empty);
122 void oz_send_queued_frames(struct oz_pd *pd, int backlog);
123 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
124 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
125 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
126 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len);
127 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
H A Dozcdev.c48 static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd) oz_cdev_claim_ctx() argument
52 spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]); oz_cdev_claim_ctx()
53 ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; oz_cdev_claim_ctx()
56 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); oz_cdev_claim_ctx()
101 struct oz_pd *pd; oz_cdev_read() local
105 pd = g_cdev.active_pd; oz_cdev_read()
106 if (pd) oz_cdev_read()
107 oz_pd_get(pd); oz_cdev_read()
109 if (pd == NULL) oz_cdev_read()
111 ctx = oz_cdev_claim_ctx(pd); oz_cdev_read()
141 oz_pd_put(pd); oz_cdev_read()
151 struct oz_pd *pd; oz_cdev_write() local
162 pd = g_cdev.active_pd; oz_cdev_write()
163 if (pd) oz_cdev_write()
164 oz_pd_get(pd); oz_cdev_write()
166 if (pd == NULL) oz_cdev_write()
168 if (!(pd->state & OZ_PD_S_CONNECTED)) oz_cdev_write()
170 eb = &pd->elt_buff; oz_cdev_write()
185 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); oz_cdev_write()
186 ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; oz_cdev_write()
196 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); oz_cdev_write()
204 oz_pd_put(pd); oz_cdev_write()
214 struct oz_pd *pd; oz_set_active_pd() local
217 pd = oz_pd_find(addr); oz_set_active_pd()
218 if (pd) { oz_set_active_pd()
222 g_cdev.active_pd = pd; oz_set_active_pd()
229 pd = g_cdev.active_pd; oz_set_active_pd()
234 if (pd) oz_set_active_pd()
235 oz_pd_put(pd); oz_set_active_pd()
430 int oz_cdev_start(struct oz_pd *pd, int resume) oz_cdev_start() argument
444 spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]); oz_cdev_start()
445 old_ctx = pd->app_ctx[OZ_APPID_SERIAL]; oz_cdev_start()
447 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); oz_cdev_start()
450 pd->app_ctx[OZ_APPID_SERIAL] = ctx; oz_cdev_start()
451 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); oz_cdev_start()
455 ether_addr_equal(pd->mac_addr, g_cdev.active_addr)) { oz_cdev_start()
456 oz_pd_get(pd); oz_cdev_start()
457 g_cdev.active_pd = pd; oz_cdev_start()
468 void oz_cdev_stop(struct oz_pd *pd, int pause) oz_cdev_stop() argument
476 spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]); oz_cdev_stop()
477 ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; oz_cdev_stop()
478 pd->app_ctx[OZ_APPID_SERIAL] = NULL; oz_cdev_stop()
479 spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]); oz_cdev_stop()
483 if (pd == g_cdev.active_pd) oz_cdev_stop()
486 pd = NULL; oz_cdev_stop()
488 if (pd) { oz_cdev_stop()
489 oz_pd_put(pd); oz_cdev_stop()
498 void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt) oz_cdev_rx() argument
508 ctx = oz_cdev_claim_ctx(pd); oz_cdev_rx()
H A Dozproto.h35 int (*start)(struct oz_pd *pd, int resume);
36 void (*stop)(struct oz_pd *pd, int pause);
37 void (*rx)(struct oz_pd *pd, struct oz_elt *elt);
38 int (*heartbeat)(struct oz_pd *pd);
39 void (*farewell)(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
49 void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
50 void oz_timer_delete(struct oz_pd *pd, int type);
51 void oz_pd_request_heartbeat(struct oz_pd *pd);
H A Dozusbsvc1.c60 struct oz_pd *pd = usb_ctx->pd; oz_usb_get_desc_req() local
63 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_get_desc_req()
64 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); oz_usb_get_desc_req()
96 struct oz_pd *pd = usb_ctx->pd; oz_usb_set_config_req() local
98 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_set_config_req()
99 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); oz_usb_set_config_req()
119 struct oz_pd *pd = usb_ctx->pd; oz_usb_set_interface_req() local
121 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_set_interface_req()
122 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); oz_usb_set_interface_req()
144 struct oz_pd *pd = usb_ctx->pd; oz_usb_set_clear_feature_req() local
146 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_set_clear_feature_req()
147 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); oz_usb_set_clear_feature_req()
170 struct oz_pd *pd = usb_ctx->pd; oz_usb_vendor_class_req() local
172 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_vendor_class_req()
173 struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); oz_usb_vendor_class_req()
248 struct oz_pd *pd = usb_ctx->pd; oz_usb_send_isoc() local
255 if (pd->mode & OZ_F_ISOC_NO_ELTS) { oz_usb_send_isoc()
261 oz_send_isoc_unit(pd, ep_num, data, desc->length); oz_usb_send_isoc()
267 eb = &pd->elt_buff; oz_usb_send_isoc()
311 pd->mode & OZ_F_ISOC_ANYTIME); oz_usb_send_isoc()
370 void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) oz_usb_rx() argument
375 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_rx()
376 usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; oz_usb_rx()
379 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_rx()
446 void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len) oz_usb_farewell() argument
450 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_farewell()
451 usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; oz_usb_farewell()
454 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); oz_usb_farewell()
H A Dozdbg.h48 #define oz_pd_dbg(pd, mask, fmt, ...) \
H A Dozeltbuf.h14 typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context);
/linux-4.1.27/arch/arm/mach-exynos/
H A Dpm_domains.c36 struct generic_pm_domain pd; member in struct:exynos_pm_domain
45 struct exynos_pm_domain *pd; exynos_pd_power() local
51 pd = container_of(domain, struct exynos_pm_domain, pd); exynos_pd_power()
52 base = pd->base; exynos_pd_power()
55 if (IS_ERR(pd->asb_clk[i])) exynos_pd_power()
57 clk_prepare_enable(pd->asb_clk[i]); exynos_pd_power()
63 if (IS_ERR(pd->clk[i])) exynos_pd_power()
65 if (clk_set_parent(pd->clk[i], pd->oscclk)) exynos_pd_power()
67 pd->name, i); exynos_pd_power()
91 if (IS_ERR(pd->clk[i])) exynos_pd_power()
93 if (clk_set_parent(pd->clk[i], pd->pclk[i])) exynos_pd_power()
95 pd->name, i); exynos_pd_power()
100 if (IS_ERR(pd->asb_clk[i])) exynos_pd_power()
102 clk_disable_unprepare(pd->asb_clk[i]); exynos_pd_power()
123 for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { exynos4_pm_init_power_domain()
124 struct exynos_pm_domain *pd; exynos4_pm_init_power_domain() local
131 pd = kzalloc(sizeof(*pd), GFP_KERNEL); exynos4_pm_init_power_domain()
132 if (!pd) { exynos4_pm_init_power_domain()
138 pd->pd.name = kstrdup(dev_name(dev), GFP_KERNEL); exynos4_pm_init_power_domain()
139 pd->name = pd->pd.name; exynos4_pm_init_power_domain()
140 pd->base = of_iomap(np, 0); exynos4_pm_init_power_domain()
141 pd->pd.power_off = exynos_pd_power_off; exynos4_pm_init_power_domain()
142 pd->pd.power_on = exynos_pd_power_on; exynos4_pm_init_power_domain()
148 pd->asb_clk[i] = clk_get(dev, clk_name); exynos4_pm_init_power_domain()
149 if (IS_ERR(pd->asb_clk[i])) exynos4_pm_init_power_domain()
153 pd->oscclk = clk_get(dev, "oscclk"); exynos4_pm_init_power_domain()
154 if (IS_ERR(pd->oscclk)) exynos4_pm_init_power_domain()
161 pd->clk[i] = clk_get(dev, clk_name); exynos4_pm_init_power_domain()
162 if (IS_ERR(pd->clk[i])) exynos4_pm_init_power_domain()
165 pd->pclk[i] = clk_get(dev, clk_name); exynos4_pm_init_power_domain()
166 if (IS_ERR(pd->pclk[i])) { exynos4_pm_init_power_domain()
167 clk_put(pd->clk[i]); exynos4_pm_init_power_domain()
168 pd->clk[i] = ERR_PTR(-EINVAL); exynos4_pm_init_power_domain()
173 if (IS_ERR(pd->clk[0])) exynos4_pm_init_power_domain()
174 clk_put(pd->oscclk); exynos4_pm_init_power_domain()
177 on = __raw_readl(pd->base + 0x4) & INT_LOCAL_PWR_EN; exynos4_pm_init_power_domain()
179 pm_genpd_init(&pd->pd, NULL, !on); exynos4_pm_init_power_domain()
180 of_genpd_add_provider_simple(np, &pd->pd); exynos4_pm_init_power_domain()
184 for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { exynos4_pm_init_power_domain()
/linux-4.1.27/arch/arm/plat-samsung/
H A Dplatformdata.c20 void __init *s3c_set_platdata(void *pd, size_t pdsize, s3c_set_platdata() argument
25 if (!pd) { s3c_set_platdata()
31 npd = kmemdup(pd, pdsize, GFP_KERNEL); s3c_set_platdata()
41 void s3c_sdhci_set_platdata(struct s3c_sdhci_platdata *pd, s3c_sdhci_set_platdata() argument
44 set->cd_type = pd->cd_type; s3c_sdhci_set_platdata()
45 set->ext_cd_init = pd->ext_cd_init; s3c_sdhci_set_platdata()
46 set->ext_cd_cleanup = pd->ext_cd_cleanup; s3c_sdhci_set_platdata()
47 set->ext_cd_gpio = pd->ext_cd_gpio; s3c_sdhci_set_platdata()
48 set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert; s3c_sdhci_set_platdata()
50 if (pd->max_width) s3c_sdhci_set_platdata()
51 set->max_width = pd->max_width; s3c_sdhci_set_platdata()
52 if (pd->cfg_gpio) s3c_sdhci_set_platdata()
53 set->cfg_gpio = pd->cfg_gpio; s3c_sdhci_set_platdata()
54 if (pd->host_caps) s3c_sdhci_set_platdata()
55 set->host_caps |= pd->host_caps; s3c_sdhci_set_platdata()
56 if (pd->host_caps2) s3c_sdhci_set_platdata()
57 set->host_caps2 |= pd->host_caps2; s3c_sdhci_set_platdata()
58 if (pd->pm_caps) s3c_sdhci_set_platdata()
59 set->pm_caps |= pd->pm_caps; s3c_sdhci_set_platdata()
H A Ddevs.c168 void __init s3c_fb_set_platdata(struct s3c_fb_platdata *pd) s3c_fb_set_platdata() argument
170 s3c_set_platdata(pd, sizeof(struct s3c_fb_platdata), s3c_fb_set_platdata()
184 void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd) s3c_hwmon_set_platdata() argument
186 s3c_set_platdata(pd, sizeof(struct s3c_hwmon_pdata), s3c_hwmon_set_platdata()
217 void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd) s3c_sdhci0_set_platdata() argument
219 s3c_sdhci_set_platdata(pd, &s3c_hsmmc0_def_platdata); s3c_sdhci0_set_platdata()
247 void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd) s3c_sdhci1_set_platdata() argument
249 s3c_sdhci_set_platdata(pd, &s3c_hsmmc1_def_platdata); s3c_sdhci1_set_platdata()
279 void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd) s3c_sdhci2_set_platdata() argument
281 s3c_sdhci_set_platdata(pd, &s3c_hsmmc2_def_platdata); s3c_sdhci2_set_platdata()
309 void s3c_sdhci3_set_platdata(struct s3c_sdhci_platdata *pd) s3c_sdhci3_set_platdata() argument
311 s3c_sdhci_set_platdata(pd, &s3c_hsmmc3_def_platdata); s3c_sdhci3_set_platdata()
336 void __init s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c0_set_platdata() argument
340 if (!pd) { s3c_i2c0_set_platdata()
341 pd = &default_i2c_data; s3c_i2c0_set_platdata()
342 pd->bus_num = 0; s3c_i2c0_set_platdata()
345 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c0_set_platdata()
365 void __init s3c_i2c1_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c1_set_platdata() argument
369 if (!pd) { s3c_i2c1_set_platdata()
370 pd = &default_i2c_data; s3c_i2c1_set_platdata()
371 pd->bus_num = 1; s3c_i2c1_set_platdata()
374 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c1_set_platdata()
395 void __init s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c2_set_platdata() argument
399 if (!pd) { s3c_i2c2_set_platdata()
400 pd = &default_i2c_data; s3c_i2c2_set_platdata()
401 pd->bus_num = 2; s3c_i2c2_set_platdata()
404 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c2_set_platdata()
425 void __init s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c3_set_platdata() argument
429 if (!pd) { s3c_i2c3_set_platdata()
430 pd = &default_i2c_data; s3c_i2c3_set_platdata()
431 pd->bus_num = 3; s3c_i2c3_set_platdata()
434 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c3_set_platdata()
455 void __init s3c_i2c4_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c4_set_platdata() argument
459 if (!pd) { s3c_i2c4_set_platdata()
460 pd = &default_i2c_data; s3c_i2c4_set_platdata()
461 pd->bus_num = 4; s3c_i2c4_set_platdata()
464 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c4_set_platdata()
485 void __init s3c_i2c5_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c5_set_platdata() argument
489 if (!pd) { s3c_i2c5_set_platdata()
490 pd = &default_i2c_data; s3c_i2c5_set_platdata()
491 pd->bus_num = 5; s3c_i2c5_set_platdata()
494 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c5_set_platdata()
515 void __init s3c_i2c6_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c6_set_platdata() argument
519 if (!pd) { s3c_i2c6_set_platdata()
520 pd = &default_i2c_data; s3c_i2c6_set_platdata()
521 pd->bus_num = 6; s3c_i2c6_set_platdata()
524 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c6_set_platdata()
545 void __init s3c_i2c7_set_platdata(struct s3c2410_platform_i2c *pd) s3c_i2c7_set_platdata() argument
549 if (!pd) { s3c_i2c7_set_platdata()
550 pd = &default_i2c_data; s3c_i2c7_set_platdata()
551 pd->bus_num = 7; s3c_i2c7_set_platdata()
554 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), s3c_i2c7_set_platdata()
617 void __init samsung_keypad_set_platdata(struct samsung_keypad_platdata *pd) samsung_keypad_set_platdata() argument
621 npd = s3c_set_platdata(pd, sizeof(struct samsung_keypad_platdata), samsung_keypad_set_platdata()
648 void __init s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *pd) s3c24xx_fb_set_platdata() argument
652 npd = s3c_set_platdata(pd, sizeof(*npd), &s3c_device_lcd); s3c24xx_fb_set_platdata()
654 npd->displays = kmemdup(pd->displays, s3c24xx_fb_set_platdata()
822 void __init samsung_pwm_set_platdata(struct samsung_pwm_variant *pd) samsung_pwm_set_platdata() argument
824 samsung_device_pwm.dev.platform_data = pd; samsung_pwm_set_platdata()
960 void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd) s3c24xx_ts_set_platdata() argument
962 if (!pd) s3c24xx_ts_set_platdata()
963 pd = &default_ts_data; s3c24xx_ts_set_platdata()
965 s3c_set_platdata(pd, sizeof(struct s3c2410_ts_mach_info), s3c24xx_ts_set_platdata()
1020 void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *pd) s3c24xx_udc_set_platdata() argument
1022 s3c_set_platdata(pd, sizeof(*pd), &s3c_device_usbgadget); s3c24xx_udc_set_platdata()
1045 void __init s3c_hsotg_set_platdata(struct s3c_hsotg_plat *pd) s3c_hsotg_set_platdata() argument
1049 npd = s3c_set_platdata(pd, sizeof(struct s3c_hsotg_plat), s3c_hsotg_set_platdata()
1078 void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd) s3c24xx_hsudc_set_platdata() argument
1080 s3c_set_platdata(pd, sizeof(*pd), &s3c_device_usb_hsudc); s3c24xx_hsudc_set_platdata()
1122 struct s3c64xx_spi_info pd; s3c64xx_spi0_set_platdata() local
1130 pd.num_cs = num_cs; s3c64xx_spi0_set_platdata()
1131 pd.src_clk_nr = src_clk_nr; s3c64xx_spi0_set_platdata()
1132 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; s3c64xx_spi0_set_platdata()
1134 pd.filter = pl330_filter; s3c64xx_spi0_set_platdata()
1136 pd.filter = pl08x_filter_id; s3c64xx_spi0_set_platdata()
1138 pd.filter = s3c24xx_dma_filter; s3c64xx_spi0_set_platdata()
1141 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); s3c64xx_spi0_set_platdata()
1167 struct s3c64xx_spi_info pd; s3c64xx_spi1_set_platdata() local
1175 pd.num_cs = num_cs; s3c64xx_spi1_set_platdata()
1176 pd.src_clk_nr = src_clk_nr; s3c64xx_spi1_set_platdata()
1177 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; s3c64xx_spi1_set_platdata()
1179 pd.filter = pl330_filter; s3c64xx_spi1_set_platdata()
1181 pd.filter = pl08x_filter_id; s3c64xx_spi1_set_platdata()
1184 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); s3c64xx_spi1_set_platdata()
1210 struct s3c64xx_spi_info pd; s3c64xx_spi2_set_platdata() local
1218 pd.num_cs = num_cs; s3c64xx_spi2_set_platdata()
1219 pd.src_clk_nr = src_clk_nr; s3c64xx_spi2_set_platdata()
1220 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; s3c64xx_spi2_set_platdata()
1222 pd.filter = pl330_filter; s3c64xx_spi2_set_platdata()
1224 pd.filter = pl08x_filter_id; s3c64xx_spi2_set_platdata()
1227 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); s3c64xx_spi2_set_platdata()
/linux-4.1.27/drivers/block/
H A Dpktcdvd.c74 #define pkt_err(pd, fmt, ...) \
75 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
76 #define pkt_notice(pd, fmt, ...) \
77 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
78 #define pkt_info(pd, fmt, ...) \
79 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
81 #define pkt_dbg(level, pd, fmt, ...) \
85 pd->name, __func__, ##__VA_ARGS__); \
87 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
109 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) get_zone() argument
111 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); get_zone()
117 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd, pkt_kobj_create() argument
128 p->pd = pd; pkt_kobj_create()
208 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; kobj_pkt_show() local
212 n = sprintf(data, "%lu\n", pd->stats.pkt_started); kobj_pkt_show()
215 n = sprintf(data, "%lu\n", pd->stats.pkt_ended); kobj_pkt_show()
218 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1); kobj_pkt_show()
221 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1); kobj_pkt_show()
224 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1); kobj_pkt_show()
227 spin_lock(&pd->lock); kobj_pkt_show()
228 v = pd->bio_queue_size; kobj_pkt_show()
229 spin_unlock(&pd->lock); kobj_pkt_show()
233 spin_lock(&pd->lock); kobj_pkt_show()
234 v = pd->write_congestion_off; kobj_pkt_show()
235 spin_unlock(&pd->lock); kobj_pkt_show()
239 spin_lock(&pd->lock); kobj_pkt_show()
240 v = pd->write_congestion_on; kobj_pkt_show()
241 spin_unlock(&pd->lock); kobj_pkt_show()
268 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; kobj_pkt_store() local
272 pd->stats.pkt_started = 0; kobj_pkt_store()
273 pd->stats.pkt_ended = 0; kobj_pkt_store()
274 pd->stats.secs_w = 0; kobj_pkt_store()
275 pd->stats.secs_rg = 0; kobj_pkt_store()
276 pd->stats.secs_r = 0; kobj_pkt_store()
280 spin_lock(&pd->lock); kobj_pkt_store()
281 pd->write_congestion_off = val; kobj_pkt_store()
282 init_write_congestion_marks(&pd->write_congestion_off, kobj_pkt_store()
283 &pd->write_congestion_on); kobj_pkt_store()
284 spin_unlock(&pd->lock); kobj_pkt_store()
288 spin_lock(&pd->lock); kobj_pkt_store()
289 pd->write_congestion_on = val; kobj_pkt_store()
290 init_write_congestion_marks(&pd->write_congestion_off, kobj_pkt_store()
291 &pd->write_congestion_on); kobj_pkt_store()
292 spin_unlock(&pd->lock); kobj_pkt_store()
312 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) pkt_sysfs_dev_new() argument
315 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL, pkt_sysfs_dev_new()
316 "%s", pd->name); pkt_sysfs_dev_new()
317 if (IS_ERR(pd->dev)) pkt_sysfs_dev_new()
318 pd->dev = NULL; pkt_sysfs_dev_new()
320 if (pd->dev) { pkt_sysfs_dev_new()
321 pd->kobj_stat = pkt_kobj_create(pd, "stat", pkt_sysfs_dev_new()
322 &pd->dev->kobj, pkt_sysfs_dev_new()
324 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue", pkt_sysfs_dev_new()
325 &pd->dev->kobj, pkt_sysfs_dev_new()
330 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) pkt_sysfs_dev_remove() argument
332 pkt_kobj_remove(pd->kobj_stat); pkt_sysfs_dev_remove()
333 pkt_kobj_remove(pd->kobj_wqueue); pkt_sysfs_dev_remove()
335 device_unregister(pd->dev); pkt_sysfs_dev_remove()
358 struct pktcdvd_device *pd = pkt_devs[idx]; class_pktcdvd_show_map() local
359 if (!pd) class_pktcdvd_show_map()
362 pd->name, class_pktcdvd_show_map()
363 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), class_pktcdvd_show_map()
364 MAJOR(pd->bdev->bd_dev), class_pktcdvd_show_map()
365 MINOR(pd->bdev->bd_dev)); class_pktcdvd_show_map()
472 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) pkt_debugfs_dev_new() argument
476 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); pkt_debugfs_dev_new()
477 if (!pd->dfs_d_root) pkt_debugfs_dev_new()
480 pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, pkt_debugfs_dev_new()
481 pd->dfs_d_root, pd, &debug_fops); pkt_debugfs_dev_new()
484 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) pkt_debugfs_dev_remove() argument
488 debugfs_remove(pd->dfs_f_info); pkt_debugfs_dev_remove()
489 debugfs_remove(pd->dfs_d_root); pkt_debugfs_dev_remove()
490 pd->dfs_f_info = NULL; pkt_debugfs_dev_remove()
491 pd->dfs_d_root = NULL; pkt_debugfs_dev_remove()
508 static void pkt_bio_finished(struct pktcdvd_device *pd) pkt_bio_finished() argument
510 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); pkt_bio_finished()
511 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { pkt_bio_finished()
512 pkt_dbg(2, pd, "queue empty\n"); pkt_bio_finished()
513 atomic_set(&pd->iosched.attention, 1); pkt_bio_finished()
514 wake_up(&pd->wqueue); pkt_bio_finished()
590 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) pkt_shrink_pktlist() argument
594 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); pkt_shrink_pktlist()
596 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { pkt_shrink_pktlist()
599 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); pkt_shrink_pktlist()
602 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) pkt_grow_pktlist() argument
606 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); pkt_grow_pktlist()
609 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); pkt_grow_pktlist()
611 pkt_shrink_pktlist(pd); pkt_grow_pktlist()
615 pkt->pd = pd; pkt_grow_pktlist()
616 list_add(&pkt->list, &pd->cdrw.pkt_free_list); pkt_grow_pktlist()
630 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) pkt_rbtree_erase() argument
632 rb_erase(&node->rb_node, &pd->bio_queue); pkt_rbtree_erase()
633 mempool_free(node, pd->rb_pool); pkt_rbtree_erase()
634 pd->bio_queue_size--; pkt_rbtree_erase()
635 BUG_ON(pd->bio_queue_size < 0); pkt_rbtree_erase()
639 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
641 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) pkt_rbtree_find() argument
643 struct rb_node *n = pd->bio_queue.rb_node; pkt_rbtree_find()
648 BUG_ON(pd->bio_queue_size > 0); pkt_rbtree_find()
673 * Insert a node into the pd->bio_queue rb tree.
675 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) pkt_rbtree_insert() argument
677 struct rb_node **p = &pd->bio_queue.rb_node; pkt_rbtree_insert()
691 rb_insert_color(&node->rb_node, &pd->bio_queue); pkt_rbtree_insert()
692 pd->bio_queue_size++; pkt_rbtree_insert()
699 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) pkt_generic_packet() argument
701 struct request_queue *q = bdev_get_queue(pd->bdev); pkt_generic_packet()
725 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); pkt_generic_packet()
748 static void pkt_dump_sense(struct pktcdvd_device *pd, pkt_dump_sense() argument
754 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", pkt_dump_sense()
759 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); pkt_dump_sense()
765 static int pkt_flush_cache(struct pktcdvd_device *pd) pkt_flush_cache() argument
780 return pkt_generic_packet(pd, &cgc); pkt_flush_cache()
786 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, pkt_set_speed() argument
801 if ((ret = pkt_generic_packet(pd, &cgc))) pkt_set_speed()
802 pkt_dump_sense(pd, &cgc); pkt_set_speed()
811 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) pkt_queue_bio() argument
813 spin_lock(&pd->iosched.lock); pkt_queue_bio()
815 bio_list_add(&pd->iosched.read_queue, bio); pkt_queue_bio()
817 bio_list_add(&pd->iosched.write_queue, bio); pkt_queue_bio()
818 spin_unlock(&pd->iosched.lock); pkt_queue_bio()
820 atomic_set(&pd->iosched.attention, 1); pkt_queue_bio()
821 wake_up(&pd->wqueue); pkt_queue_bio()
840 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) pkt_iosched_process_queue() argument
843 if (atomic_read(&pd->iosched.attention) == 0) pkt_iosched_process_queue()
845 atomic_set(&pd->iosched.attention, 0); pkt_iosched_process_queue()
851 spin_lock(&pd->iosched.lock); pkt_iosched_process_queue()
852 reads_queued = !bio_list_empty(&pd->iosched.read_queue); pkt_iosched_process_queue()
853 writes_queued = !bio_list_empty(&pd->iosched.write_queue); pkt_iosched_process_queue()
854 spin_unlock(&pd->iosched.lock); pkt_iosched_process_queue()
859 if (pd->iosched.writing) { pkt_iosched_process_queue()
861 spin_lock(&pd->iosched.lock); pkt_iosched_process_queue()
862 bio = bio_list_peek(&pd->iosched.write_queue); pkt_iosched_process_queue()
863 spin_unlock(&pd->iosched.lock); pkt_iosched_process_queue()
865 pd->iosched.last_write)) pkt_iosched_process_queue()
868 if (atomic_read(&pd->cdrw.pending_bios) > 0) { pkt_iosched_process_queue()
869 pkt_dbg(2, pd, "write, waiting\n"); pkt_iosched_process_queue()
872 pkt_flush_cache(pd); pkt_iosched_process_queue()
873 pd->iosched.writing = 0; pkt_iosched_process_queue()
877 if (atomic_read(&pd->cdrw.pending_bios) > 0) { pkt_iosched_process_queue()
878 pkt_dbg(2, pd, "read, waiting\n"); pkt_iosched_process_queue()
881 pd->iosched.writing = 1; pkt_iosched_process_queue()
885 spin_lock(&pd->iosched.lock); pkt_iosched_process_queue()
886 if (pd->iosched.writing) pkt_iosched_process_queue()
887 bio = bio_list_pop(&pd->iosched.write_queue); pkt_iosched_process_queue()
889 bio = bio_list_pop(&pd->iosched.read_queue); pkt_iosched_process_queue()
890 spin_unlock(&pd->iosched.lock); pkt_iosched_process_queue()
896 pd->iosched.successive_reads += pkt_iosched_process_queue()
899 pd->iosched.successive_reads = 0; pkt_iosched_process_queue()
900 pd->iosched.last_write = bio_end_sector(bio); pkt_iosched_process_queue()
902 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { pkt_iosched_process_queue()
903 if (pd->read_speed == pd->write_speed) { pkt_iosched_process_queue()
904 pd->read_speed = MAX_SPEED; pkt_iosched_process_queue()
905 pkt_set_speed(pd, pd->write_speed, pd->read_speed); pkt_iosched_process_queue()
908 if (pd->read_speed != pd->write_speed) { pkt_iosched_process_queue()
909 pd->read_speed = pd->write_speed; pkt_iosched_process_queue()
910 pkt_set_speed(pd, pd->write_speed, pd->read_speed); pkt_iosched_process_queue()
914 atomic_inc(&pd->cdrw.pending_bios); pkt_iosched_process_queue()
923 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) pkt_set_segment_merging() argument
925 if ((pd->settings.size << 9) / CD_FRAMESIZE pkt_set_segment_merging()
930 clear_bit(PACKET_MERGE_SEGS, &pd->flags); pkt_set_segment_merging()
932 } else if ((pd->settings.size << 9) / PAGE_SIZE pkt_set_segment_merging()
938 set_bit(PACKET_MERGE_SEGS, &pd->flags); pkt_set_segment_merging()
941 pkt_err(pd, "cdrom max_phys_segments too small\n"); pkt_set_segment_merging()
982 struct pktcdvd_device *pd = pkt->pd; pkt_end_io_read() local
983 BUG_ON(!pd); pkt_end_io_read()
985 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", pkt_end_io_read()
993 wake_up(&pd->wqueue); pkt_end_io_read()
995 pkt_bio_finished(pd); pkt_end_io_read()
1001 struct pktcdvd_device *pd = pkt->pd; pkt_end_io_packet_write() local
1002 BUG_ON(!pd); pkt_end_io_packet_write()
1004 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err); pkt_end_io_packet_write()
1006 pd->stats.pkt_ended++; pkt_end_io_packet_write()
1008 pkt_bio_finished(pd); pkt_end_io_packet_write()
1011 wake_up(&pd->wqueue); pkt_end_io_packet_write()
1017 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) pkt_gather_data() argument
1038 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); pkt_gather_data()
1047 pkt_dbg(2, pd, "zone %llx cached\n", pkt_gather_data()
1064 bio->bi_bdev = pd->bdev; pkt_gather_data()
1070 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", pkt_gather_data()
1077 pkt_queue_bio(pd, bio); pkt_gather_data()
1082 pkt_dbg(2, pd, "need %d frames for zone %llx\n", pkt_gather_data()
1084 pd->stats.pkt_started++; pkt_gather_data()
1085 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); pkt_gather_data()
1092 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) pkt_get_packet_data() argument
1096 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { pkt_get_packet_data()
1097 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { pkt_get_packet_data()
1108 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) pkt_put_packet_data() argument
1111 list_add(&pkt->list, &pd->cdrw.pkt_free_list); pkt_put_packet_data()
1113 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); pkt_put_packet_data()
1132 struct pktcdvd_device *pd = rq->rq_disk->private_data; pkt_start_recovery()
1138 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev)); pkt_start_recovery()
1158 pkt->bio->bi_bdev = pd->bdev; pkt_start_recovery()
1183 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", pkt_set_state()
1194 static int pkt_handle_queue(struct pktcdvd_device *pd) pkt_handle_queue() argument
1203 atomic_set(&pd->scan_queue, 0); pkt_handle_queue()
1205 if (list_empty(&pd->cdrw.pkt_free_list)) { pkt_handle_queue()
1206 pkt_dbg(2, pd, "no pkt\n"); pkt_handle_queue()
1213 spin_lock(&pd->lock); pkt_handle_queue()
1214 first_node = pkt_rbtree_find(pd, pd->current_sector); pkt_handle_queue()
1216 n = rb_first(&pd->bio_queue); pkt_handle_queue()
1223 zone = get_zone(bio->bi_iter.bi_sector, pd); pkt_handle_queue()
1224 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { pkt_handle_queue()
1234 n = rb_first(&pd->bio_queue); pkt_handle_queue()
1241 spin_unlock(&pd->lock); pkt_handle_queue()
1243 pkt_dbg(2, pd, "no bio\n"); pkt_handle_queue()
1247 pkt = pkt_get_packet_data(pd, zone); pkt_handle_queue()
1249 pd->current_sector = zone + pd->settings.size; pkt_handle_queue()
1251 BUG_ON(pkt->frames != pd->settings.size >> 2); pkt_handle_queue()
1258 spin_lock(&pd->lock); pkt_handle_queue()
1259 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); pkt_handle_queue()
1260 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { pkt_handle_queue()
1262 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) pkt_handle_queue()
1263 get_zone(bio->bi_iter.bi_sector, pd)); pkt_handle_queue()
1264 if (get_zone(bio->bi_iter.bi_sector, pd) != zone) pkt_handle_queue()
1266 pkt_rbtree_erase(pd, node); pkt_handle_queue()
1274 wakeup = (pd->write_congestion_on > 0 pkt_handle_queue()
1275 && pd->bio_queue_size <= pd->write_congestion_off); pkt_handle_queue()
1276 spin_unlock(&pd->lock); pkt_handle_queue()
1278 clear_bdi_congested(&pd->disk->queue->backing_dev_info, pkt_handle_queue()
1286 spin_lock(&pd->cdrw.active_list_lock); pkt_handle_queue()
1287 list_add(&pkt->list, &pd->cdrw.pkt_active_list); pkt_handle_queue()
1288 spin_unlock(&pd->cdrw.active_list_lock); pkt_handle_queue()
1297 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) pkt_start_write() argument
1304 pkt->w_bio->bi_bdev = pd->bdev; pkt_start_write()
1315 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); pkt_start_write()
1326 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", pkt_start_write()
1329 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { pkt_start_write()
1339 pkt_queue_bio(pd, pkt->w_bio); pkt_start_write()
1354 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) pkt_run_state_machine() argument
1358 pkt_dbg(2, pd, "pkt %d\n", pkt->id); pkt_run_state_machine()
1367 pkt_gather_data(pd, pkt); pkt_run_state_machine()
1378 pkt_start_write(pd, pkt); pkt_run_state_machine()
1395 pkt_start_write(pd, pkt); pkt_run_state_machine()
1397 pkt_dbg(2, pd, "No recovery possible\n"); pkt_run_state_machine()
1414 static void pkt_handle_packets(struct pktcdvd_device *pd) pkt_handle_packets() argument
1421 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { pkt_handle_packets()
1424 pkt_run_state_machine(pd, pkt); pkt_handle_packets()
1431 spin_lock(&pd->cdrw.active_list_lock); pkt_handle_packets()
1432 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { pkt_handle_packets()
1435 pkt_put_packet_data(pd, pkt); pkt_handle_packets()
1437 atomic_set(&pd->scan_queue, 1); pkt_handle_packets()
1440 spin_unlock(&pd->cdrw.active_list_lock); pkt_handle_packets()
1443 static void pkt_count_states(struct pktcdvd_device *pd, int *states) pkt_count_states() argument
1451 spin_lock(&pd->cdrw.active_list_lock); pkt_count_states()
1452 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { pkt_count_states()
1455 spin_unlock(&pd->cdrw.active_list_lock); pkt_count_states()
1464 struct pktcdvd_device *pd = foobar; kcdrwd() local
1477 add_wait_queue(&pd->wqueue, &wait); kcdrwd()
1482 if (atomic_read(&pd->scan_queue) > 0) kcdrwd()
1486 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { kcdrwd()
1492 if (atomic_read(&pd->iosched.attention) != 0) kcdrwd()
1498 pkt_count_states(pd, states); kcdrwd()
1499 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", kcdrwd()
1505 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { kcdrwd()
1510 pkt_dbg(2, pd, "sleeping\n"); kcdrwd()
1512 pkt_dbg(2, pd, "wake up\n"); kcdrwd()
1517 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { kcdrwd()
1532 remove_wait_queue(&pd->wqueue, &wait); kcdrwd()
1541 while (pkt_handle_queue(pd)) kcdrwd()
1547 pkt_handle_packets(pd); kcdrwd()
1552 pkt_iosched_process_queue(pd); kcdrwd()
1558 static void pkt_print_settings(struct pktcdvd_device *pd) pkt_print_settings() argument
1560 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", pkt_print_settings()
1561 pd->settings.fp ? "Fixed" : "Variable", pkt_print_settings()
1562 pd->settings.size >> 2, pkt_print_settings()
1563 pd->settings.block_mode == 8 ? '1' : '2'); pkt_print_settings()
1566 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) pkt_mode_sense() argument
1575 return pkt_generic_packet(pd, cgc); pkt_mode_sense()
1578 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) pkt_mode_select() argument
1587 return pkt_generic_packet(pd, cgc); pkt_mode_select()
1590 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) pkt_get_disc_info() argument
1601 if ((ret = pkt_generic_packet(pd, &cgc))) pkt_get_disc_info()
1614 return pkt_generic_packet(pd, &cgc); pkt_get_disc_info()
1617 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) pkt_get_track_info() argument
1630 if ((ret = pkt_generic_packet(pd, &cgc))) pkt_get_track_info()
1640 return pkt_generic_packet(pd, &cgc); pkt_get_track_info()
1643 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, pkt_get_last_written() argument
1651 if ((ret = pkt_get_disc_info(pd, &di))) pkt_get_last_written()
1655 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) pkt_get_last_written()
1661 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) pkt_get_last_written()
1679 * write mode select package based on pd->settings
1681 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) pkt_set_write_settings() argument
1690 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) pkt_set_write_settings()
1696 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { pkt_set_write_settings()
1697 pkt_dump_sense(pd, &cgc); pkt_set_write_settings()
1702 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); pkt_set_write_settings()
1711 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { pkt_set_write_settings()
1712 pkt_dump_sense(pd, &cgc); pkt_set_write_settings()
1719 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; pkt_set_write_settings()
1721 wp->fp = pd->settings.fp; pkt_set_write_settings()
1722 wp->track_mode = pd->settings.track_mode; pkt_set_write_settings()
1723 wp->write_type = pd->settings.write_type; pkt_set_write_settings()
1724 wp->data_block_type = pd->settings.block_mode; pkt_set_write_settings()
1747 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); pkt_set_write_settings()
1750 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); pkt_set_write_settings()
1753 if ((ret = pkt_mode_select(pd, &cgc))) { pkt_set_write_settings()
1754 pkt_dump_sense(pd, &cgc); pkt_set_write_settings()
1758 pkt_print_settings(pd); pkt_set_write_settings()
1765 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) pkt_writable_track() argument
1767 switch (pd->mmc3_profile) { pkt_writable_track()
1791 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); pkt_writable_track()
1798 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) pkt_writable_disc() argument
1800 switch (pd->mmc3_profile) { pkt_writable_disc()
1809 pkt_dbg(2, pd, "Wrong disc profile (%x)\n", pkt_writable_disc()
1810 pd->mmc3_profile); pkt_writable_disc()
1819 pkt_notice(pd, "unknown disc - no track?\n"); pkt_writable_disc()
1824 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); pkt_writable_disc()
1829 pkt_notice(pd, "disc not erasable\n"); pkt_writable_disc()
1834 pkt_err(pd, "can't write to last track (reserved)\n"); pkt_writable_disc()
1841 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) pkt_probe_settings() argument
1852 ret = pkt_generic_packet(pd, &cgc); pkt_probe_settings()
1853 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; pkt_probe_settings()
1858 if ((ret = pkt_get_disc_info(pd, &di))) { pkt_probe_settings()
1859 pkt_err(pd, "failed get_disc\n"); pkt_probe_settings()
1863 if (!pkt_writable_disc(pd, &di)) pkt_probe_settings()
1866 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; pkt_probe_settings()
1869 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) { pkt_probe_settings()
1870 pkt_err(pd, "failed get_track\n"); pkt_probe_settings()
1874 if (!pkt_writable_track(pd, &ti)) { pkt_probe_settings()
1875 pkt_err(pd, "can't write to this track\n"); pkt_probe_settings()
1883 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; pkt_probe_settings()
1884 if (pd->settings.size == 0) { pkt_probe_settings()
1885 pkt_notice(pd, "detected zero packet size!\n"); pkt_probe_settings()
1888 if (pd->settings.size > PACKET_MAX_SECTORS) { pkt_probe_settings()
1889 pkt_err(pd, "packet size is too big\n"); pkt_probe_settings()
1892 pd->settings.fp = ti.fp; pkt_probe_settings()
1893 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); pkt_probe_settings()
1896 pd->nwa = be32_to_cpu(ti.next_writable); pkt_probe_settings()
1897 set_bit(PACKET_NWA_VALID, &pd->flags); pkt_probe_settings()
1906 pd->lra = be32_to_cpu(ti.last_rec_address); pkt_probe_settings()
1907 set_bit(PACKET_LRA_VALID, &pd->flags); pkt_probe_settings()
1909 pd->lra = 0xffffffff; pkt_probe_settings()
1910 set_bit(PACKET_LRA_VALID, &pd->flags); pkt_probe_settings()
1916 pd->settings.link_loss = 7; pkt_probe_settings()
1917 pd->settings.write_type = 0; /* packet */ pkt_probe_settings()
1918 pd->settings.track_mode = ti.track_mode; pkt_probe_settings()
1925 pd->settings.block_mode = PACKET_BLOCK_MODE1; pkt_probe_settings()
1928 pd->settings.block_mode = PACKET_BLOCK_MODE2; pkt_probe_settings()
1931 pkt_err(pd, "unknown data mode\n"); pkt_probe_settings()
1940 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, pkt_write_caching() argument
1950 cgc.buflen = pd->mode_offset + 12; pkt_write_caching()
1957 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0))) pkt_write_caching()
1960 buf[pd->mode_offset + 10] |= (!!set << 2); pkt_write_caching()
1963 ret = pkt_mode_select(pd, &cgc); pkt_write_caching()
1965 pkt_err(pd, "write caching control failed\n"); pkt_write_caching()
1966 pkt_dump_sense(pd, &cgc); pkt_write_caching()
1968 pkt_notice(pd, "enabled write caching\n"); pkt_write_caching()
1972 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) pkt_lock_door() argument
1979 return pkt_generic_packet(pd, &cgc); pkt_lock_door()
1985 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, pkt_get_max_speed() argument
1994 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; pkt_get_max_speed()
1998 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); pkt_get_max_speed()
2000 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + pkt_get_max_speed()
2002 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); pkt_get_max_speed()
2004 pkt_dump_sense(pd, &cgc); pkt_get_max_speed()
2046 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, pkt_media_speed() argument
2061 ret = pkt_generic_packet(pd, &cgc); pkt_media_speed()
2063 pkt_dump_sense(pd, &cgc); pkt_media_speed()
2076 ret = pkt_generic_packet(pd, &cgc); pkt_media_speed()
2078 pkt_dump_sense(pd, &cgc); pkt_media_speed()
2083 pkt_notice(pd, "disc type is not CD-RW\n"); pkt_media_speed()
2087 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); pkt_media_speed()
2107 pkt_notice(pd, "unknown disc sub-type %d\n", st); pkt_media_speed()
2111 pkt_info(pd, "maximum media speed: %d\n", *speed); pkt_media_speed()
2114 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); pkt_media_speed()
2119 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) pkt_perform_opc() argument
2125 pkt_dbg(2, pd, "Performing OPC\n"); pkt_perform_opc()
2132 if ((ret = pkt_generic_packet(pd, &cgc))) pkt_perform_opc()
2133 pkt_dump_sense(pd, &cgc); pkt_perform_opc()
2137 static int pkt_open_write(struct pktcdvd_device *pd) pkt_open_write() argument
2142 if ((ret = pkt_probe_settings(pd))) { pkt_open_write()
2143 pkt_dbg(2, pd, "failed probe\n"); pkt_open_write()
2147 if ((ret = pkt_set_write_settings(pd))) { pkt_open_write()
2148 pkt_dbg(1, pd, "failed saving write settings\n"); pkt_open_write()
2152 pkt_write_caching(pd, USE_WCACHING); pkt_open_write()
2154 if ((ret = pkt_get_max_speed(pd, &write_speed))) pkt_open_write()
2156 switch (pd->mmc3_profile) { pkt_open_write()
2160 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); pkt_open_write()
2163 if ((ret = pkt_media_speed(pd, &media_write_speed))) pkt_open_write()
2166 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); pkt_open_write()
2171 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) { pkt_open_write()
2172 pkt_dbg(1, pd, "couldn't set write speed\n"); pkt_open_write()
2175 pd->write_speed = write_speed; pkt_open_write()
2176 pd->read_speed = read_speed; pkt_open_write()
2178 if ((ret = pkt_perform_opc(pd))) { pkt_open_write()
2179 pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); pkt_open_write()
2188 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) pkt_open_dev() argument
2199 bdget(pd->bdev->bd_dev); pkt_open_dev()
2200 if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd))) pkt_open_dev()
2203 if ((ret = pkt_get_last_written(pd, &lba))) { pkt_open_dev()
2204 pkt_err(pd, "pkt_get_last_written failed\n"); pkt_open_dev()
2208 set_capacity(pd->disk, lba << 2); pkt_open_dev()
2209 set_capacity(pd->bdev->bd_disk, lba << 2); pkt_open_dev()
2210 bd_set_size(pd->bdev, (loff_t)lba << 11); pkt_open_dev()
2212 q = bdev_get_queue(pd->bdev); pkt_open_dev()
2214 if ((ret = pkt_open_write(pd))) pkt_open_dev()
2221 blk_queue_max_hw_sectors(q, pd->settings.size); pkt_open_dev()
2223 set_bit(PACKET_WRITABLE, &pd->flags); pkt_open_dev()
2225 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); pkt_open_dev()
2226 clear_bit(PACKET_WRITABLE, &pd->flags); pkt_open_dev()
2229 if ((ret = pkt_set_segment_merging(pd, q))) pkt_open_dev()
2233 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { pkt_open_dev()
2234 pkt_err(pd, "not enough memory for buffers\n"); pkt_open_dev()
2238 pkt_info(pd, "%lukB available on disc\n", lba << 1); pkt_open_dev()
2244 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); pkt_open_dev()
2253 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) pkt_release_dev() argument
2255 if (flush && pkt_flush_cache(pd)) pkt_release_dev()
2256 pkt_dbg(1, pd, "not flushing cache\n"); pkt_release_dev()
2258 pkt_lock_door(pd, 0); pkt_release_dev()
2260 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); pkt_release_dev()
2261 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); pkt_release_dev()
2263 pkt_shrink_pktlist(pd); pkt_release_dev()
2275 struct pktcdvd_device *pd = NULL; pkt_open() local
2280 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); pkt_open()
2281 if (!pd) { pkt_open()
2285 BUG_ON(pd->refcnt < 0); pkt_open()
2287 pd->refcnt++; pkt_open()
2288 if (pd->refcnt > 1) { pkt_open()
2290 !test_bit(PACKET_WRITABLE, &pd->flags)) { pkt_open()
2295 ret = pkt_open_dev(pd, mode & FMODE_WRITE); pkt_open()
2310 pd->refcnt--; pkt_open()
2319 struct pktcdvd_device *pd = disk->private_data; pkt_close() local
2323 pd->refcnt--; pkt_close()
2324 BUG_ON(pd->refcnt < 0); pkt_close()
2325 if (pd->refcnt == 0) { pkt_close()
2326 int flush = test_bit(PACKET_WRITABLE, &pd->flags); pkt_close()
2327 pkt_release_dev(pd, flush); pkt_close()
2337 struct pktcdvd_device *pd = psd->pd; pkt_end_io_read_cloned() local
2342 pkt_bio_finished(pd); pkt_end_io_read_cloned()
2345 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) pkt_make_request_read() argument
2350 psd->pd = pd; pkt_make_request_read()
2352 cloned_bio->bi_bdev = pd->bdev; pkt_make_request_read()
2355 pd->stats.secs_r += bio_sectors(bio); pkt_make_request_read()
2356 pkt_queue_bio(pd, cloned_bio); pkt_make_request_read()
2361 struct pktcdvd_device *pd = q->queuedata; pkt_make_request_write() local
2367 zone = get_zone(bio->bi_iter.bi_sector, pd); pkt_make_request_write()
2373 spin_lock(&pd->cdrw.active_list_lock); pkt_make_request_write()
2375 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { pkt_make_request_write()
2386 wake_up(&pd->wqueue); pkt_make_request_write()
2389 spin_unlock(&pd->cdrw.active_list_lock); pkt_make_request_write()
2397 spin_unlock(&pd->cdrw.active_list_lock); pkt_make_request_write()
2404 spin_lock(&pd->lock); pkt_make_request_write()
2405 if (pd->write_congestion_on > 0 pkt_make_request_write()
2406 && pd->bio_queue_size >= pd->write_congestion_on) { pkt_make_request_write()
2409 spin_unlock(&pd->lock); pkt_make_request_write()
2411 spin_lock(&pd->lock); pkt_make_request_write()
2412 } while(pd->bio_queue_size > pd->write_congestion_off); pkt_make_request_write()
2414 spin_unlock(&pd->lock); pkt_make_request_write()
2419 node = mempool_alloc(pd->rb_pool, GFP_NOIO); pkt_make_request_write()
2421 spin_lock(&pd->lock); pkt_make_request_write()
2422 BUG_ON(pd->bio_queue_size < 0); pkt_make_request_write()
2423 was_empty = (pd->bio_queue_size == 0); pkt_make_request_write()
2424 pkt_rbtree_insert(pd, node); pkt_make_request_write()
2425 spin_unlock(&pd->lock); pkt_make_request_write()
2430 atomic_set(&pd->scan_queue, 1); pkt_make_request_write()
2433 wake_up(&pd->wqueue); pkt_make_request_write()
2434 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { pkt_make_request_write()
2439 wake_up(&pd->wqueue); pkt_make_request_write()
2445 struct pktcdvd_device *pd; pkt_make_request() local
2449 pd = q->queuedata; pkt_make_request()
2450 if (!pd) { pkt_make_request()
2456 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", pkt_make_request()
2464 pkt_make_request_read(pd, bio); pkt_make_request()
2468 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { pkt_make_request()
2469 pkt_notice(pd, "WRITE for ro device (%llu)\n", pkt_make_request()
2475 pkt_err(pd, "wrong bio size\n"); pkt_make_request()
2482 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); pkt_make_request()
2483 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); pkt_make_request()
2486 BUG_ON(last_zone != zone + pd->settings.size); pkt_make_request()
2509 struct pktcdvd_device *pd = q->queuedata; pkt_merge_bvec() local
2510 sector_t zone = get_zone(bmd->bi_sector, pd); pkt_merge_bvec()
2512 int remaining = (pd->settings.size << 9) - used; pkt_merge_bvec()
2526 static void pkt_init_queue(struct pktcdvd_device *pd) pkt_init_queue() argument
2528 struct request_queue *q = pd->disk->queue; pkt_init_queue()
2534 q->queuedata = pd; pkt_init_queue()
2539 struct pktcdvd_device *pd = m->private; pkt_seq_show() local
2544 seq_printf(m, "Writer %s mapped to %s:\n", pd->name, pkt_seq_show()
2545 bdevname(pd->bdev, bdev_buf)); pkt_seq_show()
2548 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); pkt_seq_show()
2550 if (pd->settings.write_type == 0) pkt_seq_show()
2556 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); pkt_seq_show()
2557 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); pkt_seq_show()
2559 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); pkt_seq_show()
2561 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) pkt_seq_show()
2563 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) pkt_seq_show()
2570 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); pkt_seq_show()
2571 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); pkt_seq_show()
2572 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); pkt_seq_show()
2573 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); pkt_seq_show()
2574 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); pkt_seq_show()
2577 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); pkt_seq_show()
2578 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); pkt_seq_show()
2579 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); pkt_seq_show()
2580 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); pkt_seq_show()
2581 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); pkt_seq_show()
2582 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); pkt_seq_show()
2585 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); pkt_seq_show()
2586 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); pkt_seq_show()
2587 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); pkt_seq_show()
2589 pkt_count_states(pd, states); pkt_seq_show()
2594 pd->write_congestion_off, pkt_seq_show()
2595 pd->write_congestion_on); pkt_seq_show()
2611 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) pkt_new_dev() argument
2618 if (pd->pkt_dev == dev) { pkt_new_dev()
2619 pkt_err(pd, "recursive setup not allowed\n"); pkt_new_dev()
2627 pkt_err(pd, "%s already setup\n", pkt_new_dev()
2632 pkt_err(pd, "can't chain pktcdvd devices\n"); pkt_new_dev()
2647 pd->bdev = bdev; pkt_new_dev()
2650 pkt_init_queue(pd); pkt_new_dev()
2652 atomic_set(&pd->cdrw.pending_bios, 0); pkt_new_dev()
2653 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); pkt_new_dev()
2654 if (IS_ERR(pd->cdrw.thread)) { pkt_new_dev()
2655 pkt_err(pd, "can't start kernel thread\n"); pkt_new_dev()
2660 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd); pkt_new_dev()
2661 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b)); pkt_new_dev()
2673 struct pktcdvd_device *pd = bdev->bd_disk->private_data; pkt_ioctl() local
2676 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", pkt_ioctl()
2686 if (pd->refcnt == 1) pkt_ioctl()
2687 pkt_lock_door(pd, 0); pkt_ioctl()
2697 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); pkt_ioctl()
2701 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); pkt_ioctl()
2712 struct pktcdvd_device *pd = disk->private_data; pkt_check_events() local
2715 if (!pd) pkt_check_events()
2717 if (!pd->bdev) pkt_check_events()
2719 attached_disk = pd->bdev->bd_disk; pkt_check_events()
2745 struct pktcdvd_device *pd; pkt_setup_dev() local
2759 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); pkt_setup_dev()
2760 if (!pd) pkt_setup_dev()
2763 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE, pkt_setup_dev()
2765 if (!pd->rb_pool) pkt_setup_dev()
2768 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); pkt_setup_dev()
2769 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); pkt_setup_dev()
2770 spin_lock_init(&pd->cdrw.active_list_lock); pkt_setup_dev()
2772 spin_lock_init(&pd->lock); pkt_setup_dev()
2773 spin_lock_init(&pd->iosched.lock); pkt_setup_dev()
2774 bio_list_init(&pd->iosched.read_queue); pkt_setup_dev()
2775 bio_list_init(&pd->iosched.write_queue); pkt_setup_dev()
2776 sprintf(pd->name, DRIVER_NAME"%d", idx); pkt_setup_dev()
2777 init_waitqueue_head(&pd->wqueue); pkt_setup_dev()
2778 pd->bio_queue = RB_ROOT; pkt_setup_dev()
2780 pd->write_congestion_on = write_congestion_on; pkt_setup_dev()
2781 pd->write_congestion_off = write_congestion_off; pkt_setup_dev()
2786 pd->disk = disk; pkt_setup_dev()
2791 strcpy(disk->disk_name, pd->name); pkt_setup_dev()
2793 disk->private_data = pd; pkt_setup_dev()
2798 pd->pkt_dev = MKDEV(pktdev_major, idx); pkt_setup_dev()
2799 ret = pkt_new_dev(pd, dev); pkt_setup_dev()
2804 disk->events = pd->bdev->bd_disk->events; pkt_setup_dev()
2805 disk->async_events = pd->bdev->bd_disk->async_events; pkt_setup_dev()
2809 pkt_sysfs_dev_new(pd); pkt_setup_dev()
2810 pkt_debugfs_dev_new(pd); pkt_setup_dev()
2812 pkt_devs[idx] = pd; pkt_setup_dev()
2814 *pkt_dev = pd->pkt_dev; pkt_setup_dev()
2824 if (pd->rb_pool) pkt_setup_dev()
2825 mempool_destroy(pd->rb_pool); pkt_setup_dev()
2826 kfree(pd); pkt_setup_dev()
2838 struct pktcdvd_device *pd; pkt_remove_dev() local
2845 pd = pkt_devs[idx]; pkt_remove_dev()
2846 if (pd && (pd->pkt_dev == pkt_dev)) pkt_remove_dev()
2855 if (pd->refcnt > 0) { pkt_remove_dev()
2859 if (!IS_ERR(pd->cdrw.thread)) pkt_remove_dev()
2860 kthread_stop(pd->cdrw.thread); pkt_remove_dev()
2864 pkt_debugfs_dev_remove(pd); pkt_remove_dev()
2865 pkt_sysfs_dev_remove(pd); pkt_remove_dev()
2867 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); pkt_remove_dev()
2869 remove_proc_entry(pd->name, pkt_proc); pkt_remove_dev()
2870 pkt_dbg(1, pd, "writer unmapped\n"); pkt_remove_dev()
2872 del_gendisk(pd->disk); pkt_remove_dev()
2873 blk_cleanup_queue(pd->disk->queue); pkt_remove_dev()
2874 put_disk(pd->disk); pkt_remove_dev()
2876 mempool_destroy(pd->rb_pool); pkt_remove_dev()
2877 kfree(pd); pkt_remove_dev()
2889 struct pktcdvd_device *pd; pkt_get_status() local
2893 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); pkt_get_status()
2894 if (pd) { pkt_get_status()
2895 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); pkt_get_status()
2896 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); pkt_get_status()
/linux-4.1.27/drivers/gpu/drm/gma500/
H A Dmmu.c138 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) psb_mmu_set_pd_context() argument
140 struct drm_device *dev = pd->driver->dev; psb_mmu_set_pd_context()
145 down_write(&pd->driver->sem); psb_mmu_set_pd_context()
146 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); psb_mmu_set_pd_context()
148 psb_mmu_flush_pd_locked(pd->driver, 1); psb_mmu_set_pd_context()
149 pd->hw_context = hw_context; psb_mmu_set_pd_context()
150 up_write(&pd->driver->sem); psb_mmu_set_pd_context()
178 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); psb_mmu_alloc_pd() local
182 if (!pd) psb_mmu_alloc_pd()
185 pd->p = alloc_page(GFP_DMA32); psb_mmu_alloc_pd()
186 if (!pd->p) psb_mmu_alloc_pd()
188 pd->dummy_pt = alloc_page(GFP_DMA32); psb_mmu_alloc_pd()
189 if (!pd->dummy_pt) psb_mmu_alloc_pd()
191 pd->dummy_page = alloc_page(GFP_DMA32); psb_mmu_alloc_pd()
192 if (!pd->dummy_page) psb_mmu_alloc_pd()
196 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), psb_mmu_alloc_pd()
198 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), psb_mmu_alloc_pd()
201 pd->invalid_pde = 0; psb_mmu_alloc_pd()
202 pd->invalid_pte = 0; psb_mmu_alloc_pd()
205 v = kmap(pd->dummy_pt); psb_mmu_alloc_pd()
207 v[i] = pd->invalid_pte; psb_mmu_alloc_pd()
209 kunmap(pd->dummy_pt); psb_mmu_alloc_pd()
211 v = kmap(pd->p); psb_mmu_alloc_pd()
213 v[i] = pd->invalid_pde; psb_mmu_alloc_pd()
215 kunmap(pd->p); psb_mmu_alloc_pd()
217 clear_page(kmap(pd->dummy_page)); psb_mmu_alloc_pd()
218 kunmap(pd->dummy_page); psb_mmu_alloc_pd()
220 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); psb_mmu_alloc_pd()
221 if (!pd->tables) psb_mmu_alloc_pd()
224 pd->hw_context = -1; psb_mmu_alloc_pd()
225 pd->pd_mask = PSB_PTE_VALID; psb_mmu_alloc_pd()
226 pd->driver = driver; psb_mmu_alloc_pd()
228 return pd; psb_mmu_alloc_pd()
231 __free_page(pd->dummy_page); psb_mmu_alloc_pd()
233 __free_page(pd->dummy_pt); psb_mmu_alloc_pd()
235 __free_page(pd->p); psb_mmu_alloc_pd()
237 kfree(pd); psb_mmu_alloc_pd()
247 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) psb_mmu_free_pagedir() argument
249 struct psb_mmu_driver *driver = pd->driver; psb_mmu_free_pagedir()
256 if (pd->hw_context != -1) { psb_mmu_free_pagedir()
257 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4); psb_mmu_free_pagedir()
265 pt = pd->tables[i]; psb_mmu_free_pagedir()
270 vfree(pd->tables); psb_mmu_free_pagedir()
271 __free_page(pd->dummy_page); psb_mmu_free_pagedir()
272 __free_page(pd->dummy_pt); psb_mmu_free_pagedir()
273 __free_page(pd->p); psb_mmu_free_pagedir()
274 kfree(pd); psb_mmu_free_pagedir()
278 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) psb_mmu_alloc_pt() argument
282 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; psb_mmu_alloc_pt()
284 spinlock_t *lock = &pd->driver->lock; psb_mmu_alloc_pt()
304 *ptes++ = pd->invalid_pte; psb_mmu_alloc_pt()
307 if (pd->driver->has_clflush && pd->hw_context != -1) { psb_mmu_alloc_pt()
320 pt->pd = pd; psb_mmu_alloc_pt()
326 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, psb_mmu_pt_alloc_map_lock() argument
332 spinlock_t *lock = &pd->driver->lock; psb_mmu_pt_alloc_map_lock()
335 pt = pd->tables[index]; psb_mmu_pt_alloc_map_lock()
338 pt = psb_mmu_alloc_pt(pd); psb_mmu_pt_alloc_map_lock()
343 if (pd->tables[index]) { psb_mmu_pt_alloc_map_lock()
347 pt = pd->tables[index]; psb_mmu_pt_alloc_map_lock()
351 v = kmap_atomic(pd->p); psb_mmu_pt_alloc_map_lock()
352 pd->tables[index] = pt; psb_mmu_pt_alloc_map_lock()
353 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; psb_mmu_pt_alloc_map_lock()
357 if (pd->hw_context != -1) { psb_mmu_pt_alloc_map_lock()
358 psb_mmu_clflush(pd->driver, (void *)&v[index]); psb_mmu_pt_alloc_map_lock()
359 atomic_set(&pd->driver->needs_tlbflush, 1); psb_mmu_pt_alloc_map_lock()
366 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, psb_mmu_pt_map_lock() argument
371 spinlock_t *lock = &pd->driver->lock; psb_mmu_pt_map_lock()
374 pt = pd->tables[index]; psb_mmu_pt_map_lock()
385 struct psb_mmu_pd *pd = pt->pd; psb_mmu_pt_unmap_unlock() local
390 v = kmap_atomic(pd->p); psb_mmu_pt_unmap_unlock()
391 v[pt->index] = pd->invalid_pde; psb_mmu_pt_unmap_unlock()
392 pd->tables[pt->index] = NULL; psb_mmu_pt_unmap_unlock()
394 if (pd->hw_context != -1) { psb_mmu_pt_unmap_unlock()
395 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]); psb_mmu_pt_unmap_unlock()
396 atomic_set(&pd->driver->needs_tlbflush, 1); psb_mmu_pt_unmap_unlock()
399 spin_unlock(&pd->driver->lock); psb_mmu_pt_unmap_unlock()
403 spin_unlock(&pd->driver->lock); psb_mmu_pt_unmap_unlock()
415 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; psb_mmu_invalidate_pte()
420 struct psb_mmu_pd *pd; psb_mmu_get_default_pd() local
423 pd = driver->default_pd; psb_mmu_get_default_pd()
426 return pd; psb_mmu_get_default_pd()
432 struct psb_mmu_pd *pd; psb_get_default_pd_addr() local
434 pd = psb_mmu_get_default_pd(driver); psb_get_default_pd_addr()
435 return page_to_pfn(pd->p) << PAGE_SHIFT; psb_get_default_pd_addr()
509 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, psb_mmu_flush_ptes() argument
521 unsigned long clflush_add = pd->driver->clflush_add; psb_mmu_flush_ptes()
522 unsigned long clflush_mask = pd->driver->clflush_mask; psb_mmu_flush_ptes()
524 if (!pd->driver->has_clflush) psb_mmu_flush_ptes()
542 pt = psb_mmu_pt_map_lock(pd, addr); psb_mmu_flush_ptes()
557 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, psb_mmu_flush_ptes() argument
565 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, psb_mmu_remove_pfn_sequence() argument
574 down_read(&pd->driver->sem); psb_mmu_remove_pfn_sequence()
581 pt = psb_mmu_pt_alloc_map_lock(pd, addr); psb_mmu_remove_pfn_sequence()
593 if (pd->hw_context != -1) psb_mmu_remove_pfn_sequence()
594 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); psb_mmu_remove_pfn_sequence()
596 up_read(&pd->driver->sem); psb_mmu_remove_pfn_sequence()
598 if (pd->hw_context != -1) psb_mmu_remove_pfn_sequence()
599 psb_mmu_flush(pd->driver); psb_mmu_remove_pfn_sequence()
604 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, psb_mmu_remove_pages() argument
626 down_read(&pd->driver->sem); psb_mmu_remove_pages()
637 pt = psb_mmu_pt_map_lock(pd, addr); psb_mmu_remove_pages()
650 if (pd->hw_context != -1) psb_mmu_remove_pages()
651 psb_mmu_flush_ptes(pd, f_address, num_pages, psb_mmu_remove_pages()
654 up_read(&pd->driver->sem); psb_mmu_remove_pages()
656 if (pd->hw_context != -1) psb_mmu_remove_pages()
657 psb_mmu_flush(pd->driver); psb_mmu_remove_pages()
660 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, psb_mmu_insert_pfn_sequence() argument
672 down_read(&pd->driver->sem); psb_mmu_insert_pfn_sequence()
679 pt = psb_mmu_pt_alloc_map_lock(pd, addr); psb_mmu_insert_pfn_sequence()
695 if (pd->hw_context != -1) psb_mmu_insert_pfn_sequence()
696 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); psb_mmu_insert_pfn_sequence()
698 up_read(&pd->driver->sem); psb_mmu_insert_pfn_sequence()
700 if (pd->hw_context != -1) psb_mmu_insert_pfn_sequence()
701 psb_mmu_flush(pd->driver); psb_mmu_insert_pfn_sequence()
706 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, psb_mmu_insert_pages() argument
734 down_read(&pd->driver->sem); psb_mmu_insert_pages()
743 pt = psb_mmu_pt_alloc_map_lock(pd, addr); psb_mmu_insert_pages()
761 if (pd->hw_context != -1) psb_mmu_insert_pages()
762 psb_mmu_flush_ptes(pd, f_address, num_pages, psb_mmu_insert_pages()
765 up_read(&pd->driver->sem); psb_mmu_insert_pages()
767 if (pd->hw_context != -1) psb_mmu_insert_pages()
768 psb_mmu_flush(pd->driver); psb_mmu_insert_pages()
773 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, psb_mmu_virtual_to_pfn() argument
779 spinlock_t *lock = &pd->driver->lock; psb_mmu_virtual_to_pfn()
781 down_read(&pd->driver->sem); psb_mmu_virtual_to_pfn()
782 pt = psb_mmu_pt_map_lock(pd, virtual); psb_mmu_virtual_to_pfn()
787 v = kmap_atomic(pd->p); psb_mmu_virtual_to_pfn()
792 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || psb_mmu_virtual_to_pfn()
793 !(pd->invalid_pte & PSB_PTE_VALID)) { psb_mmu_virtual_to_pfn()
798 *pfn = pd->invalid_pte >> PAGE_SHIFT; psb_mmu_virtual_to_pfn()
810 up_read(&pd->driver->sem); psb_mmu_virtual_to_pfn()
H A Dmmu.h19 /* protects driver- and pd structures. Always take in read mode
43 struct psb_mmu_pd *pd; member in struct:psb_mmu_pt
72 extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
74 extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
77 extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
81 extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
83 extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
84 extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
88 extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
/linux-4.1.27/drivers/staging/media/dt3155v4l/
H A Ddt3155v4l.c191 dt3155_start_acq(struct dt3155_priv *pd) dt3155_start_acq() argument
193 struct vb2_buffer *vb = pd->curr_buf; dt3155_start_acq()
197 iowrite32(dma_addr, pd->regs + EVEN_DMA_START); dt3155_start_acq()
198 iowrite32(dma_addr + img_width, pd->regs + ODD_DMA_START); dt3155_start_acq()
199 iowrite32(img_width, pd->regs + EVEN_DMA_STRIDE); dt3155_start_acq()
200 iowrite32(img_width, pd->regs + ODD_DMA_STRIDE); dt3155_start_acq()
203 FLD_END_EVEN | FLD_END_ODD, pd->regs + INT_CSR); dt3155_start_acq()
206 pd->regs + CSR1); dt3155_start_acq()
207 wait_i2c_reg(pd->regs); dt3155_start_acq()
208 write_i2c_reg(pd->regs, CONFIG, pd->config); dt3155_start_acq()
209 write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE); dt3155_start_acq()
210 write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_DONE); dt3155_start_acq()
213 write_i2c_reg(pd->regs, CSR2, pd->csr2 | BUSY_EVEN | BUSY_ODD); dt3155_start_acq()
226 struct dt3155_priv *pd = vb2_get_drv_priv(q); dt3155_queue_setup() local
233 if (pd->q->alloc_ctx[0]) dt3155_queue_setup()
235 ret = vb2_dma_contig_init_ctx(&pd->pdev->dev); dt3155_queue_setup()
238 pd->q->alloc_ctx[0] = ret; dt3155_queue_setup()
245 struct dt3155_priv *pd = vb2_get_drv_priv(q); dt3155_wait_prepare() local
247 mutex_unlock(pd->vdev.lock); dt3155_wait_prepare()
253 struct dt3155_priv *pd = vb2_get_drv_priv(q); dt3155_wait_finish() local
255 mutex_lock(pd->vdev.lock); dt3155_wait_finish()
268 struct dt3155_priv *pd = vb2_get_drv_priv(q); dt3155_stop_streaming() local
271 spin_lock_irq(&pd->lock); dt3155_stop_streaming()
272 while (!list_empty(&pd->dmaq)) { dt3155_stop_streaming()
273 vb = list_first_entry(&pd->dmaq, typeof(*vb), done_entry); dt3155_stop_streaming()
277 spin_unlock_irq(&pd->lock); dt3155_stop_streaming()
284 struct dt3155_priv *pd = vb2_get_drv_priv(vb->vb2_queue); dt3155_buf_queue() local
286 /* pd->q->streaming = 1 when dt3155_buf_queue() is invoked */ dt3155_buf_queue()
287 spin_lock_irq(&pd->lock); dt3155_buf_queue()
288 if (pd->curr_buf) dt3155_buf_queue()
289 list_add_tail(&vb->done_entry, &pd->dmaq); dt3155_buf_queue()
291 pd->curr_buf = vb; dt3155_buf_queue()
292 dt3155_start_acq(pd); dt3155_buf_queue()
294 spin_unlock_irq(&pd->lock); dt3155_buf_queue()
381 struct dt3155_priv *pd = video_drvdata(filp); dt3155_open() local
383 if (mutex_lock_interruptible(&pd->mux)) dt3155_open()
385 if (!pd->users) { dt3155_open()
386 pd->q = kzalloc(sizeof(*pd->q), GFP_KERNEL); dt3155_open()
387 if (!pd->q) { dt3155_open()
391 pd->q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dt3155_open()
392 pd->q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; dt3155_open()
393 pd->q->io_modes = VB2_READ | VB2_MMAP; dt3155_open()
394 pd->q->ops = &q_ops; dt3155_open()
395 pd->q->mem_ops = &vb2_dma_contig_memops; dt3155_open()
396 pd->q->drv_priv = pd; dt3155_open()
397 pd->curr_buf = NULL; dt3155_open()
398 pd->field_count = 0; dt3155_open()
399 ret = vb2_queue_init(pd->q); dt3155_open()
402 INIT_LIST_HEAD(&pd->dmaq); dt3155_open()
403 spin_lock_init(&pd->lock); dt3155_open()
406 pd->regs + INT_CSR); dt3155_open()
407 ret = request_irq(pd->pdev->irq, dt3155_irq_handler_even, dt3155_open()
408 IRQF_SHARED, DT3155_NAME, pd); dt3155_open()
412 pd->users++; dt3155_open()
413 mutex_unlock(&pd->mux); dt3155_open()
416 kfree(pd->q); dt3155_open()
417 pd->q = NULL; dt3155_open()
419 mutex_unlock(&pd->mux); dt3155_open()
426 struct dt3155_priv *pd = video_drvdata(filp); dt3155_release() local
428 mutex_lock(&pd->mux); dt3155_release()
429 pd->users--; dt3155_release()
430 BUG_ON(pd->users < 0); dt3155_release()
431 if (!pd->users) { dt3155_release()
432 vb2_queue_release(pd->q); dt3155_release()
433 free_irq(pd->pdev->irq, pd); dt3155_release()
434 if (pd->q->alloc_ctx[0]) dt3155_release()
435 vb2_dma_contig_cleanup_ctx(pd->q->alloc_ctx[0]); dt3155_release()
436 kfree(pd->q); dt3155_release()
437 pd->q = NULL; dt3155_release()
439 mutex_unlock(&pd->mux); dt3155_release()
446 struct dt3155_priv *pd = video_drvdata(filp); dt3155_read() local
449 if (mutex_lock_interruptible(&pd->mux)) dt3155_read()
451 res = vb2_read(pd->q, user, size, loff, filp->f_flags & O_NONBLOCK); dt3155_read()
452 mutex_unlock(&pd->mux); dt3155_read()
459 struct dt3155_priv *pd = video_drvdata(filp); dt3155_poll() local
462 mutex_lock(&pd->mux); dt3155_poll()
463 res = vb2_poll(pd->q, filp, polltbl); dt3155_poll()
464 mutex_unlock(&pd->mux); dt3155_poll()
471 struct dt3155_priv *pd = video_drvdata(filp); dt3155_mmap() local
474 if (mutex_lock_interruptible(&pd->mux)) dt3155_mmap()
476 res = vb2_mmap(pd->q, vma); dt3155_mmap()
477 mutex_unlock(&pd->mux); dt3155_mmap()
494 struct dt3155_priv *pd = video_drvdata(filp); dt3155_ioc_streamon() local
496 return vb2_streamon(pd->q, type); dt3155_ioc_streamon()
502 struct dt3155_priv *pd = video_drvdata(filp); dt3155_ioc_streamoff() local
504 return vb2_streamoff(pd->q, type); dt3155_ioc_streamoff()
510 struct dt3155_priv *pd = video_drvdata(filp); dt3155_ioc_querycap() local
514 sprintf(cap->bus_info, "PCI:%s", pci_name(pd->pdev)); dt3155_ioc_querycap()
571 struct dt3155_priv *pd = video_drvdata(filp); dt3155_ioc_reqbufs() local
573 return vb2_reqbufs(pd->q, b); dt3155_ioc_reqbufs()
579 struct dt3155_priv *pd = video_drvdata(filp); dt3155_ioc_querybuf() local
581 return vb2_querybuf(pd->q, b); dt3155_ioc_querybuf()
587 struct dt3155_priv *pd = video_drvdata(filp); dt3155_ioc_qbuf() local
589 return vb2_qbuf(pd->q, b); dt3155_ioc_qbuf()
595 struct dt3155_priv *pd = video_drvdata(filp); dt3155_ioc_dqbuf() local
597 return vb2_dqbuf(pd->q, b, filp->f_flags & O_NONBLOCK); dt3155_ioc_dqbuf()
726 struct dt3155_priv *pd = pci_get_drvdata(pdev); dt3155_init_board() local
736 pd->regs + CSR1); dt3155_init_board()
741 iowrite32(FIFO_EN | SRST, pd->regs + CSR1); dt3155_init_board()
743 iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT); dt3155_init_board()
744 iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT); dt3155_init_board()
745 iowrite32(0x00000020, pd->regs + FIFO_TRIGER); dt3155_init_board()
746 iowrite32(0x00000103, pd->regs + XFER_MODE); dt3155_init_board()
747 iowrite32(0, pd->regs + RETRY_WAIT_CNT); dt3155_init_board()
748 iowrite32(0, pd->regs + INT_CSR); dt3155_init_board()
749 iowrite32(1, pd->regs + EVEN_FLD_MASK); dt3155_init_board()
750 iowrite32(1, pd->regs + ODD_FLD_MASK); dt3155_init_board()
751 iowrite32(0, pd->regs + MASK_LENGTH); dt3155_init_board()
752 iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT); dt3155_init_board()
753 iowrite32(0x01010101, pd->regs + IIC_CLK_DUR); dt3155_init_board()
757 read_i2c_reg(pd->regs, DT_ID, &tmp); dt3155_init_board()
762 write_i2c_reg(pd->regs, AD_ADDR, 0); dt3155_init_board()
764 write_i2c_reg(pd->regs, AD_LUT, i); dt3155_init_board()
768 write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG); dt3155_init_board()
769 write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3); dt3155_init_board()
770 write_i2c_reg(pd->regs, AD_ADDR, AD_POS_REF); dt3155_init_board()
771 write_i2c_reg(pd->regs, AD_CMD, 34); dt3155_init_board()
772 write_i2c_reg(pd->regs, AD_ADDR, AD_NEG_REF); dt3155_init_board()
773 write_i2c_reg(pd->regs, AD_CMD, 0); dt3155_init_board()
776 write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM); dt3155_init_board()
778 write_i2c_reg(pd->regs, PM_LUT_ADDR, i); dt3155_init_board()
779 write_i2c_reg(pd->regs, PM_LUT_DATA, i); dt3155_init_board()
781 write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM | PM_LUT_SEL); dt3155_init_board()
783 write_i2c_reg(pd->regs, PM_LUT_ADDR, i); dt3155_init_board()
784 write_i2c_reg(pd->regs, PM_LUT_DATA, i); dt3155_init_board()
786 write_i2c_reg(pd->regs, CONFIG, pd->config); /* ACQ_MODE_EVEN */ dt3155_init_board()
789 write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG); dt3155_init_board()
790 write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3); dt3155_init_board()
797 iowrite32(buf_dma, pd->regs + EVEN_DMA_START); dt3155_init_board()
798 iowrite32(buf_dma, pd->regs + ODD_DMA_START); dt3155_init_board()
799 iowrite32(0, pd->regs + EVEN_DMA_STRIDE); dt3155_init_board()
800 iowrite32(0, pd->regs + ODD_DMA_STRIDE); dt3155_init_board()
803 iowrite32(FIFO_EN | SRST | CAP_CONT_ODD, pd->regs + CSR1); dt3155_init_board()
804 write_i2c_reg(pd->regs, CSR2, pd->csr2 | SYNC_SNTL); dt3155_init_board()
805 write_i2c_reg(pd->regs, CONFIG, pd->config); dt3155_init_board()
806 write_i2c_reg(pd->regs, EVEN_CSR, CSR_SNGL); dt3155_init_board()
807 write_i2c_reg(pd->regs, CSR2, pd->csr2 | BUSY_EVEN | SYNC_SNTL); dt3155_init_board()
809 read_i2c_reg(pd->regs, CSR2, &tmp); dt3155_init_board()
810 write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_SNGL | CSR_DONE); dt3155_init_board()
811 write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_SNGL | CSR_DONE); dt3155_init_board()
812 write_i2c_reg(pd->regs, CSR2, pd->csr2); dt3155_init_board()
813 iowrite32(FIFO_EN | SRST | FLD_DN_EVEN | FLD_DN_ODD, pd->regs + CSR1); dt3155_init_board()
899 struct dt3155_priv *pd; dt3155_probe() local
904 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); dt3155_probe()
905 if (!pd) dt3155_probe()
908 pd->vdev = dt3155_vdev; dt3155_probe()
909 pci_set_drvdata(pdev, pd); /* for use in dt3155_remove() */ dt3155_probe()
910 video_set_drvdata(&pd->vdev, pd); /* for use in video_fops */ dt3155_probe()
911 pd->users = 0; dt3155_probe()
912 pd->pdev = pdev; dt3155_probe()
913 INIT_LIST_HEAD(&pd->dmaq); dt3155_probe()
914 mutex_init(&pd->mux); dt3155_probe()
915 pd->vdev.lock = &pd->mux; /* for locking v4l2_file_operations */ dt3155_probe()
916 spin_lock_init(&pd->lock); dt3155_probe()
917 pd->csr2 = csr2_init; dt3155_probe()
918 pd->config = config_init; dt3155_probe()
925 pd->regs = pci_iomap(pdev, 0, pci_resource_len(pd->pdev, 0)); dt3155_probe()
926 if (!pd->regs) { dt3155_probe()
933 err = video_register_device(&pd->vdev, VFL_TYPE_GRABBER, -1); dt3155_probe()
939 dev_info(&pdev->dev, "/dev/video%i is ready\n", pd->vdev.minor); dt3155_probe()
943 pci_iounmap(pdev, pd->regs); dt3155_probe()
954 struct dt3155_priv *pd = pci_get_drvdata(pdev); dt3155_remove() local
957 video_unregister_device(&pd->vdev); dt3155_remove()
958 pci_iounmap(pdev, pd->regs); dt3155_remove()
/linux-4.1.27/drivers/isdn/hysdn/
H A Dhysdn_proclog.c71 struct procdata *pd = card->proclog; hysdn_addlog() local
75 if (!pd) hysdn_addlog()
78 cp = pd->logtmp; hysdn_addlog()
88 printk(KERN_INFO "%s", pd->logtmp); hysdn_addlog()
90 put_log_buffer(card, pd->logtmp); hysdn_addlog()
104 struct procdata *pd = card->proclog; put_log_buffer() local
108 if (!pd) put_log_buffer()
114 if (pd->if_used <= 0) put_log_buffer()
121 ib->proc_ctrl = pd; /* point to own control structure */ put_log_buffer()
123 ib->usage_cnt = pd->if_used; put_log_buffer()
124 if (!pd->log_head) put_log_buffer()
125 pd->log_head = ib; /* new head */ put_log_buffer()
127 pd->log_tail->next = ib; /* follows existing messages */ put_log_buffer()
128 pd->log_tail = ib; /* new tail */ put_log_buffer()
129 i = pd->del_lock++; /* get lock state */ put_log_buffer()
134 while (pd->log_head->next) { put_log_buffer()
135 if ((pd->log_head->usage_cnt <= 0) && put_log_buffer()
136 (pd->log_head->next->usage_cnt <= 0)) { put_log_buffer()
137 ib = pd->log_head; put_log_buffer()
138 pd->log_head = pd->log_head->next; put_log_buffer()
142 } /* pd->log_head->next */ put_log_buffer()
143 pd->del_lock--; /* release lock level */ put_log_buffer()
144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ put_log_buffer()
179 struct procdata *pd = card->proclog; hysdn_log_read() local
183 wait_event_interruptible(pd->rd_queue, (inf = hysdn_log_read()
213 struct procdata *pd = card->proclog; hysdn_log_open() local
218 pd->if_used++; hysdn_log_open()
219 if (pd->log_head) hysdn_log_open()
220 filep->private_data = &pd->log_tail->next; hysdn_log_open()
222 filep->private_data = &pd->log_head; hysdn_log_open()
243 struct procdata *pd; hysdn_log_close() local
256 pd = (struct procdata *) inf->proc_ctrl; /* still entries there */ hysdn_log_close()
260 pd = card->proclog; /* pointer to procfs log */ hysdn_log_close()
262 if (pd) hysdn_log_close()
263 pd->if_used--; /* decrement interface usage count by one */ hysdn_log_close()
270 if (pd) hysdn_log_close()
271 if (pd->if_used <= 0) /* delete buffers if last file closed */ hysdn_log_close()
272 while (pd->log_head) { hysdn_log_close()
273 inf = pd->log_head; hysdn_log_close()
274 pd->log_head = pd->log_head->next; hysdn_log_close()
291 struct procdata *pd = card->proclog; hysdn_log_poll() local
296 poll_wait(file, &(pd->rd_queue), wait); hysdn_log_poll()
326 struct procdata *pd; hysdn_proclog_init() local
330 if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) { hysdn_proclog_init()
331 sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid); hysdn_proclog_init()
332 pd->log = proc_create_data(pd->log_name, hysdn_proclog_init()
336 init_waitqueue_head(&(pd->rd_queue)); hysdn_proclog_init()
338 card->proclog = (void *) pd; /* remember procfs structure */ hysdn_proclog_init()
351 struct procdata *pd; hysdn_proclog_release() local
353 if ((pd = (struct procdata *) card->proclog) != NULL) { hysdn_proclog_release()
354 if (pd->log) hysdn_proclog_release()
355 remove_proc_entry(pd->log_name, hysdn_proc_entry); hysdn_proclog_release()
356 kfree(pd); /* release memory */ hysdn_proclog_release()
/linux-4.1.27/net/l2tp/
H A Dl2tp_debugfs.c48 static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) l2tp_dfs_next_tunnel() argument
50 pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); l2tp_dfs_next_tunnel()
51 pd->tunnel_idx++; l2tp_dfs_next_tunnel()
54 static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) l2tp_dfs_next_session() argument
56 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); l2tp_dfs_next_session()
57 pd->session_idx++; l2tp_dfs_next_session()
59 if (pd->session == NULL) { l2tp_dfs_next_session()
60 pd->session_idx = 0; l2tp_dfs_next_session()
61 l2tp_dfs_next_tunnel(pd); l2tp_dfs_next_session()
68 struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN; l2tp_dfs_seq_start() local
75 pd = m->private; l2tp_dfs_seq_start()
77 if (pd->tunnel == NULL) l2tp_dfs_seq_start()
78 l2tp_dfs_next_tunnel(pd); l2tp_dfs_seq_start()
80 l2tp_dfs_next_session(pd); l2tp_dfs_seq_start()
83 if ((pd->tunnel == NULL) && (pd->session == NULL)) l2tp_dfs_seq_start()
84 pd = NULL; l2tp_dfs_seq_start()
87 return pd; l2tp_dfs_seq_start()
222 struct l2tp_dfs_seq_data *pd = v; l2tp_dfs_seq_show() local
241 if (pd->session == NULL) l2tp_dfs_seq_show()
242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel); l2tp_dfs_seq_show()
244 l2tp_dfs_seq_session_show(m, pd->session); l2tp_dfs_seq_show()
259 struct l2tp_dfs_seq_data *pd; l2tp_dfs_seq_open() local
263 pd = kzalloc(sizeof(*pd), GFP_KERNEL); l2tp_dfs_seq_open()
264 if (pd == NULL) l2tp_dfs_seq_open()
270 pd->net = get_net_ns_by_pid(current->pid); l2tp_dfs_seq_open()
271 if (IS_ERR(pd->net)) { l2tp_dfs_seq_open()
272 rc = PTR_ERR(pd->net); l2tp_dfs_seq_open()
281 seq->private = pd; l2tp_dfs_seq_open()
287 put_net(pd->net); l2tp_dfs_seq_open()
289 kfree(pd); l2tp_dfs_seq_open()
295 struct l2tp_dfs_seq_data *pd; l2tp_dfs_seq_release() local
299 pd = seq->private; l2tp_dfs_seq_release()
300 if (pd->net) l2tp_dfs_seq_release()
301 put_net(pd->net); l2tp_dfs_seq_release()
302 kfree(pd); l2tp_dfs_seq_release()
H A Dl2tp_ppp.c1560 static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) pppol2tp_next_tunnel() argument
1563 pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); pppol2tp_next_tunnel()
1564 pd->tunnel_idx++; pppol2tp_next_tunnel()
1566 if (pd->tunnel == NULL) pppol2tp_next_tunnel()
1570 if (pd->tunnel->version < 3) pppol2tp_next_tunnel()
1575 static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) pppol2tp_next_session() argument
1577 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); pppol2tp_next_session()
1578 pd->session_idx++; pppol2tp_next_session()
1580 if (pd->session == NULL) { pppol2tp_next_session()
1581 pd->session_idx = 0; pppol2tp_next_session()
1582 pppol2tp_next_tunnel(net, pd); pppol2tp_next_session()
1588 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; pppol2tp_seq_start() local
1596 pd = m->private; pppol2tp_seq_start()
1599 if (pd->tunnel == NULL) pppol2tp_seq_start()
1600 pppol2tp_next_tunnel(net, pd); pppol2tp_seq_start()
1602 pppol2tp_next_session(net, pd); pppol2tp_seq_start()
1605 if ((pd->tunnel == NULL) && (pd->session == NULL)) pppol2tp_seq_start()
1606 pd = NULL; pppol2tp_seq_start()
1609 return pd; pppol2tp_seq_start()
1688 struct pppol2tp_seq_data *pd = v; pppol2tp_seq_show() local
1704 if (pd->session == NULL) pppol2tp_seq_show()
1705 pppol2tp_seq_tunnel_show(m, pd->tunnel); pppol2tp_seq_show()
1707 pppol2tp_seq_session_show(m, pd->session); pppol2tp_seq_show()
/linux-4.1.27/drivers/dma/
H A Dpch_dma.c144 #define dma_readl(pd, name) \
145 readl((pd)->membase + PCH_DMA_##name)
146 #define dma_writel(pd, name, val) \
147 writel((val), (pd)->membase + PCH_DMA_##name)
191 struct pch_dma *pd = to_pd(chan->device); pdc_enable_irq() local
200 val = dma_readl(pd, CTL2); pdc_enable_irq()
207 dma_writel(pd, CTL2, val); pdc_enable_irq()
216 struct pch_dma *pd = to_pd(chan->device); pdc_set_dir() local
222 val = dma_readl(pd, CTL0); pdc_set_dir()
237 dma_writel(pd, CTL0, val); pdc_set_dir()
240 val = dma_readl(pd, CTL3); pdc_set_dir()
254 dma_writel(pd, CTL3, val); pdc_set_dir()
263 struct pch_dma *pd = to_pd(chan->device); pdc_set_mode() local
273 val = dma_readl(pd, CTL0); pdc_set_mode()
277 dma_writel(pd, CTL0, val); pdc_set_mode()
284 val = dma_readl(pd, CTL3); pdc_set_mode()
288 dma_writel(pd, CTL3, val); pdc_set_mode()
297 struct pch_dma *pd = to_pd(pd_chan->chan.device); pdc_get_status0() local
300 val = dma_readl(pd, STS0); pdc_get_status0()
307 struct pch_dma *pd = to_pd(pd_chan->chan.device); pdc_get_status2() local
310 val = dma_readl(pd, STS2); pdc_get_status2()
444 struct pch_dma *pd = to_pd(chan->device); pdc_alloc_desc() local
447 desc = pci_pool_alloc(pd->pool, flags, &addr); pdc_alloc_desc()
546 struct pch_dma *pd = to_pd(chan->device); pd_free_chan_resources() local
560 pci_pool_free(pd->pool, desc, desc->txd.phys); pd_free_chan_resources()
710 struct pch_dma *pd = (struct pch_dma *)devid; pd_irq() local
718 sts0 = dma_readl(pd, STS0); pd_irq()
719 sts2 = dma_readl(pd, STS2); pd_irq()
721 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); pd_irq()
723 for (i = 0; i < pd->dma.chancnt; i++) { pd_irq()
724 pd_chan = &pd->channels[i]; pd_irq()
747 dma_writel(pd, STS0, sts0); pd_irq()
749 dma_writel(pd, STS2, sts2); pd_irq()
755 static void pch_dma_save_regs(struct pch_dma *pd) pch_dma_save_regs() argument
761 pd->regs.dma_ctl0 = dma_readl(pd, CTL0); pch_dma_save_regs()
762 pd->regs.dma_ctl1 = dma_readl(pd, CTL1); pch_dma_save_regs()
763 pd->regs.dma_ctl2 = dma_readl(pd, CTL2); pch_dma_save_regs()
764 pd->regs.dma_ctl3 = dma_readl(pd, CTL3); pch_dma_save_regs()
766 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { pch_dma_save_regs()
769 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); pch_dma_save_regs()
770 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); pch_dma_save_regs()
771 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); pch_dma_save_regs()
772 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); pch_dma_save_regs()
778 static void pch_dma_restore_regs(struct pch_dma *pd) pch_dma_restore_regs() argument
784 dma_writel(pd, CTL0, pd->regs.dma_ctl0); pch_dma_restore_regs()
785 dma_writel(pd, CTL1, pd->regs.dma_ctl1); pch_dma_restore_regs()
786 dma_writel(pd, CTL2, pd->regs.dma_ctl2); pch_dma_restore_regs()
787 dma_writel(pd, CTL3, pd->regs.dma_ctl3); pch_dma_restore_regs()
789 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { pch_dma_restore_regs()
792 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); pch_dma_restore_regs()
793 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); pch_dma_restore_regs()
794 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); pch_dma_restore_regs()
795 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); pch_dma_restore_regs()
803 struct pch_dma *pd = pci_get_drvdata(pdev); pch_dma_suspend() local
805 if (pd) pch_dma_suspend()
806 pch_dma_save_regs(pd); pch_dma_suspend()
817 struct pch_dma *pd = pci_get_drvdata(pdev); pch_dma_resume() local
829 if (pd) pch_dma_resume()
830 pch_dma_restore_regs(pd); pch_dma_resume()
839 struct pch_dma *pd; pch_dma_probe() local
846 pd = kzalloc(sizeof(*pd), GFP_KERNEL); pch_dma_probe()
847 if (!pd) pch_dma_probe()
850 pci_set_drvdata(pdev, pd); pch_dma_probe()
876 regs = pd->membase = pci_iomap(pdev, 1, 0); pch_dma_probe()
877 if (!pd->membase) { pch_dma_probe()
885 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); pch_dma_probe()
891 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, pch_dma_probe()
893 if (!pd->pool) { pch_dma_probe()
899 pd->dma.dev = &pdev->dev; pch_dma_probe()
901 INIT_LIST_HEAD(&pd->dma.channels); pch_dma_probe()
904 struct pch_dma_chan *pd_chan = &pd->channels[i]; pch_dma_probe()
906 pd_chan->chan.device = &pd->dma; pch_dma_probe()
919 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); pch_dma_probe()
922 dma_cap_zero(pd->dma.cap_mask); pch_dma_probe()
923 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); pch_dma_probe()
924 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); pch_dma_probe()
926 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; pch_dma_probe()
927 pd->dma.device_free_chan_resources = pd_free_chan_resources; pch_dma_probe()
928 pd->dma.device_tx_status = pd_tx_status; pch_dma_probe()
929 pd->dma.device_issue_pending = pd_issue_pending; pch_dma_probe()
930 pd->dma.device_prep_slave_sg = pd_prep_slave_sg; pch_dma_probe()
931 pd->dma.device_terminate_all = pd_device_terminate_all; pch_dma_probe()
933 err = dma_async_device_register(&pd->dma); pch_dma_probe()
942 pci_pool_destroy(pd->pool); pch_dma_probe()
944 free_irq(pdev->irq, pd); pch_dma_probe()
946 pci_iounmap(pdev, pd->membase); pch_dma_probe()
952 kfree(pd); pch_dma_probe()
958 struct pch_dma *pd = pci_get_drvdata(pdev); pch_dma_remove() local
962 if (pd) { pch_dma_remove()
963 dma_async_device_unregister(&pd->dma); pch_dma_remove()
965 free_irq(pdev->irq, pd); pch_dma_remove()
967 list_for_each_entry_safe(chan, _c, &pd->dma.channels, pch_dma_remove()
974 pci_pool_destroy(pd->pool); pch_dma_remove()
975 pci_iounmap(pdev, pd->membase); pch_dma_remove()
978 kfree(pd); pch_dma_remove()
/linux-4.1.27/drivers/media/tuners/
H A Dtda18271-maps.c25 u8 pd; /* post div */ member in struct:tda18271_pll_map
37 { .lomax = 32000, .pd = 0x5f, .d = 0xf0 },
38 { .lomax = 35000, .pd = 0x5e, .d = 0xe0 },
39 { .lomax = 37000, .pd = 0x5d, .d = 0xd0 },
40 { .lomax = 41000, .pd = 0x5c, .d = 0xc0 },
41 { .lomax = 44000, .pd = 0x5b, .d = 0xb0 },
42 { .lomax = 49000, .pd = 0x5a, .d = 0xa0 },
43 { .lomax = 54000, .pd = 0x59, .d = 0x90 },
44 { .lomax = 61000, .pd = 0x58, .d = 0x80 },
45 { .lomax = 65000, .pd = 0x4f, .d = 0x78 },
46 { .lomax = 70000, .pd = 0x4e, .d = 0x70 },
47 { .lomax = 75000, .pd = 0x4d, .d = 0x68 },
48 { .lomax = 82000, .pd = 0x4c, .d = 0x60 },
49 { .lomax = 89000, .pd = 0x4b, .d = 0x58 },
50 { .lomax = 98000, .pd = 0x4a, .d = 0x50 },
51 { .lomax = 109000, .pd = 0x49, .d = 0x48 },
52 { .lomax = 123000, .pd = 0x48, .d = 0x40 },
53 { .lomax = 131000, .pd = 0x3f, .d = 0x3c },
54 { .lomax = 141000, .pd = 0x3e, .d = 0x38 },
55 { .lomax = 151000, .pd = 0x3d, .d = 0x34 },
56 { .lomax = 164000, .pd = 0x3c, .d = 0x30 },
57 { .lomax = 179000, .pd = 0x3b, .d = 0x2c },
58 { .lomax = 197000, .pd = 0x3a, .d = 0x28 },
59 { .lomax = 219000, .pd = 0x39, .d = 0x24 },
60 { .lomax = 246000, .pd = 0x38, .d = 0x20 },
61 { .lomax = 263000, .pd = 0x2f, .d = 0x1e },
62 { .lomax = 282000, .pd = 0x2e, .d = 0x1c },
63 { .lomax = 303000, .pd = 0x2d, .d = 0x1a },
64 { .lomax = 329000, .pd = 0x2c, .d = 0x18 },
65 { .lomax = 359000, .pd = 0x2b, .d = 0x16 },
66 { .lomax = 395000, .pd = 0x2a, .d = 0x14 },
67 { .lomax = 438000, .pd = 0x29, .d = 0x12 },
68 { .lomax = 493000, .pd = 0x28, .d = 0x10 },
69 { .lomax = 526000, .pd = 0x1f, .d = 0x0f },
70 { .lomax = 564000, .pd = 0x1e, .d = 0x0e },
71 { .lomax = 607000, .pd = 0x1d, .d = 0x0d },
72 { .lomax = 658000, .pd = 0x1c, .d = 0x0c },
73 { .lomax = 718000, .pd = 0x1b, .d = 0x0b },
74 { .lomax = 790000, .pd = 0x1a, .d = 0x0a },
75 { .lomax = 877000, .pd = 0x19, .d = 0x09 },
76 { .lomax = 987000, .pd = 0x18, .d = 0x08 },
77 { .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
81 { .lomax = 33125, .pd = 0x57, .d = 0xf0 },
82 { .lomax = 35500, .pd = 0x56, .d = 0xe0 },
83 { .lomax = 38188, .pd = 0x55, .d = 0xd0 },
84 { .lomax = 41375, .pd = 0x54, .d = 0xc0 },
85 { .lomax = 45125, .pd = 0x53, .d = 0xb0 },
86 { .lomax = 49688, .pd = 0x52, .d = 0xa0 },
87 { .lomax = 55188, .pd = 0x51, .d = 0x90 },
88 { .lomax = 62125, .pd = 0x50, .d = 0x80 },
89 { .lomax = 66250, .pd = 0x47, .d = 0x78 },
90 { .lomax = 71000, .pd = 0x46, .d = 0x70 },
91 { .lomax = 76375, .pd = 0x45, .d = 0x68 },
92 { .lomax = 82750, .pd = 0x44, .d = 0x60 },
93 { .lomax = 90250, .pd = 0x43, .d = 0x58 },
94 { .lomax = 99375, .pd = 0x42, .d = 0x50 },
95 { .lomax = 110375, .pd = 0x41, .d = 0x48 },
96 { .lomax = 124250, .pd = 0x40, .d = 0x40 },
97 { .lomax = 132500, .pd = 0x37, .d = 0x3c },
98 { .lomax = 142000, .pd = 0x36, .d = 0x38 },
99 { .lomax = 152750, .pd = 0x35, .d = 0x34 },
100 { .lomax = 165500, .pd = 0x34, .d = 0x30 },
101 { .lomax = 180500, .pd = 0x33, .d = 0x2c },
102 { .lomax = 198750, .pd = 0x32, .d = 0x28 },
103 { .lomax = 220750, .pd = 0x31, .d = 0x24 },
104 { .lomax = 248500, .pd = 0x30, .d = 0x20 },
105 { .lomax = 265000, .pd = 0x27, .d = 0x1e },
106 { .lomax = 284000, .pd = 0x26, .d = 0x1c },
107 { .lomax = 305500, .pd = 0x25, .d = 0x1a },
108 { .lomax = 331000, .pd = 0x24, .d = 0x18 },
109 { .lomax = 361000, .pd = 0x23, .d = 0x16 },
110 { .lomax = 397500, .pd = 0x22, .d = 0x14 },
111 { .lomax = 441500, .pd = 0x21, .d = 0x12 },
112 { .lomax = 497000, .pd = 0x20, .d = 0x10 },
113 { .lomax = 530000, .pd = 0x17, .d = 0x0f },
114 { .lomax = 568000, .pd = 0x16, .d = 0x0e },
115 { .lomax = 611000, .pd = 0x15, .d = 0x0d },
116 { .lomax = 662000, .pd = 0x14, .d = 0x0c },
117 { .lomax = 722000, .pd = 0x13, .d = 0x0b },
118 { .lomax = 795000, .pd = 0x12, .d = 0x0a },
119 { .lomax = 883000, .pd = 0x11, .d = 0x09 },
120 { .lomax = 994000, .pd = 0x10, .d = 0x08 },
121 { .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
125 { .lomax = 33000, .pd = 0xdd, .d = 0xd0 },
126 { .lomax = 36000, .pd = 0xdc, .d = 0xc0 },
127 { .lomax = 40000, .pd = 0xdb, .d = 0xb0 },
128 { .lomax = 44000, .pd = 0xda, .d = 0xa0 },
129 { .lomax = 49000, .pd = 0xd9, .d = 0x90 },
130 { .lomax = 55000, .pd = 0xd8, .d = 0x80 },
131 { .lomax = 63000, .pd = 0xd3, .d = 0x70 },
132 { .lomax = 67000, .pd = 0xcd, .d = 0x68 },
133 { .lomax = 73000, .pd = 0xcc, .d = 0x60 },
134 { .lomax = 80000, .pd = 0xcb, .d = 0x58 },
135 { .lomax = 88000, .pd = 0xca, .d = 0x50 },
136 { .lomax = 98000, .pd = 0xc9, .d = 0x48 },
137 { .lomax = 110000, .pd = 0xc8, .d = 0x40 },
138 { .lomax = 126000, .pd = 0xc3, .d = 0x38 },
139 { .lomax = 135000, .pd = 0xbd, .d = 0x34 },
140 { .lomax = 147000, .pd = 0xbc, .d = 0x30 },
141 { .lomax = 160000, .pd = 0xbb, .d = 0x2c },
142 { .lomax = 176000, .pd = 0xba, .d = 0x28 },
143 { .lomax = 196000, .pd = 0xb9, .d = 0x24 },
144 { .lomax = 220000, .pd = 0xb8, .d = 0x20 },
145 { .lomax = 252000, .pd = 0xb3, .d = 0x1c },
146 { .lomax = 271000, .pd = 0xad, .d = 0x1a },
147 { .lomax = 294000, .pd = 0xac, .d = 0x18 },
148 { .lomax = 321000, .pd = 0xab, .d = 0x16 },
149 { .lomax = 353000, .pd = 0xaa, .d = 0x14 },
150 { .lomax = 392000, .pd = 0xa9, .d = 0x12 },
151 { .lomax = 441000, .pd = 0xa8, .d = 0x10 },
152 { .lomax = 505000, .pd = 0xa3, .d = 0x0e },
153 { .lomax = 543000, .pd = 0x9d, .d = 0x0d },
154 { .lomax = 589000, .pd = 0x9c, .d = 0x0c },
155 { .lomax = 642000, .pd = 0x9b, .d = 0x0b },
156 { .lomax = 707000, .pd = 0x9a, .d = 0x0a },
157 { .lomax = 785000, .pd = 0x99, .d = 0x09 },
158 { .lomax = 883000, .pd = 0x98, .d = 0x08 },
159 { .lomax = 1010000, .pd = 0x93, .d = 0x07 },
160 { .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
164 { .lomax = 33813, .pd = 0xdd, .d = 0xd0 },
165 { .lomax = 36625, .pd = 0xdc, .d = 0xc0 },
166 { .lomax = 39938, .pd = 0xdb, .d = 0xb0 },
167 { .lomax = 43938, .pd = 0xda, .d = 0xa0 },
168 { .lomax = 48813, .pd = 0xd9, .d = 0x90 },
169 { .lomax = 54938, .pd = 0xd8, .d = 0x80 },
170 { .lomax = 62813, .pd = 0xd3, .d = 0x70 },
171 { .lomax = 67625, .pd = 0xcd, .d = 0x68 },
172 { .lomax = 73250, .pd = 0xcc, .d = 0x60 },
173 { .lomax = 79875, .pd = 0xcb, .d = 0x58 },
174 { .lomax = 87875, .pd = 0xca, .d = 0x50 },
175 { .lomax = 97625, .pd = 0xc9, .d = 0x48 },
176 { .lomax = 109875, .pd = 0xc8, .d = 0x40 },
177 { .lomax = 125625, .pd = 0xc3, .d = 0x38 },
178 { .lomax = 135250, .pd = 0xbd, .d = 0x34 },
179 { .lomax = 146500, .pd = 0xbc, .d = 0x30 },
180 { .lomax = 159750, .pd = 0xbb, .d = 0x2c },
181 { .lomax = 175750, .pd = 0xba, .d = 0x28 },
182 { .lomax = 195250, .pd = 0xb9, .d = 0x24 },
183 { .lomax = 219750, .pd = 0xb8, .d = 0x20 },
184 { .lomax = 251250, .pd = 0xb3, .d = 0x1c },
185 { .lomax = 270500, .pd = 0xad, .d = 0x1a },
186 { .lomax = 293000, .pd = 0xac, .d = 0x18 },
187 { .lomax = 319500, .pd = 0xab, .d = 0x16 },
188 { .lomax = 351500, .pd = 0xaa, .d = 0x14 },
189 { .lomax = 390500, .pd = 0xa9, .d = 0x12 },
190 { .lomax = 439500, .pd = 0xa8, .d = 0x10 },
191 { .lomax = 502500, .pd = 0xa3, .d = 0x0e },
192 { .lomax = 541000, .pd = 0x9d, .d = 0x0d },
193 { .lomax = 586000, .pd = 0x9c, .d = 0x0c },
194 { .lomax = 639000, .pd = 0x9b, .d = 0x0b },
195 { .lomax = 703000, .pd = 0x9a, .d = 0x0a },
196 { .lomax = 781000, .pd = 0x99, .d = 0x09 },
197 { .lomax = 879000, .pd = 0x98, .d = 0x08 },
198 { .lomax = 0, .pd = 0x00, .d = 0x00 }, /* end */
1109 *post_div = map[i].pd; tda18271_lookup_pll_map()
/linux-4.1.27/drivers/ata/
H A Dpata_pxa.c62 struct pata_pxa_data *pd = qc->ap->private_data; pxa_load_dmac() local
73 pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr + pxa_load_dmac()
74 ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc)); pxa_load_dmac()
76 pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 | pxa_load_dmac()
80 pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr; pxa_load_dmac()
81 pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr; pxa_load_dmac()
82 pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR | pxa_load_dmac()
85 pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr; pxa_load_dmac()
86 pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr; pxa_load_dmac()
87 pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR | pxa_load_dmac()
93 pd->dma_desc_id++; pxa_load_dmac()
99 DALGN |= (1 << pd->dma_dreq); pxa_load_dmac()
107 struct pata_pxa_data *pd = qc->ap->private_data; pxa_qc_prep() local
114 pd->dma_desc_id = 0; pxa_qc_prep()
116 DCSR(pd->dma_channel) = 0; pxa_qc_prep()
117 DALGN &= ~(1 << pd->dma_dreq); pxa_qc_prep()
122 pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP; pxa_qc_prep()
125 pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN; pxa_qc_prep()
127 DDADR(pd->dma_channel) = pd->dma_desc_addr; pxa_qc_prep()
128 DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel; pxa_qc_prep()
146 struct pata_pxa_data *pd = qc->ap->private_data; pxa_bmdma_start() local
147 init_completion(&pd->dma_done); pxa_bmdma_start()
148 DCSR(pd->dma_channel) = DCSR_RUN; pxa_bmdma_start()
156 struct pata_pxa_data *pd = qc->ap->private_data; pxa_bmdma_stop() local
158 if ((DCSR(pd->dma_channel) & DCSR_RUN) && pxa_bmdma_stop()
159 wait_for_completion_timeout(&pd->dma_done, HZ)) pxa_bmdma_stop()
162 DCSR(pd->dma_channel) = 0; pxa_bmdma_stop()
171 struct pata_pxa_data *pd = ap->private_data; pxa_bmdma_status() local
174 if (pd->dma_dcsr & DCSR_BUSERR) pxa_bmdma_status()
222 struct pata_pxa_data *pd = ap->private_data; pxa_ata_dma_irq() local
224 pd->dma_dcsr = DCSR(dma); pxa_ata_dma_irq()
225 DCSR(dma) = pd->dma_dcsr; pxa_ata_dma_irq()
227 if (pd->dma_dcsr & DCSR_STOPSTATE) pxa_ata_dma_irq()
228 complete(&pd->dma_done); pxa_ata_dma_irq()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_file_ops.c97 struct ipath_portdata *pd = port_fp(fp); ipath_get_base_info() local
100 struct ipath_devdata *dd = pd->port_dd; ipath_get_base_info()
105 subport_cnt = pd->port_subport_cnt; ipath_get_base_info()
133 ret = dd->ipath_f_get_base_info(pd, kinfo); ipath_get_base_info()
145 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; ipath_get_base_info()
146 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; ipath_get_base_info()
148 pd->port_rcvegrbuf_chunks; ipath_get_base_info()
177 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; ipath_get_base_info()
178 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; ipath_get_base_info()
179 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; ipath_get_base_info()
185 kinfo->spi_piocnt = pd->port_piocnt; ipath_get_base_info()
186 kinfo->spi_piobufbase = (u64) pd->port_piobufs; ipath_get_base_info()
188 dd->ipath_ureg_align * pd->port_port; ipath_get_base_info()
190 kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) + ipath_get_base_info()
191 (pd->port_piocnt % subport_cnt); ipath_get_base_info()
193 kinfo->spi_piobufbase = (u64) pd->port_piobufs + ipath_get_base_info()
195 (pd->port_piocnt - kinfo->spi_piocnt); ipath_get_base_info()
199 kinfo->spi_piocnt = pd->port_piocnt / subport_cnt; ipath_get_base_info()
200 kinfo->spi_piobufbase = (u64) pd->port_piobufs + ipath_get_base_info()
206 dd->ipath_ureg_align * pd->port_port; ipath_get_base_info()
211 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + ipath_get_base_info()
214 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + ipath_get_base_info()
215 pd->port_rcvhdrq_size * subport_fp(fp)); ipath_get_base_info()
217 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + ipath_get_base_info()
218 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * ipath_get_base_info()
222 cvt_kvaddr(pd->subport_uregbase); ipath_get_base_info()
224 cvt_kvaddr(pd->subport_rcvegrbuf); ipath_get_base_info()
226 cvt_kvaddr(pd->subport_rcvhdr_base); ipath_get_base_info()
251 kinfo->spi_port = pd->port_port; ipath_get_base_info()
271 * @pd: the port
295 static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, ipath_tid_update() argument
301 struct ipath_devdata *dd = pd->port_dd; ipath_tid_update()
324 porttid = pd->port_port * dd->ipath_rcvtidcnt; ipath_tid_update()
325 if (!pd->port_subport_cnt) { ipath_tid_update()
327 tid = pd->port_tidcursor; ipath_tid_update()
330 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + ipath_tid_update()
331 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); ipath_tid_update()
336 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; ipath_tid_update()
347 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; ipath_tid_update()
358 pd->port_port, cnt, tid, tidbase); ipath_tid_update()
483 if (!pd->port_subport_cnt) ipath_tid_update()
484 pd->port_tidcursor = tid; ipath_tid_update()
498 * @pd: the port
513 static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, ipath_tid_free() argument
518 struct ipath_devdata *dd = pd->port_dd; ipath_tid_free()
533 porttid = pd->port_port * dd->ipath_rcvtidcnt; ipath_tid_free()
534 if (!pd->port_subport_cnt) ipath_tid_free()
537 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + ipath_tid_free()
538 (dd->ipath_rcvtidcnt % pd->port_subport_cnt); ipath_tid_free()
541 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; ipath_tid_free()
554 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, ipath_tid_free()
572 pid_nr(pd->port_pid), tid); ipath_tid_free()
596 * @pd: the port
610 static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) ipath_set_part_key() argument
612 struct ipath_devdata *dd = pd->port_dd; ipath_set_part_key()
625 pd->port_port, key, dd->ipath_pkeys[0], ipath_set_part_key()
633 pd->port_port); ipath_set_part_key()
646 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { ipath_set_part_key()
647 if (!pd->port_pkeys[i] && pidx == -1) ipath_set_part_key()
649 if (pd->port_pkeys[i] == key) { ipath_set_part_key()
652 pd->port_port, key); ipath_set_part_key()
659 "can't set %x\n", pd->port_port, key); ipath_set_part_key()
672 pd->port_pkeys[pidx] = key; ipath_set_part_key()
675 pd->port_port, key, i, ipath_set_part_key()
702 "can't set %x\n", pd->port_port, key); ipath_set_part_key()
713 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; ipath_set_part_key()
721 pd->port_port, key, i, pidx, ipath_set_part_key()
731 "can't set %x\n", pd->port_port, key); ipath_set_part_key()
740 * @pd: the port
748 static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, ipath_manage_rcvq() argument
751 struct ipath_devdata *dd = pd->port_dd; ipath_manage_rcvq()
755 pd->port_port, subport); ipath_manage_rcvq()
772 if (pd->port_rcvhdrtail_kvaddr) ipath_manage_rcvq()
773 ipath_clear_rcvhdrtail(pd); ipath_manage_rcvq()
774 set_bit(dd->ipath_r_portenable_shift + pd->port_port, ipath_manage_rcvq()
777 clear_bit(dd->ipath_r_portenable_shift + pd->port_port, ipath_manage_rcvq()
791 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); ipath_manage_rcvq()
798 static void ipath_clean_part_key(struct ipath_portdata *pd, ipath_clean_part_key() argument
810 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { ipath_clean_part_key()
811 if (!pd->port_pkeys[i]) ipath_clean_part_key()
814 pd->port_pkeys[i]); ipath_clean_part_key()
818 (pd->port_pkeys[i] & 0x7fff)) ipath_clean_part_key()
823 pd->port_port, ipath_clean_part_key()
824 pd->port_pkeys[i], j); ipath_clean_part_key()
831 "but ref still %d\n", pd->port_port, ipath_clean_part_key()
832 pd->port_pkeys[i], j, ipath_clean_part_key()
836 pd->port_pkeys[i] = 0; ipath_clean_part_key()
844 "new pkey reg %llx\n", pd->port_port, ipath_clean_part_key()
858 static void init_user_egr_sizes(struct ipath_portdata *pd) init_user_egr_sizes() argument
860 struct ipath_devdata *dd = pd->port_dd; init_user_egr_sizes()
877 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; init_user_egr_sizes()
878 pd->port_rcvegrbufs_perchunk = egrperchunk; init_user_egr_sizes()
879 pd->port_rcvegrbuf_size = size; init_user_egr_sizes()
884 * @pd: the port to allocate TID buffers for
894 static int ipath_create_user_egr(struct ipath_portdata *pd) ipath_create_user_egr() argument
896 struct ipath_devdata *dd = pd->port_dd; ipath_create_user_egr()
912 egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt; ipath_create_user_egr()
917 chunk = pd->port_rcvegrbuf_chunks; ipath_create_user_egr()
918 egrperchunk = pd->port_rcvegrbufs_perchunk; ipath_create_user_egr()
919 size = pd->port_rcvegrbuf_size; ipath_create_user_egr()
920 pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), ipath_create_user_egr()
922 if (!pd->port_rcvegrbuf) { ipath_create_user_egr()
926 pd->port_rcvegrbuf_phys = ipath_create_user_egr()
927 kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), ipath_create_user_egr()
929 if (!pd->port_rcvegrbuf_phys) { ipath_create_user_egr()
933 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { ipath_create_user_egr()
935 pd->port_rcvegrbuf[e] = dma_alloc_coherent( ipath_create_user_egr()
936 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], ipath_create_user_egr()
939 if (!pd->port_rcvegrbuf[e]) { ipath_create_user_egr()
945 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; ipath_create_user_egr()
947 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { ipath_create_user_egr()
948 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; ipath_create_user_egr()
967 for (e = 0; e < pd->port_rcvegrbuf_chunks && ipath_create_user_egr()
968 pd->port_rcvegrbuf[e]; e++) { ipath_create_user_egr()
970 pd->port_rcvegrbuf[e], ipath_create_user_egr()
971 pd->port_rcvegrbuf_phys[e]); ipath_create_user_egr()
974 kfree(pd->port_rcvegrbuf_phys); ipath_create_user_egr()
975 pd->port_rcvegrbuf_phys = NULL; ipath_create_user_egr()
977 kfree(pd->port_rcvegrbuf); ipath_create_user_egr()
978 pd->port_rcvegrbuf = NULL; ipath_create_user_egr()
986 struct ipath_portdata *pd, unsigned len, int write_ok, ipath_mmap_mem()
989 struct ipath_devdata *dd = pd->port_dd; ipath_mmap_mem()
1018 "bytes r%c failed: %d\n", what, pd->port_port, ipath_mmap_mem()
1022 "r%c\n", what, pd->port_port, pfn, len, ipath_mmap_mem()
1058 struct ipath_portdata *pd, mmap_piobufs()
1102 struct ipath_portdata *pd) mmap_rcvegrbufs()
1104 struct ipath_devdata *dd = pd->port_dd; mmap_rcvegrbufs()
1110 size = pd->port_rcvegrbuf_size; mmap_rcvegrbufs()
1111 total_size = pd->port_rcvegrbuf_chunks * size; mmap_rcvegrbufs()
1132 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { mmap_rcvegrbufs()
1133 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; mmap_rcvegrbufs()
1167 struct ipath_portdata *pd, unsigned subport) mmap_kvaddr()
1176 if (!pd->port_subport_cnt) mmap_kvaddr()
1179 dd = pd->port_dd; mmap_kvaddr()
1180 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; mmap_kvaddr()
1187 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { mmap_kvaddr()
1188 addr = pd->subport_uregbase; mmap_kvaddr()
1189 size = PAGE_SIZE * pd->port_subport_cnt; mmap_kvaddr()
1190 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { mmap_kvaddr()
1191 addr = pd->subport_rcvhdr_base; mmap_kvaddr()
1192 size = pd->port_rcvhdrq_size * pd->port_subport_cnt; mmap_kvaddr()
1193 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { mmap_kvaddr()
1194 addr = pd->subport_rcvegrbuf; mmap_kvaddr()
1195 size *= pd->port_subport_cnt; mmap_kvaddr()
1196 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + mmap_kvaddr()
1198 addr = pd->subport_uregbase + PAGE_SIZE * subport; mmap_kvaddr()
1200 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + mmap_kvaddr()
1201 pd->port_rcvhdrq_size * subport)) { mmap_kvaddr()
1202 addr = pd->subport_rcvhdr_base + mmap_kvaddr()
1203 pd->port_rcvhdrq_size * subport; mmap_kvaddr()
1204 size = pd->port_rcvhdrq_size; mmap_kvaddr()
1205 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + mmap_kvaddr()
1207 addr = pd->subport_rcvegrbuf + size * subport; mmap_kvaddr()
1252 struct ipath_portdata *pd; ipath_mmap() local
1258 pd = port_fp(fp); ipath_mmap()
1259 if (!pd) { ipath_mmap()
1263 dd = pd->port_dd; ipath_mmap()
1287 pd->port_port, subport_fp(fp)); ipath_mmap()
1294 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); ipath_mmap()
1301 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; ipath_mmap()
1302 if (!pd->port_subport_cnt) { ipath_mmap()
1304 piocnt = pd->port_piocnt; ipath_mmap()
1305 piobufs = pd->port_piobufs; ipath_mmap()
1308 piocnt = (pd->port_piocnt / pd->port_subport_cnt) + ipath_mmap()
1309 (pd->port_piocnt % pd->port_subport_cnt); ipath_mmap()
1310 piobufs = pd->port_piobufs + ipath_mmap()
1311 dd->ipath_palign * (pd->port_piocnt - piocnt); ipath_mmap()
1316 piocnt = pd->port_piocnt / pd->port_subport_cnt; ipath_mmap()
1317 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; ipath_mmap()
1323 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); ipath_mmap()
1326 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, ipath_mmap()
1329 else if (pgaddr == pd->port_rcvegr_phys) ipath_mmap()
1330 ret = mmap_rcvegrbufs(vma, pd); ipath_mmap()
1331 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) ipath_mmap()
1337 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, ipath_mmap()
1338 pd->port_rcvhdrq, ipath_mmap()
1340 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) ipath_mmap()
1342 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, ipath_mmap()
1343 pd->port_rcvhdrtail_kvaddr, ipath_mmap()
1359 static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd) ipath_poll_hdrqfull() argument
1363 if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && ipath_poll_hdrqfull()
1364 pd->port_hdrqfull != pd->port_hdrqfull_poll) { ipath_poll_hdrqfull()
1366 pd->port_hdrqfull_poll = pd->port_hdrqfull; ipath_poll_hdrqfull()
1372 static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, ipath_poll_urgent() argument
1379 dd = pd->port_dd; ipath_poll_urgent()
1383 pollflag = ipath_poll_hdrqfull(pd); ipath_poll_urgent()
1385 if (pd->port_urgent != pd->port_urgent_poll) { ipath_poll_urgent()
1387 pd->port_urgent_poll = pd->port_urgent; ipath_poll_urgent()
1392 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag); ipath_poll_urgent()
1395 poll_wait(fp, &pd->port_wait, pt); ipath_poll_urgent()
1401 static unsigned int ipath_poll_next(struct ipath_portdata *pd, ipath_poll_next() argument
1410 dd = pd->port_dd; ipath_poll_next()
1414 pollflag = ipath_poll_hdrqfull(pd); ipath_poll_next()
1416 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); ipath_poll_next()
1417 if (pd->port_rcvhdrtail_kvaddr) ipath_poll_next()
1418 tail = ipath_get_rcvhdrtail(pd); ipath_poll_next()
1420 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); ipath_poll_next()
1426 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); ipath_poll_next()
1430 set_bit(pd->port_port + dd->ipath_r_intravail_shift, ipath_poll_next()
1439 pd->port_port); ipath_poll_next()
1441 poll_wait(fp, &pd->port_wait, pt); ipath_poll_next()
1450 struct ipath_portdata *pd; ipath_poll() local
1453 pd = port_fp(fp); ipath_poll()
1454 if (!pd) ipath_poll()
1456 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT) ipath_poll()
1457 pollflag = ipath_poll_urgent(pd, fp, pt); ipath_poll()
1459 pollflag = ipath_poll_next(pd, fp, pt); ipath_poll()
1497 struct ipath_portdata *pd, init_subports()
1540 pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports); init_subports()
1541 if (!pd->subport_uregbase) { init_subports()
1545 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ init_subports()
1548 pd->subport_rcvhdr_base = vzalloc(size); init_subports()
1549 if (!pd->subport_rcvhdr_base) { init_subports()
1554 pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks * init_subports()
1555 pd->port_rcvegrbuf_size * init_subports()
1557 if (!pd->subport_rcvegrbuf) { init_subports()
1562 pd->port_subport_cnt = uinfo->spu_subport_cnt; init_subports()
1563 pd->port_subport_id = uinfo->spu_subport_id; init_subports()
1564 pd->active_slaves = 1; init_subports()
1565 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); init_subports()
1569 vfree(pd->subport_rcvhdr_base); init_subports()
1571 vfree(pd->subport_uregbase); init_subports()
1572 pd->subport_uregbase = NULL; init_subports()
1581 struct ipath_portdata *pd; try_alloc_port() local
1584 if (!(pd = dd->ipath_pd[port])) { try_alloc_port()
1587 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); try_alloc_port()
1597 if (!pd || !ptmp) { try_alloc_port()
1601 kfree(pd); try_alloc_port()
1605 dd->ipath_pd[port] = pd; try_alloc_port()
1611 if (!pd->port_cnt) { try_alloc_port()
1612 pd->userversion = uinfo->spu_userversion; try_alloc_port()
1613 init_user_egr_sizes(pd); try_alloc_port()
1614 if ((ret = init_subports(dd, pd, uinfo)) != 0) try_alloc_port()
1619 pd->port_cnt = 1; try_alloc_port()
1620 port_fp(fp) = pd; try_alloc_port()
1621 pd->port_pid = get_pid(task_pid(current)); try_alloc_port()
1622 strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); try_alloc_port()
1790 struct ipath_portdata *pd = dd->ipath_pd[i]; find_shared_port() local
1793 if (!pd || !pd->port_cnt) find_shared_port()
1796 if (pd->port_subport_id != uinfo->spu_subport_id) find_shared_port()
1799 if (pd->port_subport_cnt != uinfo->spu_subport_cnt || find_shared_port()
1800 pd->userversion != uinfo->spu_userversion || find_shared_port()
1801 pd->port_cnt >= pd->port_subport_cnt) { find_shared_port()
1805 port_fp(fp) = pd; find_shared_port()
1806 subport_fp(fp) = pd->port_cnt++; find_shared_port()
1807 pd->port_subpid[subport_fp(fp)] = find_shared_port()
1810 pd->active_slaves |= 1 << subport_fp(fp); find_shared_port()
1815 pd->port_comm, pid_nr(pd->port_pid), find_shared_port()
1816 dd->ipath_unit, pd->port_port); find_shared_port()
1884 const struct ipath_portdata *pd = fd->pd; ipath_assign_port() local
1885 const struct ipath_devdata *dd = pd->port_dd; ipath_assign_port()
1889 pd->port_port, ipath_assign_port()
1907 struct ipath_portdata *pd = port_fp(fp); ipath_do_user_init() local
1913 ret = wait_event_interruptible(pd->port_wait, ipath_do_user_init()
1914 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); ipath_do_user_init()
1918 dd = pd->port_dd; ipath_do_user_init()
1929 if (pd->port_port <= dd->ipath_ports_extrabuf) ipath_do_user_init()
1930 pd->port_piocnt = dd->ipath_pbufsport + 1; ipath_do_user_init()
1932 pd->port_piocnt = dd->ipath_pbufsport; ipath_do_user_init()
1935 if (pd->port_port <= dd->ipath_ports_extrabuf) ipath_do_user_init()
1936 pd->port_pio_base = (dd->ipath_pbufsport + 1) ipath_do_user_init()
1937 * (pd->port_port - 1); ipath_do_user_init()
1939 pd->port_pio_base = dd->ipath_ports_extrabuf + ipath_do_user_init()
1940 dd->ipath_pbufsport * (pd->port_port - 1); ipath_do_user_init()
1941 pd->port_piobufs = dd->ipath_piobufbase + ipath_do_user_init()
1942 pd->port_pio_base * dd->ipath_palign; ipath_do_user_init()
1944 " first pio %u\n", pd->port_port, pd->port_piobufs, ipath_do_user_init()
1945 pd->port_piocnt, pd->port_pio_base); ipath_do_user_init()
1946 ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0); ipath_do_user_init()
1950 * array for time being. If pd->port_port > chip-supported, ipath_do_user_init()
1954 ret = ipath_create_rcvhdrq(dd, pd); ipath_do_user_init()
1956 ret = ipath_create_user_egr(pd); ipath_do_user_init()
1965 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); ipath_do_user_init()
1966 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); ipath_do_user_init()
1967 pd->port_lastrcvhdrqtail = -1; ipath_do_user_init()
1969 pd->port_port, head32); ipath_do_user_init()
1970 pd->port_tidcursor = 0; /* start at beginning after open */ ipath_do_user_init()
1973 pd->port_urgent = 0; ipath_do_user_init()
1974 pd->port_urgent_poll = 0; ipath_do_user_init()
1975 pd->port_hdrqfull_poll = pd->port_hdrqfull; ipath_do_user_init()
1988 set_bit(dd->ipath_r_portenable_shift + pd->port_port, ipath_do_user_init()
1991 if (pd->port_rcvhdrtail_kvaddr) ipath_do_user_init()
1992 ipath_clear_rcvhdrtail(pd); ipath_do_user_init()
2000 if (pd->port_subport_cnt) { ipath_do_user_init()
2001 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); ipath_do_user_init()
2002 wake_up(&pd->port_wait); ipath_do_user_init()
2010 * @pd: port
2015 static void unlock_expected_tids(struct ipath_portdata *pd) unlock_expected_tids() argument
2017 struct ipath_devdata *dd = pd->port_dd; unlock_expected_tids()
2018 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; unlock_expected_tids()
2022 pd->port_port); unlock_expected_tids()
2038 pd->port_port, cnt); unlock_expected_tids()
2051 struct ipath_portdata *pd; ipath_close() local
2064 pd = fd->pd; ipath_close()
2065 if (!pd) { ipath_close()
2070 dd = pd->port_dd; ipath_close()
2076 if (--pd->port_cnt) { ipath_close()
2082 pd->active_slaves &= ~(1 << fd->subport); ipath_close()
2083 put_pid(pd->port_subpid[fd->subport]); ipath_close()
2084 pd->port_subpid[fd->subport] = NULL; ipath_close()
2090 port = pd->port_port; ipath_close()
2092 pid = pd->port_pid; ipath_close()
2093 pd->port_pid = NULL; ipath_close()
2096 if (pd->port_rcvwait_to || pd->port_piowait_to ipath_close()
2097 || pd->port_rcvnowait || pd->port_pionowait) { ipath_close()
2100 pd->port_port, pd->port_rcvwait_to, ipath_close()
2101 pd->port_piowait_to, pd->port_rcvnowait, ipath_close()
2102 pd->port_pionowait); ipath_close()
2103 pd->port_rcvwait_to = pd->port_piowait_to = ipath_close()
2104 pd->port_rcvnowait = pd->port_pionowait = 0; ipath_close()
2106 if (pd->port_flag) { ipath_close()
2108 pd->port_port, pd->port_flag); ipath_close()
2109 pd->port_flag = 0; ipath_close()
2116 clear_bit(pd->port_port + dd->ipath_r_intravail_shift, ipath_close()
2125 ipath_clean_part_key(pd, dd); ipath_close()
2139 pd->port_port, dd->ipath_dummy_hdrq_phys); ipath_close()
2141 ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt); ipath_close()
2142 ipath_chg_pioavailkernel(dd, pd->port_pio_base, ipath_close()
2143 pd->port_piocnt, 1); ipath_close()
2145 dd->ipath_f_clear_tids(dd, pd->port_port); ipath_close()
2148 unlock_expected_tids(pd); ipath_close()
2151 pd->port_comm, pid_nr(pid), ipath_close()
2157 ipath_free_pddata(dd, pd); /* after releasing the mutex */ ipath_close()
2164 static int ipath_port_info(struct ipath_portdata *pd, u16 subport, ipath_port_info() argument
2174 info.unit = pd->port_dd->ipath_unit; ipath_port_info()
2175 info.port = pd->port_port; ipath_port_info()
2178 if (ipath_supports_subports(pd->userversion >> 16, ipath_port_info()
2179 pd->userversion & 0xffff)) { ipath_port_info()
2181 info.num_ports = pd->port_dd->ipath_cfgports - 1; ipath_port_info()
2182 info.num_subports = pd->port_subport_cnt; ipath_port_info()
2197 static int ipath_get_slave_info(struct ipath_portdata *pd, ipath_get_slave_info() argument
2202 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) ipath_get_slave_info()
2240 struct ipath_portdata *pd; ipath_write() local
2339 pd = port_fp(fp); ipath_write()
2340 if (!pd && cmd.type != __IPATH_CMD_USER_INIT && ipath_write()
2368 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); ipath_write()
2371 ret = ipath_port_info(pd, subport_fp(fp), ipath_write()
2376 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); ipath_write()
2379 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); ipath_write()
2382 ret = ipath_set_part_key(pd, cmd.cmd.part_key); ipath_write()
2385 ret = ipath_get_slave_info(pd, ipath_write()
2390 ipath_force_pio_avail_update(pd->port_dd); ipath_write()
2393 pd->poll_type = cmd.cmd.poll_type; ipath_write()
2397 ipath_enable_armlaunch(pd->port_dd); ipath_write()
2399 ipath_disable_armlaunch(pd->port_dd); ipath_write()
2407 ret = ipath_sdma_get_complete(pd->port_dd, ipath_write()
2425 struct ipath_portdata *pd = port_fp(filp); ipath_write_iter() local
2431 return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs); ipath_write_iter()
985 ipath_mmap_mem(struct vm_area_struct *vma, struct ipath_portdata *pd, unsigned len, int write_ok, void *kvaddr, char *what) ipath_mmap_mem() argument
1056 mmap_piobufs(struct vm_area_struct *vma, struct ipath_devdata *dd, struct ipath_portdata *pd, unsigned piobufs, unsigned piocnt) mmap_piobufs() argument
1101 mmap_rcvegrbufs(struct vm_area_struct *vma, struct ipath_portdata *pd) mmap_rcvegrbufs() argument
1166 mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, struct ipath_portdata *pd, unsigned subport) mmap_kvaddr() argument
1496 init_subports(struct ipath_devdata *dd, struct ipath_portdata *pd, const struct ipath_user_info *uinfo) init_subports() argument
H A Dipath_mr.c56 * @pd: protection domain for this memory region
63 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) ipath_get_dma_mr() argument
125 * @pd: protection domain for this memory region
132 struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, ipath_reg_phys_mr() argument
140 mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); ipath_reg_phys_mr()
146 mr->mr.pd = pd; ipath_reg_phys_mr()
176 * @pd: protection domain for this memory region
185 struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ipath_reg_user_mr() argument
200 umem = ib_umem_get(pd->uobject->context, start, length, ipath_reg_user_mr()
206 mr = alloc_mr(n, &to_idev(pd->device)->lk_table); ipath_reg_user_mr()
213 mr->mr.pd = pd; ipath_reg_user_mr()
276 * @pd: the protection domain for this memory region
282 struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags, ipath_alloc_fmr() argument
308 if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) ipath_alloc_fmr()
315 fmr->mr.pd = pd; ipath_alloc_fmr()
H A Dipath_keys.c136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); ipath_lkey_ok() local
138 if (pd->user) { ipath_lkey_ok()
151 qp->ibqp.pd != mr->pd)) { ipath_lkey_ok()
216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); ipath_rkey_ok() local
218 if (pd->user) { ipath_rkey_ok()
234 qp->ibqp.pd != mr->pd)) { ipath_rkey_ok()
H A Dipath_stats.c144 struct ipath_portdata *pd = dd->ipath_pd[0]; ipath_qcheck() local
150 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) { ipath_qcheck()
152 pd->port_hdrqfull - ipath_qcheck()
154 dd->ipath_p0_hdrqfull = pd->port_hdrqfull; ipath_qcheck()
186 hdrqtail = ipath_get_hdrqtail(pd); ipath_qcheck()
187 if (pd->port_head != hdrqtail) { ipath_qcheck()
193 pd->port_head, hdrqtail, ipath_qcheck()
197 dd->ipath_rhdrhead_intr_off, pd->port_port); ipath_qcheck()
337 struct ipath_portdata *pd = dd->ipath_pd[i]; ipath_get_faststats() local
339 if (pd && pd->port_lastrcvhdrqtail != -1) ipath_get_faststats()
340 pd->port_lastrcvhdrqtail = -1; ipath_get_faststats()
H A Dipath_driver.c728 struct ipath_portdata *pd = tmp[port]; cleanup_device() local
730 ipath_free_pddata(dd, pd); cleanup_device()
1141 * @pd: the infinipath port
1145 void ipath_kreceive(struct ipath_portdata *pd) ipath_kreceive() argument
1147 struct ipath_devdata *dd = pd->port_dd; ipath_kreceive()
1158 l = pd->port_head; ipath_kreceive()
1159 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset; ipath_kreceive()
1163 if (seq != pd->port_seq_cnt) ipath_kreceive()
1167 hdrqtail = ipath_get_rcvhdrtail(pd); ipath_kreceive()
1254 rhf_addr = (__le32 *) pd->port_rcvhdrq + ipath_kreceive()
1259 if (++pd->port_seq_cnt > 13) ipath_kreceive()
1260 pd->port_seq_cnt = 1; ipath_kreceive()
1261 if (seq != pd->port_seq_cnt) ipath_kreceive()
1277 pd->port_port); ipath_kreceive()
1280 etail, pd->port_port); ipath_kreceive()
1296 u32 hqtail = ipath_get_rcvhdrtail(pd); ipath_kreceive()
1306 pd->port_head = l; ipath_kreceive()
1756 * @pd: the port data
1763 struct ipath_portdata *pd) ipath_create_rcvhdrq()
1767 if (!pd->port_rcvhdrq) { ipath_create_rcvhdrq()
1773 pd->port_rcvhdrq = dma_alloc_coherent( ipath_create_rcvhdrq()
1774 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys, ipath_create_rcvhdrq()
1777 if (!pd->port_rcvhdrq) { ipath_create_rcvhdrq()
1780 amt, pd->port_port); ipath_create_rcvhdrq()
1786 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent( ipath_create_rcvhdrq()
1789 if (!pd->port_rcvhdrtail_kvaddr) { ipath_create_rcvhdrq()
1792 "failed\n", pd->port_port); ipath_create_rcvhdrq()
1795 pd->port_rcvhdrq, ipath_create_rcvhdrq()
1796 pd->port_rcvhdrq_phys); ipath_create_rcvhdrq()
1797 pd->port_rcvhdrq = NULL; ipath_create_rcvhdrq()
1800 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail; ipath_create_rcvhdrq()
1802 "physical\n", pd->port_port, ipath_create_rcvhdrq()
1806 pd->port_rcvhdrq_size = amt; ipath_create_rcvhdrq()
1810 amt >> PAGE_SHIFT, pd->port_rcvhdrq, ipath_create_rcvhdrq()
1811 (unsigned long) pd->port_rcvhdrq_phys, ipath_create_rcvhdrq()
1812 (unsigned long) pd->port_rcvhdrq_size, ipath_create_rcvhdrq()
1813 pd->port_port); ipath_create_rcvhdrq()
1818 pd->port_port, pd->port_rcvhdrq, ipath_create_rcvhdrq()
1819 (unsigned long long) pd->port_rcvhdrq_phys, ipath_create_rcvhdrq()
1820 pd->port_rcvhdrtail_kvaddr, (unsigned long long) ipath_create_rcvhdrq()
1821 pd->port_rcvhdrqtailaddr_phys); ipath_create_rcvhdrq()
1824 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size); ipath_create_rcvhdrq()
1825 if (pd->port_rcvhdrtail_kvaddr) ipath_create_rcvhdrq()
1826 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE); ipath_create_rcvhdrq()
1833 pd->port_port, pd->port_rcvhdrqtailaddr_phys); ipath_create_rcvhdrq()
1835 pd->port_port, pd->port_rcvhdrq_phys); ipath_create_rcvhdrq()
2420 * @pd: the portdata structure
2429 void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd) ipath_free_pddata() argument
2431 if (!pd) ipath_free_pddata()
2434 if (pd->port_rcvhdrq) { ipath_free_pddata()
2436 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq, ipath_free_pddata()
2437 (unsigned long) pd->port_rcvhdrq_size); ipath_free_pddata()
2438 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size, ipath_free_pddata()
2439 pd->port_rcvhdrq, pd->port_rcvhdrq_phys); ipath_free_pddata()
2440 pd->port_rcvhdrq = NULL; ipath_free_pddata()
2441 if (pd->port_rcvhdrtail_kvaddr) { ipath_free_pddata()
2443 pd->port_rcvhdrtail_kvaddr, ipath_free_pddata()
2444 pd->port_rcvhdrqtailaddr_phys); ipath_free_pddata()
2445 pd->port_rcvhdrtail_kvaddr = NULL; ipath_free_pddata()
2448 if (pd->port_port && pd->port_rcvegrbuf) { ipath_free_pddata()
2451 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { ipath_free_pddata()
2452 void *base = pd->port_rcvegrbuf[e]; ipath_free_pddata()
2453 size_t size = pd->port_rcvegrbuf_size; ipath_free_pddata()
2458 e, pd->port_rcvegrbuf_chunks); ipath_free_pddata()
2460 base, pd->port_rcvegrbuf_phys[e]); ipath_free_pddata()
2462 kfree(pd->port_rcvegrbuf); ipath_free_pddata()
2463 pd->port_rcvegrbuf = NULL; ipath_free_pddata()
2464 kfree(pd->port_rcvegrbuf_phys); ipath_free_pddata()
2465 pd->port_rcvegrbuf_phys = NULL; ipath_free_pddata()
2466 pd->port_rcvegrbuf_chunks = 0; ipath_free_pddata()
2467 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) { ipath_free_pddata()
2473 "ipath_port0_skbinfo @ %p\n", pd->port_port, ipath_free_pddata()
2484 kfree(pd->port_tid_pg_list); ipath_free_pddata()
2485 vfree(pd->subport_uregbase); ipath_free_pddata()
2486 vfree(pd->subport_rcvegrbuf); ipath_free_pddata()
2487 vfree(pd->subport_rcvhdr_base); ipath_free_pddata()
2488 kfree(pd); ipath_free_pddata()
1762 ipath_create_rcvhdrq(struct ipath_devdata *dd, struct ipath_portdata *pd) ipath_create_rcvhdrq() argument
H A Dipath_init_chip.c213 struct ipath_portdata *pd = NULL; create_portdata0() local
215 pd = kzalloc(sizeof(*pd), GFP_KERNEL); create_portdata0()
216 if (pd) { create_portdata0()
217 pd->port_dd = dd; create_portdata0()
218 pd->port_cnt = 1; create_portdata0()
220 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; create_portdata0()
221 pd->port_seq_cnt = 1; create_portdata0()
223 return pd; create_portdata0()
228 struct ipath_portdata *pd; init_chip_first() local
277 pd = create_portdata0(dd); init_chip_first()
278 if (!pd) { init_chip_first()
284 dd->ipath_pd[0] = pd; init_chip_first()
714 struct ipath_portdata *pd; ipath_init_chip() local
878 pd = dd->ipath_pd[0]; ipath_init_chip()
884 * Then free old pd. Could lead to fragmentation, but also ipath_init_chip()
889 ipath_free_pddata(dd, pd); ipath_init_chip()
891 pd = npd; ipath_init_chip()
899 ret = ipath_create_rcvhdrq(dd, pd); ipath_init_chip()
H A Dipath_intr.c593 struct ipath_portdata *pd = dd->ipath_pd[i]; handle_hdrq_full() local
601 if (pd->port_head != ipath_get_hdrqtail(pd)) handle_hdrq_full()
607 if (!pd || !pd->port_cnt) handle_hdrq_full()
614 tl = ipath_get_rcvhdrtail(pd); handle_hdrq_full()
615 if (tl == pd->port_lastrcvhdrqtail) handle_hdrq_full()
620 pd->port_lastrcvhdrqtail = tl; handle_hdrq_full()
621 pd->port_hdrqfull++; handle_hdrq_full()
624 wake_up_interruptible(&pd->port_wait); handle_hdrq_full()
814 struct ipath_portdata *pd = dd->ipath_pd[0]; handle_errors() local
823 if (pd->port_head != ipath_get_hdrqtail(pd)) handle_errors()
1039 struct ipath_portdata *pd = dd->ipath_pd[i]; handle_urcv() local
1041 if (portr & (1 << i) && pd && pd->port_cnt) { handle_urcv()
1043 &pd->port_flag)) { handle_urcv()
1046 wake_up_interruptible(&pd->port_wait); handle_urcv()
1049 &pd->port_flag)) { handle_urcv()
1050 pd->port_urgent++; handle_urcv()
1051 wake_up_interruptible(&pd->port_wait); handle_urcv()
/linux-4.1.27/arch/arm/mach-shmobile/
H A Dpm-rmobile.c231 struct device_node *pd; member in struct:special_pd
248 struct device_node *pd; add_special_pd() local
250 pd = of_parse_phandle(np, "power-domains", 0); add_special_pd()
251 if (!pd) add_special_pd()
255 if (pd == special_pds[i].pd && type == special_pds[i].type) { add_special_pd()
256 of_node_put(pd); add_special_pd()
262 of_node_put(pd); add_special_pd()
266 pr_debug("Special PM domain %s type %d for %s\n", pd->name, type, add_special_pd()
269 special_pds[num_special_pds].pd = pd; add_special_pd()
297 of_node_put(special_pds[i].pd); put_special_pds()
300 static enum pd_types __init pd_type(const struct device_node *pd) pd_type() argument
305 if (pd == special_pds[i].pd) pd_type()
312 struct rmobile_pm_domain *pd) rmobile_setup_pm_domain()
314 const char *name = pd->genpd.name; rmobile_setup_pm_domain()
323 pd->gov = &pm_domain_always_on_gov; rmobile_setup_pm_domain()
324 pd->suspend = rmobile_pd_suspend_busy; rmobile_setup_pm_domain()
329 pd->gov = &pm_domain_always_on_gov; rmobile_setup_pm_domain()
330 pd->suspend = rmobile_pd_suspend_console; rmobile_setup_pm_domain()
340 pd->gov = &pm_domain_always_on_gov; rmobile_setup_pm_domain()
341 pd->suspend = rmobile_pd_suspend_busy; rmobile_setup_pm_domain()
350 pd->gov = &pm_domain_always_on_gov; rmobile_setup_pm_domain()
351 pd->suspend = rmobile_pd_suspend_busy; rmobile_setup_pm_domain()
358 rmobile_init_pm_domain(pd); rmobile_setup_pm_domain()
368 struct rmobile_pm_domain *pd; for_each_child_of_node() local
375 pd = kzalloc(sizeof(*pd), GFP_KERNEL); for_each_child_of_node()
376 if (!pd) for_each_child_of_node()
379 pd->genpd.name = np->name; for_each_child_of_node()
380 pd->base = base; for_each_child_of_node()
381 pd->bit_shift = idx; for_each_child_of_node()
383 rmobile_setup_pm_domain(np, pd); for_each_child_of_node()
385 pm_genpd_add_subdomain(genpd_parent, &pd->genpd); for_each_child_of_node()
386 of_genpd_add_provider_simple(np, &pd->genpd); for_each_child_of_node()
388 rmobile_add_pm_domains(base, np, &pd->genpd); for_each_child_of_node()
311 rmobile_setup_pm_domain(struct device_node *np, struct rmobile_pm_domain *pd) rmobile_setup_pm_domain() argument
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_pd.c39 int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd) mthca_pd_alloc() argument
43 pd->privileged = privileged; mthca_pd_alloc()
45 atomic_set(&pd->sqp_count, 0); mthca_pd_alloc()
46 pd->pd_num = mthca_alloc(&dev->pd_table.alloc); mthca_pd_alloc()
47 if (pd->pd_num == -1) mthca_pd_alloc()
51 err = mthca_mr_alloc_notrans(dev, pd->pd_num, mthca_pd_alloc()
54 &pd->ntmr); mthca_pd_alloc()
56 mthca_free(&dev->pd_table.alloc, pd->pd_num); mthca_pd_alloc()
62 void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) mthca_pd_free() argument
64 if (pd->privileged) mthca_pd_free()
65 mthca_free_mr(dev, &pd->ntmr); mthca_pd_free()
66 mthca_free(&dev->pd_table.alloc, pd->pd_num); mthca_pd_free()
H A Dmthca_provider.c375 struct mthca_pd *pd; mthca_alloc_pd() local
378 pd = kmalloc(sizeof *pd, GFP_KERNEL); mthca_alloc_pd()
379 if (!pd) mthca_alloc_pd()
382 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); mthca_alloc_pd()
384 kfree(pd); mthca_alloc_pd()
389 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { mthca_alloc_pd()
390 mthca_pd_free(to_mdev(ibdev), pd); mthca_alloc_pd() local
391 kfree(pd); mthca_alloc_pd()
396 return &pd->ibpd; mthca_alloc_pd()
399 static int mthca_dealloc_pd(struct ib_pd *pd) mthca_dealloc_pd() argument
401 mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); mthca_dealloc_pd()
402 kfree(pd); mthca_dealloc_pd()
407 static struct ib_ah *mthca_ah_create(struct ib_pd *pd, mthca_ah_create() argument
417 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah); mthca_ah_create()
434 static struct ib_srq *mthca_create_srq(struct ib_pd *pd, mthca_create_srq() argument
450 if (pd->uobject) { mthca_create_srq()
451 context = to_mucontext(pd->uobject->context); mthca_create_srq()
458 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_srq()
469 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), mthca_create_srq()
472 if (err && pd->uobject) mthca_create_srq()
473 mthca_unmap_user_db(to_mdev(pd->device), &context->uar, mthca_create_srq()
480 mthca_free_srq(to_mdev(pd->device), srq); mthca_create_srq()
510 static struct ib_qp *mthca_create_qp(struct ib_pd *pd, mthca_create_qp() argument
532 if (pd->uobject) { mthca_create_qp()
533 context = to_mucontext(pd->uobject->context); mthca_create_qp()
540 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_qp()
548 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_qp()
552 mthca_unmap_user_db(to_mdev(pd->device), mthca_create_qp()
565 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), mthca_create_qp()
571 if (err && pd->uobject) { mthca_create_qp()
572 context = to_mucontext(pd->uobject->context); mthca_create_qp()
574 mthca_unmap_user_db(to_mdev(pd->device), mthca_create_qp()
578 mthca_unmap_user_db(to_mdev(pd->device), mthca_create_qp()
591 if (pd->uobject) mthca_create_qp()
600 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), mthca_create_qp()
870 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) mthca_get_dma_mr() argument
879 err = mthca_mr_alloc_notrans(to_mdev(pd->device), mthca_get_dma_mr()
880 to_mpd(pd)->pd_num, mthca_get_dma_mr()
893 static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, mthca_reg_phys_mr() argument
951 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) " mthca_reg_phys_mr()
955 to_mpd(pd)->pd_num, mthca_reg_phys_mr()
958 err = mthca_mr_alloc_phys(to_mdev(pd->device), mthca_reg_phys_mr()
959 to_mpd(pd)->pd_num, mthca_reg_phys_mr()
976 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mthca_reg_user_mr() argument
979 struct mthca_dev *dev = to_mdev(pd->device); mthca_reg_user_mr()
990 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) { mthca_reg_user_mr()
995 ++to_mucontext(pd->uobject->context)->reg_mr_warned; mthca_reg_user_mr()
1004 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, mthca_reg_user_mr()
1057 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length, mthca_reg_user_mr()
1088 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, mthca_alloc_fmr() argument
1099 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, mthca_alloc_fmr()
H A Dmthca_srq.c96 struct mthca_pd *pd, mthca_tavor_init_srq_context()
103 context->state_pd = cpu_to_be32(pd->pd_num); mthca_tavor_init_srq_context()
106 if (pd->ibpd.uobject) mthca_tavor_init_srq_context()
108 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); mthca_tavor_init_srq_context()
114 struct mthca_pd *pd, mthca_arbel_init_srq_context()
132 if (pd->ibpd.uobject) mthca_arbel_init_srq_context()
134 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); mthca_arbel_init_srq_context()
137 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); mthca_arbel_init_srq_context()
147 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, mthca_alloc_srq_buf() argument
155 if (pd->ibpd.uobject) mthca_alloc_srq_buf()
164 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); mthca_alloc_srq_buf()
199 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, mthca_alloc_srq() argument
238 if (!pd->ibpd.uobject) { mthca_alloc_srq()
254 err = mthca_alloc_srq_buf(dev, pd, srq); mthca_alloc_srq()
264 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); mthca_alloc_srq()
266 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); mthca_alloc_srq()
300 if (!pd->ibpd.uobject) mthca_alloc_srq()
307 if (!pd->ibpd.uobject && mthca_is_memfree(dev)) mthca_alloc_srq()
95 mthca_tavor_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_tavor_srq_context *context) mthca_tavor_init_srq_context() argument
113 mthca_arbel_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_arbel_srq_context *context) mthca_arbel_init_srq_context() argument
H A Dmthca_dev.h425 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
462 int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd);
463 void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd);
471 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
473 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
475 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
481 int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
512 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
544 struct mthca_pd *pd,
552 struct mthca_pd *pd,
562 struct mthca_pd *pd,
H A Dmthca_mr.c54 __be32 pd; member in struct:mthca_mpt_entry
429 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, mthca_mr_alloc() argument
468 mpt_entry->pd = cpu_to_be32(pd); mthca_mr_alloc()
512 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, mthca_mr_alloc_notrans() argument
516 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); mthca_mr_alloc_notrans()
519 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, mthca_mr_alloc_phys() argument
536 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, mthca_mr_alloc_phys()
567 int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, mthca_fmr_alloc() argument
638 mpt_entry->pd = cpu_to_be32(pd); mthca_fmr_alloc()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
H A DMakefile4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
/linux-4.1.27/arch/mips/include/asm/mach-bcm63xx/
H A Dbcm63xx_dev_dsp.h11 int __init bcm63xx_dsp_register(const struct bcm63xx_dsp_platform_data *pd);
H A Dbcm63xx_dev_usb_usbd.h15 int bcm63xx_usbd_register(const struct bcm63xx_usbd_platform_data *pd);
H A Dbcm63xx_dev_enet.h98 const struct bcm63xx_enet_platform_data *pd);
100 int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd);
/linux-4.1.27/net/netfilter/ipvs/
H A Dip_vs_proto.c70 struct ip_vs_proto_data *pd = register_ip_vs_proto_netns() local
73 if (!pd) register_ip_vs_proto_netns()
76 pd->pp = pp; /* For speed issues */ register_ip_vs_proto_netns()
77 pd->next = ipvs->proto_data_table[hash]; register_ip_vs_proto_netns()
78 ipvs->proto_data_table[hash] = pd; register_ip_vs_proto_netns()
79 atomic_set(&pd->appcnt, 0); /* Init app counter */ register_ip_vs_proto_netns()
82 int ret = pp->init_netns(net, pd); register_ip_vs_proto_netns()
85 ipvs->proto_data_table[hash] = pd->next; register_ip_vs_proto_netns()
86 kfree(pd); register_ip_vs_proto_netns()
119 unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd) unregister_ip_vs_proto_netns() argument
123 unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol); unregister_ip_vs_proto_netns()
127 if (*pd_p == pd) { unregister_ip_vs_proto_netns()
128 *pd_p = pd->next; unregister_ip_vs_proto_netns()
129 if (pd->pp->exit_netns != NULL) unregister_ip_vs_proto_netns()
130 pd->pp->exit_netns(net, pd); unregister_ip_vs_proto_netns()
131 kfree(pd); unregister_ip_vs_proto_netns()
162 struct ip_vs_proto_data *pd; __ipvs_proto_data_get() local
165 for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { __ipvs_proto_data_get()
166 if (pd->pp->protocol == proto) __ipvs_proto_data_get()
167 return pd; __ipvs_proto_data_get()
187 struct ip_vs_proto_data *pd; ip_vs_protocol_timeout_change() local
191 for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) { ip_vs_protocol_timeout_change()
192 if (pd->pp->timeout_change) ip_vs_protocol_timeout_change()
193 pd->pp->timeout_change(pd, flags); ip_vs_protocol_timeout_change()
356 struct ip_vs_proto_data *pd; ip_vs_protocol_net_cleanup() local
361 while ((pd = ipvs->proto_data_table[i]) != NULL) ip_vs_protocol_net_cleanup()
362 unregister_ip_vs_proto_netns(net, pd); ip_vs_protocol_net_cleanup()
H A Dip_vs_proto_udp.c32 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, udp_conn_schedule() argument
67 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); udp_conn_schedule()
70 *verdict = ip_vs_leave(svc, skb, pd, iph); udp_conn_schedule()
358 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP); udp_register_app() local
369 atomic_inc(&pd->appcnt); udp_register_app()
379 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP); udp_unregister_app() local
381 atomic_dec(&pd->appcnt); udp_unregister_app()
449 struct ip_vs_proto_data *pd) udp_state_transition()
451 if (unlikely(!pd)) { udp_state_transition()
456 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; udp_state_transition()
459 static int __udp_init(struct net *net, struct ip_vs_proto_data *pd) __udp_init() argument
464 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, __udp_init()
466 if (!pd->timeout_table) __udp_init()
471 static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) __udp_exit() argument
473 kfree(pd->timeout_table); __udp_exit()
447 udp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) udp_state_transition() argument
H A Dip_vs_proto_tcp.c35 tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, tcp_conn_schedule() argument
72 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); tcp_conn_schedule()
75 *verdict = ip_vs_leave(svc, skb, pd, iph); tcp_conn_schedule()
447 static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) tcp_timeout_change() argument
457 pd->tcp_state_table = (on ? tcp_states_dos : tcp_states); tcp_timeout_change()
474 set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, set_tcp_state() argument
498 pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; set_tcp_state()
506 pd->pp->name, set_tcp_state()
536 if (likely(pd)) set_tcp_state()
537 cp->timeout = pd->timeout_table[cp->state = new_state]; set_tcp_state()
548 struct ip_vs_proto_data *pd) tcp_state_transition()
563 set_tcp_state(pd, cp, direction, th); tcp_state_transition()
581 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); tcp_register_app() local
592 atomic_inc(&pd->appcnt); tcp_register_app()
602 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); tcp_unregister_app() local
604 atomic_dec(&pd->appcnt); tcp_unregister_app()
658 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); ip_vs_tcp_conn_listen() local
662 cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN] ip_vs_tcp_conn_listen()
671 static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) __ip_vs_tcp_init() argument
676 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, __ip_vs_tcp_init()
678 if (!pd->timeout_table) __ip_vs_tcp_init()
680 pd->tcp_state_table = tcp_states; __ip_vs_tcp_init()
684 static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) __ip_vs_tcp_exit() argument
686 kfree(pd->timeout_table); __ip_vs_tcp_exit()
546 tcp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) tcp_state_transition() argument
H A Dip_vs_proto_sctp.c12 sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, sctp_conn_schedule() argument
56 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); sctp_conn_schedule()
59 *verdict = ip_vs_leave(svc, skb, pd, iph); sctp_conn_schedule()
369 set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, set_sctp_state() argument
432 pd->pp->name, set_sctp_state()
456 if (likely(pd)) set_sctp_state()
457 cp->timeout = pd->timeout_table[cp->state = next_state]; set_sctp_state()
464 const struct sk_buff *skb, struct ip_vs_proto_data *pd) sctp_state_transition()
467 set_sctp_state(pd, cp, direction, skb); sctp_state_transition()
484 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP); sctp_register_app() local
495 atomic_inc(&pd->appcnt); sctp_register_app()
503 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP); sctp_unregister_app() local
505 atomic_dec(&pd->appcnt); sctp_unregister_app()
552 static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) __ip_vs_sctp_init() argument
557 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, __ip_vs_sctp_init()
559 if (!pd->timeout_table) __ip_vs_sctp_init()
564 static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) __ip_vs_sctp_exit() argument
566 kfree(pd->timeout_table); __ip_vs_sctp_exit()
463 sctp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) sctp_state_transition() argument
H A Dip_vs_core.c205 struct ip_vs_proto_data *pd) ip_vs_set_state()
207 if (likely(pd->pp->state_transition)) ip_vs_set_state()
208 pd->pp->state_transition(cp, direction, skb, pd); ip_vs_set_state()
414 struct ip_vs_proto_data *pd, int *ignored, ip_vs_schedule()
417 struct ip_vs_protocol *pp = pd->pp; ip_vs_schedule()
529 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph) ip_vs_leave()
583 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); ip_vs_leave()
586 ret = cp->packet_xmit(skb, cp, pd->pp, iph); ip_vs_leave()
1090 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, handle_response() argument
1094 struct ip_vs_protocol *pp = pd->pp; handle_response()
1136 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd); handle_response()
1163 struct ip_vs_proto_data *pd; ip_vs_out() local
1210 pd = ip_vs_proto_data_get(net, iph.protocol); ip_vs_out()
1211 if (unlikely(!pd)) ip_vs_out()
1213 pp = pd->pp; ip_vs_out()
1233 return handle_response(af, skb, pd, cp, &iph, hooknum); ip_vs_out()
1346 struct ip_vs_proto_data *pd; ip_vs_in_icmp() local
1405 pd = ip_vs_proto_data_get(net, cih->protocol); ip_vs_in_icmp()
1406 if (!pd) ip_vs_in_icmp()
1408 pp = pd->pp; ip_vs_in_icmp()
1519 struct ip_vs_proto_data *pd; ip_vs_in_icmp_v6() local
1563 pd = ip_vs_proto_data_get(net, ciph.protocol); ip_vs_in_icmp_v6()
1564 if (!pd) ip_vs_in_icmp_v6()
1566 pp = pd->pp; ip_vs_in_icmp_v6()
1618 struct ip_vs_proto_data *pd; ip_vs_in() local
1682 pd = ip_vs_proto_data_get(net, iph.protocol); ip_vs_in()
1683 if (unlikely(!pd)) ip_vs_in()
1685 pp = pd->pp; ip_vs_in()
1710 if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph)) ip_vs_in()
1744 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); ip_vs_in()
203 ip_vs_set_state(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) ip_vs_set_state() argument
413 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, struct ip_vs_proto_data *pd, int *ignored, struct ip_vs_iphdr *iph) ip_vs_schedule() argument
528 ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph) ip_vs_leave() argument
/linux-4.1.27/kernel/
H A Dpadata.c36 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) padata_index_to_cpu() argument
40 target_cpu = cpumask_first(pd->cpumask.pcpu); padata_index_to_cpu()
42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); padata_index_to_cpu()
47 static int padata_cpu_hash(struct parallel_data *pd) padata_cpu_hash() argument
57 seq_nr = atomic_inc_return(&pd->seq_nr); padata_cpu_hash()
58 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); padata_cpu_hash()
60 return padata_index_to_cpu(pd, cpu_index); padata_cpu_hash()
66 struct parallel_data *pd; padata_parallel_worker() local
73 pd = pqueue->pd; padata_parallel_worker()
74 pinst = pd->pinst; padata_parallel_worker()
111 struct parallel_data *pd; padata_do_parallel() local
115 pd = rcu_dereference_bh(pinst->pd); padata_do_parallel()
121 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) padata_do_parallel()
128 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) padata_do_parallel()
132 atomic_inc(&pd->refcnt); padata_do_parallel()
133 padata->pd = pd; padata_do_parallel()
136 target_cpu = padata_cpu_hash(pd); padata_do_parallel()
137 queue = per_cpu_ptr(pd->pqueue, target_cpu); padata_do_parallel()
169 static struct padata_priv *padata_get_next(struct parallel_data *pd) padata_get_next() argument
177 num_cpus = cpumask_weight(pd->cpumask.pcpu); padata_get_next()
183 next_nr = pd->processed; padata_get_next()
185 cpu = padata_index_to_cpu(pd, next_index); padata_get_next()
186 next_queue = per_cpu_ptr(pd->pqueue, cpu); padata_get_next()
198 atomic_dec(&pd->reorder_objects); padata_get_next()
201 pd->processed++; padata_get_next()
206 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { padata_get_next()
216 static void padata_reorder(struct parallel_data *pd) padata_reorder() argument
221 struct padata_instance *pinst = pd->pinst; padata_reorder()
233 if (!spin_trylock_bh(&pd->lock)) padata_reorder()
237 padata = padata_get_next(pd); padata_reorder()
254 del_timer(&pd->timer); padata_reorder()
255 spin_unlock_bh(&pd->lock); padata_reorder()
260 squeue = per_cpu_ptr(pd->squeue, cb_cpu); padata_reorder()
269 spin_unlock_bh(&pd->lock); padata_reorder()
276 if (atomic_read(&pd->reorder_objects) padata_reorder()
278 mod_timer(&pd->timer, jiffies + HZ); padata_reorder()
280 del_timer(&pd->timer); padata_reorder()
287 struct parallel_data *pd = (struct parallel_data *)arg; padata_reorder_timer() local
289 padata_reorder(pd); padata_reorder_timer()
295 struct parallel_data *pd; padata_serial_worker() local
300 pd = squeue->pd; padata_serial_worker()
315 atomic_dec(&pd->refcnt); padata_serial_worker()
332 struct parallel_data *pd; padata_do_serial() local
334 pd = padata->pd; padata_do_serial()
337 pqueue = per_cpu_ptr(pd->pqueue, cpu); padata_do_serial()
340 atomic_inc(&pd->reorder_objects); padata_do_serial()
346 padata_reorder(pd); padata_do_serial()
350 static int padata_setup_cpumasks(struct parallel_data *pd, padata_setup_cpumasks() argument
354 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) padata_setup_cpumasks()
357 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); padata_setup_cpumasks()
358 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { padata_setup_cpumasks()
359 free_cpumask_var(pd->cpumask.cbcpu); padata_setup_cpumasks()
363 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); padata_setup_cpumasks()
374 static void padata_init_squeues(struct parallel_data *pd) padata_init_squeues() argument
379 for_each_cpu(cpu, pd->cpumask.cbcpu) { padata_init_squeues()
380 squeue = per_cpu_ptr(pd->squeue, cpu); padata_init_squeues()
381 squeue->pd = pd; padata_init_squeues()
388 static void padata_init_pqueues(struct parallel_data *pd) padata_init_pqueues() argument
394 for_each_cpu(cpu, pd->cpumask.pcpu) { padata_init_pqueues()
395 pqueue = per_cpu_ptr(pd->pqueue, cpu); padata_init_pqueues()
396 pqueue->pd = pd; padata_init_pqueues()
412 struct parallel_data *pd; padata_alloc_pd() local
414 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); padata_alloc_pd()
415 if (!pd) padata_alloc_pd()
418 pd->pqueue = alloc_percpu(struct padata_parallel_queue); padata_alloc_pd()
419 if (!pd->pqueue) padata_alloc_pd()
422 pd->squeue = alloc_percpu(struct padata_serial_queue); padata_alloc_pd()
423 if (!pd->squeue) padata_alloc_pd()
425 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) padata_alloc_pd()
428 padata_init_pqueues(pd); padata_alloc_pd()
429 padata_init_squeues(pd); padata_alloc_pd()
430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); padata_alloc_pd()
431 atomic_set(&pd->seq_nr, -1); padata_alloc_pd()
432 atomic_set(&pd->reorder_objects, 0); padata_alloc_pd()
433 atomic_set(&pd->refcnt, 0); padata_alloc_pd()
434 pd->pinst = pinst; padata_alloc_pd()
435 spin_lock_init(&pd->lock); padata_alloc_pd()
437 return pd; padata_alloc_pd()
440 free_percpu(pd->squeue); padata_alloc_pd()
442 free_percpu(pd->pqueue); padata_alloc_pd()
444 kfree(pd); padata_alloc_pd()
449 static void padata_free_pd(struct parallel_data *pd) padata_free_pd() argument
451 free_cpumask_var(pd->cpumask.pcpu); padata_free_pd()
452 free_cpumask_var(pd->cpumask.cbcpu); padata_free_pd()
453 free_percpu(pd->pqueue); padata_free_pd()
454 free_percpu(pd->squeue); padata_free_pd()
455 kfree(pd); padata_free_pd()
459 static void padata_flush_queues(struct parallel_data *pd) padata_flush_queues() argument
465 for_each_cpu(cpu, pd->cpumask.pcpu) { padata_flush_queues()
466 pqueue = per_cpu_ptr(pd->pqueue, cpu); padata_flush_queues()
470 del_timer_sync(&pd->timer); padata_flush_queues()
472 if (atomic_read(&pd->reorder_objects)) padata_flush_queues()
473 padata_reorder(pd); padata_flush_queues()
475 for_each_cpu(cpu, pd->cpumask.cbcpu) { padata_flush_queues()
476 squeue = per_cpu_ptr(pd->squeue, cpu); padata_flush_queues()
480 BUG_ON(atomic_read(&pd->refcnt) != 0); padata_flush_queues()
498 padata_flush_queues(pinst->pd); __padata_stop()
506 struct parallel_data *pd_old = pinst->pd; padata_replace()
511 rcu_assign_pointer(pinst->pd, pd_new); padata_replace()
581 struct parallel_data *pd; __padata_set_cpumasks() local
594 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); __padata_set_cpumasks()
595 if (!pd) __padata_set_cpumasks()
601 padata_replace(pinst, pd); __padata_set_cpumasks()
679 struct parallel_data *pd; __padata_add_cpu() local
682 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, __padata_add_cpu()
684 if (!pd) __padata_add_cpu()
687 padata_replace(pinst, pd); __padata_add_cpu()
735 struct parallel_data *pd = NULL; __padata_remove_cpu() local
743 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, __padata_remove_cpu()
745 if (!pd) __padata_remove_cpu()
748 padata_replace(pinst, pd); __padata_remove_cpu()
750 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); __padata_remove_cpu()
751 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); __padata_remove_cpu()
884 padata_free_pd(pinst->pd); __padata_free()
1041 struct parallel_data *pd = NULL; padata_alloc() local
1058 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); padata_alloc()
1059 if (!pd) padata_alloc()
1062 rcu_assign_pointer(pinst->pd, pd); padata_alloc()
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_pd.c51 struct ehca_pd *pd; ehca_alloc_pd() local
54 pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL); ehca_alloc_pd()
55 if (!pd) { ehca_alloc_pd()
62 INIT_LIST_HEAD(&pd->free[i]); ehca_alloc_pd()
63 INIT_LIST_HEAD(&pd->full[i]); ehca_alloc_pd()
65 mutex_init(&pd->lock); ehca_alloc_pd()
78 pd->fw_pd.value = shca->pd->fw_pd.value; ehca_alloc_pd()
80 pd->fw_pd.value = (u64)pd; ehca_alloc_pd()
82 return &pd->ib_pd; ehca_alloc_pd()
85 int ehca_dealloc_pd(struct ib_pd *pd) ehca_dealloc_pd() argument
87 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); ehca_dealloc_pd()
101 ehca_warn(pd->device, ehca_dealloc_pd()
H A Dipz_pt_fn.c127 static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) alloc_small_queue_page() argument
133 mutex_lock(&pd->lock); alloc_small_queue_page()
135 if (!list_empty(&pd->free[order])) alloc_small_queue_page()
136 page = list_entry(pd->free[order].next, alloc_small_queue_page()
149 list_add(&page->list, &pd->free[order]); alloc_small_queue_page()
157 list_move(&page->list, &pd->full[order]); alloc_small_queue_page()
159 mutex_unlock(&pd->lock); alloc_small_queue_page()
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page"); alloc_small_queue_page()
168 mutex_unlock(&pd->lock); alloc_small_queue_page()
172 static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) free_small_queue_page() argument
182 mutex_lock(&pd->lock); free_small_queue_page()
194 list_move_tail(&page->list, &pd->free[order]); free_small_queue_page()
196 mutex_unlock(&pd->lock); free_small_queue_page()
204 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ipz_queue_ctor() argument
237 if (!alloc_small_queue_page(queue, pd)) ipz_queue_ctor()
256 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) ipz_queue_dtor() argument
266 free_small_queue_page(queue, pd); ipz_queue_dtor()
H A Dehca_av.c88 struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) ehca_create_ah() argument
92 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ehca_create_ah()
97 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", ehca_create_ah()
98 pd, ah_attr); ehca_create_ah()
132 rc = ehca_query_port(pd->device, ah_attr->port_num, ehca_create_ah()
136 ehca_err(pd->device, "Invalid port number " ehca_create_ah()
138 "pd=%p ah_attr=%p", rc, pd, ah_attr); ehca_create_ah()
142 rc = ehca_query_gid(pd->device, ehca_create_ah()
147 ehca_err(pd->device, "Failed to retrieve sgid " ehca_create_ah()
149 "pd=%p ah_attr=%p", rc, pd, ah_attr); ehca_create_ah()
172 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca, ehca_modify_ah()
H A Dehca_iverbs.h67 int ehca_dealloc_pd(struct ib_pd *pd);
69 struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
77 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
79 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
84 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
90 struct ib_pd *pd,
98 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
105 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
143 struct ib_qp *ehca_create_qp(struct ib_pd *pd,
165 struct ib_srq *ehca_create_srq(struct ib_pd *pd,
H A Dehca_mrmw.c156 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) ehca_get_dma_mr() argument
161 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); ehca_get_dma_mr()
163 container_of(pd->device, struct ehca_shca, ib_device); ehca_get_dma_mr()
192 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x", ehca_get_dma_mr()
193 PTR_ERR(ib_mr), pd, mr_access_flags); ehca_get_dma_mr()
199 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, ehca_reg_phys_mr() argument
209 container_of(pd->device, struct ehca_shca, ib_device); ehca_reg_phys_mr()
210 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); ehca_reg_phys_mr()
215 ehca_err(pd->device, "bad input values: num_phys_buf=%x " ehca_reg_phys_mr()
228 ehca_err(pd->device, "bad input values: mr_access_flags=%x", ehca_reg_phys_mr()
243 ehca_err(pd->device, "bad input values: size=%llx iova_start=%p", ehca_reg_phys_mr()
251 ehca_err(pd->device, "out of memory"); ehca_reg_phys_mr()
304 ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p " ehca_reg_phys_mr()
306 PTR_ERR(ib_mr), pd, phys_buf_array, ehca_reg_phys_mr()
313 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ehca_reg_user_mr() argument
320 container_of(pd->device, struct ehca_shca, ib_device); ehca_reg_user_mr()
321 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); ehca_reg_user_mr()
328 if (!pd) { ehca_reg_user_mr()
329 ehca_gen_err("bad pd=%p", pd); ehca_reg_user_mr()
341 ehca_err(pd->device, "bad input values: mr_access_flags=%x", ehca_reg_user_mr()
348 ehca_err(pd->device, "bad input values: length=%llx " ehca_reg_user_mr()
356 ehca_err(pd->device, "out of memory"); ehca_reg_user_mr()
361 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, ehca_reg_user_mr()
369 ehca_err(pd->device, "page size not supported, " ehca_reg_user_mr()
408 ehca_warn(pd->device, "failed to register mr " ehca_reg_user_mr()
410 ehca_info(pd->device, "try to register mr with " ehca_reg_user_mr()
433 ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p", ehca_reg_user_mr()
434 PTR_ERR(ib_mr), pd, mr_access_flags, udata); ehca_reg_user_mr()
442 struct ib_pd *pd, ehca_rereg_phys_mr()
472 if (!pd) { ehca_rereg_phys_mr()
473 ehca_err(mr->device, "rereg with bad pd, pd=%p " ehca_rereg_phys_mr()
474 "mr_rereg_mask=%x", pd, mr_rereg_mask); ehca_rereg_phys_mr()
531 new_pd = container_of(mr->pd, struct ehca_pd, ib_pd); ehca_rereg_phys_mr()
567 new_pd = container_of(pd, struct ehca_pd, ib_pd); ehca_rereg_phys_mr()
576 mr->pd = pd; ehca_rereg_phys_mr()
584 ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p " ehca_rereg_phys_mr()
587 ret, mr, mr_rereg_mask, pd, phys_buf_array, ehca_rereg_phys_mr()
623 mr_attr->pd = mr->pd; ehca_query_mr()
688 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) ehca_alloc_mw() argument
693 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); ehca_alloc_mw()
695 container_of(pd->device, struct ehca_shca, ib_device); ehca_alloc_mw()
710 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli " ehca_alloc_mw()
725 ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd); ehca_alloc_mw()
765 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, ehca_alloc_fmr() argument
771 container_of(pd->device, struct ehca_shca, ib_device); ehca_alloc_fmr()
772 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); ehca_alloc_fmr()
788 ehca_err(pd->device, "bad input values: mr_access_flags=%x", ehca_alloc_fmr()
794 ehca_err(pd->device, "bad input values: mr_access_flags=%x", ehca_alloc_fmr()
800 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x " ehca_alloc_fmr()
810 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", ehca_alloc_fmr()
864 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); ehca_map_phys_fmr()
1389 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); ehca_unmap_one_fmr()
1669 e_mr->ib.ib_mr.pd = &e_pd->ib_pd; ehca_reg_internal_maxmr()
1742 ib_pd = e_maxmr->ib.ib_mr.pd; ehca_dereg_internal_maxmr()
440 ehca_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) ehca_rereg_phys_mr() argument
H A Dehca_qp.c280 struct ehca_pd *pd, init_qp_queue()
299 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages, init_qp_queue()
304 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages, init_qp_queue()
360 ipz_queue_dtor(pd, queue); init_qp_queue()
453 struct ib_pd *pd, internal_create_qp()
459 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); internal_create_qp()
460 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, internal_create_qp()
473 ehca_err(pd->device, "Unable to create QP, max number of %i " internal_create_qp()
475 ehca_err(pd->device, "To increase the maximum number of QPs " internal_create_qp()
490 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed", internal_create_qp()
510 ehca_err(pd->device, "UC with SRQ not supported"); internal_create_qp()
521 ehca_err(pd->device, "LLQPs can't have an SRQ"); internal_create_qp()
531 ehca_err(pd->device, "no more than three SGEs " internal_create_qp()
532 "supported for SRQ pd=%p max_sge=%x", internal_create_qp()
533 pd, init_attr->cap.max_recv_sge); internal_create_qp()
545 ehca_err(pd->device, "wrong QP Type=%x", qp_type); internal_create_qp()
555 ehca_err(pd->device, internal_create_qp()
566 ehca_err(pd->device, "UD LLQP not supported " internal_create_qp()
575 ehca_err(pd->device, internal_create_qp()
583 ehca_err(pd->device, internal_create_qp()
592 ehca_err(pd->device, "unsupported LL QP Type=%x", internal_create_qp()
603 ehca_err(pd->device, "Invalid number of SGEs requested " internal_create_qp()
614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); internal_create_qp()
619 if (pd->uobject && udata) { internal_create_qp()
621 context = pd->uobject->context; internal_create_qp()
651 ehca_err(pd->device, "Invalid number of qp"); internal_create_qp()
654 ehca_err(pd->device, "Can't allocate new idr entry."); internal_create_qp()
665 ehca_err(pd->device, "Invalid qp_type=%x", qp_type); internal_create_qp()
682 parms.pd = my_pd->fw_pd; internal_create_qp()
710 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", internal_create_qp()
765 ehca_err(pd->device, "Couldn't initialize squeue " internal_create_qp()
776 ehca_err(pd->device, "Couldn't allocate squeue " internal_create_qp()
791 ehca_err(pd->device, "Couldn't initialize rqueue " internal_create_qp()
801 ehca_err(pd->device, "Couldn't allocate squeue " internal_create_qp()
818 my_qp->ib_srq.pd = &my_pd->ib_pd; internal_create_qp()
825 my_qp->ib_qp.pd = &my_pd->ib_pd; internal_create_qp()
857 ehca_err(pd->device, internal_create_qp()
881 ehca_err(pd->device, internal_create_qp()
907 ehca_err(pd->device, "Copy to udata failed"); internal_create_qp()
951 struct ib_qp *ehca_create_qp(struct ib_pd *pd, ehca_create_qp() argument
957 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0); ehca_create_qp()
964 struct ib_srq *ehca_create_srq(struct ib_pd *pd, ehca_create_srq() argument
971 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ehca_create_srq()
990 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1); ehca_create_srq()
1001 ehca_err(pd->device, "Could not get zeroed page for mqpcb " ehca_create_srq()
1016 ehca_err(pd->device, "Could not modify SRQ to INIT " ehca_create_srq()
1030 ehca_err(pd->device, "Could not enable SRQ " ehca_create_srq()
1044 ehca_err(pd->device, "Could not modify SRQ to RTR " ehca_create_srq()
1059 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject); ehca_create_srq()
1247 container_of(ibqp->pd->device, struct ehca_shca, ib_device); internal_modify_qp()
2038 container_of(ibsrq->pd->device, struct ehca_shca, ib_device); ehca_modify_srq()
2135 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, internal_destroy_qp()
279 init_qp_queue(struct ehca_shca *shca, struct ehca_pd *pd, struct ehca_qp *my_qp, struct ipz_queue *queue, int q_type, u64 expected_hret, struct ehca_alloc_queue_parms *parms, int wqe_size) init_qp_queue() argument
452 internal_create_qp( struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata, int is_srq) internal_create_qp() argument
H A Dhcp_if.h202 const struct ipz_pd pd,
228 const struct ipz_pd pd,
238 const struct ipz_pd pd,
247 const struct ipz_pd pd,
H A Dhcp_if.c336 ((u64)parms->token << 32) | parms->pd.value, hipz_h_alloc_resource_qp()
736 const struct ipz_pd pd, hipz_h_alloc_resource_mr()
748 pd.value, /* r9 */ hipz_h_alloc_resource_mr()
830 const struct ipz_pd pd, hipz_h_reregister_pmr()
843 ((((u64)access_ctrl) << 32ULL) | pd.value), hipz_h_reregister_pmr()
858 const struct ipz_pd pd, hipz_h_register_smr()
869 pd.value, /* r8 */ hipz_h_register_smr()
880 const struct ipz_pd pd, hipz_h_alloc_resource_mw()
889 pd.value, /* r6 */ hipz_h_alloc_resource_mw()
731 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, const struct ehca_mr *mr, const u64 vaddr, const u64 length, const u32 access_ctrl, const struct ipz_pd pd, struct ehca_mr_hipzout_parms *outparms) hipz_h_alloc_resource_mr() argument
825 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle, const struct ehca_mr *mr, const u64 vaddr_in, const u64 length, const u32 access_ctrl, const struct ipz_pd pd, const u64 mr_addr_cb, struct ehca_mr_hipzout_parms *outparms) hipz_h_reregister_pmr() argument
853 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle, const struct ehca_mr *mr, const struct ehca_mr *orig_mr, const u64 vaddr_in, const u32 access_ctrl, const struct ipz_pd pd, struct ehca_mr_hipzout_parms *outparms) hipz_h_register_smr() argument
878 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle, const struct ehca_mw *mw, const struct ipz_pd pd, struct ehca_mw_hipzout_parms *outparms) hipz_h_alloc_resource_mw() argument
/linux-4.1.27/arch/mips/bcm63xx/
H A Ddev-dsp.c39 int __init bcm63xx_dsp_register(const struct bcm63xx_dsp_platform_data *pd) bcm63xx_dsp_register() argument
45 val = bcm_mpi_readl(MPI_CSBASE_REG(pd->cs - 1)); bcm63xx_dsp_register()
49 voip_dsp_resources[1].start = pd->ext_irq; bcm63xx_dsp_register()
53 memcpy(dpd, pd, sizeof (*pd)); bcm63xx_dsp_register()
H A Ddev-usb-usbd.c36 int __init bcm63xx_usbd_register(const struct bcm63xx_usbd_platform_data *pd) bcm63xx_usbd_register() argument
62 platform_device_add_data(&bcm63xx_usbd_device, pd, sizeof(*pd)); bcm63xx_usbd_register()
H A Ddev-pcmcia.c57 static struct bcm63xx_pcmcia_platform_data pd; variable in typeref:struct:bcm63xx_pcmcia_platform_data
65 .platform_data = &pd,
114 pd.ready_gpio = 22; bcm63xx_pcmcia_register()
118 pd.ready_gpio = 18; bcm63xx_pcmcia_register()
H A Ddev-enet.c203 const struct bcm63xx_enet_platform_data *pd) bcm63xx_enet_register()
239 memcpy(dpd, pd, sizeof(*pd)); bcm63xx_enet_register()
275 bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd) bcm63xx_enetsw_register() argument
294 memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd)); bcm63xx_enetsw_register()
202 bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd) bcm63xx_enet_register() argument
/linux-4.1.27/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c198 struct usnic_uiom_pd *pd) usnic_uiom_unmap_sorted_intervals()
209 iommu_unmap(pd->domain, va, PAGE_SIZE); list_for_each_entry_safe()
216 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, __usnic_uiom_reg_release() argument
230 spin_lock(&pd->lock); __usnic_uiom_reg_release()
231 usnic_uiom_remove_interval(&pd->rb_root, vpn_start, __usnic_uiom_reg_release()
233 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); __usnic_uiom_reg_release()
243 spin_unlock(&pd->lock); __usnic_uiom_reg_release()
257 struct usnic_uiom_pd *pd = uiomr->pd; usnic_uiom_map_sorted_intervals() local
286 err = iommu_map(pd->domain, va_start, pa_start, list_for_each_entry()
303 err = iommu_map(pd->domain, va_start, pa_start, list_for_each_entry()
332 usnic_uiom_unmap_sorted_intervals(intervals, pd);
336 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, usnic_uiom_reg_get() argument
369 uiomr->pd = pd; usnic_uiom_reg_get()
379 spin_lock(&pd->lock); usnic_uiom_reg_get()
383 &pd->rb_root, usnic_uiom_reg_get()
399 err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, usnic_uiom_reg_get()
408 spin_unlock(&pd->lock); usnic_uiom_reg_get()
413 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); usnic_uiom_reg_get()
418 spin_unlock(&pd->lock); usnic_uiom_reg_get()
429 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); usnic_uiom_reg_release()
467 struct usnic_uiom_pd *pd; usnic_uiom_alloc_pd() local
470 pd = kzalloc(sizeof(*pd), GFP_KERNEL); usnic_uiom_alloc_pd()
471 if (!pd) usnic_uiom_alloc_pd()
474 pd->domain = domain = iommu_domain_alloc(&pci_bus_type); usnic_uiom_alloc_pd()
477 PTR_ERR(pd->domain)); usnic_uiom_alloc_pd()
478 kfree(pd); usnic_uiom_alloc_pd()
482 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); usnic_uiom_alloc_pd()
484 spin_lock_init(&pd->lock); usnic_uiom_alloc_pd()
485 INIT_LIST_HEAD(&pd->devs); usnic_uiom_alloc_pd()
487 return pd; usnic_uiom_alloc_pd()
490 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) usnic_uiom_dealloc_pd() argument
492 iommu_domain_free(pd->domain); usnic_uiom_dealloc_pd()
493 kfree(pd); usnic_uiom_dealloc_pd()
496 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) usnic_uiom_attach_dev_to_pd() argument
506 err = iommu_attach_device(pd->domain, dev); usnic_uiom_attach_dev_to_pd()
517 spin_lock(&pd->lock); usnic_uiom_attach_dev_to_pd()
518 list_add_tail(&uiom_dev->link, &pd->devs); usnic_uiom_attach_dev_to_pd()
519 pd->dev_cnt++; usnic_uiom_attach_dev_to_pd()
520 spin_unlock(&pd->lock); usnic_uiom_attach_dev_to_pd()
525 iommu_detach_device(pd->domain, dev); usnic_uiom_attach_dev_to_pd()
531 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) usnic_uiom_detach_dev_from_pd() argument
536 spin_lock(&pd->lock); usnic_uiom_detach_dev_from_pd()
537 list_for_each_entry(uiom_dev, &pd->devs, link) { usnic_uiom_detach_dev_from_pd()
547 spin_unlock(&pd->lock); usnic_uiom_detach_dev_from_pd()
552 pd->dev_cnt--; usnic_uiom_detach_dev_from_pd()
553 spin_unlock(&pd->lock); usnic_uiom_detach_dev_from_pd()
555 return iommu_detach_device(pd->domain, dev); usnic_uiom_detach_dev_from_pd()
558 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) usnic_uiom_get_dev_list() argument
564 spin_lock(&pd->lock); usnic_uiom_get_dev_list()
565 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); usnic_uiom_get_dev_list()
571 list_for_each_entry(uiom_dev, &pd->devs, link) { usnic_uiom_get_dev_list()
575 spin_unlock(&pd->lock); usnic_uiom_get_dev_list()
197 usnic_uiom_unmap_sorted_intervals(struct list_head *intervals, struct usnic_uiom_pd *pd) usnic_uiom_unmap_sorted_intervals() argument
H A Dusnic_uiom.h49 struct usnic_uiom_pd *pd; member in struct:usnic_uiom_reg
68 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd);
69 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev);
70 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd,
72 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd);
74 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
H A Dusnic_ib_verbs.h40 int usnic_ib_dealloc_pd(struct ib_pd *pd);
41 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
51 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
60 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
71 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc);
H A Dusnic_ib_verbs.c131 struct usnic_ib_pd *pd, find_free_vf_and_create_qp_grp()
149 /* Try to find resouces on a used vf which is in pd */ find_free_vf_and_create_qp_grp()
150 dev_list = usnic_uiom_get_dev_list(pd->umem_pd); find_free_vf_and_create_qp_grp()
190 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec, find_free_vf_and_create_qp_grp()
422 struct usnic_ib_pd *pd; usnic_ib_alloc_pd() local
427 pd = kzalloc(sizeof(*pd), GFP_KERNEL); usnic_ib_alloc_pd()
428 if (!pd) usnic_ib_alloc_pd()
431 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); usnic_ib_alloc_pd()
433 kfree(pd); usnic_ib_alloc_pd()
438 pd, context, ibdev->name); usnic_ib_alloc_pd()
439 return &pd->ibpd; usnic_ib_alloc_pd()
442 int usnic_ib_dealloc_pd(struct ib_pd *pd) usnic_ib_dealloc_pd() argument
444 usnic_info("freeing domain 0x%p\n", pd); usnic_ib_dealloc_pd()
446 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); usnic_ib_dealloc_pd()
447 kfree(pd); usnic_ib_dealloc_pd()
451 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, usnic_ib_create_qp() argument
466 ucontext = to_uucontext(pd->uobject->context); usnic_ib_create_qp()
467 us_ibdev = to_usdev(pd->device); usnic_ib_create_qp()
497 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), usnic_ib_create_qp()
594 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, usnic_ib_reg_mr() argument
608 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, usnic_ib_reg_mr()
629 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); usnic_ib_dereg_mr()
719 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, usnic_ib_create_ah() argument
760 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc) usnic_ib_get_dma_mr() argument
130 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, struct usnic_ib_pd *pd, struct usnic_transport_spec *trans_spec, struct usnic_vnic_res_spec *res_spec) find_free_vf_and_create_qp_grp() argument
H A Dusnic_ib_qp_grp.c578 struct usnic_ib_pd *pd, qp_grp_and_vf_bind()
588 err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev); qp_grp_and_vf_bind()
594 vf->pd = pd; qp_grp_and_vf_bind()
598 WARN_ON(vf->pd != pd); qp_grp_and_vf_bind()
607 struct usnic_ib_pd *pd; qp_grp_and_vf_unbind() local
611 pd = qp_grp->vf->pd; qp_grp_and_vf_unbind()
614 qp_grp->vf->pd = NULL; qp_grp_and_vf_unbind()
615 usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev); qp_grp_and_vf_unbind()
661 struct usnic_ib_pd *pd, usnic_ib_qp_grp_create()
697 err = qp_grp_and_vf_bind(vf, pd, qp_grp); usnic_ib_qp_grp_create()
577 qp_grp_and_vf_bind(struct usnic_ib_vf *vf, struct usnic_ib_pd *pd, struct usnic_ib_qp_grp *qp_grp) qp_grp_and_vf_bind() argument
660 usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, struct usnic_ib_pd *pd, struct usnic_vnic_res_spec *res_spec, struct usnic_transport_spec *transport_spec) usnic_ib_qp_grp_create() argument
/linux-4.1.27/arch/mips/sgi-ip32/
H A Dip32-platform.c54 struct platform_device *pd; meth_devinit() local
57 pd = platform_device_alloc("meth", -1); meth_devinit()
58 if (!pd) meth_devinit()
61 ret = platform_device_add(pd); meth_devinit()
63 platform_device_put(pd); meth_devinit()
72 struct platform_device *pd; sgio2audio_devinit() local
75 pd = platform_device_alloc("sgio2audio", -1); sgio2audio_devinit()
76 if (!pd) sgio2audio_devinit()
79 ret = platform_device_add(pd); sgio2audio_devinit()
81 platform_device_put(pd); sgio2audio_devinit()
/linux-4.1.27/drivers/infiniband/core/
H A Dverbs.c153 struct ib_pd *pd; ib_alloc_pd() local
155 pd = device->alloc_pd(device, NULL, NULL); ib_alloc_pd()
157 if (!IS_ERR(pd)) { ib_alloc_pd()
158 pd->device = device; ib_alloc_pd()
159 pd->uobject = NULL; ib_alloc_pd()
160 atomic_set(&pd->usecnt, 0); ib_alloc_pd()
163 return pd; ib_alloc_pd()
167 int ib_dealloc_pd(struct ib_pd *pd) ib_dealloc_pd() argument
169 if (atomic_read(&pd->usecnt)) ib_dealloc_pd()
172 return pd->device->dealloc_pd(pd); ib_dealloc_pd()
178 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) ib_create_ah() argument
182 ah = pd->device->create_ah(pd, ah_attr); ib_create_ah()
185 ah->device = pd->device; ib_create_ah()
186 ah->pd = pd; ib_create_ah()
188 atomic_inc(&pd->usecnt); ib_create_ah()
247 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, ib_create_ah_from_wc() argument
253 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); ib_create_ah_from_wc()
257 return ib_create_ah(pd, &ah_attr); ib_create_ah_from_wc()
279 struct ib_pd *pd; ib_destroy_ah() local
282 pd = ah->pd; ib_destroy_ah()
285 atomic_dec(&pd->usecnt); ib_destroy_ah()
293 struct ib_srq *ib_create_srq(struct ib_pd *pd, ib_create_srq() argument
298 if (!pd->device->create_srq) ib_create_srq()
301 srq = pd->device->create_srq(pd, srq_init_attr, NULL); ib_create_srq()
304 srq->device = pd->device; ib_create_srq()
305 srq->pd = pd; ib_create_srq()
316 atomic_inc(&pd->usecnt); ib_create_srq()
344 struct ib_pd *pd; ib_destroy_srq() local
353 pd = srq->pd; ib_destroy_srq()
362 atomic_dec(&pd->usecnt); ib_destroy_srq()
442 struct ib_qp *ib_create_qp(struct ib_pd *pd, ib_create_qp() argument
448 device = pd ? pd->device : qp_init_attr->xrcd->device; ib_create_qp()
449 qp = device->create_qp(pd, qp_init_attr, NULL); ib_create_qp()
461 qp->pd = NULL; ib_create_qp()
489 qp->pd = pd; ib_create_qp()
493 atomic_inc(&pd->usecnt); ib_create_qp()
978 struct ib_pd *pd; ib_destroy_qp() local
989 pd = qp->pd; ib_destroy_qp()
996 if (pd) ib_destroy_qp()
997 atomic_dec(&pd->usecnt); ib_destroy_qp()
1059 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) ib_get_dma_mr() argument
1068 mr = pd->device->get_dma_mr(pd, mr_access_flags); ib_get_dma_mr()
1071 mr->device = pd->device; ib_get_dma_mr()
1072 mr->pd = pd; ib_get_dma_mr()
1074 atomic_inc(&pd->usecnt); ib_get_dma_mr()
1082 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, ib_reg_phys_mr() argument
1095 if (!pd->device->reg_phys_mr) ib_reg_phys_mr()
1098 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, ib_reg_phys_mr()
1102 mr->device = pd->device; ib_reg_phys_mr()
1103 mr->pd = pd; ib_reg_phys_mr()
1105 atomic_inc(&pd->usecnt); ib_reg_phys_mr()
1115 struct ib_pd *pd, ib_rereg_phys_mr()
1134 old_pd = mr->pd; ib_rereg_phys_mr()
1136 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, ib_rereg_phys_mr()
1142 atomic_inc(&pd->usecnt); ib_rereg_phys_mr()
1158 struct ib_pd *pd; ib_dereg_mr() local
1164 pd = mr->pd; ib_dereg_mr()
1167 atomic_dec(&pd->usecnt); ib_dereg_mr()
1173 struct ib_mr *ib_create_mr(struct ib_pd *pd, ib_create_mr() argument
1178 if (!pd->device->create_mr) ib_create_mr()
1181 mr = pd->device->create_mr(pd, mr_init_attr); ib_create_mr()
1184 mr->device = pd->device; ib_create_mr()
1185 mr->pd = pd; ib_create_mr()
1187 atomic_inc(&pd->usecnt); ib_create_mr()
1197 struct ib_pd *pd; ib_destroy_mr() local
1203 pd = mr->pd; ib_destroy_mr()
1206 atomic_dec(&pd->usecnt); ib_destroy_mr()
1212 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) ib_alloc_fast_reg_mr() argument
1216 if (!pd->device->alloc_fast_reg_mr) ib_alloc_fast_reg_mr()
1219 mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len); ib_alloc_fast_reg_mr()
1222 mr->device = pd->device; ib_alloc_fast_reg_mr()
1223 mr->pd = pd; ib_alloc_fast_reg_mr()
1225 atomic_inc(&pd->usecnt); ib_alloc_fast_reg_mr()
1260 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) ib_alloc_mw() argument
1264 if (!pd->device->alloc_mw) ib_alloc_mw()
1267 mw = pd->device->alloc_mw(pd, type); ib_alloc_mw()
1269 mw->device = pd->device; ib_alloc_mw()
1270 mw->pd = pd; ib_alloc_mw()
1273 atomic_inc(&pd->usecnt); ib_alloc_mw()
1282 struct ib_pd *pd; ib_dealloc_mw() local
1285 pd = mw->pd; ib_dealloc_mw()
1288 atomic_dec(&pd->usecnt); ib_dealloc_mw()
1296 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, ib_alloc_fmr() argument
1302 if (!pd->device->alloc_fmr) ib_alloc_fmr()
1305 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); ib_alloc_fmr()
1307 fmr->device = pd->device; ib_alloc_fmr()
1308 fmr->pd = pd; ib_alloc_fmr()
1309 atomic_inc(&pd->usecnt); ib_alloc_fmr()
1330 struct ib_pd *pd; ib_dealloc_fmr() local
1333 pd = fmr->pd; ib_dealloc_fmr()
1336 atomic_dec(&pd->usecnt); ib_dealloc_fmr()
1113 ib_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) ib_rereg_phys_mr() argument
H A Duverbs_cmd.c214 static void put_pd_read(struct ib_pd *pd) put_pd_read() argument
216 put_uobj_read(pd->uobject); put_pd_read()
536 struct ib_pd *pd; ib_uverbs_alloc_pd() local
556 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, ib_uverbs_alloc_pd()
558 if (IS_ERR(pd)) { ib_uverbs_alloc_pd()
559 ret = PTR_ERR(pd); ib_uverbs_alloc_pd()
563 pd->device = file->device->ib_dev; ib_uverbs_alloc_pd()
564 pd->uobject = uobj; ib_uverbs_alloc_pd()
565 atomic_set(&pd->usecnt, 0); ib_uverbs_alloc_pd()
567 uobj->object = pd; ib_uverbs_alloc_pd()
595 ib_dealloc_pd(pd); ib_uverbs_alloc_pd()
944 struct ib_pd *pd; ib_uverbs_reg_mr() local
972 pd = idr_read_pd(cmd.pd_handle, file->ucontext); ib_uverbs_reg_mr()
973 if (!pd) { ib_uverbs_reg_mr()
981 ret = ib_query_device(pd->device, &attr); ib_uverbs_reg_mr()
990 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, ib_uverbs_reg_mr()
997 mr->device = pd->device; ib_uverbs_reg_mr()
998 mr->pd = pd; ib_uverbs_reg_mr()
1000 atomic_inc(&pd->usecnt); ib_uverbs_reg_mr()
1019 put_pd_read(pd); ib_uverbs_reg_mr()
1038 put_pd_read(pd); ib_uverbs_reg_mr()
1052 struct ib_pd *pd = NULL; ib_uverbs_rereg_mr() local
1091 pd = idr_read_pd(cmd.pd_handle, file->ucontext); ib_uverbs_rereg_mr()
1092 if (!pd) { ib_uverbs_rereg_mr()
1103 old_pd = mr->pd; ib_uverbs_rereg_mr()
1106 cmd.access_flags, pd, &udata); ib_uverbs_rereg_mr()
1109 atomic_inc(&pd->usecnt); ib_uverbs_rereg_mr()
1110 mr->pd = pd; ib_uverbs_rereg_mr()
1129 put_pd_read(pd); ib_uverbs_rereg_mr()
1183 struct ib_pd *pd; ib_uverbs_alloc_mw() local
1200 pd = idr_read_pd(cmd.pd_handle, file->ucontext); ib_uverbs_alloc_mw()
1201 if (!pd) { ib_uverbs_alloc_mw()
1206 mw = pd->device->alloc_mw(pd, cmd.mw_type); ib_uverbs_alloc_mw()
1212 mw->device = pd->device; ib_uverbs_alloc_mw()
1213 mw->pd = pd; ib_uverbs_alloc_mw()
1215 atomic_inc(&pd->usecnt); ib_uverbs_alloc_mw()
1232 put_pd_read(pd); ib_uverbs_alloc_mw()
1251 put_pd_read(pd); ib_uverbs_alloc_mw()
1631 struct ib_pd *pd = NULL; ib_uverbs_create_qp() local
1690 pd = idr_read_pd(cmd.pd_handle, file->ucontext); ib_uverbs_create_qp()
1691 if (!pd || !scq) { ib_uverbs_create_qp()
1696 device = pd->device; ib_uverbs_create_qp()
1720 qp = ib_create_qp(pd, &attr); ib_uverbs_create_qp()
1722 qp = device->create_qp(pd, &attr, &udata); ib_uverbs_create_qp()
1732 qp->pd = pd; ib_uverbs_create_qp()
1740 atomic_inc(&pd->usecnt); ib_uverbs_create_qp()
1776 if (pd) ib_uverbs_create_qp()
1777 put_pd_read(pd); ib_uverbs_create_qp()
1804 if (pd) ib_uverbs_create_qp()
1805 put_pd_read(pd); ib_uverbs_create_qp()
2536 struct ib_pd *pd; ib_uverbs_create_ah() local
2554 pd = idr_read_pd(cmd.pd_handle, file->ucontext); ib_uverbs_create_ah()
2555 if (!pd) { ib_uverbs_create_ah()
2574 ah = ib_create_ah(pd, &attr); ib_uverbs_create_ah()
2595 put_pd_read(pd); ib_uverbs_create_ah()
2614 put_pd_read(pd); ib_uverbs_create_ah()
2992 struct ib_pd *pd; __uverbs_create_xsrq() local
3022 pd = idr_read_pd(cmd->pd_handle, file->ucontext); __uverbs_create_xsrq()
3023 if (!pd) { __uverbs_create_xsrq()
3038 srq = pd->device->create_srq(pd, &attr, udata); __uverbs_create_xsrq()
3044 srq->device = pd->device; __uverbs_create_xsrq()
3045 srq->pd = pd; __uverbs_create_xsrq()
3058 atomic_inc(&pd->usecnt); __uverbs_create_xsrq()
3083 put_pd_read(pd); __uverbs_create_xsrq()
3102 put_pd_read(pd); __uverbs_create_xsrq()
/linux-4.1.27/lib/
H A Dproportions.c76 int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp) prop_descriptor_init() argument
83 pd->index = 0; prop_descriptor_init()
84 pd->pg[0].shift = shift; prop_descriptor_init()
85 mutex_init(&pd->mutex); prop_descriptor_init()
86 err = percpu_counter_init(&pd->pg[0].events, 0, gfp); prop_descriptor_init()
90 err = percpu_counter_init(&pd->pg[1].events, 0, gfp); prop_descriptor_init()
92 percpu_counter_destroy(&pd->pg[0].events); prop_descriptor_init()
105 void prop_change_shift(struct prop_descriptor *pd, int shift) prop_change_shift() argument
115 mutex_lock(&pd->mutex); prop_change_shift()
117 index = pd->index ^ 1; prop_change_shift()
118 offset = pd->pg[pd->index].shift - shift; prop_change_shift()
122 pd->pg[index].shift = shift; prop_change_shift()
125 events = percpu_counter_sum(&pd->pg[pd->index].events); prop_change_shift()
130 percpu_counter_set(&pd->pg[index].events, events); prop_change_shift()
136 pd->index = index; prop_change_shift()
142 mutex_unlock(&pd->mutex); prop_change_shift()
149 static struct prop_global *prop_get_global(struct prop_descriptor *pd) __acquires()
155 index = pd->index; __acquires()
160 return &pd->pg[index]; __acquires()
163 static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg) __releases()
256 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) __prop_inc_percpu() argument
258 struct prop_global *pg = prop_get_global(pd); __prop_inc_percpu()
263 prop_put_global(pd, pg); __prop_inc_percpu()
270 void __prop_inc_percpu_max(struct prop_descriptor *pd, __prop_inc_percpu_max() argument
273 struct prop_global *pg = prop_get_global(pd); __prop_inc_percpu_max()
295 prop_put_global(pd, pg); __prop_inc_percpu_max()
303 void prop_fraction_percpu(struct prop_descriptor *pd, prop_fraction_percpu() argument
307 struct prop_global *pg = prop_get_global(pd); prop_fraction_percpu()
318 prop_put_global(pd, pg); prop_fraction_percpu()
376 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) __prop_inc_single() argument
378 struct prop_global *pg = prop_get_global(pd); __prop_inc_single()
383 prop_put_global(pd, pg); __prop_inc_single()
391 void prop_fraction_single(struct prop_descriptor *pd, prop_fraction_single() argument
395 struct prop_global *pg = prop_get_global(pd); prop_fraction_single()
406 prop_put_global(pd, pg); prop_fraction_single()
/linux-4.1.27/arch/powerpc/platforms/ps3/
H A Dinterrupt.c108 struct ps3_private *pd = irq_data_get_irq_chip_data(d); ps3_chip_mask() local
112 pd->thread_id, d->irq); ps3_chip_mask()
115 clear_bit(63 - d->irq, &pd->bmp.mask); ps3_chip_mask()
116 lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); ps3_chip_mask()
129 struct ps3_private *pd = irq_data_get_irq_chip_data(d); ps3_chip_unmask() local
133 pd->thread_id, d->irq); ps3_chip_unmask()
136 set_bit(63 - d->irq, &pd->bmp.mask); ps3_chip_unmask()
137 lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); ps3_chip_unmask()
150 const struct ps3_private *pd = irq_data_get_irq_chip_data(d); ps3_chip_eoi() local
154 if (!test_bit(63 - d->irq, &pd->ipi_mask)) ps3_chip_eoi()
155 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); ps3_chip_eoi()
184 struct ps3_private *pd; ps3_virq_setup() local
191 pd = &per_cpu(ps3_private, cpu); ps3_virq_setup()
205 result = irq_set_chip_data(*virq, pd); ps3_virq_setup()
232 const struct ps3_private *pd = irq_get_chip_data(virq); ps3_virq_destroy() local
235 __LINE__, pd->ppe_id, pd->thread_id, virq); ps3_virq_destroy()
258 struct ps3_private *pd; ps3_irq_plug_setup() local
267 pd = irq_get_chip_data(*virq); ps3_irq_plug_setup()
271 result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq, ps3_irq_plug_setup()
302 const struct ps3_private *pd = irq_get_chip_data(virq); ps3_irq_plug_destroy() local
305 __LINE__, pd->ppe_id, pd->thread_id, virq); ps3_irq_plug_destroy()
309 result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); ps3_irq_plug_destroy()
646 static void _dump_bmp(struct ps3_private* pd, const char* func, int line) _dump_bmp() argument
650 spin_lock_irqsave(&pd->bmp_lock, flags); _dump_bmp()
651 _dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line); _dump_bmp()
652 _dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line); _dump_bmp()
653 spin_unlock_irqrestore(&pd->bmp_lock, flags); _dump_bmp()
657 static void __maybe_unused _dump_mask(struct ps3_private *pd, _dump_mask() argument
662 spin_lock_irqsave(&pd->bmp_lock, flags); _dump_mask()
663 _dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line); _dump_mask()
664 spin_unlock_irqrestore(&pd->bmp_lock, flags); _dump_mask()
667 static void dump_bmp(struct ps3_private* pd) {}; dump_bmp() argument
694 struct ps3_private *pd = &per_cpu(ps3_private, cpu); ps3_register_ipi_debug_brk() local
696 set_bit(63 - virq, &pd->ipi_debug_brk_mask); ps3_register_ipi_debug_brk()
699 cpu, virq, pd->ipi_debug_brk_mask); ps3_register_ipi_debug_brk()
704 struct ps3_private *pd = &per_cpu(ps3_private, cpu); ps3_register_ipi_irq() local
706 set_bit(63 - virq, &pd->ipi_mask); ps3_register_ipi_irq()
709 cpu, virq, pd->ipi_mask); ps3_register_ipi_irq()
714 struct ps3_private *pd = this_cpu_ptr(&ps3_private); ps3_get_irq() local
715 u64 x = (pd->bmp.status & pd->bmp.mask); ps3_get_irq()
720 if (x & pd->ipi_debug_brk_mask) ps3_get_irq()
721 x &= pd->ipi_debug_brk_mask; ps3_get_irq()
728 __LINE__, pd->thread_id); ps3_get_irq()
744 if (test_bit(63 - plug, &pd->ipi_mask)) ps3_get_irq()
745 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug); ps3_get_irq()
760 struct ps3_private *pd = &per_cpu(ps3_private, cpu); for_each_possible_cpu() local
762 lv1_get_logical_ppe_id(&pd->ppe_id); for_each_possible_cpu()
763 pd->thread_id = get_hard_smp_processor_id(cpu); for_each_possible_cpu()
764 spin_lock_init(&pd->bmp_lock); for_each_possible_cpu()
767 __func__, __LINE__, pd->ppe_id, pd->thread_id, for_each_possible_cpu()
768 ps3_mm_phys_to_lpar(__pa(&pd->bmp))); for_each_possible_cpu()
770 result = lv1_configure_irq_state_bitmap(pd->ppe_id, for_each_possible_cpu()
771 pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp))); for_each_possible_cpu()
/linux-4.1.27/include/linux/
H A Dproportions.h44 int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
45 void prop_change_shift(struct prop_descriptor *pd, int new_shift);
67 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
68 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
72 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) prop_inc_percpu() argument
77 __prop_inc_percpu(pd, pl); prop_inc_percpu()
94 void __prop_inc_percpu_max(struct prop_descriptor *pd,
123 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
124 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
128 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) prop_inc_single() argument
133 __prop_inc_single(pd, pl); prop_inc_single()
H A Dpadata.h38 * @pd: Pointer to the internal control structure.
47 struct parallel_data *pd; member in struct:padata_priv
70 * @pd: Backpointer to the internal control structure.
75 struct parallel_data *pd; member in struct:padata_serial_queue
86 * @pd: Backpointer to the internal control structure.
94 struct parallel_data *pd; member in struct:padata_parallel_queue
144 * @pd: The internal control structure.
156 struct parallel_data *pd; member in struct:padata_instance
/linux-4.1.27/include/sound/
H A Dsh_dac_audio.h17 void (*start)(struct dac_audio_pdata *pd);
18 void (*stop)(struct dac_audio_pdata *pd);
/linux-4.1.27/arch/arm/plat-samsung/include/plat/
H A Dpwm-core.h17 extern void samsung_pwm_set_platdata(struct samsung_pwm_variant *pd);
19 static inline void samsung_pwm_set_platdata(struct samsung_pwm_variant *pd) { } argument
H A Dsdhci.h25 * @pd: The default platform data for this device.
28 extern void s3c_sdhci_set_platdata(struct s3c_sdhci_platdata *pd,
33 * @pd: Platform data to register to device.
39 extern void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd);
40 extern void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd);
41 extern void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd);
42 extern void s3c_sdhci3_set_platdata(struct s3c_sdhci_platdata *pd);
H A Dfb.h22 * @pd: The platform data to set. The data is copied from the passed structure
26 extern void s3c_fb_set_platdata(struct s3c_fb_platdata *pd);
H A Dkeypad.h20 * @pd: Platform data to register to device.
26 extern void samsung_keypad_set_platdata(struct samsung_keypad_platdata *pd);
H A Ddevs.h90 * @pd: The default platform data for this device.
97 extern void *s3c_set_platdata(void *pd, size_t pdsize,
/linux-4.1.27/arch/mips/cobalt/
H A Dbuttons.c33 struct platform_device *pd; cobalt_add_buttons() local
36 pd = platform_device_alloc("Cobalt buttons", -1); cobalt_add_buttons()
37 if (!pd) cobalt_add_buttons()
40 error = platform_device_add_resources(pd, &cobalt_buttons_resource, 1); cobalt_add_buttons()
44 error = platform_device_add(pd); cobalt_add_buttons()
51 platform_device_put(pd); cobalt_add_buttons()
/linux-4.1.27/arch/mips/alchemy/devboards/
H A Dplatform.c97 struct platform_device *pd; db1x_register_pcmcia_socket() local
109 pd = platform_device_alloc("db1xxx_pcmcia", id); db1x_register_pcmcia_socket()
110 if (!pd) { db1x_register_pcmcia_socket()
151 pd->resource = sr; db1x_register_pcmcia_socket()
152 pd->num_resources = cnt; db1x_register_pcmcia_socket()
154 ret = platform_device_add(pd); db1x_register_pcmcia_socket()
158 platform_device_put(pd); db1x_register_pcmcia_socket()
171 struct platform_device *pd; db1x_register_norflash() local
192 pd = platform_device_alloc("physmap-flash", 0); db1x_register_norflash()
193 if (!pd) db1x_register_norflash()
246 pd->dev.platform_data = pfd; db1x_register_norflash()
247 pd->resource = res; db1x_register_norflash()
248 pd->num_resources = 1; db1x_register_norflash()
250 ret = platform_device_add(pd); db1x_register_norflash()
254 platform_device_put(pd); db1x_register_norflash()
/linux-4.1.27/arch/arm/mach-s3c64xx/
H A Dpm.c42 struct generic_pm_domain pd; member in struct:s3c64xx_pm_domain
47 struct s3c64xx_pm_domain *pd; s3c64xx_pd_off() local
50 pd = container_of(domain, struct s3c64xx_pm_domain, pd); s3c64xx_pd_off()
53 val &= ~(pd->ena); s3c64xx_pd_off()
61 struct s3c64xx_pm_domain *pd; s3c64xx_pd_on() local
65 pd = container_of(domain, struct s3c64xx_pm_domain, pd); s3c64xx_pd_on()
68 val |= pd->ena; s3c64xx_pd_on()
72 if (pd->pwr_stat) { s3c64xx_pd_on()
75 if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat) s3c64xx_pd_on()
80 pr_err("Failed to start domain %s\n", pd->name); s3c64xx_pd_on()
91 .pd = {
101 .pd = {
111 .pd = {
121 .pd = {
131 .pd = {
141 .pd = {
150 .pd = {
160 .pd = {
319 pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd, s3c64xx_pm_init()
323 pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); s3c64xx_pm_init()
327 pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); s3c64xx_pm_init()
/linux-4.1.27/net/dsa/
H A Ddsa.c183 struct dsa_chip_data *pd = ds->pd; dsa_switch_setup_one() local
194 name = pd->port_names[i]; dsa_switch_setup_one()
292 ret = dsa_slave_create(ds, parent, i, pd->port_names[i]); dsa_switch_setup_one()
295 index, i, pd->port_names[i]); dsa_switch_setup_one()
338 struct dsa_chip_data *pd = dst->pd->chip + index; dsa_switch_setup() local
347 drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); dsa_switch_setup()
366 ds->pd = pd; dsa_switch_setup()
440 for (i = 0; i < dst->pd->nr_chips; i++) { dsa_link_poll_work()
514 static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, dsa_of_setup_routing_table() argument
538 if (link_sw_addr >= pd->nr_chips) dsa_of_setup_routing_table()
543 cd->rtable = kmalloc_array(pd->nr_chips, sizeof(s8), dsa_of_setup_routing_table()
549 memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); dsa_of_setup_routing_table()
557 static void dsa_of_free_platform_data(struct dsa_platform_data *pd) dsa_of_free_platform_data() argument
562 for (i = 0; i < pd->nr_chips; i++) { dsa_of_free_platform_data()
565 kfree(pd->chip[i].port_names[port_index]); dsa_of_free_platform_data()
568 kfree(pd->chip[i].rtable); dsa_of_free_platform_data()
570 kfree(pd->chip); dsa_of_free_platform_data()
579 struct dsa_platform_data *pd; dsa_of_probe() local
603 pd = kzalloc(sizeof(*pd), GFP_KERNEL); dsa_of_probe()
604 if (!pd) dsa_of_probe()
607 dev->platform_data = pd; dsa_of_probe()
608 pd->of_netdev = ethernet_dev; dsa_of_probe()
609 pd->nr_chips = of_get_available_child_count(np); dsa_of_probe()
610 if (pd->nr_chips > DSA_MAX_SWITCHES) dsa_of_probe()
611 pd->nr_chips = DSA_MAX_SWITCHES; dsa_of_probe()
613 pd->chip = kcalloc(pd->nr_chips, sizeof(struct dsa_chip_data), dsa_of_probe()
615 if (!pd->chip) { dsa_of_probe()
623 cd = &pd->chip[chip_index]; for_each_available_child_of_node()
662 pd->nr_chips > 1) { for_each_available_child_of_node()
663 ret = dsa_of_setup_routing_table(pd, cd, for_each_available_child_of_node()
677 dsa_of_free_platform_data(pd);
679 kfree(pd);
686 struct dsa_platform_data *pd = dev->platform_data; dsa_of_remove() local
691 dsa_of_free_platform_data(pd); dsa_of_remove()
692 kfree(pd); dsa_of_remove()
706 struct device *parent, struct dsa_platform_data *pd) dsa_setup_dst()
710 dst->pd = pd; dsa_setup_dst()
715 for (i = 0; i < pd->nr_chips; i++) { dsa_setup_dst()
718 ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev); dsa_setup_dst()
750 struct dsa_platform_data *pd = pdev->dev.platform_data; dsa_probe() local
763 pd = pdev->dev.platform_data; dsa_probe()
766 if (pd == NULL || (pd->netdev == NULL && pd->of_netdev == NULL)) dsa_probe()
769 if (pd->of_netdev) { dsa_probe()
770 dev = pd->of_netdev; dsa_probe()
773 dev = dev_to_net_device(pd->netdev); dsa_probe()
795 dsa_setup_dst(dst, dev, &pdev->dev, pd); dsa_probe()
814 for (i = 0; i < dst->pd->nr_chips; i++) { dsa_remove_dst()
865 for (i = 0; i < dst->pd->nr_chips; i++) { dsa_suspend()
881 for (i = 0; i < dst->pd->nr_chips; i++) { dsa_resume()
705 dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, struct device *parent, struct dsa_platform_data *pd) dsa_setup_dst() argument
/linux-4.1.27/arch/mips/netlogic/common/
H A Dirq.c90 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); xlp_pic_enable() local
92 BUG_ON(!pd); xlp_pic_enable()
93 spin_lock_irqsave(&pd->node->piclock, flags); xlp_pic_enable()
94 nlm_pic_enable_irt(pd->node->picbase, pd->irt); xlp_pic_enable()
95 spin_unlock_irqrestore(&pd->node->piclock, flags); xlp_pic_enable()
100 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); xlp_pic_disable() local
103 BUG_ON(!pd); xlp_pic_disable()
104 spin_lock_irqsave(&pd->node->piclock, flags); xlp_pic_disable()
105 nlm_pic_disable_irt(pd->node->picbase, pd->irt); xlp_pic_disable()
106 spin_unlock_irqrestore(&pd->node->piclock, flags); xlp_pic_disable()
111 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); xlp_pic_mask_ack() local
113 clear_c0_eimr(pd->picirq); xlp_pic_mask_ack()
114 ack_c0_eirr(pd->picirq); xlp_pic_mask_ack()
119 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); xlp_pic_unmask() local
121 BUG_ON(!pd); xlp_pic_unmask()
123 if (pd->extra_ack) xlp_pic_unmask()
124 pd->extra_ack(d); xlp_pic_unmask()
127 set_c0_eimr(pd->picirq); xlp_pic_unmask()
130 nlm_pic_ack(pd->node->picbase, pd->irt); xlp_pic_unmask()
/linux-4.1.27/drivers/regulator/
H A Dmax8952.c140 struct max8952_platform_data *pd; max8952_parse_dt() local
145 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); max8952_parse_dt()
146 if (!pd) max8952_parse_dt()
149 pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0); max8952_parse_dt()
150 pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1); max8952_parse_dt()
151 pd->gpio_en = of_get_named_gpio(np, "max8952,en-gpio", 0); max8952_parse_dt()
153 if (of_property_read_u32(np, "max8952,default-mode", &pd->default_mode)) max8952_parse_dt()
157 pd->dvs_mode, ARRAY_SIZE(pd->dvs_mode)); max8952_parse_dt()
163 for (i = 0; i < ARRAY_SIZE(pd->dvs_mode); ++i) { max8952_parse_dt()
164 if (pd->dvs_mode[i] < 770000 || pd->dvs_mode[i] > 1400000) { max8952_parse_dt()
168 pd->dvs_mode[i] = (pd->dvs_mode[i] - 770000) / 10000; max8952_parse_dt()
171 if (of_property_read_u32(np, "max8952,sync-freq", &pd->sync_freq)) max8952_parse_dt()
174 if (of_property_read_u32(np, "max8952,ramp-speed", &pd->ramp_speed)) max8952_parse_dt()
177 pd->reg_data = of_get_regulator_init_data(dev, np, &regulator); max8952_parse_dt()
178 if (!pd->reg_data) { max8952_parse_dt()
183 return pd; max8952_parse_dt()
/linux-4.1.27/sound/soc/samsung/
H A Ds3c24xx_simtec.c203 static int simtec_call_startup(struct s3c24xx_audio_simtec_pdata *pd) simtec_call_startup() argument
208 if (pd->startup) simtec_call_startup()
209 pd->startup(); simtec_call_startup()
221 * @pd: The platform data supplied by the board.
227 struct s3c24xx_audio_simtec_pdata *pd) attach_gpio_amp()
233 ret = gpio_request(pd->amp_gain[0], "gpio-amp-gain0"); attach_gpio_amp()
239 ret = gpio_request(pd->amp_gain[1], "gpio-amp-gain1"); attach_gpio_amp()
246 gpio_direction_output(pd->amp_gain[0], 0); attach_gpio_amp()
247 gpio_direction_output(pd->amp_gain[1], 0); attach_gpio_amp()
252 ret = gpio_request(pd->amp_gpio, "gpio-amp"); attach_gpio_amp()
255 pd->amp_gpio, ret); attach_gpio_amp()
266 if (pd->amp_gain[0] > 0) { attach_gpio_amp()
267 gpio_free(pd->amp_gain[0]); attach_gpio_amp()
268 gpio_free(pd->amp_gain[1]); attach_gpio_amp()
274 static void detach_gpio_amp(struct s3c24xx_audio_simtec_pdata *pd) detach_gpio_amp() argument
276 if (pd->amp_gain[0] > 0) { detach_gpio_amp()
277 gpio_free(pd->amp_gain[0]); detach_gpio_amp()
278 gpio_free(pd->amp_gain[1]); detach_gpio_amp()
281 if (pd->amp_gpio > 0) detach_gpio_amp()
282 gpio_free(pd->amp_gpio); detach_gpio_amp()
226 attach_gpio_amp(struct device *dev, struct s3c24xx_audio_simtec_pdata *pd) attach_gpio_amp() argument
H A Ds3c24xx_simtec_hermes.c94 static int simtec_audio_hermes_probe(struct platform_device *pd) simtec_audio_hermes_probe() argument
96 dev_info(&pd->dev, "probing....\n"); simtec_audio_hermes_probe()
97 return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic33); simtec_audio_hermes_probe()
H A Ds3c24xx_simtec_tlv320aic23.c83 static int simtec_audio_tlv320aic23_probe(struct platform_device *pd) simtec_audio_tlv320aic23_probe() argument
85 return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic23); simtec_audio_tlv320aic23_probe()
/linux-4.1.27/drivers/hsi/controllers/
H A Domap_ssi.c294 static int __init ssi_get_iomem(struct platform_device *pd, ssi_get_iomem() argument
300 struct hsi_controller *ssi = platform_get_drvdata(pd); ssi_get_iomem()
302 mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); ssi_get_iomem()
304 dev_err(&pd->dev, "IO memory region missing (%s)\n", name); ssi_get_iomem()
308 resource_size(mem), dev_name(&pd->dev)); ssi_get_iomem()
310 dev_err(&pd->dev, "%s IO memory region request failed\n", ssi_get_iomem()
316 dev_err(&pd->dev, "%s IO remap failed\n", mem->name); ssi_get_iomem()
328 struct platform_device *pd) ssi_add_controller()
335 dev_err(&pd->dev, "not enough memory for omap ssi\n"); ssi_add_controller()
346 ssi->device.parent = &pd->dev; ssi_add_controller()
350 err = ssi_get_iomem(pd, "sys", &omap_ssi->sys, NULL); ssi_add_controller()
353 err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL); ssi_add_controller()
356 err = platform_get_irq_byname(pd, "gdd_mpu"); ssi_add_controller()
358 dev_err(&pd->dev, "GDD IRQ resource missing\n"); ssi_add_controller()
381 dev_err(&pd->dev, "Could not acquire clock \"ssi_ssr_fck\": %li\n", ssi_add_controller()
474 static int __init ssi_probe(struct platform_device *pd) ssi_probe() argument
477 struct device_node *np = pd->dev.of_node; ssi_probe()
484 dev_err(&pd->dev, "missing device tree data\n"); ssi_probe()
492 dev_err(&pd->dev, "No memory for controller\n"); ssi_probe()
496 platform_set_drvdata(pd, ssi); ssi_probe()
498 err = ssi_add_controller(ssi, pd); ssi_probe()
502 pm_runtime_irq_safe(&pd->dev); ssi_probe()
503 pm_runtime_enable(&pd->dev); ssi_probe()
518 childpdev = of_platform_device_create(child, NULL, &pd->dev); for_each_available_child_of_node()
521 dev_err(&pd->dev, "failed to create ssi controller port\n"); for_each_available_child_of_node()
526 dev_info(&pd->dev, "ssi controller %d initialized (%d ports)!\n",
530 device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
534 platform_set_drvdata(pd, NULL);
535 pm_runtime_disable(&pd->dev);
540 static int __exit ssi_remove(struct platform_device *pd) ssi_remove() argument
542 struct hsi_controller *ssi = platform_get_drvdata(pd); ssi_remove()
548 platform_set_drvdata(pd, NULL); ssi_remove()
550 pm_runtime_disable(&pd->dev); ssi_remove()
553 device_for_each_child(&pd->dev, NULL, ssi_remove_ports); ssi_remove()
327 ssi_add_controller(struct hsi_controller *ssi, struct platform_device *pd) ssi_add_controller() argument
H A Domap_ssi_port.c1011 struct platform_device *pd) ssi_port_irq()
1016 err = platform_get_irq(pd, 0); ssi_port_irq()
1033 struct platform_device *pd) ssi_wake_irq()
1074 static int __init ssi_port_get_iomem(struct platform_device *pd, ssi_port_get_iomem() argument
1077 struct hsi_port *port = platform_get_drvdata(pd); ssi_port_get_iomem()
1082 mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); ssi_port_get_iomem()
1084 dev_err(&pd->dev, "IO memory region missing (%s)\n", name); ssi_port_get_iomem()
1088 resource_size(mem), dev_name(&pd->dev)); ssi_port_get_iomem()
1090 dev_err(&pd->dev, "%s IO memory region request failed\n", ssi_port_get_iomem()
1096 dev_err(&pd->dev, "%s IO remap failed\n", mem->name); ssi_port_get_iomem()
1107 static int __init ssi_port_probe(struct platform_device *pd) ssi_port_probe() argument
1109 struct device_node *np = pd->dev.of_node; ssi_port_probe()
1112 struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); ssi_port_probe()
1118 dev_dbg(&pd->dev, "init ssi port...\n"); ssi_port_probe()
1121 dev_err(&pd->dev, "could not increment parent module refcount\n"); ssi_port_probe()
1126 dev_err(&pd->dev, "ssi controller not initialized!\n"); ssi_port_probe()
1137 dev_err(&pd->dev, "port id out of range!\n"); ssi_port_probe()
1145 dev_err(&pd->dev, "missing device tree data\n"); ssi_port_probe()
1152 dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n", ssi_port_probe()
1161 dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n", ssi_port_probe()
1173 omap_port->pdev = &pd->dev; ssi_port_probe()
1186 platform_set_drvdata(pd, port); ssi_port_probe()
1188 err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, ssi_port_probe()
1192 err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, ssi_port_probe()
1197 err = ssi_port_irq(port, pd); ssi_port_probe()
1200 err = ssi_wake_irq(port, pd); ssi_port_probe()
1222 dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n", ssi_port_probe()
1231 static int __exit ssi_port_remove(struct platform_device *pd) ssi_port_remove() argument
1233 struct hsi_port *port = platform_get_drvdata(pd); ssi_port_remove()
1255 platform_set_drvdata(pd, NULL); ssi_port_remove()
1257 pm_runtime_disable(&pd->dev); ssi_port_remove()
1010 ssi_port_irq(struct hsi_port *port, struct platform_device *pd) ssi_port_irq() argument
1032 ssi_wake_irq(struct hsi_port *port, struct platform_device *pd) ssi_wake_irq() argument
/linux-4.1.27/drivers/input/misc/
H A Dsoc_button_array.c75 struct platform_device *pd; soc_button_device_create() local
117 pd = platform_device_alloc("gpio-keys", PLATFORM_DEVID_AUTO); soc_button_device_create()
118 if (!pd) { soc_button_device_create()
123 error = platform_device_add_data(pd, gpio_keys_pdata, soc_button_device_create()
128 error = platform_device_add(pd); soc_button_device_create()
132 return pd; soc_button_device_create()
135 platform_device_put(pd); soc_button_device_create()
160 struct platform_device *pd; soc_button_probe() local
177 pd = soc_button_device_create(pdev, button_info, i == 0); soc_button_probe()
178 if (IS_ERR(pd)) { soc_button_probe()
179 error = PTR_ERR(pd); soc_button_probe()
187 priv->children[i] = pd; soc_button_probe()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
H A DMakefile4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
/linux-4.1.27/drivers/input/joystick/
H A Dturbografx.c77 struct pardevice *pd; member in struct:tgfx
102 parport_write_data(tgfx->pd->port, ~(1 << i)); tgfx_timer()
103 data1 = parport_read_status(tgfx->pd->port) ^ 0x7f; tgfx_timer()
104 data2 = parport_read_control(tgfx->pd->port) ^ 0x04; /* CAVEAT parport */ tgfx_timer()
131 parport_claim(tgfx->pd); tgfx_open()
132 parport_write_control(tgfx->pd->port, 0x04); tgfx_open()
147 parport_write_control(tgfx->pd->port, 0x00); tgfx_close()
148 parport_release(tgfx->pd); tgfx_close()
164 struct pardevice *pd; tgfx_probe() local
175 pd = parport_register_device(pp, "turbografx", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); tgfx_probe()
176 if (!pd) { tgfx_probe()
190 tgfx->pd = pd; tgfx_probe()
216 "%s/input%d", tgfx->pd->port->name, i); tgfx_probe()
260 parport_unregister_device(pd); tgfx_probe()
274 parport_unregister_device(tgfx->pd); tgfx_remove()
H A Dgamecon.c90 struct pardevice *pd; member in struct:gc
151 struct parport *port = gc->pd->port; gc_n64_send_command()
164 struct parport *port = gc->pd->port; gc_n64_send_stop_bit()
206 parport_write_data(gc->pd->port, GC_N64_POWER_R); gc_n64_read_packet()
208 data[i] = parport_read_status(gc->pd->port); gc_n64_read_packet()
209 parport_write_data(gc->pd->port, GC_N64_POWER_R | GC_N64_CLOCK); gc_n64_read_packet()
360 parport_write_data(gc->pd->port, GC_NES_POWER | GC_NES_CLOCK | GC_NES_LATCH); gc_nes_read_packet()
362 parport_write_data(gc->pd->port, GC_NES_POWER | GC_NES_CLOCK); gc_nes_read_packet()
366 parport_write_data(gc->pd->port, GC_NES_POWER); gc_nes_read_packet()
367 data[i] = parport_read_status(gc->pd->port) ^ 0x7f; gc_nes_read_packet()
369 parport_write_data(gc->pd->port, GC_NES_POWER | GC_NES_CLOCK); gc_nes_read_packet()
480 parport_write_data(gc->pd->port, ~(1 << i)); gc_multi_read_packet()
481 data[i] = parport_read_status(gc->pd->port) ^ 0x7f; gc_multi_read_packet()
567 struct parport *port = gc->pd->port; gc_psx_command()
586 parport_write_data(gc->pd->port, cmd | GC_PSX_CLOCK | GC_PSX_POWER); gc_psx_command()
605 parport_write_data(gc->pd->port, GC_PSX_CLOCK | GC_PSX_SELECT | GC_PSX_POWER); gc_psx_read_packet()
608 parport_write_data(gc->pd->port, GC_PSX_CLOCK | GC_PSX_POWER); gc_psx_read_packet()
637 parport_write_data(gc->pd->port, GC_PSX_CLOCK | GC_PSX_SELECT | GC_PSX_POWER); gc_psx_read_packet()
792 parport_claim(gc->pd); gc_open()
793 parport_write_control(gc->pd->port, 0x04); gc_open()
808 parport_write_control(gc->pd->port, 0x00); gc_close()
809 parport_release(gc->pd); gc_close()
835 "%s/input%d", gc->pd->port->name, idx); gc_setup_pad()
933 struct pardevice *pd; gc_probe() local
945 pd = parport_register_device(pp, "gamecon", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); gc_probe()
946 if (!pd) { gc_probe()
960 gc->pd = pd; gc_probe()
990 parport_unregister_device(pd); gc_probe()
1004 parport_unregister_device(gc->pd); gc_remove()
H A Ddb9.c106 struct pardevice *pd; member in struct:db9
369 struct parport *port = db9->pd->port; db9_timer()
520 struct parport *port = db9->pd->port; db9_open()
528 parport_claim(db9->pd); db9_open()
544 struct parport *port = db9->pd->port; db9_close()
551 parport_release(db9->pd); db9_close()
561 struct pardevice *pd; db9_probe() local
587 pd = parport_register_device(pp, "db9", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); db9_probe()
588 if (!pd) { db9_probe()
602 db9->pd = pd; db9_probe()
618 "%s/input%d", db9->pd->port->name, i); db9_probe()
657 parport_unregister_device(pd); db9_probe()
670 parport_unregister_device(db9->pd); db9_remove()
/linux-4.1.27/block/
H A Dblk-cgroup.c55 kfree(blkg->pd[i]); blkg_free()
94 struct blkg_policy_data *pd; blkg_alloc() local
100 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); blkg_alloc()
101 if (!pd) blkg_alloc()
104 blkg->pd[i] = pd; blkg_alloc()
105 pd->blkg = blkg; blkg_alloc()
106 pd->plid = i; blkg_alloc()
217 if (blkg->pd[i] && pol->pd_init_fn) blkg_create()
231 if (blkg->pd[i] && pol->pd_online_fn) blkg_create()
325 if (blkg->pd[i] && pol->pd_offline_fn) blkg_destroy()
396 if (blkg->pd[i] && pol->pd_exit_fn) __blkg_release_rcu()
504 * This function invokes @prfill on each blkg of @blkcg if pd for the
526 total += prfill(sf, blkg->pd[pol->plid], data); blkcg_print_blkgs()
539 * @pd: policy private data of interest
542 * Print @v to @sf for the device assocaited with @pd.
544 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) __blkg_prfill_u64() argument
546 const char *dname = blkg_dev_name(pd->blkg); __blkg_prfill_u64()
559 * @pd: policy private data of interest
562 * Print @rwstat to @sf for the device assocaited with @pd.
564 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, __blkg_prfill_rwstat() argument
573 const char *dname = blkg_dev_name(pd->blkg); __blkg_prfill_rwstat()
593 * @pd: policy private data of interest
594 * @off: offset to the blkg_stat in @pd
598 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) blkg_prfill_stat() argument
600 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); blkg_prfill_stat()
607 * @pd: policy private data of interest
608 * @off: offset to the blkg_rwstat in @pd
612 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, blkg_prfill_rwstat() argument
615 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); blkg_prfill_rwstat()
617 return __blkg_prfill_rwstat(sf, pd, &rwstat); blkg_prfill_rwstat()
623 * @pd: policy private data of interest
624 * @off: offset to the blkg_stat in @pd
626 * Collect the blkg_stat specified by @off from @pd and all its online
630 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) blkg_stat_recursive_sum() argument
632 struct blkcg_policy *pol = blkcg_policy[pd->plid]; blkg_stat_recursive_sum()
637 lockdep_assert_held(pd->blkg->q->queue_lock); blkg_stat_recursive_sum()
640 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { blkg_for_each_descendant_pre()
655 * @pd: policy private data of interest
656 * @off: offset to the blkg_stat in @pd
658 * Collect the blkg_rwstat specified by @off from @pd and all its online
662 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, blkg_rwstat_recursive_sum() argument
665 struct blkcg_policy *pol = blkcg_policy[pd->plid]; blkg_rwstat_recursive_sum()
671 lockdep_assert_held(pd->blkg->q->queue_lock); blkg_rwstat_recursive_sum()
674 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { blkg_for_each_descendant_pre()
966 struct blkg_policy_data *pd, *n; blkcg_activate_policy() local
1012 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); blkcg_activate_policy()
1013 if (!pd) { blkcg_activate_policy()
1017 list_add_tail(&pd->alloc_node, &pds); blkcg_activate_policy()
1032 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); blkcg_activate_policy()
1033 list_del_init(&pd->alloc_node); blkcg_activate_policy()
1035 /* grab blkcg lock too while installing @pd on @blkg */ blkcg_activate_policy()
1038 blkg->pd[pol->plid] = pd; blkcg_activate_policy()
1039 pd->blkg = blkg; blkcg_activate_policy()
1040 pd->plid = pol->plid; blkcg_activate_policy()
1052 list_for_each_entry_safe(pd, n, &pds, alloc_node) blkcg_activate_policy()
1053 kfree(pd); blkcg_activate_policy()
1084 /* grab blkcg lock too while removing @pd from @blkg */ blkcg_deactivate_policy()
1092 kfree(blkg->pd[pol->plid]); blkcg_deactivate_policy()
1093 blkg->pd[pol->plid] = NULL; blkcg_deactivate_policy()
H A Dblk-cgroup.h78 * Such private data must embed struct blkg_policy_data (pd) at the
79 * beginning and pd_size can't be smaller than pd.
110 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; member in struct:blkcg_gq
158 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
159 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
161 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
162 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
165 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
166 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
218 return blkg ? blkg->pd[pol->plid] : NULL; blkg_to_pd()
223 * @pd: policy private data of interest
225 * @pd is policy private data. Determine the blkg it's associated with.
227 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) pd_to_blkg() argument
229 return pd ? pd->blkg : NULL; pd_to_blkg()
588 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } blkg_path() argument
/linux-4.1.27/drivers/iommu/
H A Dtegra-smmu.c42 struct page *pd; member in struct:tegra_smmu_as
239 uint32_t *pd; tegra_smmu_domain_alloc() local
250 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); tegra_smmu_domain_alloc()
251 if (!as->pd) { tegra_smmu_domain_alloc()
258 __free_page(as->pd); tegra_smmu_domain_alloc()
264 pd = page_address(as->pd); tegra_smmu_domain_alloc()
265 SetPageReserved(as->pd); tegra_smmu_domain_alloc()
268 pd[i] = 0; tegra_smmu_domain_alloc()
271 pd = page_address(as->count); tegra_smmu_domain_alloc()
275 pd[i] = 0; tegra_smmu_domain_alloc()
290 ClearPageReserved(as->pd); tegra_smmu_domain_free()
382 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD); tegra_smmu_as_prepare()
383 smmu_flush_ptc(smmu, as->pd, 0); tegra_smmu_as_prepare()
387 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr); tegra_smmu_as_prepare()
470 u32 *pd = page_address(as->pd), *pt, *count; as_get_pte() local
477 if (pd[pde] == 0) { as_get_pte()
490 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT); as_get_pte()
492 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4); as_get_pte()
493 smmu_flush_ptc(smmu, as->pd, pde << 2); as_get_pte()
497 page = pfn_to_page(pd[pde] & smmu->pfn_mask); as_get_pte()
516 u32 *pd = page_address(as->pd), *pt; as_put_pte() local
519 page = pfn_to_page(pd[pde] & as->smmu->pfn_mask); as_put_pte()
530 pd[pde] = 0; as_put_pte()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_keys.c55 struct qib_ibdev *dev = to_idev(mr->pd->device); qib_alloc_lkey()
122 struct qib_ibdev *dev = to_idev(mr->pd->device); qib_free_lkey()
143 * @pd: protection domain
155 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, qib_lkey_ok() argument
168 struct qib_ibdev *dev = to_idev(pd->ibpd.device); qib_lkey_ok()
170 if (pd->user) qib_lkey_ok()
189 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) qib_lkey_ok()
266 struct qib_pd *pd = to_ipd(qp->ibqp.pd); qib_rkey_ok() local
267 struct qib_ibdev *dev = to_idev(pd->ibpd.device); qib_rkey_ok()
269 if (pd->user) qib_rkey_ok()
289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) qib_rkey_ok()
344 struct qib_pd *pd = to_ipd(qp->ibqp.pd); qib_fast_reg_mr() local
354 if (pd->user || rkey == 0) qib_fast_reg_mr()
360 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) qib_fast_reg_mr()
H A Dqib_mr.c50 static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, init_qib_mregion() argument
66 mr->pd = pd; init_qib_mregion()
89 * @pd: protection domain for this memory region
96 struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) qib_get_dma_mr() argument
102 if (to_ipd(pd)->user) { qib_get_dma_mr()
113 rval = init_qib_mregion(&mr->mr, pd, 0); qib_get_dma_mr()
138 static struct qib_mr *alloc_mr(int count, struct ib_pd *pd) alloc_mr() argument
150 rval = init_qib_mregion(&mr->mr, pd, count); alloc_mr()
175 * @pd: protection domain for this memory region
182 struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, qib_reg_phys_mr() argument
190 mr = alloc_mr(num_phys_buf, pd); qib_reg_phys_mr()
221 * @pd: protection domain for this memory region
229 struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, qib_reg_user_mr() argument
244 umem = ib_umem_get(pd->uobject->context, start, length, qib_reg_user_mr()
251 mr = alloc_mr(n, pd); qib_reg_user_mr()
330 struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) qib_alloc_fast_reg_mr() argument
334 mr = alloc_mr(max_page_list_len, pd); qib_alloc_fast_reg_mr()
373 * @pd: the protection domain for this memory region
379 struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, qib_alloc_fmr() argument
393 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); qib_alloc_fmr()
/linux-4.1.27/arch/arm/mach-omap2/
H A Dpowerdomain.h151 * @pwrdm_set_next_pwrst: Set the target power state for a pd
152 * @pwrdm_read_next_pwrst: Read the target power state set for a pd
153 * @pwrdm_read_pwrst: Read the current power state of a pd
154 * @pwrdm_read_prev_pwrst: Read the prev power state entered by the pd
155 * @pwrdm_set_logic_retst: Set the logic state in RET for a pd
156 * @pwrdm_set_mem_onst: Set the Memory state in ON for a pd
157 * @pwrdm_set_mem_retst: Set the Memory state in RET for a pd
158 * @pwrdm_read_logic_pwrst: Read the current logic state of a pd
159 * @pwrdm_read_prev_logic_pwrst: Read the previous logic state entered by a pd
160 * @pwrdm_read_logic_retst: Read the logic state in RET for a pd
161 * @pwrdm_read_mem_pwrst: Read the current memory state of a pd
162 * @pwrdm_read_prev_mem_pwrst: Read the previous memory state entered by a pd
163 * @pwrdm_read_mem_retst: Read the memory state in RET for a pd
164 * @pwrdm_clear_all_prev_pwrst: Clear all previous power states logged for a pd
165 * @pwrdm_enable_hdwr_sar: Enable Hardware Save-Restore feature for the pd
166 * @pwrdm_disable_hdwr_sar: Disable Hardware Save-Restore feature for a pd
167 * @pwrdm_set_lowpwrstchange: Enable pd transitions from a shallow to deep sleep
168 * @pwrdm_wait_transition: Wait for a pd state transition to complete
/linux-4.1.27/drivers/leds/
H A Dleds-s3c24xx.c46 struct s3c24xx_led_platdata *pd = led->pdata; s3c24xx_led_set() local
47 int state = (value ? 1 : 0) ^ (pd->flags & S3C24XX_LEDF_ACTLOW); s3c24xx_led_set()
52 gpio_set_value(pd->gpio, state); s3c24xx_led_set()
54 if (pd->flags & S3C24XX_LEDF_TRISTATE) { s3c24xx_led_set()
56 gpio_direction_output(pd->gpio, state); s3c24xx_led_set()
58 gpio_direction_input(pd->gpio); s3c24xx_led_set()
/linux-4.1.27/drivers/video/fbdev/
H A Dsh7760fb.c37 struct sh7760fb_platdata *pd; /* display information */ member in struct:sh7760fb_par
76 struct sh7760fb_platdata *pd = par->pd; sh7760fb_blank() local
91 if (pd->blank) sh7760fb_blank()
92 pd->blank(blank); sh7760fb_blank()
176 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL); sh7760fb_check_var()
202 struct fb_videomode *vm = par->pd->def_mode; sh7760fb_set_par()
209 par->rot = par->pd->rotate; sh7760fb_set_par()
228 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, &gray); sh7760fb_set_par()
237 lddfr = par->pd->lddfr | (1 << 8); sh7760fb_set_par()
239 lddfr = par->pd->lddfr & ~(1 << 8); sh7760fb_set_par()
242 ldmtr = par->pd->ldmtr; sh7760fb_set_par()
252 iowrite16(par->pd->ldickr, par->base + LDICKR); /* pixclock */ sh7760fb_set_par()
256 iowrite16(par->pd->ldpmmr, par->base + LDPMMR); /* Power Management */ sh7760fb_set_par()
257 iowrite16(par->pd->ldpspr, par->base + LDPSPR); /* Power Supply Ctrl */ sh7760fb_set_par()
269 iowrite16(par->pd->ldaclnr, par->base + LDACLNR); sh7760fb_set_par()
337 (par->pd->ldickr >> 12) & 3, par->pd->ldickr & 0x1f); sh7760fb_set_par()
338 dev_dbg(info->dev, "ldpmmr: 0x%04x ldpspr: 0x%04x\n", par->pd->ldpmmr, sh7760fb_set_par()
339 par->pd->ldpspr); sh7760fb_set_par()
387 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL); sh7760fb_alloc_mem()
454 par->pd = pdev->dev.platform_data; sh7760fb_probe()
455 if (!par->pd) { sh7760fb_probe()
488 fb_videomode_to_var(&info->var, par->pd->def_mode); sh7760fb_probe()
H A Dau1200fb.c148 struct au1200fb_platdata *pd; member in struct:au1200fb_device
771 struct au1200fb_platdata *pd) au1200_setpanel()
804 if (pd->panel_shutdown) au1200_setpanel()
805 pd->panel_shutdown(); au1200_setpanel()
864 if (pd->panel_init) au1200_setpanel()
865 pd->panel_init(); au1200_setpanel()
1209 au1200_setpanel(panel, fbdev->pd); au1200fb_fb_blank()
1215 au1200_setpanel(NULL, fbdev->pd); au1200fb_fb_blank()
1462 au1200_setpanel(newpanel, fbdev->pd); au1200fb_ioctl()
1579 static int au1200fb_setup(struct au1200fb_platdata *pd) au1200fb_setup() argument
1605 panel_idx = pd->panel_index(); au1200fb_setup()
1648 struct au1200fb_platdata *pd; au1200fb_drv_probe() local
1655 pd = dev->dev.platform_data; au1200fb_drv_probe()
1656 if (!pd) au1200fb_drv_probe()
1660 if (au1200fb_setup(pd)) au1200fb_drv_probe()
1689 fbdev->pd = pd; au1200fb_drv_probe()
1751 platform_set_drvdata(dev, pd); au1200fb_drv_probe()
1754 au1200_setpanel(panel, pd); au1200fb_drv_probe()
1772 struct au1200fb_platdata *pd = platform_get_drvdata(dev); au1200fb_drv_remove() local
1778 au1200_setpanel(NULL, pd); au1200fb_drv_remove()
1802 struct au1200fb_platdata *pd = dev_get_drvdata(dev); au1200fb_drv_suspend() local
1803 au1200_setpanel(NULL, pd); au1200fb_drv_suspend()
1813 struct au1200fb_platdata *pd = dev_get_drvdata(dev); au1200fb_drv_resume() local
1818 au1200_setpanel(panel, pd); au1200fb_drv_resume()
770 au1200_setpanel(struct panel_settings *newpanel, struct au1200fb_platdata *pd) au1200_setpanel() argument
H A Dsimplefb.c146 struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev); simplefb_parse_pd() local
149 params->width = pd->width; simplefb_parse_pd()
150 params->height = pd->height; simplefb_parse_pd()
151 params->stride = pd->stride; simplefb_parse_pd()
155 if (strcmp(pd->format, simplefb_formats[i].name)) simplefb_parse_pd()
/linux-4.1.27/drivers/net/wireless/ath/ath5k/
H A Deeprom.c698 struct ath5k_pdgain_info *pd = ath5k_eeprom_free_pcal_info() local
701 kfree(pd->pd_step); ath5k_eeprom_free_pcal_info()
702 kfree(pd->pd_pwr); ath5k_eeprom_free_pcal_info()
719 struct ath5k_pdgain_info *pd; ath5k_eeprom_convert_pcal_info_5111() local
751 pd = &chinfo[pier].pd_curves[idx]; ath5k_eeprom_convert_pcal_info_5111()
753 pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111; ath5k_eeprom_convert_pcal_info_5111()
755 /* Allocate pd points for this curve */ ath5k_eeprom_convert_pcal_info_5111()
756 pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, ath5k_eeprom_convert_pcal_info_5111()
758 if (!pd->pd_step) ath5k_eeprom_convert_pcal_info_5111()
761 pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, ath5k_eeprom_convert_pcal_info_5111()
763 if (!pd->pd_pwr) ath5k_eeprom_convert_pcal_info_5111()
769 for (point = 0; point < pd->pd_points; point++) { ath5k_eeprom_convert_pcal_info_5111()
772 pd->pd_pwr[point] = 2 * pcinfo->pwr[point]; ath5k_eeprom_convert_pcal_info_5111()
775 pd->pd_step[point] = pcinfo->pcdac[point]; ath5k_eeprom_convert_pcal_info_5111()
779 chinfo[pier].min_pwr = pd->pd_pwr[0]; ath5k_eeprom_convert_pcal_info_5111()
780 chinfo[pier].max_pwr = pd->pd_pwr[10]; ath5k_eeprom_convert_pcal_info_5111()
928 struct ath5k_pdgain_info *pd = ath5k_eeprom_convert_pcal_info_5112() local
934 pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS; ath5k_eeprom_convert_pcal_info_5112()
936 /* Allocate pd points for this curve */ ath5k_eeprom_convert_pcal_info_5112()
937 pd->pd_step = kcalloc(pd->pd_points, ath5k_eeprom_convert_pcal_info_5112()
940 if (!pd->pd_step) ath5k_eeprom_convert_pcal_info_5112()
943 pd->pd_pwr = kcalloc(pd->pd_points, ath5k_eeprom_convert_pcal_info_5112()
946 if (!pd->pd_pwr) ath5k_eeprom_convert_pcal_info_5112()
951 pd->pd_step[0] = pcinfo->pcdac_x0[0]; ath5k_eeprom_convert_pcal_info_5112()
952 pd->pd_pwr[0] = pcinfo->pwr_x0[0]; ath5k_eeprom_convert_pcal_info_5112()
954 for (point = 1; point < pd->pd_points; ath5k_eeprom_convert_pcal_info_5112()
957 pd->pd_pwr[point] = ath5k_eeprom_convert_pcal_info_5112()
961 pd->pd_step[point] = ath5k_eeprom_convert_pcal_info_5112()
962 pd->pd_step[point - 1] + ath5k_eeprom_convert_pcal_info_5112()
967 chinfo[pier].min_pwr = pd->pd_pwr[0]; ath5k_eeprom_convert_pcal_info_5112()
972 pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS; ath5k_eeprom_convert_pcal_info_5112()
974 /* Allocate pd points for this curve */ ath5k_eeprom_convert_pcal_info_5112()
975 pd->pd_step = kcalloc(pd->pd_points, ath5k_eeprom_convert_pcal_info_5112()
978 if (!pd->pd_step) ath5k_eeprom_convert_pcal_info_5112()
981 pd->pd_pwr = kcalloc(pd->pd_points, ath5k_eeprom_convert_pcal_info_5112()
984 if (!pd->pd_pwr) ath5k_eeprom_convert_pcal_info_5112()
989 for (point = 0; point < pd->pd_points; ath5k_eeprom_convert_pcal_info_5112()
992 pd->pd_pwr[point] = ath5k_eeprom_convert_pcal_info_5112()
996 pd->pd_step[point] = ath5k_eeprom_convert_pcal_info_5112()
1002 chinfo[pier].min_pwr = pd->pd_pwr[0]; ath5k_eeprom_convert_pcal_info_5112()
1132 * instead of a PCDAC and 4 pd gain curves for each calibrated channel.
1148 /* Return the size of each section based on the mode and the number of pd
1220 struct ath5k_pdgain_info *pd = ath5k_eeprom_convert_pcal_info_2413() local
1226 pd->pd_points = AR5K_EEPROM_N_PD_POINTS; ath5k_eeprom_convert_pcal_info_2413()
1228 pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1; ath5k_eeprom_convert_pcal_info_2413()
1230 /* Allocate pd points for this curve */ ath5k_eeprom_convert_pcal_info_2413()
1231 pd->pd_step = kcalloc(pd->pd_points, ath5k_eeprom_convert_pcal_info_2413()
1234 if (!pd->pd_step) ath5k_eeprom_convert_pcal_info_2413()
1237 pd->pd_pwr = kcalloc(pd->pd_points, ath5k_eeprom_convert_pcal_info_2413()
1240 if (!pd->pd_pwr) ath5k_eeprom_convert_pcal_info_2413()
1246 pd->pd_step[0] = pcinfo->pddac_i[pdg]; ath5k_eeprom_convert_pcal_info_2413()
1247 pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg]; ath5k_eeprom_convert_pcal_info_2413()
1249 for (point = 1; point < pd->pd_points; point++) { ath5k_eeprom_convert_pcal_info_2413()
1251 pd->pd_pwr[point] = pd->pd_pwr[point - 1] + ath5k_eeprom_convert_pcal_info_2413()
1254 pd->pd_step[point] = pd->pd_step[point - 1] + ath5k_eeprom_convert_pcal_info_2413()
1261 chinfo[pier].min_pwr = pd->pd_pwr[0]; ath5k_eeprom_convert_pcal_info_2413()
1266 pd->pd_pwr[pd->pd_points - 1]; ath5k_eeprom_convert_pcal_info_2413()
1341 * 2 pd points (pwr, pddac) ath5k_eeprom_read_pcal_info_2413()
1362 * Pd gain 0 is not the last pd gain ath5k_eeprom_read_pcal_info_2413()
1363 * so it only has 2 pd points. ath5k_eeprom_read_pcal_info_2413()
1364 * Continue with pd gain 1. ath5k_eeprom_read_pcal_info_2413()
/linux-4.1.27/drivers/misc/eeprom/
H A Deeprom_93xx46.c247 struct eeprom_93xx46_platform_data *pd = edev->pdata; eeprom_93xx46_eral() local
281 if (pd->finish) eeprom_93xx46_eral()
282 pd->finish(edev); eeprom_93xx46_eral()
313 struct eeprom_93xx46_platform_data *pd; eeprom_93xx46_probe() local
317 pd = spi->dev.platform_data; eeprom_93xx46_probe()
318 if (!pd) { eeprom_93xx46_probe()
327 if (pd->flags & EE_ADDR8) eeprom_93xx46_probe()
329 else if (pd->flags & EE_ADDR16) eeprom_93xx46_probe()
340 edev->pdata = pd; eeprom_93xx46_probe()
347 if (!(pd->flags & EE_READONLY)) { eeprom_93xx46_probe()
357 (pd->flags & EE_ADDR8) ? 8 : 16, eeprom_93xx46_probe()
358 (pd->flags & EE_READONLY) ? "(readonly)" : ""); eeprom_93xx46_probe()
360 if (!(pd->flags & EE_READONLY)) { eeprom_93xx46_probe()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dmr.c57 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) mlx4_ib_get_dma_mr() argument
66 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, mlx4_ib_get_dma_mr()
71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); mlx4_ib_get_dma_mr()
81 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); mlx4_ib_get_dma_mr()
133 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mlx4_ib_reg_user_mr() argument
137 struct mlx4_ib_dev *dev = to_mdev(pd->device); mlx4_ib_reg_user_mr()
149 mr->umem = ib_umem_get(pd->uobject->context, start, length, mlx4_ib_reg_user_mr()
159 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, mlx4_ib_reg_user_mr()
177 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); mlx4_ib_reg_user_mr()
190 int mr_access_flags, struct ib_pd *pd, mlx4_ib_rereg_user_mr()
210 to_mpd(pd)->pdn); mlx4_ib_rereg_user_mr()
289 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) mlx4_ib_alloc_mw() argument
291 struct mlx4_ib_dev *dev = to_mdev(pd->device); mlx4_ib_alloc_mw()
299 err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, mlx4_ib_alloc_mw()
353 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, mlx4_ib_alloc_fast_reg_mr() argument
356 struct mlx4_ib_dev *dev = to_mdev(pd->device); mlx4_ib_alloc_fast_reg_mr()
364 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, mlx4_ib_alloc_fast_reg_mr()
434 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, mlx4_ib_fmr_alloc() argument
437 struct mlx4_ib_dev *dev = to_mdev(pd->device); mlx4_ib_fmr_alloc()
445 err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc), mlx4_ib_fmr_alloc()
451 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); mlx4_ib_fmr_alloc()
460 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); mlx4_ib_fmr_alloc()
188 mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata) mlx4_ib_rereg_user_mr() argument
H A Dah.c43 static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, create_ib_ah() argument
46 struct mlx4_dev *dev = to_mdev(pd->device)->dev; create_ib_ah()
48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); create_ib_ah()
72 static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, create_iboe_ah() argument
75 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); create_iboe_ah()
91 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); create_iboe_ah()
113 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) mlx4_ib_create_ah() argument
122 if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) { mlx4_ib_create_ah()
134 ret = create_iboe_ah(pd, ah_attr, ah); mlx4_ib_create_ah()
142 return create_ib_ah(pd, ah_attr, ah); /* never fails */ mlx4_ib_create_ah()
H A Dsrq.c71 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, mlx4_ib_create_srq() argument
75 struct mlx4_ib_dev *dev = to_mdev(pd->device); mlx4_ib_create_srq()
108 if (pd->uobject) { mlx4_ib_create_srq()
116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, mlx4_ib_create_srq()
132 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), mlx4_ib_create_srq()
185 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, mlx4_ib_create_srq()
193 if (pd->uobject) mlx4_ib_create_srq()
204 if (pd->uobject) mlx4_ib_create_srq()
205 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); mlx4_ib_create_srq()
213 if (pd->uobject) mlx4_ib_create_srq()
219 if (!pd->uobject) mlx4_ib_create_srq()
H A Dmlx4_ib.h88 struct ib_pd *pd; member in struct:mlx4_ib_xrcd
416 struct ib_pd *pd; member in struct:mlx4_ib_demux_pv_ctx
652 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
655 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
659 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
663 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
680 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
684 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
695 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
717 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
816 int mr_access_flags, struct ib_pd *pd,
/linux-4.1.27/arch/arm/mach-imx/
H A Dclk-pllv1.c44 unsigned int mfi, mfn, mfd, pd; clk_pllv1_recalc_rate() local
57 * pd + 1 clk_pllv1_recalc_rate()
63 pd = (reg >> 26) & 0xf; clk_pllv1_recalc_rate()
82 rate /= pd + 1; clk_pllv1_recalc_rate()
/linux-4.1.27/drivers/ptp/
H A Dptp_chardev.c123 struct ptp_pin_desc pd; ptp_ioctl() local
217 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { ptp_ioctl()
221 pin_index = pd.index; ptp_ioctl()
228 pd = ops->pin_config[pin_index]; ptp_ioctl()
230 if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd))) ptp_ioctl()
235 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { ptp_ioctl()
239 pin_index = pd.index; ptp_ioctl()
246 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan); ptp_ioctl()
/linux-4.1.27/arch/powerpc/sysdev/
H A Drtc_cmos_setup.c23 struct platform_device *pd; add_rtc() local
62 pd = platform_device_register_simple("rtc_cmos", -1, add_rtc()
65 return PTR_ERR_OR_ZERO(pd); add_rtc()
/linux-4.1.27/drivers/mmc/host/
H A Dsh_mmcif.c230 struct platform_device *pd; member in struct:sh_mmcif_host
277 dev_dbg(&host->pd->dev, "Command completed\n"); mmcif_dma_complete()
280 dev_name(&host->pd->dev))) mmcif_dma_complete()
310 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", sh_mmcif_start_dma_rx()
326 dev_warn(&host->pd->dev, sh_mmcif_start_dma_rx()
331 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, sh_mmcif_start_dma_rx()
359 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", sh_mmcif_start_dma_tx()
375 dev_warn(&host->pd->dev, sh_mmcif_start_dma_tx()
380 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__, sh_mmcif_start_dma_tx()
405 slave_data, &host->pd->dev, sh_mmcif_request_dma_one()
408 dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__, sh_mmcif_request_dma_one()
414 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); sh_mmcif_request_dma_one()
443 } else if (!host->pd->dev.of_node) { sh_mmcif_request_dma()
479 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; sh_mmcif_clock_control()
524 dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); sh_mmcif_error_manage()
525 dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2); sh_mmcif_error_manage()
537 dev_err(&host->pd->dev, sh_mmcif_error_manage()
542 dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); sh_mmcif_error_manage()
547 dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", sh_mmcif_error_manage()
551 dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", sh_mmcif_error_manage()
555 dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", sh_mmcif_error_manage()
602 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_read_block()
643 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_mread_block()
680 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_write_block()
721 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); sh_mmcif_mwrite_block()
778 dev_err(&host->pd->dev, "Unsupported response type.\n"); sh_mmcif_set_cmd()
806 dev_err(&host->pd->dev, "Unsupported bus width.\n"); sh_mmcif_set_cmd()
864 dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc); sh_mmcif_data_trans()
929 dev_err(&host->pd->dev, "unsupported stop cmd\n"); sh_mmcif_stop_cmd()
944 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); sh_mmcif_request()
1005 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); sh_mmcif_set_ios()
1016 sh_mmcif_request_dma(host, host->pd->dev.platform_data); sh_mmcif_set_ios()
1030 pm_runtime_put_sync(&host->pd->dev); sh_mmcif_set_ios()
1043 pm_runtime_get_sync(&host->pd->dev); sh_mmcif_set_ios()
1058 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; sh_mmcif_get_cd()
1067 return p->get_cd(host->pd); sh_mmcif_get_cd()
1093 dev_dbg(&host->pd->dev, "CMD%d error %d\n", sh_mmcif_end_cmd()
1187 dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n", sh_mmcif_irqt()
1225 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error); sh_mmcif_irqt()
1235 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error); sh_mmcif_irqt()
1289 dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n", sh_mmcif_intr()
1294 dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state); sh_mmcif_intr()
1298 dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state); sh_mmcif_intr()
1304 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); sh_mmcif_intr()
1327 dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n", mmcif_timeout_work()
1364 struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; sh_mmcif_init_ocr() local
1369 if (!pd) sh_mmcif_init_ocr()
1373 mmc->ocr_avail = pd->ocr; sh_mmcif_init_ocr()
1374 else if (pd->ocr) sh_mmcif_init_ocr()
1383 struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; sh_mmcif_probe() local
1412 host->ccs_enable = !pd || !pd->ccs_unsupported; sh_mmcif_probe()
1413 host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; sh_mmcif_probe()
1415 host->pd = pdev; sh_mmcif_probe()
1423 if (pd && pd->caps) sh_mmcif_probe()
1424 mmc->caps |= pd->caps; sh_mmcif_probe()
1472 if (pd && pd->use_cd_gpio) { sh_mmcif_probe()
1473 ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0); sh_mmcif_probe()
/linux-4.1.27/drivers/spi/
H A Dspi-butterfly.c68 struct pardevice *pd; member in struct:butterfly
195 struct pardevice *pd; butterfly_attach() local
232 pd = parport_register_device(p, "spi_butterfly", butterfly_attach()
235 if (!pd) { butterfly_attach()
239 pp->pd = pd; butterfly_attach()
241 status = parport_claim(pd); butterfly_attach()
296 parport_release(pp->pd); butterfly_attach()
298 parport_unregister_device(pd); butterfly_attach()
325 parport_release(pp->pd); butterfly_detach()
326 parport_unregister_device(pp->pd); butterfly_detach()
H A Dspi-lm70llp.c82 struct pardevice *pd; member in struct:spi_lm70llp
195 struct pardevice *pd; spi_lm70llp_attach() local
230 pd = parport_register_device(p, DRVNAME, spi_lm70llp_attach()
233 if (!pd) { spi_lm70llp_attach()
237 pp->pd = pd; spi_lm70llp_attach()
239 status = parport_claim(pd); spi_lm70llp_attach()
292 parport_release(pp->pd); spi_lm70llp_attach()
294 parport_unregister_device(pd); spi_lm70llp_attach()
314 parport_release(pp->pd); spi_lm70llp_detach()
315 parport_unregister_device(pp->pd); spi_lm70llp_detach()
/linux-4.1.27/drivers/infiniband/hw/amso1100/
H A Dc2_provider.c155 struct c2_pd *pd; c2_alloc_pd() local
160 pd = kmalloc(sizeof(*pd), GFP_KERNEL); c2_alloc_pd()
161 if (!pd) c2_alloc_pd()
164 err = c2_pd_alloc(to_c2dev(ibdev), !context, pd); c2_alloc_pd()
166 kfree(pd); c2_alloc_pd()
171 if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) { c2_alloc_pd()
172 c2_pd_free(to_c2dev(ibdev), pd); c2_alloc_pd() local
173 kfree(pd); c2_alloc_pd()
178 return &pd->ibpd; c2_alloc_pd()
181 static int c2_dealloc_pd(struct ib_pd *pd) c2_dealloc_pd() argument
184 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd)); c2_dealloc_pd()
185 kfree(pd); c2_dealloc_pd()
190 static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) c2_ah_create() argument
232 static struct ib_qp *c2_create_qp(struct ib_pd *pd, c2_create_qp() argument
252 if (pd->uobject) { c2_create_qp()
256 err = c2_alloc_qp(to_c2dev(pd->device), c2_create_qp()
257 to_c2pd(pd), init_attr, qp); c2_create_qp()
259 if (err && pd->uobject) { c2_create_qp()
394 mr->pd = to_c2pd(ib_pd); c2_reg_phys_mr()
415 static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc) c2_get_dma_mr() argument
425 return c2_reg_phys_mr(pd, &bl, 1, acc, &kva); c2_get_dma_mr()
428 static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, c2_reg_user_mr() argument
437 struct c2_pd *c2pd = to_c2pd(pd); c2_reg_user_mr()
445 c2mr->pd = c2pd; c2_reg_user_mr()
447 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); c2_reg_user_mr()
474 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), c2_reg_user_mr()
H A Dc2_pd.c43 int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd) c2_pd_alloc() argument
55 pd->pd_id = obj; c2_pd_alloc()
66 void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd) c2_pd_free() argument
69 __clear_bit(pd->pd_id, c2dev->pd_table.table); c2_pd_free()
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c325 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) ocrdma_get_pd_num() argument
331 if (pd->dpp_enabled) { ocrdma_get_pd_num()
335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; ocrdma_get_pd_num()
336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; ocrdma_get_pd_num()
340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; ocrdma_get_pd_num()
341 pd->dpp_enabled = false; ocrdma_get_pd_num()
348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; ocrdma_get_pd_num()
361 struct ocrdma_pd *pd = NULL; _ocrdma_alloc_pd() local
364 pd = kzalloc(sizeof(*pd), GFP_KERNEL); _ocrdma_alloc_pd()
365 if (!pd) _ocrdma_alloc_pd()
369 pd->dpp_enabled = _ocrdma_alloc_pd()
371 pd->num_dpp_qp = _ocrdma_alloc_pd()
372 pd->dpp_enabled ? (dev->nic_info.db_page_size / _ocrdma_alloc_pd()
377 status = ocrdma_get_pd_num(dev, pd); _ocrdma_alloc_pd()
378 return (status == 0) ? pd : ERR_PTR(status); _ocrdma_alloc_pd()
382 status = ocrdma_mbx_alloc_pd(dev, pd); _ocrdma_alloc_pd()
384 if (pd->dpp_enabled) { _ocrdma_alloc_pd()
385 pd->dpp_enabled = false; _ocrdma_alloc_pd()
386 pd->num_dpp_qp = 0; _ocrdma_alloc_pd()
389 kfree(pd); _ocrdma_alloc_pd()
394 return pd; _ocrdma_alloc_pd()
398 struct ocrdma_pd *pd) is_ucontext_pd()
400 return (uctx->cntxt_pd == pd ? true : false); is_ucontext_pd()
404 struct ocrdma_pd *pd) _ocrdma_dealloc_pd()
409 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); _ocrdma_dealloc_pd()
411 status = ocrdma_mbx_dealloc_pd(dev, pd); _ocrdma_dealloc_pd()
413 kfree(pd); _ocrdma_dealloc_pd()
438 struct ocrdma_pd *pd = uctx->cntxt_pd; ocrdma_dealloc_ucontext_pd() local
439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); ocrdma_dealloc_ucontext_pd()
443 __func__, dev->id, pd->id); ocrdma_dealloc_ucontext_pd()
446 (void)_ocrdma_dealloc_pd(dev, pd); ocrdma_dealloc_ucontext_pd()
452 struct ocrdma_pd *pd = NULL; ocrdma_get_ucontext_pd() local
457 pd = uctx->cntxt_pd; ocrdma_get_ucontext_pd()
461 return pd; ocrdma_get_ucontext_pd()
597 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, ocrdma_copy_pd_uresp() argument
609 rsp.id = pd->id; ocrdma_copy_pd_uresp()
610 rsp.dpp_enabled = pd->dpp_enabled; ocrdma_copy_pd_uresp()
611 db_page_addr = ocrdma_get_db_addr(dev, pd->id); ocrdma_copy_pd_uresp()
618 if (pd->dpp_enabled) { ocrdma_copy_pd_uresp()
620 (pd->id * PAGE_SIZE); ocrdma_copy_pd_uresp()
633 pd->uctx = uctx; ocrdma_copy_pd_uresp()
637 if (pd->dpp_enabled) ocrdma_copy_pd_uresp()
638 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE); ocrdma_copy_pd_uresp()
640 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); ocrdma_copy_pd_uresp()
649 struct ocrdma_pd *pd; ocrdma_alloc_pd() local
656 pd = ocrdma_get_ucontext_pd(uctx); ocrdma_alloc_pd()
657 if (pd) { ocrdma_alloc_pd()
663 pd = _ocrdma_alloc_pd(dev, uctx, udata); ocrdma_alloc_pd()
664 if (IS_ERR(pd)) { ocrdma_alloc_pd()
665 status = PTR_ERR(pd); ocrdma_alloc_pd()
671 status = ocrdma_copy_pd_uresp(dev, pd, context, udata); ocrdma_alloc_pd()
675 return &pd->ibpd; ocrdma_alloc_pd()
681 status = _ocrdma_dealloc_pd(dev, pd); ocrdma_alloc_pd()
689 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ocrdma_dealloc_pd() local
695 uctx = pd->uctx; ocrdma_dealloc_pd()
698 (pd->id * PAGE_SIZE); ocrdma_dealloc_pd()
699 if (pd->dpp_enabled) ocrdma_dealloc_pd()
700 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE); ocrdma_dealloc_pd()
701 usr_db = ocrdma_get_db_addr(dev, pd->id); ocrdma_dealloc_pd()
702 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); ocrdma_dealloc_pd()
704 if (is_ucontext_pd(uctx, pd)) { ocrdma_dealloc_pd()
709 status = _ocrdma_dealloc_pd(dev, pd); ocrdma_dealloc_pd()
741 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ocrdma_get_dma_mr() local
753 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0, ocrdma_get_dma_mr()
894 struct ocrdma_pd *pd; ocrdma_reg_user_mr() local
897 pd = get_ocrdma_pd(ibpd); ocrdma_reg_user_mr()
928 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); ocrdma_reg_user_mr()
1221 struct ocrdma_pd *pd = qp->pd; ocrdma_copy_qp_uresp() local
1222 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); ocrdma_copy_qp_uresp()
1226 (pd->id * dev->nic_info.db_page_size); ocrdma_copy_qp_uresp()
1255 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], ocrdma_copy_qp_uresp()
1261 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], ocrdma_copy_qp_uresp()
1268 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); ocrdma_copy_qp_uresp()
1274 struct ocrdma_pd *pd) ocrdma_set_qp_db()
1278 (pd->id * dev->nic_info.db_page_size) + ocrdma_set_qp_db()
1281 (pd->id * dev->nic_info.db_page_size) + ocrdma_set_qp_db()
1285 (pd->id * dev->nic_info.db_page_size) + ocrdma_set_qp_db()
1288 (pd->id * dev->nic_info.db_page_size) + ocrdma_set_qp_db()
1309 struct ocrdma_pd *pd, ocrdma_set_qp_init_params()
1312 qp->pd = pd; ocrdma_set_qp_init_params()
1341 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ocrdma_create_qp() local
1361 ocrdma_set_qp_init_params(qp, pd, attrs); ocrdma_create_qp()
1383 ocrdma_set_qp_db(dev, qp, pd); ocrdma_create_qp()
1719 struct ocrdma_pd *pd; ocrdma_destroy_qp() local
1729 pd = qp->pd; ocrdma_destroy_qp()
1758 if (!pd->uctx) { ocrdma_destroy_qp()
1764 if (pd->uctx) { ocrdma_destroy_qp()
1765 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, ocrdma_destroy_qp()
1768 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, ocrdma_destroy_qp()
1792 (srq->pd->id * dev->nic_info.db_page_size); ocrdma_copy_srq_uresp()
1806 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], ocrdma_copy_srq_uresp()
1818 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ocrdma_create_srq() local
1832 srq->pd = pd; ocrdma_create_srq()
1833 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); ocrdma_create_srq()
1834 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); ocrdma_create_srq()
1913 if (srq->pd->uctx) ocrdma_destroy_srq()
1914 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, ocrdma_destroy_srq()
2976 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ocrdma_alloc_frmr() local
2998 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0); ocrdma_alloc_frmr()
3144 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ocrdma_reg_kernel_mr() local
3180 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); ocrdma_reg_kernel_mr()
397 is_ucontext_pd(struct ocrdma_ucontext *uctx, struct ocrdma_pd *pd) is_ucontext_pd() argument
403 _ocrdma_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) _ocrdma_dealloc_pd() argument
1273 ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, struct ocrdma_pd *pd) ocrdma_set_qp_db() argument
1308 ocrdma_set_qp_init_params(struct ocrdma_qp *qp, struct ocrdma_pd *pd, struct ib_qp_init_attr *attrs) ocrdma_set_qp_init_params() argument
H A Docrdma_ah.c106 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ocrdma_create_ah() local
130 if ((pd->uctx) && ocrdma_create_ah()
142 status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan); ocrdma_create_ah()
146 /* if pd is for the user process, pass the ah_id to user space */ ocrdma_create_ah()
147 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { ocrdma_create_ah()
148 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; ocrdma_create_ah()
H A Docrdma_verbs.h57 int ocrdma_dealloc_pd(struct ib_pd *pd);
93 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len);
/linux-4.1.27/sound/soc/codecs/
H A Duda134x.c86 struct uda134x_platform_data *pd = codec->control_data; uda134x_write() local
116 ret = l3_write(&pd->l3, uda134x_write()
126 ret = l3_write(&pd->l3, uda134x_write()
327 struct uda134x_platform_data *pd = codec->control_data; uda134x_set_bias_level() local
338 if (pd->power) { uda134x_set_bias_level()
339 pd->power(1); uda134x_set_bias_level()
349 if (pd->power) uda134x_set_bias_level()
350 pd->power(0); uda134x_set_bias_level()
482 struct uda134x_platform_data *pd = codec->component.card->dev->platform_data; uda134x_soc_probe() local
490 if (!pd) { uda134x_soc_probe()
496 switch (pd->model) { uda134x_soc_probe()
505 pd->model); uda134x_soc_probe()
514 codec->control_data = pd; uda134x_soc_probe()
516 if (pd->power) uda134x_soc_probe()
517 pd->power(1); uda134x_soc_probe()
521 if (pd->model == UDA134X_UDA1341) { uda134x_soc_probe()
537 switch (pd->model) { uda134x_soc_probe()
553 __func__, pd->model); uda134x_soc_probe()
H A Drl6231.c117 int i, pd[] = {1, 2, 3, 4, 6, 8, 12, 16}; rl6231_get_clk_info() local
123 for (i = 0; i < ARRAY_SIZE(pd); i++) rl6231_get_clk_info()
124 if (sclk == rate * pd[i]) rl6231_get_clk_info()
/linux-4.1.27/drivers/video/backlight/
H A Dams369fg06.c314 struct lcd_platform_data *pd; ams369fg06_power_on() local
317 pd = lcd->lcd_pd; ams369fg06_power_on()
320 if (pd->power_on) { ams369fg06_power_on()
321 pd->power_on(lcd->ld, 1); ams369fg06_power_on()
322 msleep(pd->power_on_delay); ams369fg06_power_on()
325 if (!pd->reset) { ams369fg06_power_on()
330 pd->reset(lcd->ld); ams369fg06_power_on()
331 msleep(pd->reset_delay); ams369fg06_power_on()
358 struct lcd_platform_data *pd; ams369fg06_power_off() local
360 pd = lcd->lcd_pd; ams369fg06_power_off()
368 msleep(pd->power_off_delay); ams369fg06_power_off()
370 if (pd->power_on) ams369fg06_power_off()
371 pd->power_on(lcd->ld, 0); ams369fg06_power_off()
H A Dlms501kf03.c228 struct lcd_platform_data *pd; lms501kf03_power_on() local
230 pd = lcd->lcd_pd; lms501kf03_power_on()
232 if (!pd->power_on) { lms501kf03_power_on()
237 pd->power_on(lcd->ld, 1); lms501kf03_power_on()
238 msleep(pd->power_on_delay); lms501kf03_power_on()
240 if (!pd->reset) { lms501kf03_power_on()
245 pd->reset(lcd->ld); lms501kf03_power_on()
246 msleep(pd->reset_delay); lms501kf03_power_on()
266 struct lcd_platform_data *pd; lms501kf03_power_off() local
268 pd = lcd->lcd_pd; lms501kf03_power_off()
276 msleep(pd->power_off_delay); lms501kf03_power_off()
278 pd->power_on(lcd->ld, 0); lms501kf03_power_off()
H A Dld9040.c60 struct lcd_platform_data *pd = NULL; ld9040_regulator_enable() local
62 pd = lcd->lcd_pd; ld9040_regulator_enable()
71 msleep(pd->power_on_delay); ld9040_regulator_enable()
559 struct lcd_platform_data *pd; ld9040_power_on() local
561 pd = lcd->lcd_pd; ld9040_power_on()
566 if (!pd->reset) { ld9040_power_on()
571 pd->reset(lcd->ld); ld9040_power_on()
572 msleep(pd->reset_delay); ld9040_power_on()
592 struct lcd_platform_data *pd; ld9040_power_off() local
594 pd = lcd->lcd_pd; ld9040_power_off()
602 msleep(pd->power_off_delay); ld9040_power_off()
H A Dlp855x_bl.c172 struct lp855x_platform_data *pd = lp->pdata; lp855x_configure() local
198 val = pd->initial_brightness; lp855x_configure()
203 val = pd->device_control; lp855x_configure()
208 if (pd->size_program > 0) { lp855x_configure()
209 for (i = 0; i < pd->size_program; i++) { lp855x_configure()
210 addr = pd->rom_data[i].addr; lp855x_configure()
211 val = pd->rom_data[i].val; lp855x_configure()
H A Ds6e63m0.c501 struct lcd_platform_data *pd; s6e63m0_power_on() local
504 pd = lcd->lcd_pd; s6e63m0_power_on()
507 if (!pd->power_on) { s6e63m0_power_on()
512 pd->power_on(lcd->ld, 1); s6e63m0_power_on()
513 msleep(pd->power_on_delay); s6e63m0_power_on()
515 if (!pd->reset) { s6e63m0_power_on()
520 pd->reset(lcd->ld); s6e63m0_power_on()
521 msleep(pd->reset_delay); s6e63m0_power_on()
548 struct lcd_platform_data *pd; s6e63m0_power_off() local
550 pd = lcd->lcd_pd; s6e63m0_power_off()
558 msleep(pd->power_off_delay); s6e63m0_power_off()
560 pd->power_on(lcd->ld, 0); s6e63m0_power_off()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dsrq.c76 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, create_srq_user() argument
80 struct mlx5_ib_dev *dev = to_mdev(pd->device); create_srq_user()
105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, create_srq_user()
131 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), create_srq_user()
220 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) destroy_srq_user() argument
222 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); destroy_srq_user()
234 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, mlx5_ib_create_srq() argument
238 struct mlx5_ib_dev *dev = to_mdev(pd->device); mlx5_ib_create_srq()
279 if (pd->uobject) mlx5_ib_create_srq()
280 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); mlx5_ib_create_srq()
286 pd->uobject ? "user" : "kernel", err); mlx5_ib_create_srq()
304 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); mlx5_ib_create_srq()
318 if (pd->uobject) mlx5_ib_create_srq()
333 if (pd->uobject) mlx5_ib_create_srq()
334 destroy_srq_user(pd, srq); mlx5_ib_create_srq()
H A Dmain.c660 struct mlx5_ib_pd *pd; mlx5_ib_alloc_pd() local
663 pd = kmalloc(sizeof(*pd), GFP_KERNEL); mlx5_ib_alloc_pd()
664 if (!pd) mlx5_ib_alloc_pd()
667 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); mlx5_ib_alloc_pd()
669 kfree(pd); mlx5_ib_alloc_pd()
674 resp.pdn = pd->pdn; mlx5_ib_alloc_pd()
676 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); mlx5_ib_alloc_pd()
677 kfree(pd); mlx5_ib_alloc_pd()
681 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); mlx5_ib_alloc_pd()
683 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); mlx5_ib_alloc_pd()
684 kfree(pd); mlx5_ib_alloc_pd()
689 return &pd->ibpd; mlx5_ib_alloc_pd()
692 static int mlx5_ib_dealloc_pd(struct ib_pd *pd) mlx5_ib_dealloc_pd() argument
694 struct mlx5_ib_dev *mdev = to_mdev(pd->device); mlx5_ib_dealloc_pd()
695 struct mlx5_ib_pd *mpd = to_mpd(pd); mlx5_ib_dealloc_pd()
697 if (!pd->uobject) mlx5_ib_dealloc_pd()
959 ib_dealloc_pd(dev->umrc.pd); destroy_umrc_res()
970 struct ib_pd *pd; create_umr_res() local
983 pd = ib_alloc_pd(&dev->ib_dev); create_umr_res()
984 if (IS_ERR(pd)) { create_umr_res()
986 ret = PTR_ERR(pd); create_umr_res()
990 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); create_umr_res()
1013 qp = mlx5_ib_create_qp(pd, init_attr, NULL); create_umr_res()
1054 dev->umrc.pd = pd; create_umr_res()
1078 ib_dealloc_pd(pd); create_umr_res()
1150 devr->s0->pd = devr->p0; create_dev_resources()
H A Dmr.c626 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) mlx5_ib_get_dma_mr() argument
628 struct mlx5_ib_dev *dev = to_mdev(pd->device); mlx5_ib_get_dma_mr()
647 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64); mlx5_ib_get_dma_mr()
687 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, prep_umr_reg_wqe() argument
692 struct mlx5_ib_dev *dev = to_mdev(pd->device); prep_umr_reg_wqe()
716 umrwr->pd = pd; prep_umr_reg_wqe()
751 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, reg_umr() argument
755 struct mlx5_ib_dev *dev = to_mdev(pd->device); reg_umr()
807 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift, reg_umr()
826 mr->mmr.pd = to_mpd(pd)->pdn; reg_umr()
968 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, reg_create() argument
973 struct mlx5_ib_dev *dev = to_mdev(pd->device); reg_create()
999 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); reg_create()
1032 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mlx5_ib_reg_user_mr() argument
1036 struct mlx5_ib_dev *dev = to_mdev(pd->device); mlx5_ib_reg_user_mr()
1047 umem = ib_umem_get(pd->uobject->context, start, length, access_flags, mlx5_ib_reg_user_mr()
1065 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, mlx5_ib_reg_user_mr()
1078 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift, mlx5_ib_reg_user_mr()
1226 struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd, mlx5_ib_create_mr() argument
1229 struct mlx5_ib_dev *dev = to_mdev(pd->device); mlx5_ib_create_mr()
1248 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); mlx5_ib_create_mr()
1264 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, mlx5_ib_create_mr()
1342 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, mlx5_ib_alloc_fast_reg_mr() argument
1345 struct mlx5_ib_dev *dev = to_mdev(pd->device); mlx5_ib_alloc_fast_reg_mr()
1364 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); mlx5_ib_alloc_fast_reg_mr()
H A Dmlx5_ib.h254 struct ib_pd *pd; member in struct:mlx5_umr_wr
349 struct ib_pd *pd; member in struct:umr_common
532 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
535 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
544 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
567 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
568 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
575 struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
577 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
582 struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
/linux-4.1.27/drivers/video/fbdev/omap/
H A Dlcd_mipid.c216 struct mipid_platform_data *pd = md->spi->dev.platform_data; mipid_set_bklight_level() local
218 if (pd->get_bklight_max == NULL || pd->set_bklight_level == NULL) mipid_set_bklight_level()
220 if (level > pd->get_bklight_max(pd)) mipid_set_bklight_level()
226 pd->set_bklight_level(pd, level); mipid_set_bklight_level()
234 struct mipid_platform_data *pd = md->spi->dev.platform_data; mipid_get_bklight_level() local
236 if (pd->get_bklight_level == NULL) mipid_get_bklight_level()
238 return pd->get_bklight_level(pd); mipid_get_bklight_level()
244 struct mipid_platform_data *pd = md->spi->dev.platform_data; mipid_get_bklight_max() local
246 if (pd->get_bklight_max == NULL) mipid_get_bklight_max()
249 return pd->get_bklight_max(pd); mipid_get_bklight_max()
/linux-4.1.27/include/linux/platform_data/
H A Dhwmon-s3c.h41 * @pd: Platform data to register to device.
47 extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
H A Ds3c-hsotg.h40 extern void s3c_hsotg_set_platdata(struct s3c_hsotg_plat *pd);
H A Dusb-s3c2410_udc.h42 extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd);
/linux-4.1.27/arch/arm/mach-rockchip/
H A Dplatsmp.c46 static int pmu_power_domain_is_on(int pd) pmu_power_domain_is_on() argument
55 return !(val & BIT(pd)); pmu_power_domain_is_on()
72 static int pmu_set_power_domain(int pd, bool on) pmu_set_power_domain() argument
74 u32 val = (on) ? 0 : BIT(pd); pmu_set_power_domain()
75 struct reset_control *rstc = rockchip_get_core_reset(pd); pmu_set_power_domain()
80 __func__, pd); pmu_set_power_domain()
92 ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val); pmu_set_power_domain()
100 ret = pmu_power_domain_is_on(pd); pmu_set_power_domain()
/linux-4.1.27/fs/affs/
H A Dnamei.c195 pr_debug("%s(\"%pd\")\n", __func__, dentry); affs_find_entry()
223 pr_debug("%s(\"%pd\")\n", __func__, dentry); affs_lookup()
253 pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, affs_unlink()
266 pr_debug("%s(%lu,\"%pd\",0%ho)\n", affs_create()
296 pr_debug("%s(%lu,\"%pd\",0%ho)\n", affs_mkdir()
322 pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, affs_rmdir()
338 pr_debug("%s(%lu,\"%pd\" -> \"%s\")\n", affs_symlink()
408 pr_debug("%s(%lu, %lu, \"%pd\")\n", __func__, inode->i_ino, dir->i_ino, affs_link()
422 pr_debug("%s(old=%lu,\"%pd\" to new=%lu,\"%pd\")\n", __func__, affs_rename()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_gtt.c374 * @pd: The page directory which will have at least @count entries
386 static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count, alloc_pt_range() argument
402 WARN(pd->page_table[i], alloc_pt_range()
404 i, pd->page_table[i]); alloc_pt_range()
405 pd->page_table[i] = pt; alloc_pt_range()
412 unmap_and_free_pt(pd->page_table[i], dev); alloc_pt_range()
416 static void unmap_and_free_pd(struct i915_page_directory_entry *pd) unmap_and_free_pd() argument
418 if (pd->page) { unmap_and_free_pd()
419 __free_page(pd->page); unmap_and_free_pd()
420 kfree(pd); unmap_and_free_pd()
426 struct i915_page_directory_entry *pd; alloc_pd_single() local
428 pd = kzalloc(sizeof(*pd), GFP_KERNEL); alloc_pd_single()
429 if (!pd) alloc_pd_single()
432 pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO); alloc_pd_single()
433 if (!pd->page) { alloc_pd_single()
434 kfree(pd); alloc_pd_single()
438 return pd; alloc_pd_single()
469 /* bit of a hack to find the actual last used pd */ gen8_mm_switch()
500 struct i915_page_directory_entry *pd; gen8_ppgtt_clear_range() local
507 pd = ppgtt->pdp.page_directory[pdpe]; gen8_ppgtt_clear_range()
509 if (WARN_ON(!pd->page_table[pde])) gen8_ppgtt_clear_range()
512 pt = pd->page_table[pde]; gen8_ppgtt_clear_range()
562 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe]; gen8_ppgtt_insert_entries() local
563 struct i915_page_table_entry *pt = pd->page_table[pde]; gen8_ppgtt_insert_entries()
591 static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev) gen8_free_page_tables() argument
595 if (!pd->page) gen8_free_page_tables()
599 if (WARN_ON(!pd->page_table[i])) gen8_free_page_tables()
602 unmap_and_free_pt(pd->page_table[i], dev); gen8_free_page_tables()
603 pd->page_table[i] = NULL; gen8_free_page_tables()
635 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; gen8_ppgtt_unmap_pages() local
639 if (WARN_ON(!pd->page_table[j])) gen8_ppgtt_unmap_pages()
642 pt = pd->page_table[j]; gen8_ppgtt_unmap_pages()
727 const int pd) gen8_ppgtt_setup_page_directories()
733 ppgtt->pdp.page_directory[pd]->page, 0, gen8_ppgtt_setup_page_directories()
740 ppgtt->pdp.page_directory[pd]->daddr = pd_addr; gen8_ppgtt_setup_page_directories()
746 const int pd, gen8_ppgtt_setup_page_tables()
750 struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd]; gen8_ppgtt_setup_page_tables()
818 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; gen8_ppgtt_init() local
822 struct i915_page_table_entry *pt = pd->page_table[j]; gen8_ppgtt_init()
871 ppgtt->pd.pd_offset / sizeof(gen6_pte_t); gen6_dump_ppgtt()
874 ppgtt->pd.pd_offset, gen6_dump_ppgtt()
875 ppgtt->pd.pd_offset + ppgtt->num_pd_entries); gen6_dump_ppgtt()
879 dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; gen6_dump_ppgtt()
890 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page); gen6_dump_ppgtt()
916 /* Write pde (index) from the page directory @pd to the page table @pt */ gen6_write_pde()
917 static void gen6_write_pde(struct i915_page_directory_entry *pd, gen6_write_pde() argument
922 container_of(pd, struct i915_hw_ppgtt, pd); gen6_write_pde()
934 struct i915_page_directory_entry *pd, gen6_write_page_range()
940 gen6_for_each_pde(pt, pd, start, length, temp, pde) gen6_write_page_range()
941 gen6_write_pde(pd, pde, pt); gen6_write_page_range()
950 BUG_ON(ppgtt->pd.pd_offset & 0x3f); get_pd_offset()
952 return (ppgtt->pd.pd_offset / 64) << 16; get_pd_offset()
1115 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); gen6_ppgtt_clear_range()
1144 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); gen6_ppgtt_insert_entries()
1212 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { gen6_alloc_va_range()
1229 ppgtt->pd.page_table[pde] = pt; gen6_alloc_va_range()
1237 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { gen6_alloc_va_range()
1245 gen6_write_pde(&ppgtt->pd, pde, pt); gen6_alloc_va_range()
1266 struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde]; for_each_set_bit()
1268 ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; for_each_set_bit()
1281 struct i915_page_table_entry *pt = ppgtt->pd.page_table[i]; gen6_ppgtt_free()
1284 unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); gen6_ppgtt_free()
1288 unmap_and_free_pd(&ppgtt->pd); gen6_ppgtt_free()
1364 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) gen6_scratch_va_range()
1365 ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; gen6_scratch_va_range()
1393 ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, gen6_ppgtt_init()
1410 ppgtt->pd.pd_offset = gen6_ppgtt_init()
1414 ppgtt->pd.pd_offset / sizeof(gen6_pte_t); gen6_ppgtt_init()
1421 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); gen6_ppgtt_init()
1428 ppgtt->pd.pd_offset << 10); gen6_ppgtt_init()
1709 gen6_write_page_range(dev_priv, &ppgtt->pd, i915_gem_restore_gtt_mappings()
726 gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, const int pd) gen8_ppgtt_setup_page_directories() argument
745 gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, const int pd, const int pt) gen8_ppgtt_setup_page_tables() argument
933 gen6_write_page_range(struct drm_i915_private *dev_priv, struct i915_page_directory_entry *pd, uint32_t start, uint32_t length) gen6_write_page_range() argument
/linux-4.1.27/drivers/scsi/
H A Dsgiwd93.c231 struct sgiwd93_platform_data *pd = pdev->dev.platform_data; sgiwd93_probe() local
232 unsigned char *wdregs = pd->wdregs; sgiwd93_probe()
233 struct hpc3_scsiregs *hregs = pd->hregs; sgiwd93_probe()
237 unsigned int unit = pd->unit; sgiwd93_probe()
238 unsigned int irq = pd->irq; sgiwd93_probe()
304 struct sgiwd93_platform_data *pd = pdev->dev.platform_data; sgiwd93_remove() local
307 free_irq(pd->irq, host); sgiwd93_remove()
/linux-4.1.27/drivers/crypto/amcc/
H A Dcrypto4xx_core.c540 struct ce_pd *pd, crypto4xx_copy_pkt_to_dst()
628 struct ce_pd *pd) crypto4xx_ablkcipher_done()
639 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes, crypto4xx_ablkcipher_done()
674 struct ce_pd *pd; crypto4xx_pd_done() local
677 pd = dev->pdr + sizeof(struct ce_pd)*idx; crypto4xx_pd_done()
681 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); crypto4xx_pd_done()
733 u32 pd_entry, struct ce_pd *pd, crypto4xx_return_pd()
738 pd->pd_ctl.w = 0; crypto4xx_return_pd()
739 pd->pd_ctl_len.w = 0; crypto4xx_return_pd()
791 struct ce_pd *pd; crypto4xx_build_pd() local
856 pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry); crypto4xx_build_pd()
863 pd->sa = pd_uinfo->sa_pa; crypto4xx_build_pd()
877 pd->sa = ctx->sa_in_dma_addr; crypto4xx_build_pd()
880 pd->sa = ctx->sa_out_dma_addr; crypto4xx_build_pd()
884 pd->sa_len = ctx->sa_len; crypto4xx_build_pd()
891 pd->src = gd_dma; crypto4xx_build_pd()
913 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), crypto4xx_build_pd()
937 pd->dest = virt_to_phys((void *)dst); crypto4xx_build_pd()
939 pd->dest = (u32)dma_map_page(dev->core_dev->device, crypto4xx_build_pd()
952 pd->dest = sd_dma; crypto4xx_build_pd()
980 pd->pd_ctl.w = ctx->pd_ctl; crypto4xx_build_pd()
981 pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen; crypto4xx_build_pd()
984 /* write any value to push engine to read a pd */ crypto4xx_build_pd()
1086 struct ce_pd *pd; crypto4xx_bh_tasklet_cb() local
1093 pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail; crypto4xx_bh_tasklet_cb()
1095 pd->pd_ctl.bf.pe_done && crypto4xx_bh_tasklet_cb()
1096 !pd->pd_ctl.bf.host_ready) { crypto4xx_bh_tasklet_cb()
1097 pd->pd_ctl.bf.pe_done = 0; crypto4xx_bh_tasklet_cb()
539 crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, struct ce_pd *pd, struct pd_uinfo *pd_uinfo, u32 nbytes, struct scatterlist *dst) crypto4xx_copy_pkt_to_dst() argument
626 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo, struct ce_pd *pd) crypto4xx_ablkcipher_done() argument
732 crypto4xx_return_pd(struct crypto4xx_device *dev, u32 pd_entry, struct ce_pd *pd, struct pd_uinfo *pd_uinfo) crypto4xx_return_pd() argument
/linux-4.1.27/net/rds/
H A Diw.c89 rds_iwdev->pd = ib_alloc_pd(device); rds_iw_add_one()
90 if (IS_ERR(rds_iwdev->pd)) rds_iw_add_one()
94 rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd, rds_iw_add_one()
121 ib_dealloc_pd(rds_iwdev->pd); rds_iw_add_one()
152 while (ib_dealloc_pd(rds_iwdev->pd)) { rds_iw_remove_one()
153 rdsdebug("Failed to dealloc pd %p\n", rds_iwdev->pd); rds_iw_remove_one()
H A Dib.c104 if (rds_ibdev->pd) rds_ib_dev_free()
105 ib_dealloc_pd(rds_ibdev->pd); rds_ib_dev_free()
161 rds_ibdev->pd = ib_alloc_pd(device); rds_ib_add_one()
162 if (IS_ERR(rds_ibdev->pd)) { rds_ib_add_one()
163 rds_ibdev->pd = NULL; rds_ib_add_one()
167 rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE); rds_ib_add_one()
/linux-4.1.27/drivers/firewire/
H A Dnet.c279 static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, fwnet_frag_overlap() argument
285 list_for_each_entry(fi, &pd->fi_list, fi_link) fwnet_frag_overlap()
294 struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) fwnet_frag_new()
299 list = &pd->fi_list; fwnet_frag_new()
300 list_for_each_entry(fi, &pd->fi_list, fi_link) { fwnet_frag_new()
396 struct fwnet_partial_datagram *pd; fwnet_pd_find() local
398 list_for_each_entry(pd, &peer->pd_list, pd_link) fwnet_pd_find()
399 if (pd->datagram_label == datagram_label) fwnet_pd_find()
400 return pd; fwnet_pd_find()
419 struct fwnet_partial_datagram *pd, void *frag_buf, fwnet_pd_update()
422 if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) fwnet_pd_update()
425 memcpy(pd->pbuf + frag_off, frag_buf, frag_len); fwnet_pd_update()
431 list_move_tail(&pd->pd_link, &peer->pd_list); fwnet_pd_update()
436 static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) fwnet_pd_is_complete() argument
440 fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); fwnet_pd_is_complete()
442 return fi->len == pd->datagram_size; fwnet_pd_is_complete()
574 struct fwnet_partial_datagram *pd; fwnet_incoming_packet() local
627 pd = fwnet_pd_find(peer, datagram_label); fwnet_incoming_packet()
628 if (pd == NULL) { fwnet_incoming_packet()
635 pd = fwnet_pd_new(net, peer, datagram_label, fwnet_incoming_packet()
637 if (pd == NULL) { fwnet_incoming_packet()
643 if (fwnet_frag_overlap(pd, fg_off, len) || fwnet_incoming_packet()
644 pd->datagram_size != dg_size) { fwnet_incoming_packet()
649 fwnet_pd_delete(pd); fwnet_incoming_packet()
650 pd = fwnet_pd_new(net, peer, datagram_label, fwnet_incoming_packet()
652 if (pd == NULL) { fwnet_incoming_packet()
658 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { fwnet_incoming_packet()
664 fwnet_pd_delete(pd); fwnet_incoming_packet()
673 pd->ether_type = ether_type; fwnet_incoming_packet()
675 if (fwnet_pd_is_complete(pd)) { fwnet_incoming_packet()
676 ether_type = pd->ether_type; fwnet_incoming_packet()
678 skb = skb_get(pd->skb); fwnet_incoming_packet()
679 fwnet_pd_delete(pd); fwnet_incoming_packet()
1537 struct fwnet_partial_datagram *pd, *pd_next; fwnet_remove_peer() local
1545 list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) fwnet_remove_peer()
1546 fwnet_pd_delete(pd); fwnet_remove_peer()
293 fwnet_frag_new( struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) fwnet_frag_new() argument
418 fwnet_pd_update(struct fwnet_peer *peer, struct fwnet_partial_datagram *pd, void *frag_buf, unsigned frag_off, unsigned frag_len) fwnet_pd_update() argument
H A Dohci.c2775 struct descriptor *pd; handle_ir_packet_per_buffer() local
2778 for (pd = d; pd <= last; pd++) handle_ir_packet_per_buffer()
2779 if (pd->transfer_status) handle_ir_packet_per_buffer()
2781 if (pd > last) handle_ir_packet_per_buffer()
2856 struct descriptor *pd) sync_it_packet_for_cpu()
2862 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) sync_it_packet_for_cpu()
2866 pd += 2; sync_it_packet_for_cpu()
2873 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == sync_it_packet_for_cpu()
2875 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) sync_it_packet_for_cpu()
2877 pd++; sync_it_packet_for_cpu()
2881 buffer_dma = le32_to_cpu(pd->data_address); sync_it_packet_for_cpu()
2885 le16_to_cpu(pd->req_count), sync_it_packet_for_cpu()
2887 control = pd->control; sync_it_packet_for_cpu()
2888 pd++; sync_it_packet_for_cpu()
2898 struct descriptor *pd; handle_it_packet() local
2901 for (pd = d; pd <= last; pd++) handle_it_packet()
2902 if (pd->transfer_status) handle_it_packet()
2904 if (pd > last) handle_it_packet()
2919 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | handle_it_packet()
2920 le16_to_cpu(pd->res_count)); handle_it_packet()
3206 struct descriptor *d, *last, *pd; queue_iso_transmit() local
3269 pd = d + z - payload_z; queue_iso_transmit()
3277 pd[i].req_count = cpu_to_le16(length); queue_iso_transmit()
3280 pd[i].data_address = cpu_to_le32(page_bus + offset); queue_iso_transmit()
3311 struct descriptor *d, *pd; queue_iso_packet_per_buffer() local
3348 pd = d; queue_iso_packet_per_buffer()
3350 pd++; queue_iso_packet_per_buffer()
3351 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | queue_iso_packet_per_buffer()
3358 pd->req_count = cpu_to_le16(length); queue_iso_packet_per_buffer()
3359 pd->res_count = pd->req_count; queue_iso_packet_per_buffer()
3360 pd->transfer_status = 0; queue_iso_packet_per_buffer()
3363 pd->data_address = cpu_to_le32(page_bus + offset); queue_iso_packet_per_buffer()
3374 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | queue_iso_packet_per_buffer()
3378 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); queue_iso_packet_per_buffer()
2855 sync_it_packet_for_cpu(struct context *context, struct descriptor *pd) sync_it_packet_for_cpu() argument
/linux-4.1.27/drivers/memory/
H A Demif.c1203 struct emif_platform_data *pd = emif->plat_data; get_default_timings() local
1205 pd->timings = lpddr2_jedec_timings; get_default_timings()
1206 pd->timings_arr_size = ARRAY_SIZE(lpddr2_jedec_timings); get_default_timings()
1349 struct emif_platform_data *pd = NULL; of_get_memory_device_details() local
1357 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); of_get_memory_device_details()
1360 if (!emif || !pd || !dev_info) { of_get_memory_device_details()
1366 emif->plat_data = pd; of_get_memory_device_details()
1367 pd->device_info = dev_info; of_get_memory_device_details()
1377 of_property_read_u32(np_emif, "phy-type", &pd->phy_type); of_get_memory_device_details()
1380 pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE; of_get_memory_device_details()
1383 if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density, of_get_memory_device_details()
1384 pd->device_info->io_width, pd->phy_type, pd->ip_rev, of_get_memory_device_details()
1433 struct emif_platform_data *pd; get_device_details() local
1437 pd = pdev->dev.platform_data; get_device_details()
1440 if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type, get_device_details()
1441 pd->device_info->density, pd->device_info->io_width, get_device_details()
1442 pd->phy_type, pd->ip_rev, dev))) { get_device_details()
1448 temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); get_device_details()
1451 if (!emif || !pd || !dev_info) { get_device_details()
1456 memcpy(temp, pd, sizeof(*pd)); get_device_details()
1457 pd = temp; get_device_details()
1458 memcpy(dev_info, pd->device_info, sizeof(*dev_info)); get_device_details()
1460 pd->device_info = dev_info; get_device_details()
1461 emif->plat_data = pd; get_device_details()
1476 pd->timings = NULL; get_device_details()
1477 pd->min_tck = NULL; get_device_details()
1488 cust_cfgs = pd->custom_configs; get_device_details()
1496 pd->custom_configs = temp; get_device_details()
1503 size = sizeof(struct lpddr2_timings) * pd->timings_arr_size; get_device_details()
1504 if (pd->timings) { get_device_details()
1507 memcpy(temp, pd->timings, size); get_device_details()
1508 pd->timings = temp; get_device_details()
1518 if (pd->min_tck) { get_device_details()
1519 temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL); get_device_details()
1521 memcpy(temp, pd->min_tck, sizeof(*pd->min_tck)); get_device_details()
1522 pd->min_tck = temp; get_device_details()
1526 pd->min_tck = &lpddr2_jedec_min_tck; get_device_details()
1529 pd->min_tck = &lpddr2_jedec_min_tck; get_device_details()
/linux-4.1.27/drivers/net/ethernet/broadcom/genet/
H A Dbcmmii.c483 struct bcmgenet_platform_data *pd = kdev->platform_data; bcmgenet_mii_pd_init() local
488 if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { bcmgenet_mii_pd_init()
492 if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) bcmgenet_mii_pd_init()
493 mdio->phy_mask = ~(1 << pd->phy_address); bcmgenet_mii_pd_init()
503 if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) bcmgenet_mii_pd_init()
504 phydev = mdio->phy_map[pd->phy_address]; bcmgenet_mii_pd_init()
520 .speed = pd->phy_speed, bcmgenet_mii_pd_init()
521 .duplex = pd->phy_duplex, bcmgenet_mii_pd_init()
541 priv->phy_interface = pd->phy_interface; bcmgenet_mii_pd_init()
/linux-4.1.27/drivers/platform/x86/
H A Dsony-laptop.c141 static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
143 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
146 static int sony_nc_battery_care_setup(struct platform_device *pd,
148 static void sony_nc_battery_care_cleanup(struct platform_device *pd);
150 static int sony_nc_thermal_setup(struct platform_device *pd);
151 static void sony_nc_thermal_cleanup(struct platform_device *pd);
153 static int sony_nc_lid_resume_setup(struct platform_device *pd,
155 static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
157 static int sony_nc_gfx_switch_setup(struct platform_device *pd,
159 static void sony_nc_gfx_switch_cleanup(struct platform_device *pd);
162 static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
163 static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
165 static int sony_nc_lowbatt_setup(struct platform_device *pd);
166 static void sony_nc_lowbatt_cleanup(struct platform_device *pd);
168 static int sony_nc_fanspeed_setup(struct platform_device *pd);
169 static void sony_nc_fanspeed_cleanup(struct platform_device *pd);
171 static int sony_nc_usb_charge_setup(struct platform_device *pd);
172 static void sony_nc_usb_charge_cleanup(struct platform_device *pd);
174 static int sony_nc_panelid_setup(struct platform_device *pd);
175 static void sony_nc_panelid_cleanup(struct platform_device *pd);
177 static int sony_nc_smart_conn_setup(struct platform_device *pd);
178 static void sony_nc_smart_conn_cleanup(struct platform_device *pd);
180 static int sony_nc_touchpad_setup(struct platform_device *pd,
182 static void sony_nc_touchpad_cleanup(struct platform_device *pd);
851 static int sony_nc_handles_setup(struct platform_device *pd) sony_nc_handles_setup() argument
877 if (device_create_file(&pd->dev, &handles->devattr)) { sony_nc_handles_setup()
887 static int sony_nc_handles_cleanup(struct platform_device *pd) sony_nc_handles_cleanup() argument
891 device_remove_file(&pd->dev, &handles->devattr); sony_nc_handles_cleanup()
1442 static void sony_nc_function_cleanup(struct platform_device *pd) sony_nc_function_cleanup() argument
1461 sony_nc_touchpad_cleanup(pd); sony_nc_function_cleanup()
1466 sony_nc_battery_care_cleanup(pd); sony_nc_function_cleanup()
1470 sony_nc_lid_resume_cleanup(pd); sony_nc_function_cleanup()
1473 sony_nc_thermal_cleanup(pd); sony_nc_function_cleanup()
1478 sony_nc_gfx_switch_cleanup(pd); sony_nc_function_cleanup()
1481 sony_nc_highspeed_charging_cleanup(pd); sony_nc_function_cleanup()
1492 sony_nc_kbd_backlight_cleanup(pd, handle); sony_nc_function_cleanup()
1495 sony_nc_lowbatt_cleanup(pd); sony_nc_function_cleanup()
1498 sony_nc_fanspeed_cleanup(pd); sony_nc_function_cleanup()
1501 sony_nc_usb_charge_cleanup(pd); sony_nc_function_cleanup()
1504 sony_nc_panelid_cleanup(pd); sony_nc_function_cleanup()
1507 sony_nc_smart_conn_cleanup(pd); sony_nc_function_cleanup()
1515 sony_nc_handles_cleanup(pd); sony_nc_function_cleanup()
1874 static int sony_nc_kbd_backlight_setup(struct platform_device *pd, sony_nc_kbd_backlight_setup() argument
1924 ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr); sony_nc_kbd_backlight_setup()
1928 ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr); sony_nc_kbd_backlight_setup()
1938 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); sony_nc_kbd_backlight_setup()
1945 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd, sony_nc_kbd_backlight_cleanup() argument
1949 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); sony_nc_kbd_backlight_cleanup()
1950 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); sony_nc_kbd_backlight_cleanup()
2060 static int sony_nc_battery_care_setup(struct platform_device *pd, sony_nc_battery_care_setup() argument
2077 ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]); sony_nc_battery_care_setup()
2090 ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]); sony_nc_battery_care_setup()
2097 device_remove_file(&pd->dev, &bcare_ctl->attrs[0]); sony_nc_battery_care_setup()
2106 static void sony_nc_battery_care_cleanup(struct platform_device *pd) sony_nc_battery_care_cleanup() argument
2109 device_remove_file(&pd->dev, &bcare_ctl->attrs[0]); sony_nc_battery_care_cleanup()
2111 device_remove_file(&pd->dev, &bcare_ctl->attrs[1]); sony_nc_battery_care_cleanup()
2218 static int sony_nc_thermal_setup(struct platform_device *pd) sony_nc_thermal_setup() argument
2249 ret = device_create_file(&pd->dev, &th_handle->profiles_attr); sony_nc_thermal_setup()
2253 ret = device_create_file(&pd->dev, &th_handle->mode_attr); sony_nc_thermal_setup()
2260 device_remove_file(&pd->dev, &th_handle->profiles_attr); sony_nc_thermal_setup()
2267 static void sony_nc_thermal_cleanup(struct platform_device *pd) sony_nc_thermal_cleanup() argument
2270 device_remove_file(&pd->dev, &th_handle->profiles_attr); sony_nc_thermal_cleanup()
2271 device_remove_file(&pd->dev, &th_handle->mode_attr); sony_nc_thermal_cleanup()
2355 static int sony_nc_lid_resume_setup(struct platform_device *pd, sony_nc_lid_resume_setup() argument
2392 result = device_create_file(&pd->dev, &lid_ctl->attrs[i]); sony_nc_lid_resume_setup()
2401 device_remove_file(&pd->dev, &lid_ctl->attrs[i]); sony_nc_lid_resume_setup()
2409 static void sony_nc_lid_resume_cleanup(struct platform_device *pd) sony_nc_lid_resume_cleanup() argument
2418 device_remove_file(&pd->dev, &lid_ctl->attrs[i]); sony_nc_lid_resume_cleanup()
2489 static int sony_nc_gfx_switch_setup(struct platform_device *pd, sony_nc_gfx_switch_setup() argument
2505 result = device_create_file(&pd->dev, &gfxs_ctl->attr); sony_nc_gfx_switch_setup()
2518 static void sony_nc_gfx_switch_cleanup(struct platform_device *pd) sony_nc_gfx_switch_cleanup() argument
2521 device_remove_file(&pd->dev, &gfxs_ctl->attr); sony_nc_gfx_switch_cleanup()
2561 static int sony_nc_highspeed_charging_setup(struct platform_device *pd) sony_nc_highspeed_charging_setup() argument
2583 result = device_create_file(&pd->dev, hsc_handle); sony_nc_highspeed_charging_setup()
2593 static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd) sony_nc_highspeed_charging_cleanup() argument
2596 device_remove_file(&pd->dev, hsc_handle); sony_nc_highspeed_charging_cleanup()
2635 static int sony_nc_lowbatt_setup(struct platform_device *pd) sony_nc_lowbatt_setup() argument
2649 result = device_create_file(&pd->dev, lowbatt_handle); sony_nc_lowbatt_setup()
2659 static void sony_nc_lowbatt_cleanup(struct platform_device *pd) sony_nc_lowbatt_cleanup() argument
2662 device_remove_file(&pd->dev, lowbatt_handle); sony_nc_lowbatt_cleanup()
2712 static int sony_nc_fanspeed_setup(struct platform_device *pd) sony_nc_fanspeed_setup() argument
2738 result = device_create_file(&pd->dev, fan_handle); sony_nc_fanspeed_setup()
2742 result = device_create_file(&pd->dev, hsf_handle); sony_nc_fanspeed_setup()
2749 device_remove_file(&pd->dev, fan_handle); sony_nc_fanspeed_setup()
2761 static void sony_nc_fanspeed_cleanup(struct platform_device *pd) sony_nc_fanspeed_cleanup() argument
2764 device_remove_file(&pd->dev, fan_handle); sony_nc_fanspeed_cleanup()
2769 device_remove_file(&pd->dev, hsf_handle); sony_nc_fanspeed_cleanup()
2808 static int sony_nc_usb_charge_setup(struct platform_device *pd) sony_nc_usb_charge_setup() argument
2830 result = device_create_file(&pd->dev, uc_handle); sony_nc_usb_charge_setup()
2840 static void sony_nc_usb_charge_cleanup(struct platform_device *pd) sony_nc_usb_charge_cleanup() argument
2843 device_remove_file(&pd->dev, uc_handle); sony_nc_usb_charge_cleanup()
2863 static int sony_nc_panelid_setup(struct platform_device *pd) sony_nc_panelid_setup() argument
2877 result = device_create_file(&pd->dev, panel_handle); sony_nc_panelid_setup()
2887 static void sony_nc_panelid_cleanup(struct platform_device *pd) sony_nc_panelid_cleanup() argument
2890 device_remove_file(&pd->dev, panel_handle); sony_nc_panelid_cleanup()
2918 static int sony_nc_smart_conn_setup(struct platform_device *pd) sony_nc_smart_conn_setup() argument
2932 result = device_create_file(&pd->dev, sc_handle); sony_nc_smart_conn_setup()
2942 static void sony_nc_smart_conn_cleanup(struct platform_device *pd) sony_nc_smart_conn_cleanup() argument
2945 device_remove_file(&pd->dev, sc_handle); sony_nc_smart_conn_cleanup()
2991 static int sony_nc_touchpad_setup(struct platform_device *pd, sony_nc_touchpad_setup() argument
3008 ret = device_create_file(&pd->dev, &tp_ctl->attr); sony_nc_touchpad_setup()
3017 static void sony_nc_touchpad_cleanup(struct platform_device *pd) sony_nc_touchpad_cleanup() argument
3020 device_remove_file(&pd->dev, &tp_ctl->attr); sony_nc_touchpad_cleanup()
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
H A Ddfs.c133 struct dfs_pattern_detector *pd = sc->dfs_detector; ath9k_dfs_process_radar_pulse() local
135 if (pd == NULL) ath9k_dfs_process_radar_pulse()
137 if (!pd->add_pulse(pd, pe)) ath9k_dfs_process_radar_pulse()
/linux-4.1.27/drivers/pcmcia/
H A Dbcm63xx_pcmcia.h25 struct bcm63xx_pcmcia_platform_data *pd; member in struct:bcm63xx_pcmcia_socket
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dgddr5.c37 int pd, lf, xd, vh, vr, vo, l3; nvkm_gddr5_calc() local
43 pd = ram->next->bios.ramcfg_11_01_80; nvkm_gddr5_calc()
103 pd = 1; /* binary driver does this.. bug? */ nvkm_gddr5_calc()
106 ram->mr[6] |= (pd & 0x01) << 0; nvkm_gddr5_calc()
/linux-4.1.27/arch/mips/cavium-octeon/
H A Docteon-platform.c35 struct platform_device *pd; octeon_rng_device_init() local
50 pd = platform_device_alloc("octeon_rng", -1); octeon_rng_device_init()
51 if (!pd) { octeon_rng_device_init()
56 ret = platform_device_add_resources(pd, rng_resources, octeon_rng_device_init()
61 ret = platform_device_add(pd); octeon_rng_device_init()
67 platform_device_put(pd); octeon_rng_device_init()
343 struct platform_device *pd; octeon_ehci_device_init() local
351 pd = of_find_device_by_node(ehci_node); octeon_ehci_device_init()
352 if (!pd) octeon_ehci_device_init()
355 pd->dev.platform_data = &octeon_ehci_pdata; octeon_ehci_device_init()
356 octeon_ehci_hw_start(&pd->dev); octeon_ehci_device_init()
405 struct platform_device *pd; octeon_ohci_device_init() local
413 pd = of_find_device_by_node(ohci_node); octeon_ohci_device_init()
414 if (!pd) octeon_ohci_device_init()
417 pd->dev.platform_data = &octeon_ohci_pdata; octeon_ohci_device_init()
418 octeon_ohci_hw_start(&pd->dev); octeon_ohci_device_init()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dhugetlb.h22 return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK); hugepd_page()
27 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; hugepd_mmu_psize()
40 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); hugepd_page()
45 return hpd.pd & HUGEPD_SHIFT_MASK; hugepd_shift()
/linux-4.1.27/sound/soc/dwc/
H A Ddesignware_i2s.c87 struct i2s_dma_data pd; member in union:dw_i2s_snd_dma_data
466 dev->play_dma_data.pd.data = pdata->play_dma_data; dw_configure_dai_by_pd()
467 dev->capture_dma_data.pd.data = pdata->capture_dma_data; dw_configure_dai_by_pd()
468 dev->play_dma_data.pd.addr = res->start + I2S_TXDMA; dw_configure_dai_by_pd()
469 dev->capture_dma_data.pd.addr = res->start + I2S_RXDMA; dw_configure_dai_by_pd()
470 dev->play_dma_data.pd.max_burst = 16; dw_configure_dai_by_pd()
471 dev->capture_dma_data.pd.max_burst = 16; dw_configure_dai_by_pd()
472 dev->play_dma_data.pd.addr_width = bus_widths[idx]; dw_configure_dai_by_pd()
473 dev->capture_dma_data.pd.addr_width = bus_widths[idx]; dw_configure_dai_by_pd()
474 dev->play_dma_data.pd.filter = pdata->filter; dw_configure_dai_by_pd()
475 dev->capture_dma_data.pd.filter = pdata->filter; dw_configure_dai_by_pd()
/linux-4.1.27/arch/arm/include/asm/
H A Dtrusted_foundations.h43 void register_trusted_foundations(struct trusted_foundations_platform_data *pd);
49 struct trusted_foundations_platform_data *pd) register_trusted_foundations()
48 register_trusted_foundations( struct trusted_foundations_platform_data *pd) register_trusted_foundations() argument
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c148 priv->pd = ib_alloc_pd(priv->ca); ipoib_transport_dev_init()
149 if (IS_ERR(priv->pd)) { ipoib_transport_dev_init()
154 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); ipoib_transport_dev_init()
213 priv->qp = ib_create_qp(priv->pd, &init_attr); ipoib_transport_dev_init()
257 ib_dealloc_pd(priv->pd); ipoib_transport_dev_init()
291 if (ib_dealloc_pd(priv->pd)) ipoib_transport_dev_cleanup()
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dfrwr_ops.c21 __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, __frwr_init() argument
27 f->fr_mr = ib_alloc_fast_reg_mr(pd, depth); __frwr_init()
142 struct ib_pd *pd = r_xprt->rx_ia.ri_pd; frwr_op_init() local
159 rc = __frwr_init(r, pd, device, depth); frwr_op_init()
309 struct ib_pd *pd = r_xprt->rx_ia.ri_pd; frwr_op_reset() local
318 rc = __frwr_init(r, pd, device, depth); frwr_op_reset()
/linux-4.1.27/drivers/video/fbdev/omap2/dss/
H A Drfbi.c260 const u16 __iomem *pd = buf; rfbi_write_pixels() local
261 pd += start_offset; rfbi_write_pixels()
265 const u8 __iomem *b = (const u8 __iomem *)pd; rfbi_write_pixels()
268 ++pd; rfbi_write_pixels()
270 pd += horiz_offset; rfbi_write_pixels()
274 const u32 __iomem *pd = buf; rfbi_write_pixels() local
275 pd += start_offset; rfbi_write_pixels()
279 const u8 __iomem *b = (const u8 __iomem *)pd; rfbi_write_pixels()
283 ++pd; rfbi_write_pixels()
285 pd += horiz_offset; rfbi_write_pixels()
289 const u16 __iomem *pd = buf; rfbi_write_pixels() local
290 pd += start_offset; rfbi_write_pixels()
294 rfbi_write_reg(RFBI_PARAM, __raw_readw(pd)); rfbi_write_pixels()
295 ++pd; rfbi_write_pixels()
297 pd += horiz_offset; rfbi_write_pixels()
/linux-4.1.27/drivers/mtd/nand/
H A Dau1550nd.c405 struct au1550nd_platdata *pd; au1550nd_probe() local
411 pd = dev_get_platdata(&pdev->dev); au1550nd_probe()
412 if (!pd) { au1550nd_probe()
461 if (pd->devwidth) au1550nd_probe()
464 this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte; au1550nd_probe()
465 ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte; au1550nd_probe()
467 this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf; au1550nd_probe()
468 this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf; au1550nd_probe()
476 mtd_device_register(&ctx->info, pd->parts, pd->num_parts); au1550nd_probe()
/linux-4.1.27/fs/autofs4/
H A Dexpire.c44 DPRINTK("dentry %p %pd", dentry, dentry); autofs4_mount_busy()
194 DPRINTK("top %p %pd", top, top); autofs4_direct_busy()
222 DPRINTK("top %p %pd", top, top); autofs4_tree_busy()
230 DPRINTK("dentry %p %pd", p, p); autofs4_tree_busy()
276 DPRINTK("parent %p %pd", parent, parent); autofs4_check_leaves()
280 DPRINTK("dentry %p %pd", p, p); autofs4_check_leaves()
365 DPRINTK("checking mountpoint %p %pd", dentry, dentry); should_expire()
378 DPRINTK("checking symlink %p %pd", dentry, dentry); should_expire()
474 DPRINTK("returning %p %pd", expired, expired); autofs4_expire_indirect()
506 DPRINTK("waiting for expire %p name=%pd", dentry, dentry); autofs4_expire_wait()
/linux-4.1.27/drivers/usb/gadget/function/
H A Df_uvc.c793 struct uvc_processing_unit_descriptor *pd; uvc_alloc_inst() local
820 pd = &opts->uvc_processing; uvc_alloc_inst()
821 pd->bLength = UVC_DT_PROCESSING_UNIT_SIZE(2); uvc_alloc_inst()
822 pd->bDescriptorType = USB_DT_CS_INTERFACE; uvc_alloc_inst()
823 pd->bDescriptorSubType = UVC_VC_PROCESSING_UNIT; uvc_alloc_inst()
824 pd->bUnitID = 2; uvc_alloc_inst()
825 pd->bSourceID = 1; uvc_alloc_inst()
826 pd->wMaxMultiplier = cpu_to_le16(16*1024); uvc_alloc_inst()
827 pd->bControlSize = 2; uvc_alloc_inst()
828 pd->bmControls[0] = 1; uvc_alloc_inst()
829 pd->bmControls[1] = 0; uvc_alloc_inst()
830 pd->iProcessing = 0; uvc_alloc_inst()
854 ctl_cls[2] = (struct uvc_descriptor_header *)pd; uvc_alloc_inst()
864 ctl_cls[2] = (struct uvc_descriptor_header *)pd; uvc_alloc_inst()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Diwch_provider.c64 static struct ib_ah *iwch_ah_create(struct ib_pd *pd, iwch_ah_create() argument
402 static int iwch_deallocate_pd(struct ib_pd *pd) iwch_deallocate_pd() argument
407 php = to_iwch_pd(pd); iwch_deallocate_pd()
409 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); iwch_deallocate_pd()
472 static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, iwch_register_phys_mem() argument
487 PDBG("%s ib_pd %p\n", __func__, pd); iwch_register_phys_mem()
488 php = to_iwch_pd(pd); iwch_register_phys_mem()
551 struct ib_pd *pd, iwch_reregister_phys_mem()
566 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); iwch_reregister_phys_mem()
574 php = to_iwch_pd(mr->pd); iwch_reregister_phys_mem()
583 php = to_iwch_pd(pd); iwch_reregister_phys_mem()
616 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, iwch_reg_user_mr() argument
628 PDBG("%s ib_pd %p\n", __func__, pd); iwch_reg_user_mr()
630 php = to_iwch_pd(pd); iwch_reg_user_mr()
638 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); iwch_reg_user_mr()
719 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) iwch_get_dma_mr() argument
725 PDBG("%s ib_pd %p\n", __func__, pd); iwch_get_dma_mr()
733 ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); iwch_get_dma_mr()
737 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) iwch_alloc_mw() argument
749 php = to_iwch_pd(pd); iwch_alloc_mw()
790 static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) iwch_alloc_fast_reg_mr() argument
799 php = to_iwch_pd(pd); iwch_alloc_fast_reg_mr()
888 static struct ib_qp *iwch_create_qp(struct ib_pd *pd, iwch_create_qp() argument
901 PDBG("%s ib_pd %p\n", __func__, pd); iwch_create_qp()
904 php = to_iwch_pd(pd); iwch_create_qp()
938 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; iwch_create_qp()
961 qhp->attr.pd = php->pdid; iwch_create_qp()
549 iwch_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 * iova_start) iwch_reregister_phys_mem() argument
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/
H A Dbcdc.c113 struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd; brcmf_proto_bcdc_msg()
144 struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd; brcmf_proto_bcdc_cmplt()
162 struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd; brcmf_proto_bcdc_query_dcmd()
217 struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd; brcmf_proto_bcdc_set_dcmd()
379 drvr->proto->pd = bcdc; brcmf_proto_bcdc_attach()
393 kfree(drvr->proto->pd); brcmf_proto_bcdc_detach()
394 drvr->proto->pd = NULL; brcmf_proto_bcdc_detach()
/linux-4.1.27/drivers/parport/
H A Dshare.c946 struct pardevice *pd; parport_release() local
981 for (pd = port->waithead; pd; pd = pd->waitnext) { parport_release()
982 if (pd->waiting & 2) { /* sleeping in claim_or_block */ parport_release()
983 parport_claim(pd); parport_release()
984 if (waitqueue_active(&pd->wait_q)) parport_release()
985 wake_up_interruptible(&pd->wait_q); parport_release()
987 } else if (pd->wakeup) { parport_release()
988 pd->wakeup(pd->private); parport_release()
992 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name); parport_release()
999 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) { parport_release()
1000 if (pd->wakeup && pd != dev) parport_release()
1001 pd->wakeup(pd->private); parport_release()
/linux-4.1.27/drivers/net/ethernet/broadcom/
H A Dbcm63xx_enet.c1733 struct bcm63xx_enet_platform_data *pd; bcm_enet_probe() local
1797 pd = dev_get_platdata(&pdev->dev); bcm_enet_probe()
1798 if (pd) { bcm_enet_probe()
1799 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); bcm_enet_probe()
1800 priv->has_phy = pd->has_phy; bcm_enet_probe()
1801 priv->phy_id = pd->phy_id; bcm_enet_probe()
1802 priv->has_phy_interrupt = pd->has_phy_interrupt; bcm_enet_probe()
1803 priv->phy_interrupt = pd->phy_interrupt; bcm_enet_probe()
1804 priv->use_external_mii = !pd->use_internal_phy; bcm_enet_probe()
1805 priv->pause_auto = pd->pause_auto; bcm_enet_probe()
1806 priv->pause_rx = pd->pause_rx; bcm_enet_probe()
1807 priv->pause_tx = pd->pause_tx; bcm_enet_probe()
1808 priv->force_duplex_full = pd->force_duplex_full; bcm_enet_probe()
1809 priv->force_speed_100 = pd->force_speed_100; bcm_enet_probe()
1810 priv->dma_chan_en_mask = pd->dma_chan_en_mask; bcm_enet_probe()
1811 priv->dma_chan_int_mask = pd->dma_chan_int_mask; bcm_enet_probe()
1812 priv->dma_chan_width = pd->dma_chan_width; bcm_enet_probe()
1813 priv->dma_has_sram = pd->dma_has_sram; bcm_enet_probe()
1814 priv->dma_desc_shift = pd->dma_desc_shift; bcm_enet_probe()
1873 if (pd->mii_config && bcm_enet_probe()
1874 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, bcm_enet_probe()
1959 struct bcm63xx_enet_platform_data *pd; bcm_enet_remove() local
1961 pd = dev_get_platdata(&pdev->dev); bcm_enet_remove()
1962 if (pd && pd->mii_config) bcm_enet_remove()
1963 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, bcm_enet_remove()
2708 struct bcm63xx_enetsw_platform_data *pd; bcm_enetsw_probe() local
2739 pd = dev_get_platdata(&pdev->dev); bcm_enetsw_probe()
2740 if (pd) { bcm_enetsw_probe()
2741 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); bcm_enetsw_probe()
2742 memcpy(priv->used_ports, pd->used_ports, bcm_enetsw_probe()
2743 sizeof(pd->used_ports)); bcm_enetsw_probe()
2744 priv->num_ports = pd->num_ports; bcm_enetsw_probe()
2745 priv->dma_has_sram = pd->dma_has_sram; bcm_enetsw_probe()
2746 priv->dma_chan_en_mask = pd->dma_chan_en_mask; bcm_enetsw_probe()
2747 priv->dma_chan_int_mask = pd->dma_chan_int_mask; bcm_enetsw_probe()
2748 priv->dma_chan_width = pd->dma_chan_width; bcm_enetsw_probe()
/linux-4.1.27/drivers/iio/dac/
H A Dmcp4725.c287 u8 pd; mcp4725_probe() local
317 pd = (inbuf[0] >> 1) & 0x3; mcp4725_probe()
318 data->powerdown = pd > 0 ? true : false; mcp4725_probe()
319 data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */ mcp4725_probe()
/linux-4.1.27/drivers/video/console/
H A Dsticore.c442 if (sti->pd) { sti_init_glob_cfg()
455 newhpa = pci_resource_start (sti->pd, (offs - PCI_BASE_ADDRESS_0) / 4); sti_init_glob_cfg()
818 struct pci_dev *pd) sti_try_rom_generic()
858 sti->pd = pd; sti_try_rom_generic()
906 if (sti->pd) { sti_try_rom_generic()
908 rom_base = pci_resource_start(sti->pd, PCI_ROM_RESOURCE); sti_try_rom_generic()
909 pci_write_config_dword(sti->pd, PCI_ROM_ADDRESS, rom_base & ~PCI_ROM_ADDRESS_ENABLE); sti_try_rom_generic()
965 static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent) sticore_pci_init() argument
973 err = pci_enable_device(pd); sticore_pci_init()
975 dev_err(&pd->dev, "Cannot enable PCI device\n"); sticore_pci_init()
979 fb_base = pci_resource_start(pd, 0); sticore_pci_init()
980 fb_len = pci_resource_len(pd, 0); sticore_pci_init()
981 rom_base = pci_resource_start(pd, PCI_ROM_RESOURCE); sticore_pci_init()
982 rom_len = pci_resource_len(pd, PCI_ROM_RESOURCE); sticore_pci_init()
984 pci_write_config_dword(pd, PCI_ROM_ADDRESS, rom_base | PCI_ROM_ADDRESS_ENABLE); sticore_pci_init()
994 sti = sti_try_rom_generic(rom_base, fb_base, pd); sticore_pci_init()
997 print_pci_hwpath(pd, pa_path); sticore_pci_init()
1003 pci_name(pd)); sticore_pci_init()
1012 static void sticore_pci_remove(struct pci_dev *pd) sticore_pci_remove() argument
816 sti_try_rom_generic(unsigned long address, unsigned long hpa, struct pci_dev *pd) sti_try_rom_generic() argument
/linux-4.1.27/drivers/scsi/megaraid/
H A Dmegaraid_sas_fp.c137 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); MR_ArPdGet()
145 u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) MR_PdDevHandleGet() argument
147 return map->raidMap.devHndlInfo[pd].curDevHdl; MR_PdDevHandleGet()
732 u32 pd, arRef; mr_spanset_get_phy_params() local
767 pd = MR_ArPdGet(arRef, physArm, map); mr_spanset_get_phy_params()
769 if (pd != MR_PD_INVALID) mr_spanset_get_phy_params()
770 *pDevHandle = MR_PdDevHandleGet(pd, map); mr_spanset_get_phy_params()
778 pd = MR_ArPdGet(arRef, physArm + 1, map); mr_spanset_get_phy_params()
779 if (pd != MR_PD_INVALID) mr_spanset_get_phy_params()
780 *pDevHandle = MR_PdDevHandleGet(pd, map); mr_spanset_get_phy_params()
814 u32 pd, arRef; MR_GetPhyParams() local
861 pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ MR_GetPhyParams()
863 if (pd != MR_PD_INVALID) MR_GetPhyParams()
865 *pDevHandle = MR_PdDevHandleGet(pd, map); MR_GetPhyParams()
874 pd = MR_ArPdGet(arRef, physArm + 1, map); MR_GetPhyParams()
875 if (pd != MR_PD_INVALID) MR_GetPhyParams()
877 *pDevHandle = MR_PdDevHandleGet(pd, map); MR_GetPhyParams()
1330 /* Update the last accessed block on the correct pd */ megasas_get_best_arm_pd()
/linux-4.1.27/fs/afs/
H A Ddir.c437 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); afs_do_lookup()
469 _enter("%d, %p{%pd}, {%x:%u}, %p", afs_try_auto_mntpt()
505 _enter("{%x:%u},%p{%pd},", afs_lookup()
592 _enter("{v={%x:%u} n=%pd fl=%lx},", afs_d_revalidate()
596 _enter("{neg n=%pd}", dentry); afs_d_revalidate()
611 _debug("%pd: parent dir deleted", dentry); afs_d_revalidate()
637 _debug("%pd: dirent changed [%u != %u]", afs_d_revalidate()
647 _debug("%pd: file deleted (uq %u -> %u I:%u)", afs_d_revalidate()
660 _debug("%pd: dirent not found", dentry); afs_d_revalidate()
666 _debug("failed to iterate dir %pd: %d", afs_d_revalidate()
701 _enter("%pd", dentry); afs_d_delete()
724 _enter("%pd", dentry); afs_d_release()
743 _enter("{%x:%u},{%pd},%ho", afs_mkdir()
804 _enter("{%x:%u},{%pd}", afs_rmdir()
846 _enter("{%x:%u},{%pd}", afs_unlink()
920 _enter("{%x:%u},{%pd},%ho,", afs_create()
983 _enter("{%x:%u},{%x:%u},{%pd}", afs_link()
1028 _enter("{%x:%u},{%pd},%s", afs_symlink()
1096 _enter("{%x:%u},{%x:%u},{%x:%u},{%pd}", afs_rename()
/linux-4.1.27/include/xen/interface/io/
H A Dfbif.h121 * Each directory page holds PAGE_SIZE / sizeof(*pd)
123 * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
130 unsigned long pd[256]; member in struct:xenfb_page
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_hmc.c77 /* allocate a 4K pd page or 2M backing page */ i40e_add_sd_table_entry()
121 * 1. Initializes the pd entry
126 * 1. The memory for pd should be pinned down, physically contiguous and
174 /* Add the backing page physical address in the pd entry */ i40e_add_pd_table_entry()
194 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
197 * 3. Decrement the ref count for the pd _entry
/linux-4.1.27/drivers/mfd/
H A Dmax8997.c148 struct max8997_platform_data *pd; max8997_i2c_parse_dt_pdata() local
150 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); max8997_i2c_parse_dt_pdata()
151 if (!pd) { max8997_i2c_parse_dt_pdata()
156 pd->ono = irq_of_parse_and_map(dev->of_node, 1); max8997_i2c_parse_dt_pdata()
164 return pd; max8997_i2c_parse_dt_pdata()
H A Dmax8998.c156 struct max8998_platform_data *pd; max8998_i2c_parse_dt_pdata() local
158 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); max8998_i2c_parse_dt_pdata()
159 if (!pd) max8998_i2c_parse_dt_pdata()
162 pd->ono = irq_of_parse_and_map(dev->of_node, 1); max8998_i2c_parse_dt_pdata()
169 return pd; max8998_i2c_parse_dt_pdata()
H A Dsec-core.c269 struct sec_platform_data *pd; sec_pmic_i2c_parse_dt_pdata() local
271 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); sec_pmic_i2c_parse_dt_pdata()
272 if (!pd) sec_pmic_i2c_parse_dt_pdata()
281 return pd; sec_pmic_i2c_parse_dt_pdata()
/linux-4.1.27/drivers/net/wireless/ath/
H A Ddfs_pattern_detector.c284 struct pri_detector *pd = cd->detectors[i]; dpd_add_pulse() local
285 struct pri_sequence *ps = pd->add_pulse(pd, event); dpd_add_pulse()
290 event->freq, pd->rs->type_id, dpd_add_pulse()
292 pd->reset(pd, dpd->last_pulse_ts); dpd_add_pulse()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dmem.c503 struct ib_pd *pd, struct ib_phys_buf *buffer_list, c4iw_reregister_phys_mem()
516 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); c4iw_reregister_phys_mem()
524 php = to_c4iw_pd(mr->pd); c4iw_reregister_phys_mem()
533 php = to_c4iw_pd(pd); c4iw_reregister_phys_mem()
572 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd, c4iw_register_phys_mem() argument
585 PDBG("%s ib_pd %p\n", __func__, pd); c4iw_register_phys_mem()
586 php = to_c4iw_pd(pd); c4iw_register_phys_mem()
656 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) c4iw_get_dma_mr() argument
664 PDBG("%s ib_pd %p\n", __func__, pd); c4iw_get_dma_mr()
665 php = to_c4iw_pd(pd); c4iw_get_dma_mr()
700 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, c4iw_reg_user_mr() argument
712 PDBG("%s ib_pd %p\n", __func__, pd); c4iw_reg_user_mr()
720 php = to_c4iw_pd(pd); c4iw_reg_user_mr()
732 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); c4iw_reg_user_mr()
803 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) c4iw_alloc_mw() argument
815 php = to_c4iw_pd(pd); c4iw_alloc_mw()
856 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) c4iw_alloc_fast_reg_mr() argument
865 php = to_c4iw_pd(pd); c4iw_alloc_fast_reg_mr()
502 c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) c4iw_reregister_phys_mem() argument
H A Dprovider.c61 static struct ib_ah *c4iw_ah_create(struct ib_pd *pd, c4iw_ah_create() argument
231 static int c4iw_deallocate_pd(struct ib_pd *pd) c4iw_deallocate_pd() argument
236 php = to_c4iw_pd(pd); c4iw_deallocate_pd()
238 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); c4iw_deallocate_pd()
241 rhp->rdev.stats.pd.cur--; c4iw_deallocate_pd()
274 rhp->rdev.stats.pd.cur++; c4iw_allocate_pd()
275 if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max) c4iw_allocate_pd()
276 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; c4iw_allocate_pd()
/linux-4.1.27/drivers/staging/lustre/lustre/lov/
H A Dlovsub_lock.c196 struct cl_lock_descr *pd; lov_sublock_modify() local
205 pd = &lov_env_info(env)->lti_ldescr; lov_sublock_modify()
207 pd->cld_obj = parent_descr->cld_obj; lov_sublock_modify()
208 pd->cld_mode = parent_descr->cld_mode; lov_sublock_modify()
209 pd->cld_gid = parent_descr->cld_gid; lov_sublock_modify()
210 lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd); lov_sublock_modify()
216 if (!cl_lock_ext_match(parent_descr, pd)) lov_sublock_modify()
217 result = cl_lock_modify(env, parent, pd); lov_sublock_modify()
/linux-4.1.27/drivers/block/paride/
H A Dpd.c2 pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
12 The behaviour of the pd driver can be altered by setting
72 (default "pd")
93 pd.drive0
94 pd.drive1
95 pd.drive2
96 pd.drive3
97 pd.cluster
98 pd.nice
100 In addition, you can use the parameter pd.disable to disable
119 #define PD_NAME "pd"
241 static struct pd_unit pd[PD_UNITS]; variable in typeref:struct:pd_unit
840 p->first_minor = (disk - pd) << PD_BITS; pd_probe_drive()
862 struct pd_unit *disk = pd + unit; pd_detect()
876 disk = pd; pd_detect()
885 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) { pd_detect()
898 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) { pd_detect()
944 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) { pd_exit()
/linux-4.1.27/arch/mips/pci/
H A Dpci-alchemy.c363 struct alchemy_pci_platdata *pd = pdev->dev.platform_data; alchemy_pci_probe() local
372 if (!pd) { alchemy_pci_probe()
440 if (pd->board_map_irq) alchemy_pci_probe()
441 ctx->board_map_irq = pd->board_map_irq; alchemy_pci_probe()
443 if (pd->board_pci_idsel) alchemy_pci_probe()
444 ctx->board_pci_idsel = pd->board_pci_idsel; alchemy_pci_probe()
472 val &= ~pd->pci_cfg_clr; alchemy_pci_probe()
473 val |= pd->pci_cfg_set; alchemy_pci_probe()
/linux-4.1.27/fs/9p/
H A Dvfs_dentry.c52 p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n", v9fs_cached_dentry_delete()
70 p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n", v9fs_dentry_release()
/linux-4.1.27/include/rdma/
H A Dib_verbs.h1110 struct ib_pd *pd; member in struct:ib_mr_attr
1212 struct ib_pd *pd; member in struct:ib_ah
1230 struct ib_pd *pd; member in struct:ib_srq
1248 struct ib_pd *pd; member in struct:ib_qp
1267 struct ib_pd *pd; member in struct:ib_mr
1276 struct ib_pd *pd; member in struct:ib_mw
1284 struct ib_pd *pd; member in struct:ib_fmr
1525 int (*dealloc_pd)(struct ib_pd *pd);
1526 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1533 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1546 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1580 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1582 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1587 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1597 struct ib_pd *pd,
1603 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1605 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1612 struct ib_pd *pd,
1617 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1623 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1777 * @pd: The protection domain to deallocate.
1779 int ib_dealloc_pd(struct ib_pd *pd);
1783 * @pd: The protection domain associated with the address handle.
1789 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1808 * @pd: The protection domain associated with the address handle.
1817 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1847 * @pd: The protection domain associated with the SRQ.
1857 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1908 * @pd: The protection domain associated with the QP.
1913 struct ib_qp *ib_create_qp(struct ib_pd *pd,
2129 * @pd: The protection domain associated with the memory region.
2136 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2393 * @pd: The protection domain associated assigned to the registered region.
2400 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2414 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2430 struct ib_pd *pd,
2456 * @pd: The protection domain associated with the region.
2459 struct ib_mr *ib_create_mr(struct ib_pd *pd,
2474 * @pd: The protection domain associated with the region.
2478 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2532 * @pd: The protection domain associated with the memory window.
2535 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2568 * @pd: The protection domain associated with the unmapped region.
2575 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
/linux-4.1.27/drivers/video/fbdev/nvidia/
H A Dnvidia.c1275 static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent) nvidiafb_probe() argument
1283 assert(pd != NULL); nvidiafb_probe()
1285 info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev); nvidiafb_probe()
1291 par->pci_dev = pd; nvidiafb_probe()
1297 if (pci_enable_device(pd)) { nvidiafb_probe()
1302 if (pci_request_regions(pd, "nvidiafb")) { nvidiafb_probe()
1318 pci_read_config_word(pd, PCI_COMMAND, &cmd); nvidiafb_probe()
1320 pci_write_config_word(pd, PCI_COMMAND, cmd); nvidiafb_probe()
1322 nvidiafb_fix.mmio_start = pci_resource_start(pd, 0); nvidiafb_probe()
1323 nvidiafb_fix.smem_start = pci_resource_start(pd, 1); nvidiafb_probe()
1324 nvidiafb_fix.mmio_len = pci_resource_len(pd, 0); nvidiafb_probe()
1341 sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4); nvidiafb_probe()
1400 pci_set_drvdata(pd, info); nvidiafb_probe()
1427 pci_release_regions(pd); nvidiafb_probe()
1436 static void nvidiafb_remove(struct pci_dev *pd) nvidiafb_remove() argument
1438 struct fb_info *info = pci_get_drvdata(pd); nvidiafb_remove()
1457 pci_release_regions(pd); nvidiafb_remove()
/linux-4.1.27/drivers/net/dsa/
H A Dmv88e6131.c81 if (ds->dst->pd->nr_chips > 1) mv88e6131_setup_global()
103 if (ds->pd->rtable && mv88e6131_setup_global()
104 i != ds->index && i < ds->dst->pd->nr_chips) mv88e6131_setup_global()
105 nexthop = ds->pd->rtable[i] & 0x1f; mv88e6131_setup_global()
/linux-4.1.27/arch/microblaze/mm/
H A Dpgtable.c140 pmd_t *pd; map_page() local
144 pd = pmd_offset(pgd_offset_k(va), va); map_page()
146 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ map_page()
147 /* pg = pte_alloc_kernel(&init_mm, pd, va); */ map_page()
/linux-4.1.27/arch/s390/include/asm/
H A Dnmi.h18 __u32 pd : 1; /* 01 instruction-processing damage */ member in struct:mci
/linux-4.1.27/sound/isa/wavefront/
H A Dwavefront_fx.c174 unsigned short *pd; snd_wavefront_fx_ioctl() local
200 pd = (unsigned short *) &r.data[3]; snd_wavefront_fx_ioctl()
212 pd = page_data; snd_wavefront_fx_ioctl()
219 pd); snd_wavefront_fx_ioctl()
/linux-4.1.27/drivers/net/ethernet/marvell/
H A Dmv643xx_eth.c2774 struct mv643xx_eth_shared_platform_data *pd; mv643xx_eth_shared_of_probe() local
2782 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); mv643xx_eth_shared_of_probe()
2783 if (!pd) mv643xx_eth_shared_of_probe()
2785 pdev->dev.platform_data = pd; mv643xx_eth_shared_of_probe()
2787 mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); mv643xx_eth_shared_of_probe()
2820 struct mv643xx_eth_shared_platform_data *pd; mv643xx_eth_shared_probe() local
2857 pd = dev_get_platdata(&pdev->dev); mv643xx_eth_shared_probe()
2859 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? mv643xx_eth_shared_probe()
2860 pd->tx_csum_limit : 9 * 1024; mv643xx_eth_shared_probe()
2906 struct mv643xx_eth_platform_data *pd) set_params()
2911 if (is_valid_ether_addr(pd->mac_addr)) set_params()
2912 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); set_params()
2917 if (pd->rx_queue_size) set_params()
2918 mp->rx_ring_size = pd->rx_queue_size; set_params()
2919 mp->rx_desc_sram_addr = pd->rx_sram_addr; set_params()
2920 mp->rx_desc_sram_size = pd->rx_sram_size; set_params()
2922 mp->rxq_count = pd->rx_queue_count ? : 1; set_params()
2925 if (pd->tx_queue_size) set_params()
2926 tx_ring_size = pd->tx_queue_size; set_params()
2934 mp->tx_desc_sram_addr = pd->tx_sram_addr; set_params()
2935 mp->tx_desc_sram_size = pd->tx_sram_size; set_params()
2937 mp->txq_count = pd->tx_queue_count ? : 1; set_params()
3041 struct mv643xx_eth_platform_data *pd; mv643xx_eth_probe() local
3047 pd = dev_get_platdata(&pdev->dev); mv643xx_eth_probe()
3048 if (pd == NULL) { mv643xx_eth_probe()
3053 if (pd->shared == NULL) { mv643xx_eth_probe()
3065 mp->shared = platform_get_drvdata(pd->shared); mv643xx_eth_probe()
3066 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); mv643xx_eth_probe()
3067 mp->port_num = pd->port_number; mv643xx_eth_probe()
3093 set_params(mp, pd); mv643xx_eth_probe()
3098 if (pd->phy_node) { mv643xx_eth_probe()
3099 mp->phy = of_phy_connect(mp->dev, pd->phy_node, mv643xx_eth_probe()
3106 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mv643xx_eth_probe()
3107 mp->phy = phy_scan(mp, pd->phy_addr); mv643xx_eth_probe()
3112 phy_init(mp, pd->speed, pd->duplex); mv643xx_eth_probe()
3123 init_pscr(mp, pd->speed, pd->duplex); mv643xx_eth_probe()
2905 set_params(struct mv643xx_eth_private *mp, struct mv643xx_eth_platform_data *pd) set_params() argument
/linux-4.1.27/drivers/net/ethernet/sfc/
H A Dtenxpress.c258 struct tenxpress_phy_data *pd = efx->phy_data; sfx7101_check_bad_lp() local
271 pd->bad_lp_tries++; sfx7101_check_bad_lp()
275 if (!pd->bad_lp_tries) sfx7101_check_bad_lp()
280 if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) { sfx7101_check_bad_lp()
296 pd->bad_lp_tries = bad_lp; sfx7101_check_bad_lp()
/linux-4.1.27/drivers/infiniband/ulp/iser/
H A Diser_verbs.c121 device->pd = ib_alloc_pd(device->ib_device); iser_create_device_ib_res()
122 if (IS_ERR(device->pd)) iser_create_device_ib_res()
146 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | iser_create_device_ib_res()
171 ib_dealloc_pd(device->pd); iser_create_device_ib_res()
198 (void)ib_dealloc_pd(device->pd); iser_free_device_ib_res()
204 device->pd = NULL; iser_free_device_ib_res()
240 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params); iser_create_fmr_pool()
277 iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd, iser_alloc_pi_ctx() argument
298 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, iser_alloc_pi_ctx()
306 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); iser_alloc_pi_ctx()
336 iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd, iser_create_fastreg_desc() argument
350 desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1); iser_create_fastreg_desc()
359 ret = iser_alloc_pi_ctx(ib_device, pd, desc); iser_create_fastreg_desc()
394 ret = iser_create_fastreg_desc(device->ib_device, device->pd, iser_create_fastreg_pool()
504 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); iser_create_ib_conn_res()
/linux-4.1.27/drivers/video/fbdev/riva/
H A Dfbdev.c1734 static int riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd) riva_get_EDID_OF() argument
1745 dp = pci_device_to_OF_node(pd); riva_get_EDID_OF()
1860 static u32 riva_get_arch(struct pci_dev *pd) riva_get_arch() argument
1864 switch (pd->device & 0x0ff0) { riva_get_arch()
1898 static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent) rivafb_probe() argument
1905 assert(pd != NULL); rivafb_probe()
1907 info = framebuffer_alloc(sizeof(struct riva_par), &pd->dev); rivafb_probe()
1914 default_par->pdev = pd; rivafb_probe()
1922 ret = pci_enable_device(pd); rivafb_probe()
1928 ret = pci_request_regions(pd, "rivafb"); rivafb_probe()
1935 default_par->riva.Architecture = riva_get_arch(pd); rivafb_probe()
1937 default_par->Chipset = (pd->vendor << 16) | pd->device; rivafb_probe()
1948 sprintf(rivafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4); rivafb_probe()
1958 rivafb_fix.mmio_len = pci_resource_len(pd, 0); rivafb_probe()
1959 rivafb_fix.smem_len = pci_resource_len(pd, 1); rivafb_probe()
1965 pci_read_config_word(pd, PCI_COMMAND, &cmd); rivafb_probe()
1967 pci_write_config_word(pd, PCI_COMMAND, cmd); rivafb_probe()
1970 rivafb_fix.mmio_start = pci_resource_start(pd, 0); rivafb_probe()
1971 rivafb_fix.smem_start = pci_resource_start(pd, 1); rivafb_probe()
2038 riva_get_EDID(info, pd); rivafb_probe()
2050 pci_set_drvdata(pd, info); rivafb_probe()
2083 pci_release_regions(pd); rivafb_probe()
2093 static void rivafb_remove(struct pci_dev *pd) rivafb_remove() argument
2095 struct fb_info *info = pci_get_drvdata(pd); rivafb_remove()
2119 pci_release_regions(pd); rivafb_remove()
/linux-4.1.27/drivers/net/wireless/ti/wl1251/
H A Dacx.c336 struct acx_packet_detection *pd; wl1251_acx_pd_threshold() local
339 wl1251_debug(DEBUG_ACX, "acx data pd threshold"); wl1251_acx_pd_threshold()
341 pd = kzalloc(sizeof(*pd), GFP_KERNEL); wl1251_acx_pd_threshold()
342 if (!pd) wl1251_acx_pd_threshold()
347 ret = wl1251_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); wl1251_acx_pd_threshold()
349 wl1251_warning("failed to set pd threshold: %d", ret); wl1251_acx_pd_threshold()
354 kfree(pd); wl1251_acx_pd_threshold()

Completed in 5741 milliseconds

123