Lines Matching refs:rmu

211 	struct fsl_rmu *rmu = GET_RMM_HANDLE(port);  in fsl_rio_tx_handler()  local
213 osr = in_be32(&rmu->msg_regs->osr); in fsl_rio_tx_handler()
217 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE); in fsl_rio_tx_handler()
223 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI); in fsl_rio_tx_handler()
228 u32 dqp = in_be32(&rmu->msg_regs->odqdpar); in fsl_rio_tx_handler()
229 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5; in fsl_rio_tx_handler()
231 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id, in fsl_rio_tx_handler()
236 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI); in fsl_rio_tx_handler()
256 struct fsl_rmu *rmu = GET_RMM_HANDLE(port); in fsl_rio_rx_handler() local
258 isr = in_be32(&rmu->msg_regs->isr); in fsl_rio_rx_handler()
262 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE); in fsl_rio_rx_handler()
274 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id, in fsl_rio_rx_handler()
279 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI); in fsl_rio_rx_handler()
655 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); in fsl_add_outb_message() local
657 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt in fsl_add_outb_message()
658 + rmu->msg_tx_ring.tx_slot; in fsl_add_outb_message()
669 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer, in fsl_add_outb_message()
672 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot] in fsl_add_outb_message()
686 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot]; in fsl_add_outb_message()
689 omr = in_be32(&rmu->msg_regs->omr); in fsl_add_outb_message()
690 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI); in fsl_add_outb_message()
693 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size) in fsl_add_outb_message()
694 rmu->msg_tx_ring.tx_slot = 0; in fsl_add_outb_message()
716 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); in fsl_open_outb_mbox() local
725 rmu->msg_tx_ring.dev_id = dev_id; in fsl_open_outb_mbox()
726 rmu->msg_tx_ring.size = entries; in fsl_open_outb_mbox()
728 for (i = 0; i < rmu->msg_tx_ring.size; i++) { in fsl_open_outb_mbox()
729 rmu->msg_tx_ring.virt_buffer[i] = in fsl_open_outb_mbox()
731 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL); in fsl_open_outb_mbox()
732 if (!rmu->msg_tx_ring.virt_buffer[i]) { in fsl_open_outb_mbox()
734 for (j = 0; j < rmu->msg_tx_ring.size; j++) in fsl_open_outb_mbox()
735 if (rmu->msg_tx_ring.virt_buffer[j]) in fsl_open_outb_mbox()
738 rmu->msg_tx_ring. in fsl_open_outb_mbox()
740 rmu->msg_tx_ring. in fsl_open_outb_mbox()
747 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, in fsl_open_outb_mbox()
748 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, in fsl_open_outb_mbox()
749 &rmu->msg_tx_ring.phys, GFP_KERNEL); in fsl_open_outb_mbox()
750 if (!rmu->msg_tx_ring.virt) { in fsl_open_outb_mbox()
754 memset(rmu->msg_tx_ring.virt, 0, in fsl_open_outb_mbox()
755 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE); in fsl_open_outb_mbox()
756 rmu->msg_tx_ring.tx_slot = 0; in fsl_open_outb_mbox()
759 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys); in fsl_open_outb_mbox()
760 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys); in fsl_open_outb_mbox()
763 out_be32(&rmu->msg_regs->osar, 0x00000004); in fsl_open_outb_mbox()
766 out_be32(&rmu->msg_regs->osr, 0x000000b3); in fsl_open_outb_mbox()
781 out_be32(&rmu->msg_regs->omr, 0x00100220); in fsl_open_outb_mbox()
784 out_be32(&rmu->msg_regs->omr, in fsl_open_outb_mbox()
785 in_be32(&rmu->msg_regs->omr) | in fsl_open_outb_mbox()
789 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1); in fsl_open_outb_mbox()
796 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, in fsl_open_outb_mbox()
797 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); in fsl_open_outb_mbox()
800 for (i = 0; i < rmu->msg_tx_ring.size; i++) in fsl_open_outb_mbox()
802 rmu->msg_tx_ring.virt_buffer[i], in fsl_open_outb_mbox()
803 rmu->msg_tx_ring.phys_buffer[i]); in fsl_open_outb_mbox()
819 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); in fsl_close_outb_mbox() local
822 out_be32(&rmu->msg_regs->omr, 0); in fsl_close_outb_mbox()
826 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, in fsl_close_outb_mbox()
827 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); in fsl_close_outb_mbox()
849 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); in fsl_open_inb_mbox() local
858 rmu->msg_rx_ring.dev_id = dev_id; in fsl_open_inb_mbox()
859 rmu->msg_rx_ring.size = entries; in fsl_open_inb_mbox()
860 rmu->msg_rx_ring.rx_slot = 0; in fsl_open_inb_mbox()
861 for (i = 0; i < rmu->msg_rx_ring.size; i++) in fsl_open_inb_mbox()
862 rmu->msg_rx_ring.virt_buffer[i] = NULL; in fsl_open_inb_mbox()
865 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, in fsl_open_inb_mbox()
866 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, in fsl_open_inb_mbox()
867 &rmu->msg_rx_ring.phys, GFP_KERNEL); in fsl_open_inb_mbox()
868 if (!rmu->msg_rx_ring.virt) { in fsl_open_inb_mbox()
874 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys); in fsl_open_inb_mbox()
875 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys); in fsl_open_inb_mbox()
878 out_be32(&rmu->msg_regs->isr, 0x00000091); in fsl_open_inb_mbox()
885 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, in fsl_open_inb_mbox()
886 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); in fsl_open_inb_mbox()
897 out_be32(&rmu->msg_regs->imr, 0x001b0060); in fsl_open_inb_mbox()
900 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); in fsl_open_inb_mbox()
903 setbits32(&rmu->msg_regs->imr, 0x1); in fsl_open_inb_mbox()
920 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); in fsl_close_inb_mbox() local
923 out_be32(&rmu->msg_regs->imr, 0); in fsl_close_inb_mbox()
926 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, in fsl_close_inb_mbox()
927 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); in fsl_close_inb_mbox()
945 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); in fsl_add_inb_buffer() local
948 rmu->msg_rx_ring.rx_slot); in fsl_add_inb_buffer()
950 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) { in fsl_add_inb_buffer()
953 rmu->msg_rx_ring.rx_slot); in fsl_add_inb_buffer()
958 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf; in fsl_add_inb_buffer()
959 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size) in fsl_add_inb_buffer()
960 rmu->msg_rx_ring.rx_slot = 0; in fsl_add_inb_buffer()
976 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); in fsl_get_inb_message() local
982 phys_buf = in_be32(&rmu->msg_regs->ifqdpar); in fsl_get_inb_message()
985 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar)) in fsl_get_inb_message()
988 virt_buf = rmu->msg_rx_ring.virt + (phys_buf in fsl_get_inb_message()
989 - rmu->msg_rx_ring.phys); in fsl_get_inb_message()
990 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; in fsl_get_inb_message()
991 buf = rmu->msg_rx_ring.virt_buffer[buf_idx]; in fsl_get_inb_message()
1003 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL; in fsl_get_inb_message()
1006 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI); in fsl_get_inb_message()
1061 struct fsl_rmu *rmu; in fsl_rio_setup_rmu() local
1078 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL); in fsl_rio_setup_rmu()
1079 if (!rmu) in fsl_rio_setup_rmu()
1087 kfree(rmu); in fsl_rio_setup_rmu()
1092 rmu->msg_regs = (struct rio_msg_regs *) in fsl_rio_setup_rmu()
1095 rmu->txirq = irq_of_parse_and_map(node, 0); in fsl_rio_setup_rmu()
1096 rmu->rxirq = irq_of_parse_and_map(node, 1); in fsl_rio_setup_rmu()
1098 node->full_name, rmu->txirq, rmu->rxirq); in fsl_rio_setup_rmu()
1100 priv->rmm_handle = rmu; in fsl_rio_setup_rmu()