bchan 344 drivers/dma/qcom/bam_dma.c #define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\ bchan 425 drivers/dma/qcom/bam_dma.c static void bam_reset_channel(struct bam_chan *bchan) bchan 427 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 429 drivers/dma/qcom/bam_dma.c lockdep_assert_held(&bchan->vc.lock); bchan 432 drivers/dma/qcom/bam_dma.c writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); bchan 433 drivers/dma/qcom/bam_dma.c writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); bchan 439 drivers/dma/qcom/bam_dma.c bchan->initialized = 0; bchan 449 drivers/dma/qcom/bam_dma.c static void bam_chan_init_hw(struct bam_chan *bchan, bchan 452 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 456 drivers/dma/qcom/bam_dma.c bam_reset_channel(bchan); bchan 462 drivers/dma/qcom/bam_dma.c writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), bchan 463 drivers/dma/qcom/bam_dma.c bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); bchan 465 drivers/dma/qcom/bam_dma.c bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); bchan 469 drivers/dma/qcom/bam_dma.c bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); bchan 473 drivers/dma/qcom/bam_dma.c val |= BIT(bchan->id); bchan 484 drivers/dma/qcom/bam_dma.c writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); bchan 486 drivers/dma/qcom/bam_dma.c bchan->initialized = 1; bchan 489 drivers/dma/qcom/bam_dma.c bchan->head = 0; bchan 490 drivers/dma/qcom/bam_dma.c bchan->tail = 0; bchan 501 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 502 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 504 drivers/dma/qcom/bam_dma.c if (bchan->fifo_virt) bchan 508 drivers/dma/qcom/bam_dma.c bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan 509 drivers/dma/qcom/bam_dma.c &bchan->fifo_phys, GFP_KERNEL); bchan 511 drivers/dma/qcom/bam_dma.c if (!bchan->fifo_virt) { bchan 536 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 537 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 548 drivers/dma/qcom/bam_dma.c if (!list_empty(&bchan->desc_list)) { bchan 549 drivers/dma/qcom/bam_dma.c dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); bchan 553 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flags); bchan 554 drivers/dma/qcom/bam_dma.c bam_reset_channel(bchan); bchan 555 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flags); bchan 557 drivers/dma/qcom/bam_dma.c dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, bchan 558 drivers/dma/qcom/bam_dma.c bchan->fifo_phys); bchan 559 drivers/dma/qcom/bam_dma.c bchan->fifo_virt = NULL; bchan 563 drivers/dma/qcom/bam_dma.c val &= ~BIT(bchan->id); bchan 567 drivers/dma/qcom/bam_dma.c writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); bchan 585 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 588 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flag); bchan 589 drivers/dma/qcom/bam_dma.c memcpy(&bchan->slave, cfg, sizeof(*cfg)); bchan 590 drivers/dma/qcom/bam_dma.c bchan->reconfigure = 1; bchan 591 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flag); bchan 611 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 612 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 673 drivers/dma/qcom/bam_dma.c return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); bchan 690 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 696 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flag); bchan 710 drivers/dma/qcom/bam_dma.c if (!list_empty(&bchan->desc_list)) { bchan 711 drivers/dma/qcom/bam_dma.c async_desc = list_first_entry(&bchan->desc_list, bchan 713 drivers/dma/qcom/bam_dma.c bam_chan_init_hw(bchan, async_desc->dir); bchan 717 drivers/dma/qcom/bam_dma.c &bchan->desc_list, desc_node) { bchan 718 drivers/dma/qcom/bam_dma.c list_add(&async_desc->vd.node, &bchan->vc.desc_issued); bchan 722 drivers/dma/qcom/bam_dma.c vchan_get_all_descriptors(&bchan->vc, &head); bchan 723 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flag); bchan 725 drivers/dma/qcom/bam_dma.c vchan_dma_desc_free_list(&bchan->vc, &head); bchan 737 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 738 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 746 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flag); bchan 747 drivers/dma/qcom/bam_dma.c writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan 748 drivers/dma/qcom/bam_dma.c bchan->paused = 1; bchan 749 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flag); bchan 763 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 764 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 772 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flag); bchan 773 drivers/dma/qcom/bam_dma.c writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan 774 drivers/dma/qcom/bam_dma.c bchan->paused = 0; bchan 775 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flag); bchan 802 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = &bdev->channels[i]; bchan 812 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flags); bchan 819 drivers/dma/qcom/bam_dma.c avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); bchan 821 drivers/dma/qcom/bam_dma.c if (offset < bchan->head) bchan 825 drivers/dma/qcom/bam_dma.c &bchan->desc_list, desc_node) { bchan 831 drivers/dma/qcom/bam_dma.c bchan->head += async_desc->xfer_len; bchan 832 drivers/dma/qcom/bam_dma.c bchan->head %= MAX_DESCRIPTORS; bchan 847 drivers/dma/qcom/bam_dma.c &bchan->vc.desc_issued); bchan 852 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flags); bchan 910 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 923 drivers/dma/qcom/bam_dma.c return bchan->paused ? DMA_PAUSED : ret; bchan 925 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flags); bchan 926 drivers/dma/qcom/bam_dma.c vd = vchan_find_desc(&bchan->vc, cookie); bchan 930 drivers/dma/qcom/bam_dma.c list_for_each_entry(async_desc, &bchan->desc_list, desc_node) { bchan 940 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flags); bchan 944 drivers/dma/qcom/bam_dma.c if (ret == DMA_IN_PROGRESS && bchan->paused) bchan 955 drivers/dma/qcom/bam_dma.c static void bam_apply_new_config(struct bam_chan *bchan, bchan 958 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 963 drivers/dma/qcom/bam_dma.c maxburst = bchan->slave.src_maxburst; bchan 965 drivers/dma/qcom/bam_dma.c maxburst = bchan->slave.dst_maxburst; bchan 971 drivers/dma/qcom/bam_dma.c bchan->reconfigure = 0; bchan 978 drivers/dma/qcom/bam_dma.c static void bam_start_dma(struct bam_chan *bchan) bchan 980 drivers/dma/qcom/bam_dma.c struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); bchan 981 drivers/dma/qcom/bam_dma.c struct bam_device *bdev = bchan->bdev; bchan 984 drivers/dma/qcom/bam_dma.c struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, bchan 990 drivers/dma/qcom/bam_dma.c lockdep_assert_held(&bchan->vc.lock); bchan 999 drivers/dma/qcom/bam_dma.c while (vd && !IS_BUSY(bchan)) { bchan 1005 drivers/dma/qcom/bam_dma.c if (!bchan->initialized) bchan 1006 drivers/dma/qcom/bam_dma.c bam_chan_init_hw(bchan, async_desc->dir); bchan 1009 drivers/dma/qcom/bam_dma.c if (bchan->reconfigure) bchan 1010 drivers/dma/qcom/bam_dma.c bam_apply_new_config(bchan, async_desc->dir); bchan 1013 drivers/dma/qcom/bam_dma.c avail = CIRC_SPACE(bchan->tail, bchan->head, bchan 1026 drivers/dma/qcom/bam_dma.c vd = vchan_next_desc(&bchan->vc); bchan 1044 drivers/dma/qcom/bam_dma.c if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { bchan 1045 drivers/dma/qcom/bam_dma.c u32 partial = MAX_DESCRIPTORS - bchan->tail; bchan 1047 drivers/dma/qcom/bam_dma.c memcpy(&fifo[bchan->tail], desc, bchan 1053 drivers/dma/qcom/bam_dma.c memcpy(&fifo[bchan->tail], desc, bchan 1058 drivers/dma/qcom/bam_dma.c bchan->tail += async_desc->xfer_len; bchan 1059 drivers/dma/qcom/bam_dma.c bchan->tail %= MAX_DESCRIPTORS; bchan 1060 drivers/dma/qcom/bam_dma.c list_add_tail(&async_desc->desc_node, &bchan->desc_list); bchan 1065 drivers/dma/qcom/bam_dma.c writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), bchan 1066 drivers/dma/qcom/bam_dma.c bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); bchan 1081 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan; bchan 1087 drivers/dma/qcom/bam_dma.c bchan = &bdev->channels[i]; bchan 1088 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flags); bchan 1090 drivers/dma/qcom/bam_dma.c if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan)) bchan 1091 drivers/dma/qcom/bam_dma.c bam_start_dma(bchan); bchan 1092 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flags); bchan 1105 drivers/dma/qcom/bam_dma.c struct bam_chan *bchan = to_bam_chan(chan); bchan 1108 drivers/dma/qcom/bam_dma.c spin_lock_irqsave(&bchan->vc.lock, flags); bchan 1111 drivers/dma/qcom/bam_dma.c if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan)) bchan 1112 drivers/dma/qcom/bam_dma.c bam_start_dma(bchan); bchan 1114 drivers/dma/qcom/bam_dma.c spin_unlock_irqrestore(&bchan->vc.lock, flags); bchan 1207 drivers/dma/qcom/bam_dma.c static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, bchan 1210 drivers/dma/qcom/bam_dma.c bchan->id = index; bchan 1211 drivers/dma/qcom/bam_dma.c bchan->bdev = bdev; bchan 1213 drivers/dma/qcom/bam_dma.c vchan_init(&bchan->vc, &bdev->common); bchan 1214 drivers/dma/qcom/bam_dma.c bchan->vc.desc_free = bam_dma_free_desc; bchan 1215 drivers/dma/qcom/bam_dma.c INIT_LIST_HEAD(&bchan->desc_list); bchan 283 drivers/isdn/mISDN/socket.c struct mISDNchannel *bchan, *next; bchan 297 drivers/isdn/mISDN/socket.c list_for_each_entry_safe(bchan, next, bchan 299 drivers/isdn/mISDN/socket.c if (bchan->nr == cq.channel) { bchan 300 drivers/isdn/mISDN/socket.c err = bchan->ctrl(bchan,