Lines Matching refs:chan

43 #define chan_dbg(chan, fmt, arg...)					\  argument
44 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
45 #define chan_err(chan, fmt, arg...) \ argument
46 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
54 static void set_sr(struct fsldma_chan *chan, u32 val) in set_sr() argument
56 DMA_OUT(chan, &chan->regs->sr, val, 32); in set_sr()
59 static u32 get_sr(struct fsldma_chan *chan) in get_sr() argument
61 return DMA_IN(chan, &chan->regs->sr, 32); in get_sr()
64 static void set_mr(struct fsldma_chan *chan, u32 val) in set_mr() argument
66 DMA_OUT(chan, &chan->regs->mr, val, 32); in set_mr()
69 static u32 get_mr(struct fsldma_chan *chan) in get_mr() argument
71 return DMA_IN(chan, &chan->regs->mr, 32); in get_mr()
74 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) in set_cdar() argument
76 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); in set_cdar()
79 static dma_addr_t get_cdar(struct fsldma_chan *chan) in get_cdar() argument
81 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; in get_cdar()
84 static void set_bcr(struct fsldma_chan *chan, u32 val) in set_bcr() argument
86 DMA_OUT(chan, &chan->regs->bcr, val, 32); in set_bcr()
89 static u32 get_bcr(struct fsldma_chan *chan) in get_bcr() argument
91 return DMA_IN(chan, &chan->regs->bcr, 32); in get_bcr()
98 static void set_desc_cnt(struct fsldma_chan *chan, in set_desc_cnt() argument
101 hw->count = CPU_TO_DMA(chan, count, 32); in set_desc_cnt()
104 static void set_desc_src(struct fsldma_chan *chan, in set_desc_src() argument
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) in set_desc_src()
111 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); in set_desc_src()
114 static void set_desc_dst(struct fsldma_chan *chan, in set_desc_dst() argument
119 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) in set_desc_dst()
121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); in set_desc_dst()
124 static void set_desc_next(struct fsldma_chan *chan, in set_desc_next() argument
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) in set_desc_next()
131 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); in set_desc_next()
134 static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) in set_ld_eol() argument
138 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) in set_ld_eol()
141 desc->hw.next_ln_addr = CPU_TO_DMA(chan, in set_ld_eol()
142 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL in set_ld_eol()
150 static void dma_init(struct fsldma_chan *chan) in dma_init() argument
153 set_mr(chan, 0); in dma_init()
155 switch (chan->feature & FSL_DMA_IP_MASK) { in dma_init()
162 set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE in dma_init()
170 set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM); in dma_init()
175 static int dma_is_idle(struct fsldma_chan *chan) in dma_is_idle() argument
177 u32 sr = get_sr(chan); in dma_is_idle()
188 static void dma_start(struct fsldma_chan *chan) in dma_start() argument
192 mode = get_mr(chan); in dma_start()
194 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { in dma_start()
195 set_bcr(chan, 0); in dma_start()
201 if (chan->feature & FSL_DMA_CHAN_START_EXT) { in dma_start()
208 set_mr(chan, mode); in dma_start()
211 static void dma_halt(struct fsldma_chan *chan) in dma_halt() argument
217 mode = get_mr(chan); in dma_halt()
224 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { in dma_halt()
226 set_mr(chan, mode); in dma_halt()
233 set_mr(chan, mode); in dma_halt()
237 if (dma_is_idle(chan)) in dma_halt()
243 if (!dma_is_idle(chan)) in dma_halt()
244 chan_err(chan, "DMA halt timeout!\n"); in dma_halt()
258 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) in fsl_chan_set_src_loop_size() argument
262 mode = get_mr(chan); in fsl_chan_set_src_loop_size()
276 set_mr(chan, mode); in fsl_chan_set_src_loop_size()
290 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) in fsl_chan_set_dst_loop_size() argument
294 mode = get_mr(chan); in fsl_chan_set_dst_loop_size()
308 set_mr(chan, mode); in fsl_chan_set_dst_loop_size()
323 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) in fsl_chan_set_request_count() argument
329 mode = get_mr(chan); in fsl_chan_set_request_count()
332 set_mr(chan, mode); in fsl_chan_set_request_count()
344 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) in fsl_chan_toggle_ext_pause() argument
347 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; in fsl_chan_toggle_ext_pause()
349 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; in fsl_chan_toggle_ext_pause()
362 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) in fsl_chan_toggle_ext_start() argument
365 chan->feature |= FSL_DMA_CHAN_START_EXT; in fsl_chan_toggle_ext_start()
367 chan->feature &= ~FSL_DMA_CHAN_START_EXT; in fsl_chan_toggle_ext_start()
372 struct fsldma_chan *chan; in fsl_dma_external_start() local
377 chan = to_fsl_chan(dchan); in fsl_dma_external_start()
379 fsl_chan_toggle_ext_start(chan, enable); in fsl_dma_external_start()
384 static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) in append_ld_queue() argument
386 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); in append_ld_queue()
388 if (list_empty(&chan->ld_pending)) in append_ld_queue()
398 set_desc_next(chan, &tail->hw, desc->async_tx.phys); in append_ld_queue()
405 list_splice_tail_init(&desc->tx_list, &chan->ld_pending); in append_ld_queue()
410 struct fsldma_chan *chan = to_fsl_chan(tx->chan); in fsl_dma_tx_submit() local
415 spin_lock_bh(&chan->desc_lock); in fsl_dma_tx_submit()
418 if (unlikely(chan->pm_state != RUNNING)) { in fsl_dma_tx_submit()
419 chan_dbg(chan, "cannot submit due to suspend\n"); in fsl_dma_tx_submit()
420 spin_unlock_bh(&chan->desc_lock); in fsl_dma_tx_submit()
434 append_ld_queue(chan, desc); in fsl_dma_tx_submit()
436 spin_unlock_bh(&chan->desc_lock); in fsl_dma_tx_submit()
446 static void fsl_dma_free_descriptor(struct fsldma_chan *chan, in fsl_dma_free_descriptor() argument
450 chan_dbg(chan, "LD %p free\n", desc); in fsl_dma_free_descriptor()
451 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsl_dma_free_descriptor()
460 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) in fsl_dma_alloc_descriptor() argument
465 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); in fsl_dma_alloc_descriptor()
467 chan_dbg(chan, "out of memory for link descriptor\n"); in fsl_dma_alloc_descriptor()
473 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in fsl_dma_alloc_descriptor()
477 chan_dbg(chan, "LD %p allocated\n", desc); in fsl_dma_alloc_descriptor()
490 static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan) in fsldma_clean_completed_descriptor() argument
495 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) in fsldma_clean_completed_descriptor()
497 fsl_dma_free_descriptor(chan, desc); in fsldma_clean_completed_descriptor()
509 static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, in fsldma_run_tx_complete_actions() argument
522 chan_dbg(chan, "LD %p callback\n", desc); in fsldma_run_tx_complete_actions()
542 static void fsldma_clean_running_descriptor(struct fsldma_chan *chan, in fsldma_clean_running_descriptor() argument
557 list_add_tail(&desc->node, &chan->ld_completed); in fsldma_clean_running_descriptor()
561 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsldma_clean_running_descriptor()
571 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) in fsl_chan_xfer_ld_queue() argument
579 if (list_empty(&chan->ld_pending)) { in fsl_chan_xfer_ld_queue()
580 chan_dbg(chan, "no pending LDs\n"); in fsl_chan_xfer_ld_queue()
589 if (!chan->idle) { in fsl_chan_xfer_ld_queue()
590 chan_dbg(chan, "DMA controller still busy\n"); in fsl_chan_xfer_ld_queue()
603 chan_dbg(chan, "idle, starting controller\n"); in fsl_chan_xfer_ld_queue()
604 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); in fsl_chan_xfer_ld_queue()
605 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); in fsl_chan_xfer_ld_queue()
612 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { in fsl_chan_xfer_ld_queue()
615 mode = get_mr(chan); in fsl_chan_xfer_ld_queue()
617 set_mr(chan, mode); in fsl_chan_xfer_ld_queue()
624 set_cdar(chan, desc->async_tx.phys); in fsl_chan_xfer_ld_queue()
625 get_cdar(chan); in fsl_chan_xfer_ld_queue()
627 dma_start(chan); in fsl_chan_xfer_ld_queue()
628 chan->idle = false; in fsl_chan_xfer_ld_queue()
640 static void fsldma_cleanup_descriptors(struct fsldma_chan *chan) in fsldma_cleanup_descriptors() argument
644 dma_addr_t curr_phys = get_cdar(chan); in fsldma_cleanup_descriptors()
647 fsldma_clean_completed_descriptor(chan); in fsldma_cleanup_descriptors()
650 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { in fsldma_cleanup_descriptors()
665 if (!dma_is_idle(chan)) in fsldma_cleanup_descriptors()
669 cookie = fsldma_run_tx_complete_actions(chan, desc, cookie); in fsldma_cleanup_descriptors()
671 fsldma_clean_running_descriptor(chan, desc); in fsldma_cleanup_descriptors()
680 fsl_chan_xfer_ld_queue(chan); in fsldma_cleanup_descriptors()
683 chan->common.completed_cookie = cookie; in fsldma_cleanup_descriptors()
696 struct fsldma_chan *chan = to_fsl_chan(dchan); in fsl_dma_alloc_chan_resources() local
699 if (chan->desc_pool) in fsl_dma_alloc_chan_resources()
706 chan->desc_pool = dma_pool_create(chan->name, chan->dev, in fsl_dma_alloc_chan_resources()
709 if (!chan->desc_pool) { in fsl_dma_alloc_chan_resources()
710 chan_err(chan, "unable to allocate descriptor pool\n"); in fsl_dma_alloc_chan_resources()
725 static void fsldma_free_desc_list(struct fsldma_chan *chan, in fsldma_free_desc_list() argument
731 fsl_dma_free_descriptor(chan, desc); in fsldma_free_desc_list()
734 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, in fsldma_free_desc_list_reverse() argument
740 fsl_dma_free_descriptor(chan, desc); in fsldma_free_desc_list_reverse()
749 struct fsldma_chan *chan = to_fsl_chan(dchan); in fsl_dma_free_chan_resources() local
751 chan_dbg(chan, "free all channel resources\n"); in fsl_dma_free_chan_resources()
752 spin_lock_bh(&chan->desc_lock); in fsl_dma_free_chan_resources()
753 fsldma_cleanup_descriptors(chan); in fsl_dma_free_chan_resources()
754 fsldma_free_desc_list(chan, &chan->ld_pending); in fsl_dma_free_chan_resources()
755 fsldma_free_desc_list(chan, &chan->ld_running); in fsl_dma_free_chan_resources()
756 fsldma_free_desc_list(chan, &chan->ld_completed); in fsl_dma_free_chan_resources()
757 spin_unlock_bh(&chan->desc_lock); in fsl_dma_free_chan_resources()
759 dma_pool_destroy(chan->desc_pool); in fsl_dma_free_chan_resources()
760 chan->desc_pool = NULL; in fsl_dma_free_chan_resources()
768 struct fsldma_chan *chan; in fsl_dma_prep_memcpy() local
778 chan = to_fsl_chan(dchan); in fsl_dma_prep_memcpy()
783 new = fsl_dma_alloc_descriptor(chan); in fsl_dma_prep_memcpy()
785 chan_err(chan, "%s\n", msg_ld_oom); in fsl_dma_prep_memcpy()
791 set_desc_cnt(chan, &new->hw, copy); in fsl_dma_prep_memcpy()
792 set_desc_src(chan, &new->hw, dma_src); in fsl_dma_prep_memcpy()
793 set_desc_dst(chan, &new->hw, dma_dst); in fsl_dma_prep_memcpy()
798 set_desc_next(chan, &prev->hw, new->async_tx.phys); in fsl_dma_prep_memcpy()
816 set_ld_eol(chan, new); in fsl_dma_prep_memcpy()
824 fsldma_free_desc_list_reverse(chan, &first->tx_list); in fsl_dma_prep_memcpy()
834 struct fsldma_chan *chan = to_fsl_chan(dchan); in fsl_dma_prep_sg() local
868 new = fsl_dma_alloc_descriptor(chan); in fsl_dma_prep_sg()
870 chan_err(chan, "%s\n", msg_ld_oom); in fsl_dma_prep_sg()
874 set_desc_cnt(chan, &new->hw, len); in fsl_dma_prep_sg()
875 set_desc_src(chan, &new->hw, src); in fsl_dma_prep_sg()
876 set_desc_dst(chan, &new->hw, dst); in fsl_dma_prep_sg()
881 set_desc_next(chan, &prev->hw, new->async_tx.phys); in fsl_dma_prep_sg()
932 set_ld_eol(chan, new); in fsl_dma_prep_sg()
940 fsldma_free_desc_list_reverse(chan, &first->tx_list); in fsl_dma_prep_sg()
946 struct fsldma_chan *chan; in fsl_dma_device_terminate_all() local
951 chan = to_fsl_chan(dchan); in fsl_dma_device_terminate_all()
953 spin_lock_bh(&chan->desc_lock); in fsl_dma_device_terminate_all()
956 dma_halt(chan); in fsl_dma_device_terminate_all()
959 fsldma_free_desc_list(chan, &chan->ld_pending); in fsl_dma_device_terminate_all()
960 fsldma_free_desc_list(chan, &chan->ld_running); in fsl_dma_device_terminate_all()
961 fsldma_free_desc_list(chan, &chan->ld_completed); in fsl_dma_device_terminate_all()
962 chan->idle = true; in fsl_dma_device_terminate_all()
964 spin_unlock_bh(&chan->desc_lock); in fsl_dma_device_terminate_all()
971 struct fsldma_chan *chan; in fsl_dma_device_config() local
977 chan = to_fsl_chan(dchan); in fsl_dma_device_config()
980 if (!chan->set_request_count) in fsl_dma_device_config()
989 chan->set_request_count(chan, size); in fsl_dma_device_config()
1000 struct fsldma_chan *chan = to_fsl_chan(dchan); in fsl_dma_memcpy_issue_pending() local
1002 spin_lock_bh(&chan->desc_lock); in fsl_dma_memcpy_issue_pending()
1003 fsl_chan_xfer_ld_queue(chan); in fsl_dma_memcpy_issue_pending()
1004 spin_unlock_bh(&chan->desc_lock); in fsl_dma_memcpy_issue_pending()
1015 struct fsldma_chan *chan = to_fsl_chan(dchan); in fsl_tx_status() local
1022 spin_lock_bh(&chan->desc_lock); in fsl_tx_status()
1023 fsldma_cleanup_descriptors(chan); in fsl_tx_status()
1024 spin_unlock_bh(&chan->desc_lock); in fsl_tx_status()
1035 struct fsldma_chan *chan = data; in fsldma_chan_irq() local
1039 stat = get_sr(chan); in fsldma_chan_irq()
1040 set_sr(chan, stat); in fsldma_chan_irq()
1041 chan_dbg(chan, "irq: stat = 0x%x\n", stat); in fsldma_chan_irq()
1049 chan_err(chan, "Transfer Error!\n"); in fsldma_chan_irq()
1057 chan_dbg(chan, "irq: Programming Error INT\n"); in fsldma_chan_irq()
1059 if (get_bcr(chan) != 0) in fsldma_chan_irq()
1060 chan_err(chan, "Programming Error!\n"); in fsldma_chan_irq()
1068 chan_dbg(chan, "irq: End-of-Chain link INT\n"); in fsldma_chan_irq()
1078 chan_dbg(chan, "irq: End-of-link INT\n"); in fsldma_chan_irq()
1083 if (!dma_is_idle(chan)) in fsldma_chan_irq()
1084 chan_err(chan, "irq: controller not idle!\n"); in fsldma_chan_irq()
1088 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); in fsldma_chan_irq()
1095 tasklet_schedule(&chan->tasklet); in fsldma_chan_irq()
1096 chan_dbg(chan, "irq: Exit\n"); in fsldma_chan_irq()
1102 struct fsldma_chan *chan = (struct fsldma_chan *)data; in dma_do_tasklet() local
1104 chan_dbg(chan, "tasklet entry\n"); in dma_do_tasklet()
1106 spin_lock_bh(&chan->desc_lock); in dma_do_tasklet()
1109 chan->idle = true; in dma_do_tasklet()
1112 fsldma_cleanup_descriptors(chan); in dma_do_tasklet()
1114 spin_unlock_bh(&chan->desc_lock); in dma_do_tasklet()
1116 chan_dbg(chan, "tasklet exit\n"); in dma_do_tasklet()
1122 struct fsldma_chan *chan; in fsldma_ctrl_irq() local
1133 chan = fdev->chan[i]; in fsldma_ctrl_irq()
1134 if (!chan) in fsldma_ctrl_irq()
1138 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); in fsldma_ctrl_irq()
1139 fsldma_chan_irq(irq, chan); in fsldma_ctrl_irq()
1152 struct fsldma_chan *chan; in fsldma_free_irqs() local
1162 chan = fdev->chan[i]; in fsldma_free_irqs()
1163 if (chan && chan->irq != NO_IRQ) { in fsldma_free_irqs()
1164 chan_dbg(chan, "free per-channel IRQ\n"); in fsldma_free_irqs()
1165 free_irq(chan->irq, chan); in fsldma_free_irqs()
1172 struct fsldma_chan *chan; in fsldma_request_irqs() local
1186 chan = fdev->chan[i]; in fsldma_request_irqs()
1187 if (!chan) in fsldma_request_irqs()
1190 if (chan->irq == NO_IRQ) { in fsldma_request_irqs()
1191 chan_err(chan, "interrupts property missing in device tree\n"); in fsldma_request_irqs()
1196 chan_dbg(chan, "request per-channel IRQ\n"); in fsldma_request_irqs()
1197 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, in fsldma_request_irqs()
1198 "fsldma-chan", chan); in fsldma_request_irqs()
1200 chan_err(chan, "unable to request per-channel IRQ\n"); in fsldma_request_irqs()
1209 chan = fdev->chan[i]; in fsldma_request_irqs()
1210 if (!chan) in fsldma_request_irqs()
1213 if (chan->irq == NO_IRQ) in fsldma_request_irqs()
1216 free_irq(chan->irq, chan); in fsldma_request_irqs()
1229 struct fsldma_chan *chan; in fsl_dma_chan_probe() local
1234 chan = kzalloc(sizeof(*chan), GFP_KERNEL); in fsl_dma_chan_probe()
1235 if (!chan) { in fsl_dma_chan_probe()
1242 chan->regs = of_iomap(node, 0); in fsl_dma_chan_probe()
1243 if (!chan->regs) { in fsl_dma_chan_probe()
1255 chan->feature = feature; in fsl_dma_chan_probe()
1257 fdev->feature = chan->feature; in fsl_dma_chan_probe()
1263 WARN_ON(fdev->feature != chan->feature); in fsl_dma_chan_probe()
1265 chan->dev = fdev->dev; in fsl_dma_chan_probe()
1266 chan->id = (res.start & 0xfff) < 0x300 ? in fsl_dma_chan_probe()
1269 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { in fsl_dma_chan_probe()
1275 fdev->chan[chan->id] = chan; in fsl_dma_chan_probe()
1276 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); in fsl_dma_chan_probe()
1277 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); in fsl_dma_chan_probe()
1280 dma_init(chan); in fsl_dma_chan_probe()
1283 set_cdar(chan, 0); in fsl_dma_chan_probe()
1285 switch (chan->feature & FSL_DMA_IP_MASK) { in fsl_dma_chan_probe()
1287 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; in fsl_dma_chan_probe()
1289 chan->toggle_ext_start = fsl_chan_toggle_ext_start; in fsl_dma_chan_probe()
1290 chan->set_src_loop_size = fsl_chan_set_src_loop_size; in fsl_dma_chan_probe()
1291 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; in fsl_dma_chan_probe()
1292 chan->set_request_count = fsl_chan_set_request_count; in fsl_dma_chan_probe()
1295 spin_lock_init(&chan->desc_lock); in fsl_dma_chan_probe()
1296 INIT_LIST_HEAD(&chan->ld_pending); in fsl_dma_chan_probe()
1297 INIT_LIST_HEAD(&chan->ld_running); in fsl_dma_chan_probe()
1298 INIT_LIST_HEAD(&chan->ld_completed); in fsl_dma_chan_probe()
1299 chan->idle = true; in fsl_dma_chan_probe()
1301 chan->pm_state = RUNNING; in fsl_dma_chan_probe()
1304 chan->common.device = &fdev->common; in fsl_dma_chan_probe()
1305 dma_cookie_init(&chan->common); in fsl_dma_chan_probe()
1308 chan->irq = irq_of_parse_and_map(node, 0); in fsl_dma_chan_probe()
1311 list_add_tail(&chan->common.device_node, &fdev->common.channels); in fsl_dma_chan_probe()
1313 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, in fsl_dma_chan_probe()
1314 chan->irq != NO_IRQ ? chan->irq : fdev->irq); in fsl_dma_chan_probe()
1319 iounmap(chan->regs); in fsl_dma_chan_probe()
1321 kfree(chan); in fsl_dma_chan_probe()
1326 static void fsl_dma_chan_remove(struct fsldma_chan *chan) in fsl_dma_chan_remove() argument
1328 irq_dispose_mapping(chan->irq); in fsl_dma_chan_remove()
1329 list_del(&chan->common.device_node); in fsl_dma_chan_remove()
1330 iounmap(chan->regs); in fsl_dma_chan_remove()
1331 kfree(chan); in fsl_dma_chan_remove()
1436 if (fdev->chan[i]) in fsldma_of_remove()
1437 fsl_dma_chan_remove(fdev->chan[i]); in fsldma_of_remove()
1451 struct fsldma_chan *chan; in fsldma_suspend_late() local
1455 chan = fdev->chan[i]; in fsldma_suspend_late()
1456 if (!chan) in fsldma_suspend_late()
1459 spin_lock_bh(&chan->desc_lock); in fsldma_suspend_late()
1460 if (unlikely(!chan->idle)) in fsldma_suspend_late()
1462 chan->regs_save.mr = get_mr(chan); in fsldma_suspend_late()
1463 chan->pm_state = SUSPENDED; in fsldma_suspend_late()
1464 spin_unlock_bh(&chan->desc_lock); in fsldma_suspend_late()
1470 chan = fdev->chan[i]; in fsldma_suspend_late()
1471 if (!chan) in fsldma_suspend_late()
1473 chan->pm_state = RUNNING; in fsldma_suspend_late()
1474 spin_unlock_bh(&chan->desc_lock); in fsldma_suspend_late()
1483 struct fsldma_chan *chan; in fsldma_resume_early() local
1488 chan = fdev->chan[i]; in fsldma_resume_early()
1489 if (!chan) in fsldma_resume_early()
1492 spin_lock_bh(&chan->desc_lock); in fsldma_resume_early()
1493 mode = chan->regs_save.mr in fsldma_resume_early()
1495 set_mr(chan, mode); in fsldma_resume_early()
1496 chan->pm_state = RUNNING; in fsldma_resume_early()
1497 spin_unlock_bh(&chan->desc_lock); in fsldma_resume_early()