root/drivers/dma/fsldma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_sr
  2. get_sr
  3. set_mr
  4. get_mr
  5. set_cdar
  6. get_cdar
  7. set_bcr
  8. get_bcr
  9. set_desc_cnt
  10. set_desc_src
  11. set_desc_dst
  12. set_desc_next
  13. set_ld_eol
  14. dma_init
  15. dma_is_idle
  16. dma_start
  17. dma_halt
  18. fsl_chan_set_src_loop_size
  19. fsl_chan_set_dst_loop_size
  20. fsl_chan_set_request_count
  21. fsl_chan_toggle_ext_pause
  22. fsl_chan_toggle_ext_start
  23. fsl_dma_external_start
  24. append_ld_queue
  25. fsl_dma_tx_submit
  26. fsl_dma_free_descriptor
  27. fsl_dma_alloc_descriptor
  28. fsldma_clean_completed_descriptor
  29. fsldma_run_tx_complete_actions
  30. fsldma_clean_running_descriptor
  31. fsl_chan_xfer_ld_queue
  32. fsldma_cleanup_descriptors
  33. fsl_dma_alloc_chan_resources
  34. fsldma_free_desc_list
  35. fsldma_free_desc_list_reverse
  36. fsl_dma_free_chan_resources
  37. fsl_dma_prep_memcpy
  38. fsl_dma_device_terminate_all
  39. fsl_dma_device_config
  40. fsl_dma_memcpy_issue_pending
  41. fsl_tx_status
  42. fsldma_chan_irq
  43. dma_do_tasklet
  44. fsldma_ctrl_irq
  45. fsldma_free_irqs
  46. fsldma_request_irqs
  47. fsl_dma_chan_probe
  48. fsl_dma_chan_remove
  49. fsldma_of_probe
  50. fsldma_of_remove
  51. fsldma_suspend_late
  52. fsldma_resume_early
  53. fsldma_init
  54. fsldma_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Freescale MPC85xx, MPC83xx DMA Engine support
   4  *
   5  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
   6  *
   7  * Author:
   8  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
   9  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
  10  *
  11  * Description:
  12  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
  13  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
  14  *   The support for MPC8349 DMA controller is also added.
  15  *
  16  * This driver instructs the DMA controller to issue the PCI Read Multiple
  17  * command for PCI read operations, instead of using the default PCI Read Line
  18  * command. Please be aware that this setting may result in read pre-fetching
  19  * on some platforms.
  20  */
  21 
  22 #include <linux/init.h>
  23 #include <linux/module.h>
  24 #include <linux/pci.h>
  25 #include <linux/slab.h>
  26 #include <linux/interrupt.h>
  27 #include <linux/dmaengine.h>
  28 #include <linux/delay.h>
  29 #include <linux/dma-mapping.h>
  30 #include <linux/dmapool.h>
  31 #include <linux/of_address.h>
  32 #include <linux/of_irq.h>
  33 #include <linux/of_platform.h>
  34 #include <linux/fsldma.h>
  35 #include "dmaengine.h"
  36 #include "fsldma.h"
  37 
  38 #define chan_dbg(chan, fmt, arg...)                                     \
  39         dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
  40 #define chan_err(chan, fmt, arg...)                                     \
  41         dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
  42 
  43 static const char msg_ld_oom[] = "No free memory for link descriptor";
  44 
  45 /*
  46  * Register Helpers
  47  */
  48 
  49 static void set_sr(struct fsldma_chan *chan, u32 val)
  50 {
  51         FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
  52 }
  53 
  54 static u32 get_sr(struct fsldma_chan *chan)
  55 {
  56         return FSL_DMA_IN(chan, &chan->regs->sr, 32);
  57 }
  58 
  59 static void set_mr(struct fsldma_chan *chan, u32 val)
  60 {
  61         FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
  62 }
  63 
  64 static u32 get_mr(struct fsldma_chan *chan)
  65 {
  66         return FSL_DMA_IN(chan, &chan->regs->mr, 32);
  67 }
  68 
  69 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
  70 {
  71         FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
  72 }
  73 
  74 static dma_addr_t get_cdar(struct fsldma_chan *chan)
  75 {
  76         return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
  77 }
  78 
  79 static void set_bcr(struct fsldma_chan *chan, u32 val)
  80 {
  81         FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
  82 }
  83 
  84 static u32 get_bcr(struct fsldma_chan *chan)
  85 {
  86         return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
  87 }
  88 
  89 /*
  90  * Descriptor Helpers
  91  */
  92 
  93 static void set_desc_cnt(struct fsldma_chan *chan,
  94                                 struct fsl_dma_ld_hw *hw, u32 count)
  95 {
  96         hw->count = CPU_TO_DMA(chan, count, 32);
  97 }
  98 
  99 static void set_desc_src(struct fsldma_chan *chan,
 100                          struct fsl_dma_ld_hw *hw, dma_addr_t src)
 101 {
 102         u64 snoop_bits;
 103 
 104         snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
 105                 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
 106         hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
 107 }
 108 
 109 static void set_desc_dst(struct fsldma_chan *chan,
 110                          struct fsl_dma_ld_hw *hw, dma_addr_t dst)
 111 {
 112         u64 snoop_bits;
 113 
 114         snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
 115                 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
 116         hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
 117 }
 118 
 119 static void set_desc_next(struct fsldma_chan *chan,
 120                           struct fsl_dma_ld_hw *hw, dma_addr_t next)
 121 {
 122         u64 snoop_bits;
 123 
 124         snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
 125                 ? FSL_DMA_SNEN : 0;
 126         hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
 127 }
 128 
 129 static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
 130 {
 131         u64 snoop_bits;
 132 
 133         snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
 134                 ? FSL_DMA_SNEN : 0;
 135 
 136         desc->hw.next_ln_addr = CPU_TO_DMA(chan,
 137                 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
 138                         | snoop_bits, 64);
 139 }
 140 
 141 /*
 142  * DMA Engine Hardware Control Helpers
 143  */
 144 
 145 static void dma_init(struct fsldma_chan *chan)
 146 {
 147         /* Reset the channel */
 148         set_mr(chan, 0);
 149 
 150         switch (chan->feature & FSL_DMA_IP_MASK) {
 151         case FSL_DMA_IP_85XX:
 152                 /* Set the channel to below modes:
 153                  * EIE - Error interrupt enable
 154                  * EOLNIE - End of links interrupt enable
 155                  * BWC - Bandwidth sharing among channels
 156                  */
 157                 set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
 158                         | FSL_DMA_MR_EOLNIE);
 159                 break;
 160         case FSL_DMA_IP_83XX:
 161                 /* Set the channel to below modes:
 162                  * EOTIE - End-of-transfer interrupt enable
 163                  * PRC_RM - PCI read multiple
 164                  */
 165                 set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
 166                 break;
 167         }
 168 }
 169 
 170 static int dma_is_idle(struct fsldma_chan *chan)
 171 {
 172         u32 sr = get_sr(chan);
 173         return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
 174 }
 175 
 176 /*
 177  * Start the DMA controller
 178  *
 179  * Preconditions:
 180  * - the CDAR register must point to the start descriptor
 181  * - the MRn[CS] bit must be cleared
 182  */
 183 static void dma_start(struct fsldma_chan *chan)
 184 {
 185         u32 mode;
 186 
 187         mode = get_mr(chan);
 188 
 189         if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
 190                 set_bcr(chan, 0);
 191                 mode |= FSL_DMA_MR_EMP_EN;
 192         } else {
 193                 mode &= ~FSL_DMA_MR_EMP_EN;
 194         }
 195 
 196         if (chan->feature & FSL_DMA_CHAN_START_EXT) {
 197                 mode |= FSL_DMA_MR_EMS_EN;
 198         } else {
 199                 mode &= ~FSL_DMA_MR_EMS_EN;
 200                 mode |= FSL_DMA_MR_CS;
 201         }
 202 
 203         set_mr(chan, mode);
 204 }
 205 
 206 static void dma_halt(struct fsldma_chan *chan)
 207 {
 208         u32 mode;
 209         int i;
 210 
 211         /* read the mode register */
 212         mode = get_mr(chan);
 213 
 214         /*
 215          * The 85xx controller supports channel abort, which will stop
 216          * the current transfer. On 83xx, this bit is the transfer error
 217          * mask bit, which should not be changed.
 218          */
 219         if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
 220                 mode |= FSL_DMA_MR_CA;
 221                 set_mr(chan, mode);
 222 
 223                 mode &= ~FSL_DMA_MR_CA;
 224         }
 225 
 226         /* stop the DMA controller */
 227         mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
 228         set_mr(chan, mode);
 229 
 230         /* wait for the DMA controller to become idle */
 231         for (i = 0; i < 100; i++) {
 232                 if (dma_is_idle(chan))
 233                         return;
 234 
 235                 udelay(10);
 236         }
 237 
 238         if (!dma_is_idle(chan))
 239                 chan_err(chan, "DMA halt timeout!\n");
 240 }
 241 
 242 /**
 243  * fsl_chan_set_src_loop_size - Set source address hold transfer size
 244  * @chan : Freescale DMA channel
 245  * @size     : Address loop size, 0 for disable loop
 246  *
 247  * The set source address hold transfer size. The source
 248  * address hold or loop transfer size is when the DMA transfer
 249  * data from source address (SA), if the loop size is 4, the DMA will
 250  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 251  * SA + 1 ... and so on.
 252  */
 253 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
 254 {
 255         u32 mode;
 256 
 257         mode = get_mr(chan);
 258 
 259         switch (size) {
 260         case 0:
 261                 mode &= ~FSL_DMA_MR_SAHE;
 262                 break;
 263         case 1:
 264         case 2:
 265         case 4:
 266         case 8:
 267                 mode &= ~FSL_DMA_MR_SAHTS_MASK;
 268                 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
 269                 break;
 270         }
 271 
 272         set_mr(chan, mode);
 273 }
 274 
 275 /**
 276  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
 277  * @chan : Freescale DMA channel
 278  * @size     : Address loop size, 0 for disable loop
 279  *
 280  * The set destination address hold transfer size. The destination
 281  * address hold or loop transfer size is when the DMA transfer
 282  * data to destination address (TA), if the loop size is 4, the DMA will
 283  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 284  * TA + 1 ... and so on.
 285  */
 286 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
 287 {
 288         u32 mode;
 289 
 290         mode = get_mr(chan);
 291 
 292         switch (size) {
 293         case 0:
 294                 mode &= ~FSL_DMA_MR_DAHE;
 295                 break;
 296         case 1:
 297         case 2:
 298         case 4:
 299         case 8:
 300                 mode &= ~FSL_DMA_MR_DAHTS_MASK;
 301                 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
 302                 break;
 303         }
 304 
 305         set_mr(chan, mode);
 306 }
 307 
 308 /**
 309  * fsl_chan_set_request_count - Set DMA Request Count for external control
 310  * @chan : Freescale DMA channel
 311  * @size     : Number of bytes to transfer in a single request
 312  *
 313  * The Freescale DMA channel can be controlled by the external signal DREQ#.
 314  * The DMA request count is how many bytes are allowed to transfer before
 315  * pausing the channel, after which a new assertion of DREQ# resumes channel
 316  * operation.
 317  *
 318  * A size of 0 disables external pause control. The maximum size is 1024.
 319  */
 320 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
 321 {
 322         u32 mode;
 323 
 324         BUG_ON(size > 1024);
 325 
 326         mode = get_mr(chan);
 327         mode &= ~FSL_DMA_MR_BWC_MASK;
 328         mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
 329 
 330         set_mr(chan, mode);
 331 }
 332 
 333 /**
 334  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
 335  * @chan : Freescale DMA channel
 336  * @enable   : 0 is disabled, 1 is enabled.
 337  *
 338  * The Freescale DMA channel can be controlled by the external signal DREQ#.
 339  * The DMA Request Count feature should be used in addition to this feature
 340  * to set the number of bytes to transfer before pausing the channel.
 341  */
 342 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
 343 {
 344         if (enable)
 345                 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
 346         else
 347                 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
 348 }
 349 
 350 /**
 351  * fsl_chan_toggle_ext_start - Toggle channel external start status
 352  * @chan : Freescale DMA channel
 353  * @enable   : 0 is disabled, 1 is enabled.
 354  *
 355  * If enable the external start, the channel can be started by an
 356  * external DMA start pin. So the dma_start() does not start the
 357  * transfer immediately. The DMA channel will wait for the
 358  * control pin asserted.
 359  */
 360 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
 361 {
 362         if (enable)
 363                 chan->feature |= FSL_DMA_CHAN_START_EXT;
 364         else
 365                 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
 366 }
 367 
 368 int fsl_dma_external_start(struct dma_chan *dchan, int enable)
 369 {
 370         struct fsldma_chan *chan;
 371 
 372         if (!dchan)
 373                 return -EINVAL;
 374 
 375         chan = to_fsl_chan(dchan);
 376 
 377         fsl_chan_toggle_ext_start(chan, enable);
 378         return 0;
 379 }
 380 EXPORT_SYMBOL_GPL(fsl_dma_external_start);
 381 
 382 static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
 383 {
 384         struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
 385 
 386         if (list_empty(&chan->ld_pending))
 387                 goto out_splice;
 388 
 389         /*
 390          * Add the hardware descriptor to the chain of hardware descriptors
 391          * that already exists in memory.
 392          *
 393          * This will un-set the EOL bit of the existing transaction, and the
 394          * last link in this transaction will become the EOL descriptor.
 395          */
 396         set_desc_next(chan, &tail->hw, desc->async_tx.phys);
 397 
 398         /*
 399          * Add the software descriptor and all children to the list
 400          * of pending transactions
 401          */
 402 out_splice:
 403         list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
 404 }
 405 
 406 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 407 {
 408         struct fsldma_chan *chan = to_fsl_chan(tx->chan);
 409         struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
 410         struct fsl_desc_sw *child;
 411         dma_cookie_t cookie = -EINVAL;
 412 
 413         spin_lock_bh(&chan->desc_lock);
 414 
 415 #ifdef CONFIG_PM
 416         if (unlikely(chan->pm_state != RUNNING)) {
 417                 chan_dbg(chan, "cannot submit due to suspend\n");
 418                 spin_unlock_bh(&chan->desc_lock);
 419                 return -1;
 420         }
 421 #endif
 422 
 423         /*
 424          * assign cookies to all of the software descriptors
 425          * that make up this transaction
 426          */
 427         list_for_each_entry(child, &desc->tx_list, node) {
 428                 cookie = dma_cookie_assign(&child->async_tx);
 429         }
 430 
 431         /* put this transaction onto the tail of the pending queue */
 432         append_ld_queue(chan, desc);
 433 
 434         spin_unlock_bh(&chan->desc_lock);
 435 
 436         return cookie;
 437 }
 438 
 439 /**
 440  * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
 441  * @chan : Freescale DMA channel
 442  * @desc: descriptor to be freed
 443  */
 444 static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
 445                 struct fsl_desc_sw *desc)
 446 {
 447         list_del(&desc->node);
 448         chan_dbg(chan, "LD %p free\n", desc);
 449         dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
 450 }
 451 
 452 /**
 453  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
 454  * @chan : Freescale DMA channel
 455  *
 456  * Return - The descriptor allocated. NULL for failed.
 457  */
 458 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
 459 {
 460         struct fsl_desc_sw *desc;
 461         dma_addr_t pdesc;
 462 
 463         desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
 464         if (!desc) {
 465                 chan_dbg(chan, "out of memory for link descriptor\n");
 466                 return NULL;
 467         }
 468 
 469         INIT_LIST_HEAD(&desc->tx_list);
 470         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
 471         desc->async_tx.tx_submit = fsl_dma_tx_submit;
 472         desc->async_tx.phys = pdesc;
 473 
 474         chan_dbg(chan, "LD %p allocated\n", desc);
 475 
 476         return desc;
 477 }
 478 
 479 /**
 480  * fsldma_clean_completed_descriptor - free all descriptors which
 481  * has been completed and acked
 482  * @chan: Freescale DMA channel
 483  *
 484  * This function is used on all completed and acked descriptors.
 485  * All descriptors should only be freed in this function.
 486  */
 487 static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
 488 {
 489         struct fsl_desc_sw *desc, *_desc;
 490 
 491         /* Run the callback for each descriptor, in order */
 492         list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
 493                 if (async_tx_test_ack(&desc->async_tx))
 494                         fsl_dma_free_descriptor(chan, desc);
 495 }
 496 
 497 /**
 498  * fsldma_run_tx_complete_actions - cleanup a single link descriptor
 499  * @chan: Freescale DMA channel
 500  * @desc: descriptor to cleanup and free
 501  * @cookie: Freescale DMA transaction identifier
 502  *
 503  * This function is used on a descriptor which has been executed by the DMA
 504  * controller. It will run any callbacks, submit any dependencies.
 505  */
 506 static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
 507                 struct fsl_desc_sw *desc, dma_cookie_t cookie)
 508 {
 509         struct dma_async_tx_descriptor *txd = &desc->async_tx;
 510         dma_cookie_t ret = cookie;
 511 
 512         BUG_ON(txd->cookie < 0);
 513 
 514         if (txd->cookie > 0) {
 515                 ret = txd->cookie;
 516 
 517                 dma_descriptor_unmap(txd);
 518                 /* Run the link descriptor callback function */
 519                 dmaengine_desc_get_callback_invoke(txd, NULL);
 520         }
 521 
 522         /* Run any dependencies */
 523         dma_run_dependencies(txd);
 524 
 525         return ret;
 526 }
 527 
 528 /**
 529  * fsldma_clean_running_descriptor - move the completed descriptor from
 530  * ld_running to ld_completed
 531  * @chan: Freescale DMA channel
 532  * @desc: the descriptor which is completed
 533  *
 534  * Free the descriptor directly if acked by async_tx api, or move it to
 535  * queue ld_completed.
 536  */
 537 static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
 538                 struct fsl_desc_sw *desc)
 539 {
 540         /* Remove from the list of transactions */
 541         list_del(&desc->node);
 542 
 543         /*
 544          * the client is allowed to attach dependent operations
 545          * until 'ack' is set
 546          */
 547         if (!async_tx_test_ack(&desc->async_tx)) {
 548                 /*
 549                  * Move this descriptor to the list of descriptors which is
 550                  * completed, but still awaiting the 'ack' bit to be set.
 551                  */
 552                 list_add_tail(&desc->node, &chan->ld_completed);
 553                 return;
 554         }
 555 
 556         dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
 557 }
 558 
 559 /**
 560  * fsl_chan_xfer_ld_queue - transfer any pending transactions
 561  * @chan : Freescale DMA channel
 562  *
 563  * HARDWARE STATE: idle
 564  * LOCKING: must hold chan->desc_lock
 565  */
 566 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
 567 {
 568         struct fsl_desc_sw *desc;
 569 
 570         /*
 571          * If the list of pending descriptors is empty, then we
 572          * don't need to do any work at all
 573          */
 574         if (list_empty(&chan->ld_pending)) {
 575                 chan_dbg(chan, "no pending LDs\n");
 576                 return;
 577         }
 578 
 579         /*
 580          * The DMA controller is not idle, which means that the interrupt
 581          * handler will start any queued transactions when it runs after
 582          * this transaction finishes
 583          */
 584         if (!chan->idle) {
 585                 chan_dbg(chan, "DMA controller still busy\n");
 586                 return;
 587         }
 588 
 589         /*
 590          * If there are some link descriptors which have not been
 591          * transferred, we need to start the controller
 592          */
 593 
 594         /*
 595          * Move all elements from the queue of pending transactions
 596          * onto the list of running transactions
 597          */
 598         chan_dbg(chan, "idle, starting controller\n");
 599         desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
 600         list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
 601 
 602         /*
 603          * The 85xx DMA controller doesn't clear the channel start bit
 604          * automatically at the end of a transfer. Therefore we must clear
 605          * it in software before starting the transfer.
 606          */
 607         if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
 608                 u32 mode;
 609 
 610                 mode = get_mr(chan);
 611                 mode &= ~FSL_DMA_MR_CS;
 612                 set_mr(chan, mode);
 613         }
 614 
 615         /*
 616          * Program the descriptor's address into the DMA controller,
 617          * then start the DMA transaction
 618          */
 619         set_cdar(chan, desc->async_tx.phys);
 620         get_cdar(chan);
 621 
 622         dma_start(chan);
 623         chan->idle = false;
 624 }
 625 
 626 /**
 627  * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
 628  * and move them to ld_completed to free until flag 'ack' is set
 629  * @chan: Freescale DMA channel
 630  *
 631  * This function is used on descriptors which have been executed by the DMA
 632  * controller. It will run any callbacks, submit any dependencies, then
 633  * free these descriptors if flag 'ack' is set.
 634  */
 635 static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
 636 {
 637         struct fsl_desc_sw *desc, *_desc;
 638         dma_cookie_t cookie = 0;
 639         dma_addr_t curr_phys = get_cdar(chan);
 640         int seen_current = 0;
 641 
 642         fsldma_clean_completed_descriptor(chan);
 643 
 644         /* Run the callback for each descriptor, in order */
 645         list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
 646                 /*
 647                  * do not advance past the current descriptor loaded into the
 648                  * hardware channel, subsequent descriptors are either in
 649                  * process or have not been submitted
 650                  */
 651                 if (seen_current)
 652                         break;
 653 
 654                 /*
 655                  * stop the search if we reach the current descriptor and the
 656                  * channel is busy
 657                  */
 658                 if (desc->async_tx.phys == curr_phys) {
 659                         seen_current = 1;
 660                         if (!dma_is_idle(chan))
 661                                 break;
 662                 }
 663 
 664                 cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
 665 
 666                 fsldma_clean_running_descriptor(chan, desc);
 667         }
 668 
 669         /*
 670          * Start any pending transactions automatically
 671          *
 672          * In the ideal case, we keep the DMA controller busy while we go
 673          * ahead and free the descriptors below.
 674          */
 675         fsl_chan_xfer_ld_queue(chan);
 676 
 677         if (cookie > 0)
 678                 chan->common.completed_cookie = cookie;
 679 }
 680 
 681 /**
 682  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
 683  * @chan : Freescale DMA channel
 684  *
 685  * This function will create a dma pool for descriptor allocation.
 686  *
 687  * Return - The number of descriptors allocated.
 688  */
 689 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
 690 {
 691         struct fsldma_chan *chan = to_fsl_chan(dchan);
 692 
 693         /* Has this channel already been allocated? */
 694         if (chan->desc_pool)
 695                 return 1;
 696 
 697         /*
 698          * We need the descriptor to be aligned to 32bytes
 699          * for meeting FSL DMA specification requirement.
 700          */
 701         chan->desc_pool = dma_pool_create(chan->name, chan->dev,
 702                                           sizeof(struct fsl_desc_sw),
 703                                           __alignof__(struct fsl_desc_sw), 0);
 704         if (!chan->desc_pool) {
 705                 chan_err(chan, "unable to allocate descriptor pool\n");
 706                 return -ENOMEM;
 707         }
 708 
 709         /* there is at least one descriptor free to be allocated */
 710         return 1;
 711 }
 712 
 713 /**
 714  * fsldma_free_desc_list - Free all descriptors in a queue
 715  * @chan: Freescae DMA channel
 716  * @list: the list to free
 717  *
 718  * LOCKING: must hold chan->desc_lock
 719  */
 720 static void fsldma_free_desc_list(struct fsldma_chan *chan,
 721                                   struct list_head *list)
 722 {
 723         struct fsl_desc_sw *desc, *_desc;
 724 
 725         list_for_each_entry_safe(desc, _desc, list, node)
 726                 fsl_dma_free_descriptor(chan, desc);
 727 }
 728 
 729 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
 730                                           struct list_head *list)
 731 {
 732         struct fsl_desc_sw *desc, *_desc;
 733 
 734         list_for_each_entry_safe_reverse(desc, _desc, list, node)
 735                 fsl_dma_free_descriptor(chan, desc);
 736 }
 737 
 738 /**
 739  * fsl_dma_free_chan_resources - Free all resources of the channel.
 740  * @chan : Freescale DMA channel
 741  */
 742 static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
 743 {
 744         struct fsldma_chan *chan = to_fsl_chan(dchan);
 745 
 746         chan_dbg(chan, "free all channel resources\n");
 747         spin_lock_bh(&chan->desc_lock);
 748         fsldma_cleanup_descriptors(chan);
 749         fsldma_free_desc_list(chan, &chan->ld_pending);
 750         fsldma_free_desc_list(chan, &chan->ld_running);
 751         fsldma_free_desc_list(chan, &chan->ld_completed);
 752         spin_unlock_bh(&chan->desc_lock);
 753 
 754         dma_pool_destroy(chan->desc_pool);
 755         chan->desc_pool = NULL;
 756 }
 757 
 758 static struct dma_async_tx_descriptor *
 759 fsl_dma_prep_memcpy(struct dma_chan *dchan,
 760         dma_addr_t dma_dst, dma_addr_t dma_src,
 761         size_t len, unsigned long flags)
 762 {
 763         struct fsldma_chan *chan;
 764         struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
 765         size_t copy;
 766 
 767         if (!dchan)
 768                 return NULL;
 769 
 770         if (!len)
 771                 return NULL;
 772 
 773         chan = to_fsl_chan(dchan);
 774 
 775         do {
 776 
 777                 /* Allocate the link descriptor from DMA pool */
 778                 new = fsl_dma_alloc_descriptor(chan);
 779                 if (!new) {
 780                         chan_err(chan, "%s\n", msg_ld_oom);
 781                         goto fail;
 782                 }
 783 
 784                 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
 785 
 786                 set_desc_cnt(chan, &new->hw, copy);
 787                 set_desc_src(chan, &new->hw, dma_src);
 788                 set_desc_dst(chan, &new->hw, dma_dst);
 789 
 790                 if (!first)
 791                         first = new;
 792                 else
 793                         set_desc_next(chan, &prev->hw, new->async_tx.phys);
 794 
 795                 new->async_tx.cookie = 0;
 796                 async_tx_ack(&new->async_tx);
 797 
 798                 prev = new;
 799                 len -= copy;
 800                 dma_src += copy;
 801                 dma_dst += copy;
 802 
 803                 /* Insert the link descriptor to the LD ring */
 804                 list_add_tail(&new->node, &first->tx_list);
 805         } while (len);
 806 
 807         new->async_tx.flags = flags; /* client is in control of this ack */
 808         new->async_tx.cookie = -EBUSY;
 809 
 810         /* Set End-of-link to the last link descriptor of new list */
 811         set_ld_eol(chan, new);
 812 
 813         return &first->async_tx;
 814 
 815 fail:
 816         if (!first)
 817                 return NULL;
 818 
 819         fsldma_free_desc_list_reverse(chan, &first->tx_list);
 820         return NULL;
 821 }
 822 
 823 static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
 824 {
 825         struct fsldma_chan *chan;
 826 
 827         if (!dchan)
 828                 return -EINVAL;
 829 
 830         chan = to_fsl_chan(dchan);
 831 
 832         spin_lock_bh(&chan->desc_lock);
 833 
 834         /* Halt the DMA engine */
 835         dma_halt(chan);
 836 
 837         /* Remove and free all of the descriptors in the LD queue */
 838         fsldma_free_desc_list(chan, &chan->ld_pending);
 839         fsldma_free_desc_list(chan, &chan->ld_running);
 840         fsldma_free_desc_list(chan, &chan->ld_completed);
 841         chan->idle = true;
 842 
 843         spin_unlock_bh(&chan->desc_lock);
 844         return 0;
 845 }
 846 
 847 static int fsl_dma_device_config(struct dma_chan *dchan,
 848                                  struct dma_slave_config *config)
 849 {
 850         struct fsldma_chan *chan;
 851         int size;
 852 
 853         if (!dchan)
 854                 return -EINVAL;
 855 
 856         chan = to_fsl_chan(dchan);
 857 
 858         /* make sure the channel supports setting burst size */
 859         if (!chan->set_request_count)
 860                 return -ENXIO;
 861 
 862         /* we set the controller burst size depending on direction */
 863         if (config->direction == DMA_MEM_TO_DEV)
 864                 size = config->dst_addr_width * config->dst_maxburst;
 865         else
 866                 size = config->src_addr_width * config->src_maxburst;
 867 
 868         chan->set_request_count(chan, size);
 869         return 0;
 870 }
 871 
 872 
 873 /**
 874  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
 875  * @chan : Freescale DMA channel
 876  */
 877 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
 878 {
 879         struct fsldma_chan *chan = to_fsl_chan(dchan);
 880 
 881         spin_lock_bh(&chan->desc_lock);
 882         fsl_chan_xfer_ld_queue(chan);
 883         spin_unlock_bh(&chan->desc_lock);
 884 }
 885 
 886 /**
 887  * fsl_tx_status - Determine the DMA status
 888  * @chan : Freescale DMA channel
 889  */
 890 static enum dma_status fsl_tx_status(struct dma_chan *dchan,
 891                                         dma_cookie_t cookie,
 892                                         struct dma_tx_state *txstate)
 893 {
 894         struct fsldma_chan *chan = to_fsl_chan(dchan);
 895         enum dma_status ret;
 896 
 897         ret = dma_cookie_status(dchan, cookie, txstate);
 898         if (ret == DMA_COMPLETE)
 899                 return ret;
 900 
 901         spin_lock_bh(&chan->desc_lock);
 902         fsldma_cleanup_descriptors(chan);
 903         spin_unlock_bh(&chan->desc_lock);
 904 
 905         return dma_cookie_status(dchan, cookie, txstate);
 906 }
 907 
 908 /*----------------------------------------------------------------------------*/
 909 /* Interrupt Handling                                                         */
 910 /*----------------------------------------------------------------------------*/
 911 
 912 static irqreturn_t fsldma_chan_irq(int irq, void *data)
 913 {
 914         struct fsldma_chan *chan = data;
 915         u32 stat;
 916 
 917         /* save and clear the status register */
 918         stat = get_sr(chan);
 919         set_sr(chan, stat);
 920         chan_dbg(chan, "irq: stat = 0x%x\n", stat);
 921 
 922         /* check that this was really our device */
 923         stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
 924         if (!stat)
 925                 return IRQ_NONE;
 926 
 927         if (stat & FSL_DMA_SR_TE)
 928                 chan_err(chan, "Transfer Error!\n");
 929 
 930         /*
 931          * Programming Error
 932          * The DMA_INTERRUPT async_tx is a NULL transfer, which will
 933          * trigger a PE interrupt.
 934          */
 935         if (stat & FSL_DMA_SR_PE) {
 936                 chan_dbg(chan, "irq: Programming Error INT\n");
 937                 stat &= ~FSL_DMA_SR_PE;
 938                 if (get_bcr(chan) != 0)
 939                         chan_err(chan, "Programming Error!\n");
 940         }
 941 
 942         /*
 943          * For MPC8349, EOCDI event need to update cookie
 944          * and start the next transfer if it exist.
 945          */
 946         if (stat & FSL_DMA_SR_EOCDI) {
 947                 chan_dbg(chan, "irq: End-of-Chain link INT\n");
 948                 stat &= ~FSL_DMA_SR_EOCDI;
 949         }
 950 
 951         /*
 952          * If it current transfer is the end-of-transfer,
 953          * we should clear the Channel Start bit for
 954          * prepare next transfer.
 955          */
 956         if (stat & FSL_DMA_SR_EOLNI) {
 957                 chan_dbg(chan, "irq: End-of-link INT\n");
 958                 stat &= ~FSL_DMA_SR_EOLNI;
 959         }
 960 
 961         /* check that the DMA controller is really idle */
 962         if (!dma_is_idle(chan))
 963                 chan_err(chan, "irq: controller not idle!\n");
 964 
 965         /* check that we handled all of the bits */
 966         if (stat)
 967                 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
 968 
 969         /*
 970          * Schedule the tasklet to handle all cleanup of the current
 971          * transaction. It will start a new transaction if there is
 972          * one pending.
 973          */
 974         tasklet_schedule(&chan->tasklet);
 975         chan_dbg(chan, "irq: Exit\n");
 976         return IRQ_HANDLED;
 977 }
 978 
 979 static void dma_do_tasklet(unsigned long data)
 980 {
 981         struct fsldma_chan *chan = (struct fsldma_chan *)data;
 982 
 983         chan_dbg(chan, "tasklet entry\n");
 984 
 985         spin_lock(&chan->desc_lock);
 986 
 987         /* the hardware is now idle and ready for more */
 988         chan->idle = true;
 989 
 990         /* Run all cleanup for descriptors which have been completed */
 991         fsldma_cleanup_descriptors(chan);
 992 
 993         spin_unlock(&chan->desc_lock);
 994 
 995         chan_dbg(chan, "tasklet exit\n");
 996 }
 997 
 998 static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
 999 {
1000         struct fsldma_device *fdev = data;
1001         struct fsldma_chan *chan;
1002         unsigned int handled = 0;
1003         u32 gsr, mask;
1004         int i;
1005 
1006         gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1007                                                    : in_le32(fdev->regs);
1008         mask = 0xff000000;
1009         dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1010 
1011         for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1012                 chan = fdev->chan[i];
1013                 if (!chan)
1014                         continue;
1015 
1016                 if (gsr & mask) {
1017                         dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1018                         fsldma_chan_irq(irq, chan);
1019                         handled++;
1020                 }
1021 
1022                 gsr &= ~mask;
1023                 mask >>= 8;
1024         }
1025 
1026         return IRQ_RETVAL(handled);
1027 }
1028 
1029 static void fsldma_free_irqs(struct fsldma_device *fdev)
1030 {
1031         struct fsldma_chan *chan;
1032         int i;
1033 
1034         if (fdev->irq) {
1035                 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1036                 free_irq(fdev->irq, fdev);
1037                 return;
1038         }
1039 
1040         for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1041                 chan = fdev->chan[i];
1042                 if (chan && chan->irq) {
1043                         chan_dbg(chan, "free per-channel IRQ\n");
1044                         free_irq(chan->irq, chan);
1045                 }
1046         }
1047 }
1048 
1049 static int fsldma_request_irqs(struct fsldma_device *fdev)
1050 {
1051         struct fsldma_chan *chan;
1052         int ret;
1053         int i;
1054 
1055         /* if we have a per-controller IRQ, use that */
1056         if (fdev->irq) {
1057                 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1058                 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1059                                   "fsldma-controller", fdev);
1060                 return ret;
1061         }
1062 
1063         /* no per-controller IRQ, use the per-channel IRQs */
1064         for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1065                 chan = fdev->chan[i];
1066                 if (!chan)
1067                         continue;
1068 
1069                 if (!chan->irq) {
1070                         chan_err(chan, "interrupts property missing in device tree\n");
1071                         ret = -ENODEV;
1072                         goto out_unwind;
1073                 }
1074 
1075                 chan_dbg(chan, "request per-channel IRQ\n");
1076                 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1077                                   "fsldma-chan", chan);
1078                 if (ret) {
1079                         chan_err(chan, "unable to request per-channel IRQ\n");
1080                         goto out_unwind;
1081                 }
1082         }
1083 
1084         return 0;
1085 
1086 out_unwind:
1087         for (/* none */; i >= 0; i--) {
1088                 chan = fdev->chan[i];
1089                 if (!chan)
1090                         continue;
1091 
1092                 if (!chan->irq)
1093                         continue;
1094 
1095                 free_irq(chan->irq, chan);
1096         }
1097 
1098         return ret;
1099 }
1100 
1101 /*----------------------------------------------------------------------------*/
1102 /* OpenFirmware Subsystem                                                     */
1103 /*----------------------------------------------------------------------------*/
1104 
1105 static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1106         struct device_node *node, u32 feature, const char *compatible)
1107 {
1108         struct fsldma_chan *chan;
1109         struct resource res;
1110         int err;
1111 
1112         /* alloc channel */
1113         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1114         if (!chan) {
1115                 err = -ENOMEM;
1116                 goto out_return;
1117         }
1118 
1119         /* ioremap registers for use */
1120         chan->regs = of_iomap(node, 0);
1121         if (!chan->regs) {
1122                 dev_err(fdev->dev, "unable to ioremap registers\n");
1123                 err = -ENOMEM;
1124                 goto out_free_chan;
1125         }
1126 
1127         err = of_address_to_resource(node, 0, &res);
1128         if (err) {
1129                 dev_err(fdev->dev, "unable to find 'reg' property\n");
1130                 goto out_iounmap_regs;
1131         }
1132 
1133         chan->feature = feature;
1134         if (!fdev->feature)
1135                 fdev->feature = chan->feature;
1136 
1137         /*
1138          * If the DMA device's feature is different than the feature
1139          * of its channels, report the bug
1140          */
1141         WARN_ON(fdev->feature != chan->feature);
1142 
1143         chan->dev = fdev->dev;
1144         chan->id = (res.start & 0xfff) < 0x300 ?
1145                    ((res.start - 0x100) & 0xfff) >> 7 :
1146                    ((res.start - 0x200) & 0xfff) >> 7;
1147         if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1148                 dev_err(fdev->dev, "too many channels for device\n");
1149                 err = -EINVAL;
1150                 goto out_iounmap_regs;
1151         }
1152 
1153         fdev->chan[chan->id] = chan;
1154         tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1155         snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1156 
1157         /* Initialize the channel */
1158         dma_init(chan);
1159 
1160         /* Clear cdar registers */
1161         set_cdar(chan, 0);
1162 
1163         switch (chan->feature & FSL_DMA_IP_MASK) {
1164         case FSL_DMA_IP_85XX:
1165                 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166                 /* Fall through */
1167         case FSL_DMA_IP_83XX:
1168                 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1169                 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1170                 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1171                 chan->set_request_count = fsl_chan_set_request_count;
1172         }
1173 
1174         spin_lock_init(&chan->desc_lock);
1175         INIT_LIST_HEAD(&chan->ld_pending);
1176         INIT_LIST_HEAD(&chan->ld_running);
1177         INIT_LIST_HEAD(&chan->ld_completed);
1178         chan->idle = true;
1179 #ifdef CONFIG_PM
1180         chan->pm_state = RUNNING;
1181 #endif
1182 
1183         chan->common.device = &fdev->common;
1184         dma_cookie_init(&chan->common);
1185 
1186         /* find the IRQ line, if it exists in the device tree */
1187         chan->irq = irq_of_parse_and_map(node, 0);
1188 
1189         /* Add the channel to DMA device channel list */
1190         list_add_tail(&chan->common.device_node, &fdev->common.channels);
1191 
1192         dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1193                  chan->irq ? chan->irq : fdev->irq);
1194 
1195         return 0;
1196 
1197 out_iounmap_regs:
1198         iounmap(chan->regs);
1199 out_free_chan:
1200         kfree(chan);
1201 out_return:
1202         return err;
1203 }
1204 
1205 static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1206 {
1207         irq_dispose_mapping(chan->irq);
1208         list_del(&chan->common.device_node);
1209         iounmap(chan->regs);
1210         kfree(chan);
1211 }
1212 
1213 static int fsldma_of_probe(struct platform_device *op)
1214 {
1215         struct fsldma_device *fdev;
1216         struct device_node *child;
1217         int err;
1218 
1219         fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1220         if (!fdev) {
1221                 err = -ENOMEM;
1222                 goto out_return;
1223         }
1224 
1225         fdev->dev = &op->dev;
1226         INIT_LIST_HEAD(&fdev->common.channels);
1227 
1228         /* ioremap the registers for use */
1229         fdev->regs = of_iomap(op->dev.of_node, 0);
1230         if (!fdev->regs) {
1231                 dev_err(&op->dev, "unable to ioremap registers\n");
1232                 err = -ENOMEM;
1233                 goto out_free;
1234         }
1235 
1236         /* map the channel IRQ if it exists, but don't hookup the handler yet */
1237         fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1238 
1239         dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1240         dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1241         fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1242         fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1243         fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1244         fdev->common.device_tx_status = fsl_tx_status;
1245         fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1246         fdev->common.device_config = fsl_dma_device_config;
1247         fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1248         fdev->common.dev = &op->dev;
1249 
1250         fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1251         fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1252         fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1253         fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1254 
1255         dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1256 
1257         platform_set_drvdata(op, fdev);
1258 
1259         /*
1260          * We cannot use of_platform_bus_probe() because there is no
1261          * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1262          * channel object.
1263          */
1264         for_each_child_of_node(op->dev.of_node, child) {
1265                 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1266                         fsl_dma_chan_probe(fdev, child,
1267                                 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1268                                 "fsl,eloplus-dma-channel");
1269                 }
1270 
1271                 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1272                         fsl_dma_chan_probe(fdev, child,
1273                                 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1274                                 "fsl,elo-dma-channel");
1275                 }
1276         }
1277 
1278         /*
1279          * Hookup the IRQ handler(s)
1280          *
1281          * If we have a per-controller interrupt, we prefer that to the
1282          * per-channel interrupts to reduce the number of shared interrupt
1283          * handlers on the same IRQ line
1284          */
1285         err = fsldma_request_irqs(fdev);
1286         if (err) {
1287                 dev_err(fdev->dev, "unable to request IRQs\n");
1288                 goto out_free_fdev;
1289         }
1290 
1291         dma_async_device_register(&fdev->common);
1292         return 0;
1293 
1294 out_free_fdev:
1295         irq_dispose_mapping(fdev->irq);
1296         iounmap(fdev->regs);
1297 out_free:
1298         kfree(fdev);
1299 out_return:
1300         return err;
1301 }
1302 
1303 static int fsldma_of_remove(struct platform_device *op)
1304 {
1305         struct fsldma_device *fdev;
1306         unsigned int i;
1307 
1308         fdev = platform_get_drvdata(op);
1309         dma_async_device_unregister(&fdev->common);
1310 
1311         fsldma_free_irqs(fdev);
1312 
1313         for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1314                 if (fdev->chan[i])
1315                         fsl_dma_chan_remove(fdev->chan[i]);
1316         }
1317 
1318         iounmap(fdev->regs);
1319         kfree(fdev);
1320 
1321         return 0;
1322 }
1323 
1324 #ifdef CONFIG_PM
1325 static int fsldma_suspend_late(struct device *dev)
1326 {
1327         struct fsldma_device *fdev = dev_get_drvdata(dev);
1328         struct fsldma_chan *chan;
1329         int i;
1330 
1331         for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1332                 chan = fdev->chan[i];
1333                 if (!chan)
1334                         continue;
1335 
1336                 spin_lock_bh(&chan->desc_lock);
1337                 if (unlikely(!chan->idle))
1338                         goto out;
1339                 chan->regs_save.mr = get_mr(chan);
1340                 chan->pm_state = SUSPENDED;
1341                 spin_unlock_bh(&chan->desc_lock);
1342         }
1343         return 0;
1344 
1345 out:
1346         for (; i >= 0; i--) {
1347                 chan = fdev->chan[i];
1348                 if (!chan)
1349                         continue;
1350                 chan->pm_state = RUNNING;
1351                 spin_unlock_bh(&chan->desc_lock);
1352         }
1353         return -EBUSY;
1354 }
1355 
1356 static int fsldma_resume_early(struct device *dev)
1357 {
1358         struct fsldma_device *fdev = dev_get_drvdata(dev);
1359         struct fsldma_chan *chan;
1360         u32 mode;
1361         int i;
1362 
1363         for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1364                 chan = fdev->chan[i];
1365                 if (!chan)
1366                         continue;
1367 
1368                 spin_lock_bh(&chan->desc_lock);
1369                 mode = chan->regs_save.mr
1370                         & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
1371                 set_mr(chan, mode);
1372                 chan->pm_state = RUNNING;
1373                 spin_unlock_bh(&chan->desc_lock);
1374         }
1375 
1376         return 0;
1377 }
1378 
1379 static const struct dev_pm_ops fsldma_pm_ops = {
1380         .suspend_late   = fsldma_suspend_late,
1381         .resume_early   = fsldma_resume_early,
1382 };
1383 #endif
1384 
1385 static const struct of_device_id fsldma_of_ids[] = {
1386         { .compatible = "fsl,elo3-dma", },
1387         { .compatible = "fsl,eloplus-dma", },
1388         { .compatible = "fsl,elo-dma", },
1389         {}
1390 };
1391 MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1392 
1393 static struct platform_driver fsldma_of_driver = {
1394         .driver = {
1395                 .name = "fsl-elo-dma",
1396                 .of_match_table = fsldma_of_ids,
1397 #ifdef CONFIG_PM
1398                 .pm = &fsldma_pm_ops,
1399 #endif
1400         },
1401         .probe = fsldma_of_probe,
1402         .remove = fsldma_of_remove,
1403 };
1404 
1405 /*----------------------------------------------------------------------------*/
1406 /* Module Init / Exit                                                         */
1407 /*----------------------------------------------------------------------------*/
1408 
1409 static __init int fsldma_init(void)
1410 {
1411         pr_info("Freescale Elo series DMA driver\n");
1412         return platform_driver_register(&fsldma_of_driver);
1413 }
1414 
1415 static void __exit fsldma_exit(void)
1416 {
1417         platform_driver_unregister(&fsldma_of_driver);
1418 }
1419 
1420 subsys_initcall(fsldma_init);
1421 module_exit(fsldma_exit);
1422 
1423 MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1424 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */