root/drivers/dma/at_hdmac.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. atc_get_xfer_width
  2. atc_first_active
  3. atc_first_queued
  4. atc_alloc_descriptor
  5. atc_desc_get
  6. atc_desc_put
  7. atc_desc_chain
  8. atc_dostart
  9. atc_get_desc_by_cookie
  10. atc_calc_bytes_left
  11. atc_get_bytes_left
  12. atc_chain_complete
  13. atc_complete_all
  14. atc_advance_work
  15. atc_handle_error
  16. atc_handle_cyclic
  17. atc_tasklet
  18. at_dma_interrupt
  19. atc_tx_submit
  20. atc_prep_dma_interleaved
  21. atc_prep_dma_memcpy
  22. atc_create_memset_desc
  23. atc_prep_dma_memset
  24. atc_prep_dma_memset_sg
  25. atc_prep_slave_sg
  26. atc_dma_cyclic_check_values
  27. atc_dma_cyclic_fill_desc
  28. atc_prep_dma_cyclic
  29. atc_config
  30. atc_pause
  31. atc_resume
  32. atc_terminate_all
  33. atc_tx_status
  34. atc_issue_pending
  35. atc_alloc_chan_resources
  36. atc_free_chan_resources
  37. at_dma_filter
  38. at_dma_xlate
  39. at_dma_xlate
  40. at_dma_get_driver_data
  41. at_dma_off
  42. at_dma_probe
  43. at_dma_remove
  44. at_dma_shutdown
  45. at_dma_prepare
  46. atc_suspend_cyclic
  47. at_dma_suspend_noirq
  48. atc_resume_cyclic
  49. at_dma_resume_noirq
  50. at_dma_init
  51. at_dma_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
   4  *
   5  * Copyright (C) 2008 Atmel Corporation
   6  *
   7  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
   8  * The only Atmel DMA Controller that is not covered by this driver is the one
   9  * found on AT91SAM9263.
  10  */
  11 
  12 #include <dt-bindings/dma/at91.h>
  13 #include <linux/clk.h>
  14 #include <linux/dmaengine.h>
  15 #include <linux/dma-mapping.h>
  16 #include <linux/dmapool.h>
  17 #include <linux/interrupt.h>
  18 #include <linux/module.h>
  19 #include <linux/platform_device.h>
  20 #include <linux/slab.h>
  21 #include <linux/of.h>
  22 #include <linux/of_device.h>
  23 #include <linux/of_dma.h>
  24 
  25 #include "at_hdmac_regs.h"
  26 #include "dmaengine.h"
  27 
  28 /*
  29  * Glossary
  30  * --------
  31  *
  32  * at_hdmac             : Name of the ATmel AHB DMA Controller
  33  * at_dma_ / atdma      : ATmel DMA controller entity related
  34  * atc_ / atchan        : ATmel DMA Channel entity related
  35  */
  36 
  37 #define ATC_DEFAULT_CFG         (ATC_FIFOCFG_HALFFIFO)
  38 #define ATC_DEFAULT_CTRLB       (ATC_SIF(AT_DMA_MEM_IF) \
  39                                 |ATC_DIF(AT_DMA_MEM_IF))
  40 #define ATC_DMA_BUSWIDTHS\
  41         (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
  42         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
  43         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
  44         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  45 
  46 #define ATC_MAX_DSCR_TRIALS     10
  47 
  48 /*
  49  * Initial number of descriptors to allocate for each channel. This could
  50  * be increased during dma usage.
  51  */
  52 static unsigned int init_nr_desc_per_channel = 64;
  53 module_param(init_nr_desc_per_channel, uint, 0644);
  54 MODULE_PARM_DESC(init_nr_desc_per_channel,
  55                  "initial descriptors per channel (default: 64)");
  56 
  57 
  58 /* prototypes */
  59 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
  60 static void atc_issue_pending(struct dma_chan *chan);
  61 
  62 
  63 /*----------------------------------------------------------------------*/
  64 
  65 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
  66                                                 size_t len)
  67 {
  68         unsigned int width;
  69 
  70         if (!((src | dst  | len) & 3))
  71                 width = 2;
  72         else if (!((src | dst | len) & 1))
  73                 width = 1;
  74         else
  75                 width = 0;
  76 
  77         return width;
  78 }
  79 
  80 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
  81 {
  82         return list_first_entry(&atchan->active_list,
  83                                 struct at_desc, desc_node);
  84 }
  85 
  86 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
  87 {
  88         return list_first_entry(&atchan->queue,
  89                                 struct at_desc, desc_node);
  90 }
  91 
  92 /**
  93  * atc_alloc_descriptor - allocate and return an initialized descriptor
  94  * @chan: the channel to allocate descriptors for
  95  * @gfp_flags: GFP allocation flags
  96  *
  97  * Note: The ack-bit is positioned in the descriptor flag at creation time
  98  *       to make initial allocation more convenient. This bit will be cleared
  99  *       and control will be given to client at usage time (during
 100  *       preparation functions).
 101  */
 102 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
 103                                             gfp_t gfp_flags)
 104 {
 105         struct at_desc  *desc = NULL;
 106         struct at_dma   *atdma = to_at_dma(chan->device);
 107         dma_addr_t phys;
 108 
 109         desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
 110         if (desc) {
 111                 INIT_LIST_HEAD(&desc->tx_list);
 112                 dma_async_tx_descriptor_init(&desc->txd, chan);
 113                 /* txd.flags will be overwritten in prep functions */
 114                 desc->txd.flags = DMA_CTRL_ACK;
 115                 desc->txd.tx_submit = atc_tx_submit;
 116                 desc->txd.phys = phys;
 117         }
 118 
 119         return desc;
 120 }
 121 
 122 /**
 123  * atc_desc_get - get an unused descriptor from free_list
 124  * @atchan: channel we want a new descriptor for
 125  */
 126 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
 127 {
 128         struct at_desc *desc, *_desc;
 129         struct at_desc *ret = NULL;
 130         unsigned long flags;
 131         unsigned int i = 0;
 132 
 133         spin_lock_irqsave(&atchan->lock, flags);
 134         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
 135                 i++;
 136                 if (async_tx_test_ack(&desc->txd)) {
 137                         list_del(&desc->desc_node);
 138                         ret = desc;
 139                         break;
 140                 }
 141                 dev_dbg(chan2dev(&atchan->chan_common),
 142                                 "desc %p not ACKed\n", desc);
 143         }
 144         spin_unlock_irqrestore(&atchan->lock, flags);
 145         dev_vdbg(chan2dev(&atchan->chan_common),
 146                 "scanned %u descriptors on freelist\n", i);
 147 
 148         /* no more descriptor available in initial pool: create one more */
 149         if (!ret) {
 150                 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
 151                 if (ret) {
 152                         spin_lock_irqsave(&atchan->lock, flags);
 153                         atchan->descs_allocated++;
 154                         spin_unlock_irqrestore(&atchan->lock, flags);
 155                 } else {
 156                         dev_err(chan2dev(&atchan->chan_common),
 157                                         "not enough descriptors available\n");
 158                 }
 159         }
 160 
 161         return ret;
 162 }
 163 
 164 /**
 165  * atc_desc_put - move a descriptor, including any children, to the free list
 166  * @atchan: channel we work on
 167  * @desc: descriptor, at the head of a chain, to move to free list
 168  */
 169 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
 170 {
 171         if (desc) {
 172                 struct at_desc *child;
 173                 unsigned long flags;
 174 
 175                 spin_lock_irqsave(&atchan->lock, flags);
 176                 list_for_each_entry(child, &desc->tx_list, desc_node)
 177                         dev_vdbg(chan2dev(&atchan->chan_common),
 178                                         "moving child desc %p to freelist\n",
 179                                         child);
 180                 list_splice_init(&desc->tx_list, &atchan->free_list);
 181                 dev_vdbg(chan2dev(&atchan->chan_common),
 182                          "moving desc %p to freelist\n", desc);
 183                 list_add(&desc->desc_node, &atchan->free_list);
 184                 spin_unlock_irqrestore(&atchan->lock, flags);
 185         }
 186 }
 187 
 188 /**
 189  * atc_desc_chain - build chain adding a descriptor
 190  * @first: address of first descriptor of the chain
 191  * @prev: address of previous descriptor of the chain
 192  * @desc: descriptor to queue
 193  *
 194  * Called from prep_* functions
 195  */
 196 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
 197                            struct at_desc *desc)
 198 {
 199         if (!(*first)) {
 200                 *first = desc;
 201         } else {
 202                 /* inform the HW lli about chaining */
 203                 (*prev)->lli.dscr = desc->txd.phys;
 204                 /* insert the link descriptor to the LD ring */
 205                 list_add_tail(&desc->desc_node,
 206                                 &(*first)->tx_list);
 207         }
 208         *prev = desc;
 209 }
 210 
 211 /**
 212  * atc_dostart - starts the DMA engine for real
 213  * @atchan: the channel we want to start
 214  * @first: first descriptor in the list we want to begin with
 215  *
 216  * Called with atchan->lock held and bh disabled
 217  */
 218 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
 219 {
 220         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
 221 
 222         /* ASSERT:  channel is idle */
 223         if (atc_chan_is_enabled(atchan)) {
 224                 dev_err(chan2dev(&atchan->chan_common),
 225                         "BUG: Attempted to start non-idle channel\n");
 226                 dev_err(chan2dev(&atchan->chan_common),
 227                         "  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
 228                         channel_readl(atchan, SADDR),
 229                         channel_readl(atchan, DADDR),
 230                         channel_readl(atchan, CTRLA),
 231                         channel_readl(atchan, CTRLB),
 232                         channel_readl(atchan, DSCR));
 233 
 234                 /* The tasklet will hopefully advance the queue... */
 235                 return;
 236         }
 237 
 238         vdbg_dump_regs(atchan);
 239 
 240         channel_writel(atchan, SADDR, 0);
 241         channel_writel(atchan, DADDR, 0);
 242         channel_writel(atchan, CTRLA, 0);
 243         channel_writel(atchan, CTRLB, 0);
 244         channel_writel(atchan, DSCR, first->txd.phys);
 245         channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
 246                        ATC_SPIP_BOUNDARY(first->boundary));
 247         channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
 248                        ATC_DPIP_BOUNDARY(first->boundary));
 249         dma_writel(atdma, CHER, atchan->mask);
 250 
 251         vdbg_dump_regs(atchan);
 252 }
 253 
 254 /*
 255  * atc_get_desc_by_cookie - get the descriptor of a cookie
 256  * @atchan: the DMA channel
 257  * @cookie: the cookie to get the descriptor for
 258  */
 259 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
 260                                                 dma_cookie_t cookie)
 261 {
 262         struct at_desc *desc, *_desc;
 263 
 264         list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
 265                 if (desc->txd.cookie == cookie)
 266                         return desc;
 267         }
 268 
 269         list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
 270                 if (desc->txd.cookie == cookie)
 271                         return desc;
 272         }
 273 
 274         return NULL;
 275 }
 276 
 277 /**
 278  * atc_calc_bytes_left - calculates the number of bytes left according to the
 279  * value read from CTRLA.
 280  *
 281  * @current_len: the number of bytes left before reading CTRLA
 282  * @ctrla: the value of CTRLA
 283  */
 284 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
 285 {
 286         u32 btsize = (ctrla & ATC_BTSIZE_MAX);
 287         u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
 288 
 289         /*
 290          * According to the datasheet, when reading the Control A Register
 291          * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
 292          * number of transfers completed on the Source Interface.
 293          * So btsize is always a number of source width transfers.
 294          */
 295         return current_len - (btsize << src_width);
 296 }
 297 
 298 /**
 299  * atc_get_bytes_left - get the number of bytes residue for a cookie
 300  * @chan: DMA channel
 301  * @cookie: transaction identifier to check status of
 302  */
 303 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
 304 {
 305         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 306         struct at_desc *desc_first = atc_first_active(atchan);
 307         struct at_desc *desc;
 308         int ret;
 309         u32 ctrla, dscr, trials;
 310 
 311         /*
 312          * If the cookie doesn't match to the currently running transfer then
 313          * we can return the total length of the associated DMA transfer,
 314          * because it is still queued.
 315          */
 316         desc = atc_get_desc_by_cookie(atchan, cookie);
 317         if (desc == NULL)
 318                 return -EINVAL;
 319         else if (desc != desc_first)
 320                 return desc->total_len;
 321 
 322         /* cookie matches to the currently running transfer */
 323         ret = desc_first->total_len;
 324 
 325         if (desc_first->lli.dscr) {
 326                 /* hardware linked list transfer */
 327 
 328                 /*
 329                  * Calculate the residue by removing the length of the child
 330                  * descriptors already transferred from the total length.
 331                  * To get the current child descriptor we can use the value of
 332                  * the channel's DSCR register and compare it against the value
 333                  * of the hardware linked list structure of each child
 334                  * descriptor.
 335                  *
 336                  * The CTRLA register provides us with the amount of data
 337                  * already read from the source for the current child
 338                  * descriptor. So we can compute a more accurate residue by also
 339                  * removing the number of bytes corresponding to this amount of
 340                  * data.
 341                  *
 342                  * However, the DSCR and CTRLA registers cannot be read both
 343                  * atomically. Hence a race condition may occur: the first read
 344                  * register may refer to one child descriptor whereas the second
 345                  * read may refer to a later child descriptor in the list
 346                  * because of the DMA transfer progression inbetween the two
 347                  * reads.
 348                  *
 349                  * One solution could have been to pause the DMA transfer, read
 350                  * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
 351                  * this approach presents some drawbacks:
 352                  * - If the DMA transfer is paused, RX overruns or TX underruns
 353                  *   are more likey to occur depending on the system latency.
 354                  *   Taking the USART driver as an example, it uses a cyclic DMA
 355                  *   transfer to read data from the Receive Holding Register
 356                  *   (RHR) to avoid RX overruns since the RHR is not protected
 357                  *   by any FIFO on most Atmel SoCs. So pausing the DMA transfer
 358                  *   to compute the residue would break the USART driver design.
 359                  * - The atc_pause() function masks interrupts but we'd rather
 360                  *   avoid to do so for system latency purpose.
 361                  *
 362                  * Then we'd rather use another solution: the DSCR is read a
 363                  * first time, the CTRLA is read in turn, next the DSCR is read
 364                  * a second time. If the two consecutive read values of the DSCR
 365                  * are the same then we assume both refers to the very same
 366                  * child descriptor as well as the CTRLA value read inbetween
 367                  * does. For cyclic tranfers, the assumption is that a full loop
 368                  * is "not so fast".
 369                  * If the two DSCR values are different, we read again the CTRLA
 370                  * then the DSCR till two consecutive read values from DSCR are
 371                  * equal or till the maxium trials is reach.
 372                  * This algorithm is very unlikely not to find a stable value for
 373                  * DSCR.
 374                  */
 375 
 376                 dscr = channel_readl(atchan, DSCR);
 377                 rmb(); /* ensure DSCR is read before CTRLA */
 378                 ctrla = channel_readl(atchan, CTRLA);
 379                 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
 380                         u32 new_dscr;
 381 
 382                         rmb(); /* ensure DSCR is read after CTRLA */
 383                         new_dscr = channel_readl(atchan, DSCR);
 384 
 385                         /*
 386                          * If the DSCR register value has not changed inside the
 387                          * DMA controller since the previous read, we assume
 388                          * that both the dscr and ctrla values refers to the
 389                          * very same descriptor.
 390                          */
 391                         if (likely(new_dscr == dscr))
 392                                 break;
 393 
 394                         /*
 395                          * DSCR has changed inside the DMA controller, so the
 396                          * previouly read value of CTRLA may refer to an already
 397                          * processed descriptor hence could be outdated.
 398                          * We need to update ctrla to match the current
 399                          * descriptor.
 400                          */
 401                         dscr = new_dscr;
 402                         rmb(); /* ensure DSCR is read before CTRLA */
 403                         ctrla = channel_readl(atchan, CTRLA);
 404                 }
 405                 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
 406                         return -ETIMEDOUT;
 407 
 408                 /* for the first descriptor we can be more accurate */
 409                 if (desc_first->lli.dscr == dscr)
 410                         return atc_calc_bytes_left(ret, ctrla);
 411 
 412                 ret -= desc_first->len;
 413                 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
 414                         if (desc->lli.dscr == dscr)
 415                                 break;
 416 
 417                         ret -= desc->len;
 418                 }
 419 
 420                 /*
 421                  * For the current descriptor in the chain we can calculate
 422                  * the remaining bytes using the channel's register.
 423                  */
 424                 ret = atc_calc_bytes_left(ret, ctrla);
 425         } else {
 426                 /* single transfer */
 427                 ctrla = channel_readl(atchan, CTRLA);
 428                 ret = atc_calc_bytes_left(ret, ctrla);
 429         }
 430 
 431         return ret;
 432 }
 433 
 434 /**
 435  * atc_chain_complete - finish work for one transaction chain
 436  * @atchan: channel we work on
 437  * @desc: descriptor at the head of the chain we want do complete
 438  *
 439  * Called with atchan->lock held and bh disabled */
 440 static void
 441 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
 442 {
 443         struct dma_async_tx_descriptor  *txd = &desc->txd;
 444         struct at_dma                   *atdma = to_at_dma(atchan->chan_common.device);
 445 
 446         dev_vdbg(chan2dev(&atchan->chan_common),
 447                 "descriptor %u complete\n", txd->cookie);
 448 
 449         /* mark the descriptor as complete for non cyclic cases only */
 450         if (!atc_chan_is_cyclic(atchan))
 451                 dma_cookie_complete(txd);
 452 
 453         /* If the transfer was a memset, free our temporary buffer */
 454         if (desc->memset_buffer) {
 455                 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
 456                               desc->memset_paddr);
 457                 desc->memset_buffer = false;
 458         }
 459 
 460         /* move children to free_list */
 461         list_splice_init(&desc->tx_list, &atchan->free_list);
 462         /* move myself to free_list */
 463         list_move(&desc->desc_node, &atchan->free_list);
 464 
 465         dma_descriptor_unmap(txd);
 466         /* for cyclic transfers,
 467          * no need to replay callback function while stopping */
 468         if (!atc_chan_is_cyclic(atchan)) {
 469                 /*
 470                  * The API requires that no submissions are done from a
 471                  * callback, so we don't need to drop the lock here
 472                  */
 473                 dmaengine_desc_get_callback_invoke(txd, NULL);
 474         }
 475 
 476         dma_run_dependencies(txd);
 477 }
 478 
 479 /**
 480  * atc_complete_all - finish work for all transactions
 481  * @atchan: channel to complete transactions for
 482  *
 483  * Eventually submit queued descriptors if any
 484  *
 485  * Assume channel is idle while calling this function
 486  * Called with atchan->lock held and bh disabled
 487  */
 488 static void atc_complete_all(struct at_dma_chan *atchan)
 489 {
 490         struct at_desc *desc, *_desc;
 491         LIST_HEAD(list);
 492 
 493         dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
 494 
 495         /*
 496          * Submit queued descriptors ASAP, i.e. before we go through
 497          * the completed ones.
 498          */
 499         if (!list_empty(&atchan->queue))
 500                 atc_dostart(atchan, atc_first_queued(atchan));
 501         /* empty active_list now it is completed */
 502         list_splice_init(&atchan->active_list, &list);
 503         /* empty queue list by moving descriptors (if any) to active_list */
 504         list_splice_init(&atchan->queue, &atchan->active_list);
 505 
 506         list_for_each_entry_safe(desc, _desc, &list, desc_node)
 507                 atc_chain_complete(atchan, desc);
 508 }
 509 
 510 /**
 511  * atc_advance_work - at the end of a transaction, move forward
 512  * @atchan: channel where the transaction ended
 513  *
 514  * Called with atchan->lock held and bh disabled
 515  */
 516 static void atc_advance_work(struct at_dma_chan *atchan)
 517 {
 518         dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
 519 
 520         if (atc_chan_is_enabled(atchan))
 521                 return;
 522 
 523         if (list_empty(&atchan->active_list) ||
 524             list_is_singular(&atchan->active_list)) {
 525                 atc_complete_all(atchan);
 526         } else {
 527                 atc_chain_complete(atchan, atc_first_active(atchan));
 528                 /* advance work */
 529                 atc_dostart(atchan, atc_first_active(atchan));
 530         }
 531 }
 532 
 533 
 534 /**
 535  * atc_handle_error - handle errors reported by DMA controller
 536  * @atchan: channel where error occurs
 537  *
 538  * Called with atchan->lock held and bh disabled
 539  */
 540 static void atc_handle_error(struct at_dma_chan *atchan)
 541 {
 542         struct at_desc *bad_desc;
 543         struct at_desc *child;
 544 
 545         /*
 546          * The descriptor currently at the head of the active list is
 547          * broked. Since we don't have any way to report errors, we'll
 548          * just have to scream loudly and try to carry on.
 549          */
 550         bad_desc = atc_first_active(atchan);
 551         list_del_init(&bad_desc->desc_node);
 552 
 553         /* As we are stopped, take advantage to push queued descriptors
 554          * in active_list */
 555         list_splice_init(&atchan->queue, atchan->active_list.prev);
 556 
 557         /* Try to restart the controller */
 558         if (!list_empty(&atchan->active_list))
 559                 atc_dostart(atchan, atc_first_active(atchan));
 560 
 561         /*
 562          * KERN_CRITICAL may seem harsh, but since this only happens
 563          * when someone submits a bad physical address in a
 564          * descriptor, we should consider ourselves lucky that the
 565          * controller flagged an error instead of scribbling over
 566          * random memory locations.
 567          */
 568         dev_crit(chan2dev(&atchan->chan_common),
 569                         "Bad descriptor submitted for DMA!\n");
 570         dev_crit(chan2dev(&atchan->chan_common),
 571                         "  cookie: %d\n", bad_desc->txd.cookie);
 572         atc_dump_lli(atchan, &bad_desc->lli);
 573         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
 574                 atc_dump_lli(atchan, &child->lli);
 575 
 576         /* Pretend the descriptor completed successfully */
 577         atc_chain_complete(atchan, bad_desc);
 578 }
 579 
 580 /**
 581  * atc_handle_cyclic - at the end of a period, run callback function
 582  * @atchan: channel used for cyclic operations
 583  *
 584  * Called with atchan->lock held and bh disabled
 585  */
 586 static void atc_handle_cyclic(struct at_dma_chan *atchan)
 587 {
 588         struct at_desc                  *first = atc_first_active(atchan);
 589         struct dma_async_tx_descriptor  *txd = &first->txd;
 590 
 591         dev_vdbg(chan2dev(&atchan->chan_common),
 592                         "new cyclic period llp 0x%08x\n",
 593                         channel_readl(atchan, DSCR));
 594 
 595         dmaengine_desc_get_callback_invoke(txd, NULL);
 596 }
 597 
 598 /*--  IRQ & Tasklet  ---------------------------------------------------*/
 599 
 600 static void atc_tasklet(unsigned long data)
 601 {
 602         struct at_dma_chan *atchan = (struct at_dma_chan *)data;
 603         unsigned long flags;
 604 
 605         spin_lock_irqsave(&atchan->lock, flags);
 606         if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
 607                 atc_handle_error(atchan);
 608         else if (atc_chan_is_cyclic(atchan))
 609                 atc_handle_cyclic(atchan);
 610         else
 611                 atc_advance_work(atchan);
 612 
 613         spin_unlock_irqrestore(&atchan->lock, flags);
 614 }
 615 
 616 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
 617 {
 618         struct at_dma           *atdma = (struct at_dma *)dev_id;
 619         struct at_dma_chan      *atchan;
 620         int                     i;
 621         u32                     status, pending, imr;
 622         int                     ret = IRQ_NONE;
 623 
 624         do {
 625                 imr = dma_readl(atdma, EBCIMR);
 626                 status = dma_readl(atdma, EBCISR);
 627                 pending = status & imr;
 628 
 629                 if (!pending)
 630                         break;
 631 
 632                 dev_vdbg(atdma->dma_common.dev,
 633                         "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
 634                          status, imr, pending);
 635 
 636                 for (i = 0; i < atdma->dma_common.chancnt; i++) {
 637                         atchan = &atdma->chan[i];
 638                         if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
 639                                 if (pending & AT_DMA_ERR(i)) {
 640                                         /* Disable channel on AHB error */
 641                                         dma_writel(atdma, CHDR,
 642                                                 AT_DMA_RES(i) | atchan->mask);
 643                                         /* Give information to tasklet */
 644                                         set_bit(ATC_IS_ERROR, &atchan->status);
 645                                 }
 646                                 tasklet_schedule(&atchan->tasklet);
 647                                 ret = IRQ_HANDLED;
 648                         }
 649                 }
 650 
 651         } while (pending);
 652 
 653         return ret;
 654 }
 655 
 656 
 657 /*--  DMA Engine API  --------------------------------------------------*/
 658 
 659 /**
 660  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
 661  * @desc: descriptor at the head of the transaction chain
 662  *
 663  * Queue chain if DMA engine is working already
 664  *
 665  * Cookie increment and adding to active_list or queue must be atomic
 666  */
 667 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
 668 {
 669         struct at_desc          *desc = txd_to_at_desc(tx);
 670         struct at_dma_chan      *atchan = to_at_dma_chan(tx->chan);
 671         dma_cookie_t            cookie;
 672         unsigned long           flags;
 673 
 674         spin_lock_irqsave(&atchan->lock, flags);
 675         cookie = dma_cookie_assign(tx);
 676 
 677         if (list_empty(&atchan->active_list)) {
 678                 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
 679                                 desc->txd.cookie);
 680                 atc_dostart(atchan, desc);
 681                 list_add_tail(&desc->desc_node, &atchan->active_list);
 682         } else {
 683                 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
 684                                 desc->txd.cookie);
 685                 list_add_tail(&desc->desc_node, &atchan->queue);
 686         }
 687 
 688         spin_unlock_irqrestore(&atchan->lock, flags);
 689 
 690         return cookie;
 691 }
 692 
 693 /**
 694  * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
 695  * @chan: the channel to prepare operation on
 696  * @xt: Interleaved transfer template
 697  * @flags: tx descriptor status flags
 698  */
 699 static struct dma_async_tx_descriptor *
 700 atc_prep_dma_interleaved(struct dma_chan *chan,
 701                          struct dma_interleaved_template *xt,
 702                          unsigned long flags)
 703 {
 704         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 705         struct data_chunk       *first;
 706         struct at_desc          *desc = NULL;
 707         size_t                  xfer_count;
 708         unsigned int            dwidth;
 709         u32                     ctrla;
 710         u32                     ctrlb;
 711         size_t                  len = 0;
 712         int                     i;
 713 
 714         if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
 715                 return NULL;
 716 
 717         first = xt->sgl;
 718 
 719         dev_info(chan2dev(chan),
 720                  "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
 721                 __func__, &xt->src_start, &xt->dst_start, xt->numf,
 722                 xt->frame_size, flags);
 723 
 724         /*
 725          * The controller can only "skip" X bytes every Y bytes, so we
 726          * need to make sure we are given a template that fit that
 727          * description, ie a template with chunks that always have the
 728          * same size, with the same ICGs.
 729          */
 730         for (i = 0; i < xt->frame_size; i++) {
 731                 struct data_chunk *chunk = xt->sgl + i;
 732 
 733                 if ((chunk->size != xt->sgl->size) ||
 734                     (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
 735                     (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
 736                         dev_err(chan2dev(chan),
 737                                 "%s: the controller can transfer only identical chunks\n",
 738                                 __func__);
 739                         return NULL;
 740                 }
 741 
 742                 len += chunk->size;
 743         }
 744 
 745         dwidth = atc_get_xfer_width(xt->src_start,
 746                                     xt->dst_start, len);
 747 
 748         xfer_count = len >> dwidth;
 749         if (xfer_count > ATC_BTSIZE_MAX) {
 750                 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
 751                 return NULL;
 752         }
 753 
 754         ctrla = ATC_SRC_WIDTH(dwidth) |
 755                 ATC_DST_WIDTH(dwidth);
 756 
 757         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
 758                 | ATC_SRC_ADDR_MODE_INCR
 759                 | ATC_DST_ADDR_MODE_INCR
 760                 | ATC_SRC_PIP
 761                 | ATC_DST_PIP
 762                 | ATC_FC_MEM2MEM;
 763 
 764         /* create the transfer */
 765         desc = atc_desc_get(atchan);
 766         if (!desc) {
 767                 dev_err(chan2dev(chan),
 768                         "%s: couldn't allocate our descriptor\n", __func__);
 769                 return NULL;
 770         }
 771 
 772         desc->lli.saddr = xt->src_start;
 773         desc->lli.daddr = xt->dst_start;
 774         desc->lli.ctrla = ctrla | xfer_count;
 775         desc->lli.ctrlb = ctrlb;
 776 
 777         desc->boundary = first->size >> dwidth;
 778         desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
 779         desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
 780 
 781         desc->txd.cookie = -EBUSY;
 782         desc->total_len = desc->len = len;
 783 
 784         /* set end-of-link to the last link descriptor of list*/
 785         set_desc_eol(desc);
 786 
 787         desc->txd.flags = flags; /* client is in control of this ack */
 788 
 789         return &desc->txd;
 790 }
 791 
 792 /**
 793  * atc_prep_dma_memcpy - prepare a memcpy operation
 794  * @chan: the channel to prepare operation on
 795  * @dest: operation virtual destination address
 796  * @src: operation virtual source address
 797  * @len: operation length
 798  * @flags: tx descriptor status flags
 799  */
 800 static struct dma_async_tx_descriptor *
 801 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 802                 size_t len, unsigned long flags)
 803 {
 804         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 805         struct at_desc          *desc = NULL;
 806         struct at_desc          *first = NULL;
 807         struct at_desc          *prev = NULL;
 808         size_t                  xfer_count;
 809         size_t                  offset;
 810         unsigned int            src_width;
 811         unsigned int            dst_width;
 812         u32                     ctrla;
 813         u32                     ctrlb;
 814 
 815         dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
 816                         &dest, &src, len, flags);
 817 
 818         if (unlikely(!len)) {
 819                 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
 820                 return NULL;
 821         }
 822 
 823         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
 824                 | ATC_SRC_ADDR_MODE_INCR
 825                 | ATC_DST_ADDR_MODE_INCR
 826                 | ATC_FC_MEM2MEM;
 827 
 828         /*
 829          * We can be a lot more clever here, but this should take care
 830          * of the most common optimization.
 831          */
 832         src_width = dst_width = atc_get_xfer_width(src, dest, len);
 833 
 834         ctrla = ATC_SRC_WIDTH(src_width) |
 835                 ATC_DST_WIDTH(dst_width);
 836 
 837         for (offset = 0; offset < len; offset += xfer_count << src_width) {
 838                 xfer_count = min_t(size_t, (len - offset) >> src_width,
 839                                 ATC_BTSIZE_MAX);
 840 
 841                 desc = atc_desc_get(atchan);
 842                 if (!desc)
 843                         goto err_desc_get;
 844 
 845                 desc->lli.saddr = src + offset;
 846                 desc->lli.daddr = dest + offset;
 847                 desc->lli.ctrla = ctrla | xfer_count;
 848                 desc->lli.ctrlb = ctrlb;
 849 
 850                 desc->txd.cookie = 0;
 851                 desc->len = xfer_count << src_width;
 852 
 853                 atc_desc_chain(&first, &prev, desc);
 854         }
 855 
 856         /* First descriptor of the chain embedds additional information */
 857         first->txd.cookie = -EBUSY;
 858         first->total_len = len;
 859 
 860         /* set end-of-link to the last link descriptor of list*/
 861         set_desc_eol(desc);
 862 
 863         first->txd.flags = flags; /* client is in control of this ack */
 864 
 865         return &first->txd;
 866 
 867 err_desc_get:
 868         atc_desc_put(atchan, first);
 869         return NULL;
 870 }
 871 
 872 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
 873                                               dma_addr_t psrc,
 874                                               dma_addr_t pdst,
 875                                               size_t len)
 876 {
 877         struct at_dma_chan *atchan = to_at_dma_chan(chan);
 878         struct at_desc *desc;
 879         size_t xfer_count;
 880 
 881         u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
 882         u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
 883                 ATC_SRC_ADDR_MODE_FIXED |
 884                 ATC_DST_ADDR_MODE_INCR |
 885                 ATC_FC_MEM2MEM;
 886 
 887         xfer_count = len >> 2;
 888         if (xfer_count > ATC_BTSIZE_MAX) {
 889                 dev_err(chan2dev(chan), "%s: buffer is too big\n",
 890                         __func__);
 891                 return NULL;
 892         }
 893 
 894         desc = atc_desc_get(atchan);
 895         if (!desc) {
 896                 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
 897                         __func__);
 898                 return NULL;
 899         }
 900 
 901         desc->lli.saddr = psrc;
 902         desc->lli.daddr = pdst;
 903         desc->lli.ctrla = ctrla | xfer_count;
 904         desc->lli.ctrlb = ctrlb;
 905 
 906         desc->txd.cookie = 0;
 907         desc->len = len;
 908 
 909         return desc;
 910 }
 911 
 912 /**
 913  * atc_prep_dma_memset - prepare a memcpy operation
 914  * @chan: the channel to prepare operation on
 915  * @dest: operation virtual destination address
 916  * @value: value to set memory buffer to
 917  * @len: operation length
 918  * @flags: tx descriptor status flags
 919  */
 920 static struct dma_async_tx_descriptor *
 921 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
 922                     size_t len, unsigned long flags)
 923 {
 924         struct at_dma           *atdma = to_at_dma(chan->device);
 925         struct at_desc          *desc;
 926         void __iomem            *vaddr;
 927         dma_addr_t              paddr;
 928 
 929         dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
 930                 &dest, value, len, flags);
 931 
 932         if (unlikely(!len)) {
 933                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
 934                 return NULL;
 935         }
 936 
 937         if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
 938                 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
 939                         __func__);
 940                 return NULL;
 941         }
 942 
 943         vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
 944         if (!vaddr) {
 945                 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
 946                         __func__);
 947                 return NULL;
 948         }
 949         *(u32*)vaddr = value;
 950 
 951         desc = atc_create_memset_desc(chan, paddr, dest, len);
 952         if (!desc) {
 953                 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
 954                         __func__);
 955                 goto err_free_buffer;
 956         }
 957 
 958         desc->memset_paddr = paddr;
 959         desc->memset_vaddr = vaddr;
 960         desc->memset_buffer = true;
 961 
 962         desc->txd.cookie = -EBUSY;
 963         desc->total_len = len;
 964 
 965         /* set end-of-link on the descriptor */
 966         set_desc_eol(desc);
 967 
 968         desc->txd.flags = flags;
 969 
 970         return &desc->txd;
 971 
 972 err_free_buffer:
 973         dma_pool_free(atdma->memset_pool, vaddr, paddr);
 974         return NULL;
 975 }
 976 
 977 static struct dma_async_tx_descriptor *
 978 atc_prep_dma_memset_sg(struct dma_chan *chan,
 979                        struct scatterlist *sgl,
 980                        unsigned int sg_len, int value,
 981                        unsigned long flags)
 982 {
 983         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 984         struct at_dma           *atdma = to_at_dma(chan->device);
 985         struct at_desc          *desc = NULL, *first = NULL, *prev = NULL;
 986         struct scatterlist      *sg;
 987         void __iomem            *vaddr;
 988         dma_addr_t              paddr;
 989         size_t                  total_len = 0;
 990         int                     i;
 991 
 992         dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
 993                  value, sg_len, flags);
 994 
 995         if (unlikely(!sgl || !sg_len)) {
 996                 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
 997                         __func__);
 998                 return NULL;
 999         }
1000 
1001         vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
1002         if (!vaddr) {
1003                 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1004                         __func__);
1005                 return NULL;
1006         }
1007         *(u32*)vaddr = value;
1008 
1009         for_each_sg(sgl, sg, sg_len, i) {
1010                 dma_addr_t dest = sg_dma_address(sg);
1011                 size_t len = sg_dma_len(sg);
1012 
1013                 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1014                          __func__, &dest, len);
1015 
1016                 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1017                         dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1018                                 __func__);
1019                         goto err_put_desc;
1020                 }
1021 
1022                 desc = atc_create_memset_desc(chan, paddr, dest, len);
1023                 if (!desc)
1024                         goto err_put_desc;
1025 
1026                 atc_desc_chain(&first, &prev, desc);
1027 
1028                 total_len += len;
1029         }
1030 
1031         /*
1032          * Only set the buffer pointers on the last descriptor to
1033          * avoid free'ing while we have our transfer still going
1034          */
1035         desc->memset_paddr = paddr;
1036         desc->memset_vaddr = vaddr;
1037         desc->memset_buffer = true;
1038 
1039         first->txd.cookie = -EBUSY;
1040         first->total_len = total_len;
1041 
1042         /* set end-of-link on the descriptor */
1043         set_desc_eol(desc);
1044 
1045         first->txd.flags = flags;
1046 
1047         return &first->txd;
1048 
1049 err_put_desc:
1050         atc_desc_put(atchan, first);
1051         return NULL;
1052 }
1053 
1054 /**
1055  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1056  * @chan: DMA channel
1057  * @sgl: scatterlist to transfer to/from
1058  * @sg_len: number of entries in @scatterlist
1059  * @direction: DMA direction
1060  * @flags: tx descriptor status flags
1061  * @context: transaction context (ignored)
1062  */
1063 static struct dma_async_tx_descriptor *
1064 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1065                 unsigned int sg_len, enum dma_transfer_direction direction,
1066                 unsigned long flags, void *context)
1067 {
1068         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1069         struct at_dma_slave     *atslave = chan->private;
1070         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1071         struct at_desc          *first = NULL;
1072         struct at_desc          *prev = NULL;
1073         u32                     ctrla;
1074         u32                     ctrlb;
1075         dma_addr_t              reg;
1076         unsigned int            reg_width;
1077         unsigned int            mem_width;
1078         unsigned int            i;
1079         struct scatterlist      *sg;
1080         size_t                  total_len = 0;
1081 
1082         dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1083                         sg_len,
1084                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1085                         flags);
1086 
1087         if (unlikely(!atslave || !sg_len)) {
1088                 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1089                 return NULL;
1090         }
1091 
1092         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1093                 | ATC_DCSIZE(sconfig->dst_maxburst);
1094         ctrlb = ATC_IEN;
1095 
1096         switch (direction) {
1097         case DMA_MEM_TO_DEV:
1098                 reg_width = convert_buswidth(sconfig->dst_addr_width);
1099                 ctrla |=  ATC_DST_WIDTH(reg_width);
1100                 ctrlb |=  ATC_DST_ADDR_MODE_FIXED
1101                         | ATC_SRC_ADDR_MODE_INCR
1102                         | ATC_FC_MEM2PER
1103                         | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1104                 reg = sconfig->dst_addr;
1105                 for_each_sg(sgl, sg, sg_len, i) {
1106                         struct at_desc  *desc;
1107                         u32             len;
1108                         u32             mem;
1109 
1110                         desc = atc_desc_get(atchan);
1111                         if (!desc)
1112                                 goto err_desc_get;
1113 
1114                         mem = sg_dma_address(sg);
1115                         len = sg_dma_len(sg);
1116                         if (unlikely(!len)) {
1117                                 dev_dbg(chan2dev(chan),
1118                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
1119                                 goto err;
1120                         }
1121                         mem_width = 2;
1122                         if (unlikely(mem & 3 || len & 3))
1123                                 mem_width = 0;
1124 
1125                         desc->lli.saddr = mem;
1126                         desc->lli.daddr = reg;
1127                         desc->lli.ctrla = ctrla
1128                                         | ATC_SRC_WIDTH(mem_width)
1129                                         | len >> mem_width;
1130                         desc->lli.ctrlb = ctrlb;
1131                         desc->len = len;
1132 
1133                         atc_desc_chain(&first, &prev, desc);
1134                         total_len += len;
1135                 }
1136                 break;
1137         case DMA_DEV_TO_MEM:
1138                 reg_width = convert_buswidth(sconfig->src_addr_width);
1139                 ctrla |=  ATC_SRC_WIDTH(reg_width);
1140                 ctrlb |=  ATC_DST_ADDR_MODE_INCR
1141                         | ATC_SRC_ADDR_MODE_FIXED
1142                         | ATC_FC_PER2MEM
1143                         | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1144 
1145                 reg = sconfig->src_addr;
1146                 for_each_sg(sgl, sg, sg_len, i) {
1147                         struct at_desc  *desc;
1148                         u32             len;
1149                         u32             mem;
1150 
1151                         desc = atc_desc_get(atchan);
1152                         if (!desc)
1153                                 goto err_desc_get;
1154 
1155                         mem = sg_dma_address(sg);
1156                         len = sg_dma_len(sg);
1157                         if (unlikely(!len)) {
1158                                 dev_dbg(chan2dev(chan),
1159                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
1160                                 goto err;
1161                         }
1162                         mem_width = 2;
1163                         if (unlikely(mem & 3 || len & 3))
1164                                 mem_width = 0;
1165 
1166                         desc->lli.saddr = reg;
1167                         desc->lli.daddr = mem;
1168                         desc->lli.ctrla = ctrla
1169                                         | ATC_DST_WIDTH(mem_width)
1170                                         | len >> reg_width;
1171                         desc->lli.ctrlb = ctrlb;
1172                         desc->len = len;
1173 
1174                         atc_desc_chain(&first, &prev, desc);
1175                         total_len += len;
1176                 }
1177                 break;
1178         default:
1179                 return NULL;
1180         }
1181 
1182         /* set end-of-link to the last link descriptor of list*/
1183         set_desc_eol(prev);
1184 
1185         /* First descriptor of the chain embedds additional information */
1186         first->txd.cookie = -EBUSY;
1187         first->total_len = total_len;
1188 
1189         /* first link descriptor of list is responsible of flags */
1190         first->txd.flags = flags; /* client is in control of this ack */
1191 
1192         return &first->txd;
1193 
1194 err_desc_get:
1195         dev_err(chan2dev(chan), "not enough descriptors available\n");
1196 err:
1197         atc_desc_put(atchan, first);
1198         return NULL;
1199 }
1200 
1201 /**
1202  * atc_dma_cyclic_check_values
1203  * Check for too big/unaligned periods and unaligned DMA buffer
1204  */
1205 static int
1206 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1207                 size_t period_len)
1208 {
1209         if (period_len > (ATC_BTSIZE_MAX << reg_width))
1210                 goto err_out;
1211         if (unlikely(period_len & ((1 << reg_width) - 1)))
1212                 goto err_out;
1213         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1214                 goto err_out;
1215 
1216         return 0;
1217 
1218 err_out:
1219         return -EINVAL;
1220 }
1221 
1222 /**
1223  * atc_dma_cyclic_fill_desc - Fill one period descriptor
1224  */
1225 static int
1226 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1227                 unsigned int period_index, dma_addr_t buf_addr,
1228                 unsigned int reg_width, size_t period_len,
1229                 enum dma_transfer_direction direction)
1230 {
1231         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1232         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1233         u32                     ctrla;
1234 
1235         /* prepare common CRTLA value */
1236         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1237                 | ATC_DCSIZE(sconfig->dst_maxburst)
1238                 | ATC_DST_WIDTH(reg_width)
1239                 | ATC_SRC_WIDTH(reg_width)
1240                 | period_len >> reg_width;
1241 
1242         switch (direction) {
1243         case DMA_MEM_TO_DEV:
1244                 desc->lli.saddr = buf_addr + (period_len * period_index);
1245                 desc->lli.daddr = sconfig->dst_addr;
1246                 desc->lli.ctrla = ctrla;
1247                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1248                                 | ATC_SRC_ADDR_MODE_INCR
1249                                 | ATC_FC_MEM2PER
1250                                 | ATC_SIF(atchan->mem_if)
1251                                 | ATC_DIF(atchan->per_if);
1252                 desc->len = period_len;
1253                 break;
1254 
1255         case DMA_DEV_TO_MEM:
1256                 desc->lli.saddr = sconfig->src_addr;
1257                 desc->lli.daddr = buf_addr + (period_len * period_index);
1258                 desc->lli.ctrla = ctrla;
1259                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1260                                 | ATC_SRC_ADDR_MODE_FIXED
1261                                 | ATC_FC_PER2MEM
1262                                 | ATC_SIF(atchan->per_if)
1263                                 | ATC_DIF(atchan->mem_if);
1264                 desc->len = period_len;
1265                 break;
1266 
1267         default:
1268                 return -EINVAL;
1269         }
1270 
1271         return 0;
1272 }
1273 
1274 /**
1275  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1276  * @chan: the DMA channel to prepare
1277  * @buf_addr: physical DMA address where the buffer starts
1278  * @buf_len: total number of bytes for the entire buffer
1279  * @period_len: number of bytes for each period
1280  * @direction: transfer direction, to or from device
1281  * @flags: tx descriptor status flags
1282  */
1283 static struct dma_async_tx_descriptor *
1284 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1285                 size_t period_len, enum dma_transfer_direction direction,
1286                 unsigned long flags)
1287 {
1288         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1289         struct at_dma_slave     *atslave = chan->private;
1290         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1291         struct at_desc          *first = NULL;
1292         struct at_desc          *prev = NULL;
1293         unsigned long           was_cyclic;
1294         unsigned int            reg_width;
1295         unsigned int            periods = buf_len / period_len;
1296         unsigned int            i;
1297 
1298         dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1299                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1300                         &buf_addr,
1301                         periods, buf_len, period_len);
1302 
1303         if (unlikely(!atslave || !buf_len || !period_len)) {
1304                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1305                 return NULL;
1306         }
1307 
1308         was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1309         if (was_cyclic) {
1310                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1311                 return NULL;
1312         }
1313 
1314         if (unlikely(!is_slave_direction(direction)))
1315                 goto err_out;
1316 
1317         if (direction == DMA_MEM_TO_DEV)
1318                 reg_width = convert_buswidth(sconfig->dst_addr_width);
1319         else
1320                 reg_width = convert_buswidth(sconfig->src_addr_width);
1321 
1322         /* Check for too big/unaligned periods and unaligned DMA buffer */
1323         if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1324                 goto err_out;
1325 
1326         /* build cyclic linked list */
1327         for (i = 0; i < periods; i++) {
1328                 struct at_desc  *desc;
1329 
1330                 desc = atc_desc_get(atchan);
1331                 if (!desc)
1332                         goto err_desc_get;
1333 
1334                 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1335                                              reg_width, period_len, direction))
1336                         goto err_desc_get;
1337 
1338                 atc_desc_chain(&first, &prev, desc);
1339         }
1340 
1341         /* lets make a cyclic list */
1342         prev->lli.dscr = first->txd.phys;
1343 
1344         /* First descriptor of the chain embedds additional information */
1345         first->txd.cookie = -EBUSY;
1346         first->total_len = buf_len;
1347 
1348         return &first->txd;
1349 
1350 err_desc_get:
1351         dev_err(chan2dev(chan), "not enough descriptors available\n");
1352         atc_desc_put(atchan, first);
1353 err_out:
1354         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1355         return NULL;
1356 }
1357 
1358 static int atc_config(struct dma_chan *chan,
1359                       struct dma_slave_config *sconfig)
1360 {
1361         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1362 
1363         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1364 
1365         /* Check if it is chan is configured for slave transfers */
1366         if (!chan->private)
1367                 return -EINVAL;
1368 
1369         memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1370 
1371         convert_burst(&atchan->dma_sconfig.src_maxburst);
1372         convert_burst(&atchan->dma_sconfig.dst_maxburst);
1373 
1374         return 0;
1375 }
1376 
1377 static int atc_pause(struct dma_chan *chan)
1378 {
1379         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1380         struct at_dma           *atdma = to_at_dma(chan->device);
1381         int                     chan_id = atchan->chan_common.chan_id;
1382         unsigned long           flags;
1383 
1384         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1385 
1386         spin_lock_irqsave(&atchan->lock, flags);
1387 
1388         dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1389         set_bit(ATC_IS_PAUSED, &atchan->status);
1390 
1391         spin_unlock_irqrestore(&atchan->lock, flags);
1392 
1393         return 0;
1394 }
1395 
1396 static int atc_resume(struct dma_chan *chan)
1397 {
1398         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1399         struct at_dma           *atdma = to_at_dma(chan->device);
1400         int                     chan_id = atchan->chan_common.chan_id;
1401         unsigned long           flags;
1402 
1403         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1404 
1405         if (!atc_chan_is_paused(atchan))
1406                 return 0;
1407 
1408         spin_lock_irqsave(&atchan->lock, flags);
1409 
1410         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1411         clear_bit(ATC_IS_PAUSED, &atchan->status);
1412 
1413         spin_unlock_irqrestore(&atchan->lock, flags);
1414 
1415         return 0;
1416 }
1417 
1418 static int atc_terminate_all(struct dma_chan *chan)
1419 {
1420         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1421         struct at_dma           *atdma = to_at_dma(chan->device);
1422         int                     chan_id = atchan->chan_common.chan_id;
1423         struct at_desc          *desc, *_desc;
1424         unsigned long           flags;
1425 
1426         LIST_HEAD(list);
1427 
1428         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1429 
1430         /*
1431          * This is only called when something went wrong elsewhere, so
1432          * we don't really care about the data. Just disable the
1433          * channel. We still have to poll the channel enable bit due
1434          * to AHB/HSB limitations.
1435          */
1436         spin_lock_irqsave(&atchan->lock, flags);
1437 
1438         /* disabling channel: must also remove suspend state */
1439         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1440 
1441         /* confirm that this channel is disabled */
1442         while (dma_readl(atdma, CHSR) & atchan->mask)
1443                 cpu_relax();
1444 
1445         /* active_list entries will end up before queued entries */
1446         list_splice_init(&atchan->queue, &list);
1447         list_splice_init(&atchan->active_list, &list);
1448 
1449         /* Flush all pending and queued descriptors */
1450         list_for_each_entry_safe(desc, _desc, &list, desc_node)
1451                 atc_chain_complete(atchan, desc);
1452 
1453         clear_bit(ATC_IS_PAUSED, &atchan->status);
1454         /* if channel dedicated to cyclic operations, free it */
1455         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1456 
1457         spin_unlock_irqrestore(&atchan->lock, flags);
1458 
1459         return 0;
1460 }
1461 
1462 /**
1463  * atc_tx_status - poll for transaction completion
1464  * @chan: DMA channel
1465  * @cookie: transaction identifier to check status of
1466  * @txstate: if not %NULL updated with transaction state
1467  *
1468  * If @txstate is passed in, upon return it reflect the driver
1469  * internal state and can be used with dma_async_is_complete() to check
1470  * the status of multiple cookies without re-checking hardware state.
1471  */
1472 static enum dma_status
1473 atc_tx_status(struct dma_chan *chan,
1474                 dma_cookie_t cookie,
1475                 struct dma_tx_state *txstate)
1476 {
1477         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1478         unsigned long           flags;
1479         enum dma_status         ret;
1480         int bytes = 0;
1481 
1482         ret = dma_cookie_status(chan, cookie, txstate);
1483         if (ret == DMA_COMPLETE)
1484                 return ret;
1485         /*
1486          * There's no point calculating the residue if there's
1487          * no txstate to store the value.
1488          */
1489         if (!txstate)
1490                 return DMA_ERROR;
1491 
1492         spin_lock_irqsave(&atchan->lock, flags);
1493 
1494         /*  Get number of bytes left in the active transactions */
1495         bytes = atc_get_bytes_left(chan, cookie);
1496 
1497         spin_unlock_irqrestore(&atchan->lock, flags);
1498 
1499         if (unlikely(bytes < 0)) {
1500                 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1501                 return DMA_ERROR;
1502         } else {
1503                 dma_set_residue(txstate, bytes);
1504         }
1505 
1506         dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1507                  ret, cookie, bytes);
1508 
1509         return ret;
1510 }
1511 
1512 /**
1513  * atc_issue_pending - try to finish work
1514  * @chan: target DMA channel
1515  */
1516 static void atc_issue_pending(struct dma_chan *chan)
1517 {
1518         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1519         unsigned long           flags;
1520 
1521         dev_vdbg(chan2dev(chan), "issue_pending\n");
1522 
1523         /* Not needed for cyclic transfers */
1524         if (atc_chan_is_cyclic(atchan))
1525                 return;
1526 
1527         spin_lock_irqsave(&atchan->lock, flags);
1528         atc_advance_work(atchan);
1529         spin_unlock_irqrestore(&atchan->lock, flags);
1530 }
1531 
1532 /**
1533  * atc_alloc_chan_resources - allocate resources for DMA channel
1534  * @chan: allocate descriptor resources for this channel
1535  * @client: current client requesting the channel be ready for requests
1536  *
1537  * return - the number of allocated descriptors
1538  */
1539 static int atc_alloc_chan_resources(struct dma_chan *chan)
1540 {
1541         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1542         struct at_dma           *atdma = to_at_dma(chan->device);
1543         struct at_desc          *desc;
1544         struct at_dma_slave     *atslave;
1545         unsigned long           flags;
1546         int                     i;
1547         u32                     cfg;
1548         LIST_HEAD(tmp_list);
1549 
1550         dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1551 
1552         /* ASSERT:  channel is idle */
1553         if (atc_chan_is_enabled(atchan)) {
1554                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1555                 return -EIO;
1556         }
1557 
1558         cfg = ATC_DEFAULT_CFG;
1559 
1560         atslave = chan->private;
1561         if (atslave) {
1562                 /*
1563                  * We need controller-specific data to set up slave
1564                  * transfers.
1565                  */
1566                 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1567 
1568                 /* if cfg configuration specified take it instead of default */
1569                 if (atslave->cfg)
1570                         cfg = atslave->cfg;
1571         }
1572 
1573         /* have we already been set up?
1574          * reconfigure channel but no need to reallocate descriptors */
1575         if (!list_empty(&atchan->free_list))
1576                 return atchan->descs_allocated;
1577 
1578         /* Allocate initial pool of descriptors */
1579         for (i = 0; i < init_nr_desc_per_channel; i++) {
1580                 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1581                 if (!desc) {
1582                         dev_err(atdma->dma_common.dev,
1583                                 "Only %d initial descriptors\n", i);
1584                         break;
1585                 }
1586                 list_add_tail(&desc->desc_node, &tmp_list);
1587         }
1588 
1589         spin_lock_irqsave(&atchan->lock, flags);
1590         atchan->descs_allocated = i;
1591         list_splice(&tmp_list, &atchan->free_list);
1592         dma_cookie_init(chan);
1593         spin_unlock_irqrestore(&atchan->lock, flags);
1594 
1595         /* channel parameters */
1596         channel_writel(atchan, CFG, cfg);
1597 
1598         dev_dbg(chan2dev(chan),
1599                 "alloc_chan_resources: allocated %d descriptors\n",
1600                 atchan->descs_allocated);
1601 
1602         return atchan->descs_allocated;
1603 }
1604 
1605 /**
1606  * atc_free_chan_resources - free all channel resources
1607  * @chan: DMA channel
1608  */
1609 static void atc_free_chan_resources(struct dma_chan *chan)
1610 {
1611         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1612         struct at_dma           *atdma = to_at_dma(chan->device);
1613         struct at_desc          *desc, *_desc;
1614         LIST_HEAD(list);
1615 
1616         dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1617                 atchan->descs_allocated);
1618 
1619         /* ASSERT:  channel is idle */
1620         BUG_ON(!list_empty(&atchan->active_list));
1621         BUG_ON(!list_empty(&atchan->queue));
1622         BUG_ON(atc_chan_is_enabled(atchan));
1623 
1624         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1625                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1626                 list_del(&desc->desc_node);
1627                 /* free link descriptor */
1628                 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1629         }
1630         list_splice_init(&atchan->free_list, &list);
1631         atchan->descs_allocated = 0;
1632         atchan->status = 0;
1633 
1634         /*
1635          * Free atslave allocated in at_dma_xlate()
1636          */
1637         kfree(chan->private);
1638         chan->private = NULL;
1639 
1640         dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1641 }
1642 
1643 #ifdef CONFIG_OF
1644 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1645 {
1646         struct at_dma_slave *atslave = slave;
1647 
1648         if (atslave->dma_dev == chan->device->dev) {
1649                 chan->private = atslave;
1650                 return true;
1651         } else {
1652                 return false;
1653         }
1654 }
1655 
1656 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1657                                      struct of_dma *of_dma)
1658 {
1659         struct dma_chan *chan;
1660         struct at_dma_chan *atchan;
1661         struct at_dma_slave *atslave;
1662         dma_cap_mask_t mask;
1663         unsigned int per_id;
1664         struct platform_device *dmac_pdev;
1665 
1666         if (dma_spec->args_count != 2)
1667                 return NULL;
1668 
1669         dmac_pdev = of_find_device_by_node(dma_spec->np);
1670 
1671         dma_cap_zero(mask);
1672         dma_cap_set(DMA_SLAVE, mask);
1673 
1674         atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
1675         if (!atslave)
1676                 return NULL;
1677 
1678         atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1679         /*
1680          * We can fill both SRC_PER and DST_PER, one of these fields will be
1681          * ignored depending on DMA transfer direction.
1682          */
1683         per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1684         atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1685                      | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1686         /*
1687          * We have to translate the value we get from the device tree since
1688          * the half FIFO configuration value had to be 0 to keep backward
1689          * compatibility.
1690          */
1691         switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1692         case AT91_DMA_CFG_FIFOCFG_ALAP:
1693                 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1694                 break;
1695         case AT91_DMA_CFG_FIFOCFG_ASAP:
1696                 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1697                 break;
1698         case AT91_DMA_CFG_FIFOCFG_HALF:
1699         default:
1700                 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1701         }
1702         atslave->dma_dev = &dmac_pdev->dev;
1703 
1704         chan = dma_request_channel(mask, at_dma_filter, atslave);
1705         if (!chan)
1706                 return NULL;
1707 
1708         atchan = to_at_dma_chan(chan);
1709         atchan->per_if = dma_spec->args[0] & 0xff;
1710         atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1711 
1712         return chan;
1713 }
1714 #else
1715 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1716                                      struct of_dma *of_dma)
1717 {
1718         return NULL;
1719 }
1720 #endif
1721 
1722 /*--  Module Management  -----------------------------------------------*/
1723 
1724 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1725 static struct at_dma_platform_data at91sam9rl_config = {
1726         .nr_channels = 2,
1727 };
1728 static struct at_dma_platform_data at91sam9g45_config = {
1729         .nr_channels = 8,
1730 };
1731 
1732 #if defined(CONFIG_OF)
1733 static const struct of_device_id atmel_dma_dt_ids[] = {
1734         {
1735                 .compatible = "atmel,at91sam9rl-dma",
1736                 .data = &at91sam9rl_config,
1737         }, {
1738                 .compatible = "atmel,at91sam9g45-dma",
1739                 .data = &at91sam9g45_config,
1740         }, {
1741                 /* sentinel */
1742         }
1743 };
1744 
1745 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1746 #endif
1747 
1748 static const struct platform_device_id atdma_devtypes[] = {
1749         {
1750                 .name = "at91sam9rl_dma",
1751                 .driver_data = (unsigned long) &at91sam9rl_config,
1752         }, {
1753                 .name = "at91sam9g45_dma",
1754                 .driver_data = (unsigned long) &at91sam9g45_config,
1755         }, {
1756                 /* sentinel */
1757         }
1758 };
1759 
1760 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1761                                                 struct platform_device *pdev)
1762 {
1763         if (pdev->dev.of_node) {
1764                 const struct of_device_id *match;
1765                 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1766                 if (match == NULL)
1767                         return NULL;
1768                 return match->data;
1769         }
1770         return (struct at_dma_platform_data *)
1771                         platform_get_device_id(pdev)->driver_data;
1772 }
1773 
1774 /**
1775  * at_dma_off - disable DMA controller
1776  * @atdma: the Atmel HDAMC device
1777  */
1778 static void at_dma_off(struct at_dma *atdma)
1779 {
1780         dma_writel(atdma, EN, 0);
1781 
1782         /* disable all interrupts */
1783         dma_writel(atdma, EBCIDR, -1L);
1784 
1785         /* confirm that all channels are disabled */
1786         while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1787                 cpu_relax();
1788 }
1789 
1790 static int __init at_dma_probe(struct platform_device *pdev)
1791 {
1792         struct resource         *io;
1793         struct at_dma           *atdma;
1794         size_t                  size;
1795         int                     irq;
1796         int                     err;
1797         int                     i;
1798         const struct at_dma_platform_data *plat_dat;
1799 
1800         /* setup platform data for each SoC */
1801         dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1802         dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1803         dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1804         dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1805         dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1806         dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1807         dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1808 
1809         /* get DMA parameters from controller type */
1810         plat_dat = at_dma_get_driver_data(pdev);
1811         if (!plat_dat)
1812                 return -ENODEV;
1813 
1814         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1815         if (!io)
1816                 return -EINVAL;
1817 
1818         irq = platform_get_irq(pdev, 0);
1819         if (irq < 0)
1820                 return irq;
1821 
1822         size = sizeof(struct at_dma);
1823         size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1824         atdma = kzalloc(size, GFP_KERNEL);
1825         if (!atdma)
1826                 return -ENOMEM;
1827 
1828         /* discover transaction capabilities */
1829         atdma->dma_common.cap_mask = plat_dat->cap_mask;
1830         atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1831 
1832         size = resource_size(io);
1833         if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1834                 err = -EBUSY;
1835                 goto err_kfree;
1836         }
1837 
1838         atdma->regs = ioremap(io->start, size);
1839         if (!atdma->regs) {
1840                 err = -ENOMEM;
1841                 goto err_release_r;
1842         }
1843 
1844         atdma->clk = clk_get(&pdev->dev, "dma_clk");
1845         if (IS_ERR(atdma->clk)) {
1846                 err = PTR_ERR(atdma->clk);
1847                 goto err_clk;
1848         }
1849         err = clk_prepare_enable(atdma->clk);
1850         if (err)
1851                 goto err_clk_prepare;
1852 
1853         /* force dma off, just in case */
1854         at_dma_off(atdma);
1855 
1856         err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1857         if (err)
1858                 goto err_irq;
1859 
1860         platform_set_drvdata(pdev, atdma);
1861 
1862         /* create a pool of consistent memory blocks for hardware descriptors */
1863         atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1864                         &pdev->dev, sizeof(struct at_desc),
1865                         4 /* word alignment */, 0);
1866         if (!atdma->dma_desc_pool) {
1867                 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1868                 err = -ENOMEM;
1869                 goto err_desc_pool_create;
1870         }
1871 
1872         /* create a pool of consistent memory blocks for memset blocks */
1873         atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1874                                              &pdev->dev, sizeof(int), 4, 0);
1875         if (!atdma->memset_pool) {
1876                 dev_err(&pdev->dev, "No memory for memset dma pool\n");
1877                 err = -ENOMEM;
1878                 goto err_memset_pool_create;
1879         }
1880 
1881         /* clear any pending interrupt */
1882         while (dma_readl(atdma, EBCISR))
1883                 cpu_relax();
1884 
1885         /* initialize channels related values */
1886         INIT_LIST_HEAD(&atdma->dma_common.channels);
1887         for (i = 0; i < plat_dat->nr_channels; i++) {
1888                 struct at_dma_chan      *atchan = &atdma->chan[i];
1889 
1890                 atchan->mem_if = AT_DMA_MEM_IF;
1891                 atchan->per_if = AT_DMA_PER_IF;
1892                 atchan->chan_common.device = &atdma->dma_common;
1893                 dma_cookie_init(&atchan->chan_common);
1894                 list_add_tail(&atchan->chan_common.device_node,
1895                                 &atdma->dma_common.channels);
1896 
1897                 atchan->ch_regs = atdma->regs + ch_regs(i);
1898                 spin_lock_init(&atchan->lock);
1899                 atchan->mask = 1 << i;
1900 
1901                 INIT_LIST_HEAD(&atchan->active_list);
1902                 INIT_LIST_HEAD(&atchan->queue);
1903                 INIT_LIST_HEAD(&atchan->free_list);
1904 
1905                 tasklet_init(&atchan->tasklet, atc_tasklet,
1906                                 (unsigned long)atchan);
1907                 atc_enable_chan_irq(atdma, i);
1908         }
1909 
1910         /* set base routines */
1911         atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1912         atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1913         atdma->dma_common.device_tx_status = atc_tx_status;
1914         atdma->dma_common.device_issue_pending = atc_issue_pending;
1915         atdma->dma_common.dev = &pdev->dev;
1916 
1917         /* set prep routines based on capability */
1918         if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1919                 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1920 
1921         if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1922                 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1923 
1924         if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1925                 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1926                 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1927                 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1928         }
1929 
1930         if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1931                 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1932                 /* controller can do slave DMA: can trigger cyclic transfers */
1933                 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1934                 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1935                 atdma->dma_common.device_config = atc_config;
1936                 atdma->dma_common.device_pause = atc_pause;
1937                 atdma->dma_common.device_resume = atc_resume;
1938                 atdma->dma_common.device_terminate_all = atc_terminate_all;
1939                 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1940                 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1941                 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1942                 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1943         }
1944 
1945         dma_writel(atdma, EN, AT_DMA_ENABLE);
1946 
1947         dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1948           dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1949           dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1950           dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1951           plat_dat->nr_channels);
1952 
1953         dma_async_device_register(&atdma->dma_common);
1954 
1955         /*
1956          * Do not return an error if the dmac node is not present in order to
1957          * not break the existing way of requesting channel with
1958          * dma_request_channel().
1959          */
1960         if (pdev->dev.of_node) {
1961                 err = of_dma_controller_register(pdev->dev.of_node,
1962                                                  at_dma_xlate, atdma);
1963                 if (err) {
1964                         dev_err(&pdev->dev, "could not register of_dma_controller\n");
1965                         goto err_of_dma_controller_register;
1966                 }
1967         }
1968 
1969         return 0;
1970 
1971 err_of_dma_controller_register:
1972         dma_async_device_unregister(&atdma->dma_common);
1973         dma_pool_destroy(atdma->memset_pool);
1974 err_memset_pool_create:
1975         dma_pool_destroy(atdma->dma_desc_pool);
1976 err_desc_pool_create:
1977         free_irq(platform_get_irq(pdev, 0), atdma);
1978 err_irq:
1979         clk_disable_unprepare(atdma->clk);
1980 err_clk_prepare:
1981         clk_put(atdma->clk);
1982 err_clk:
1983         iounmap(atdma->regs);
1984         atdma->regs = NULL;
1985 err_release_r:
1986         release_mem_region(io->start, size);
1987 err_kfree:
1988         kfree(atdma);
1989         return err;
1990 }
1991 
1992 static int at_dma_remove(struct platform_device *pdev)
1993 {
1994         struct at_dma           *atdma = platform_get_drvdata(pdev);
1995         struct dma_chan         *chan, *_chan;
1996         struct resource         *io;
1997 
1998         at_dma_off(atdma);
1999         if (pdev->dev.of_node)
2000                 of_dma_controller_free(pdev->dev.of_node);
2001         dma_async_device_unregister(&atdma->dma_common);
2002 
2003         dma_pool_destroy(atdma->memset_pool);
2004         dma_pool_destroy(atdma->dma_desc_pool);
2005         free_irq(platform_get_irq(pdev, 0), atdma);
2006 
2007         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2008                         device_node) {
2009                 struct at_dma_chan      *atchan = to_at_dma_chan(chan);
2010 
2011                 /* Disable interrupts */
2012                 atc_disable_chan_irq(atdma, chan->chan_id);
2013 
2014                 tasklet_kill(&atchan->tasklet);
2015                 list_del(&chan->device_node);
2016         }
2017 
2018         clk_disable_unprepare(atdma->clk);
2019         clk_put(atdma->clk);
2020 
2021         iounmap(atdma->regs);
2022         atdma->regs = NULL;
2023 
2024         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2025         release_mem_region(io->start, resource_size(io));
2026 
2027         kfree(atdma);
2028 
2029         return 0;
2030 }
2031 
2032 static void at_dma_shutdown(struct platform_device *pdev)
2033 {
2034         struct at_dma   *atdma = platform_get_drvdata(pdev);
2035 
2036         at_dma_off(platform_get_drvdata(pdev));
2037         clk_disable_unprepare(atdma->clk);
2038 }
2039 
2040 static int at_dma_prepare(struct device *dev)
2041 {
2042         struct at_dma *atdma = dev_get_drvdata(dev);
2043         struct dma_chan *chan, *_chan;
2044 
2045         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2046                         device_node) {
2047                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2048                 /* wait for transaction completion (except in cyclic case) */
2049                 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2050                         return -EAGAIN;
2051         }
2052         return 0;
2053 }
2054 
2055 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2056 {
2057         struct dma_chan *chan = &atchan->chan_common;
2058 
2059         /* Channel should be paused by user
2060          * do it anyway even if it is not done already */
2061         if (!atc_chan_is_paused(atchan)) {
2062                 dev_warn(chan2dev(chan),
2063                 "cyclic channel not paused, should be done by channel user\n");
2064                 atc_pause(chan);
2065         }
2066 
2067         /* now preserve additional data for cyclic operations */
2068         /* next descriptor address in the cyclic list */
2069         atchan->save_dscr = channel_readl(atchan, DSCR);
2070 
2071         vdbg_dump_regs(atchan);
2072 }
2073 
2074 static int at_dma_suspend_noirq(struct device *dev)
2075 {
2076         struct at_dma *atdma = dev_get_drvdata(dev);
2077         struct dma_chan *chan, *_chan;
2078 
2079         /* preserve data */
2080         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2081                         device_node) {
2082                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2083 
2084                 if (atc_chan_is_cyclic(atchan))
2085                         atc_suspend_cyclic(atchan);
2086                 atchan->save_cfg = channel_readl(atchan, CFG);
2087         }
2088         atdma->save_imr = dma_readl(atdma, EBCIMR);
2089 
2090         /* disable DMA controller */
2091         at_dma_off(atdma);
2092         clk_disable_unprepare(atdma->clk);
2093         return 0;
2094 }
2095 
2096 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2097 {
2098         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
2099 
2100         /* restore channel status for cyclic descriptors list:
2101          * next descriptor in the cyclic list at the time of suspend */
2102         channel_writel(atchan, SADDR, 0);
2103         channel_writel(atchan, DADDR, 0);
2104         channel_writel(atchan, CTRLA, 0);
2105         channel_writel(atchan, CTRLB, 0);
2106         channel_writel(atchan, DSCR, atchan->save_dscr);
2107         dma_writel(atdma, CHER, atchan->mask);
2108 
2109         /* channel pause status should be removed by channel user
2110          * We cannot take the initiative to do it here */
2111 
2112         vdbg_dump_regs(atchan);
2113 }
2114 
2115 static int at_dma_resume_noirq(struct device *dev)
2116 {
2117         struct at_dma *atdma = dev_get_drvdata(dev);
2118         struct dma_chan *chan, *_chan;
2119 
2120         /* bring back DMA controller */
2121         clk_prepare_enable(atdma->clk);
2122         dma_writel(atdma, EN, AT_DMA_ENABLE);
2123 
2124         /* clear any pending interrupt */
2125         while (dma_readl(atdma, EBCISR))
2126                 cpu_relax();
2127 
2128         /* restore saved data */
2129         dma_writel(atdma, EBCIER, atdma->save_imr);
2130         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2131                         device_node) {
2132                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2133 
2134                 channel_writel(atchan, CFG, atchan->save_cfg);
2135                 if (atc_chan_is_cyclic(atchan))
2136                         atc_resume_cyclic(atchan);
2137         }
2138         return 0;
2139 }
2140 
2141 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2142         .prepare = at_dma_prepare,
2143         .suspend_noirq = at_dma_suspend_noirq,
2144         .resume_noirq = at_dma_resume_noirq,
2145 };
2146 
2147 static struct platform_driver at_dma_driver = {
2148         .remove         = at_dma_remove,
2149         .shutdown       = at_dma_shutdown,
2150         .id_table       = atdma_devtypes,
2151         .driver = {
2152                 .name   = "at_hdmac",
2153                 .pm     = &at_dma_dev_pm_ops,
2154                 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2155         },
2156 };
2157 
2158 static int __init at_dma_init(void)
2159 {
2160         return platform_driver_probe(&at_dma_driver, at_dma_probe);
2161 }
2162 subsys_initcall(at_dma_init);
2163 
2164 static void __exit at_dma_exit(void)
2165 {
2166         platform_driver_unregister(&at_dma_driver);
2167 }
2168 module_exit(at_dma_exit);
2169 
2170 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2171 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2172 MODULE_LICENSE("GPL");
2173 MODULE_ALIAS("platform:at_hdmac");

/* [<][>][^][v][top][bottom][index][help] */