root/drivers/dma/ep93xx_dma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. chan2dev
  2. to_ep93xx_dma_chan
  3. ep93xx_dma_set_active
  4. ep93xx_dma_get_active
  5. ep93xx_dma_advance_active
  6. m2p_set_control
  7. m2p_hw_setup
  8. m2p_channel_state
  9. m2p_hw_synchronize
  10. m2p_hw_shutdown
  11. m2p_fill_desc
  12. m2p_hw_submit
  13. m2p_hw_interrupt
  14. m2m_hw_setup
  15. m2m_hw_shutdown
  16. m2m_fill_desc
  17. m2m_hw_submit
  18. m2m_hw_interrupt
  19. ep93xx_dma_desc_get
  20. ep93xx_dma_desc_put
  21. ep93xx_dma_advance_work
  22. ep93xx_dma_tasklet
  23. ep93xx_dma_interrupt
  24. ep93xx_dma_tx_submit
  25. ep93xx_dma_alloc_chan_resources
  26. ep93xx_dma_free_chan_resources
  27. ep93xx_dma_prep_dma_memcpy
  28. ep93xx_dma_prep_slave_sg
  29. ep93xx_dma_prep_dma_cyclic
  30. ep93xx_dma_synchronize
  31. ep93xx_dma_terminate_all
  32. ep93xx_dma_slave_config
  33. ep93xx_dma_slave_config_write
  34. ep93xx_dma_tx_status
  35. ep93xx_dma_issue_pending
  36. ep93xx_dma_probe
  37. ep93xx_dma_module_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Driver for the Cirrus Logic EP93xx DMA Controller
   4  *
   5  * Copyright (C) 2011 Mika Westerberg
   6  *
   7  * DMA M2P implementation is based on the original
   8  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
   9  *
  10  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  11  *   Copyright (C) 2006 Applied Data Systems
  12  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  13  *
  14  * This driver is based on dw_dmac and amba-pl08x drivers.
  15  */
  16 
  17 #include <linux/clk.h>
  18 #include <linux/init.h>
  19 #include <linux/interrupt.h>
  20 #include <linux/dmaengine.h>
  21 #include <linux/module.h>
  22 #include <linux/mod_devicetable.h>
  23 #include <linux/platform_device.h>
  24 #include <linux/slab.h>
  25 
  26 #include <linux/platform_data/dma-ep93xx.h>
  27 
  28 #include "dmaengine.h"
  29 
  30 /* M2P registers */
  31 #define M2P_CONTROL                     0x0000
  32 #define M2P_CONTROL_STALLINT            BIT(0)
  33 #define M2P_CONTROL_NFBINT              BIT(1)
  34 #define M2P_CONTROL_CH_ERROR_INT        BIT(3)
  35 #define M2P_CONTROL_ENABLE              BIT(4)
  36 #define M2P_CONTROL_ICE                 BIT(6)
  37 
  38 #define M2P_INTERRUPT                   0x0004
  39 #define M2P_INTERRUPT_STALL             BIT(0)
  40 #define M2P_INTERRUPT_NFB               BIT(1)
  41 #define M2P_INTERRUPT_ERROR             BIT(3)
  42 
  43 #define M2P_PPALLOC                     0x0008
  44 #define M2P_STATUS                      0x000c
  45 
  46 #define M2P_MAXCNT0                     0x0020
  47 #define M2P_BASE0                       0x0024
  48 #define M2P_MAXCNT1                     0x0030
  49 #define M2P_BASE1                       0x0034
  50 
  51 #define M2P_STATE_IDLE                  0
  52 #define M2P_STATE_STALL                 1
  53 #define M2P_STATE_ON                    2
  54 #define M2P_STATE_NEXT                  3
  55 
  56 /* M2M registers */
  57 #define M2M_CONTROL                     0x0000
  58 #define M2M_CONTROL_DONEINT             BIT(2)
  59 #define M2M_CONTROL_ENABLE              BIT(3)
  60 #define M2M_CONTROL_START               BIT(4)
  61 #define M2M_CONTROL_DAH                 BIT(11)
  62 #define M2M_CONTROL_SAH                 BIT(12)
  63 #define M2M_CONTROL_PW_SHIFT            9
  64 #define M2M_CONTROL_PW_8                (0 << M2M_CONTROL_PW_SHIFT)
  65 #define M2M_CONTROL_PW_16               (1 << M2M_CONTROL_PW_SHIFT)
  66 #define M2M_CONTROL_PW_32               (2 << M2M_CONTROL_PW_SHIFT)
  67 #define M2M_CONTROL_PW_MASK             (3 << M2M_CONTROL_PW_SHIFT)
  68 #define M2M_CONTROL_TM_SHIFT            13
  69 #define M2M_CONTROL_TM_TX               (1 << M2M_CONTROL_TM_SHIFT)
  70 #define M2M_CONTROL_TM_RX               (2 << M2M_CONTROL_TM_SHIFT)
  71 #define M2M_CONTROL_NFBINT              BIT(21)
  72 #define M2M_CONTROL_RSS_SHIFT           22
  73 #define M2M_CONTROL_RSS_SSPRX           (1 << M2M_CONTROL_RSS_SHIFT)
  74 #define M2M_CONTROL_RSS_SSPTX           (2 << M2M_CONTROL_RSS_SHIFT)
  75 #define M2M_CONTROL_RSS_IDE             (3 << M2M_CONTROL_RSS_SHIFT)
  76 #define M2M_CONTROL_NO_HDSK             BIT(24)
  77 #define M2M_CONTROL_PWSC_SHIFT          25
  78 
  79 #define M2M_INTERRUPT                   0x0004
  80 #define M2M_INTERRUPT_MASK              6
  81 
  82 #define M2M_STATUS                      0x000c
  83 #define M2M_STATUS_CTL_SHIFT            1
  84 #define M2M_STATUS_CTL_IDLE             (0 << M2M_STATUS_CTL_SHIFT)
  85 #define M2M_STATUS_CTL_STALL            (1 << M2M_STATUS_CTL_SHIFT)
  86 #define M2M_STATUS_CTL_MEMRD            (2 << M2M_STATUS_CTL_SHIFT)
  87 #define M2M_STATUS_CTL_MEMWR            (3 << M2M_STATUS_CTL_SHIFT)
  88 #define M2M_STATUS_CTL_BWCWAIT          (4 << M2M_STATUS_CTL_SHIFT)
  89 #define M2M_STATUS_CTL_MASK             (7 << M2M_STATUS_CTL_SHIFT)
  90 #define M2M_STATUS_BUF_SHIFT            4
  91 #define M2M_STATUS_BUF_NO               (0 << M2M_STATUS_BUF_SHIFT)
  92 #define M2M_STATUS_BUF_ON               (1 << M2M_STATUS_BUF_SHIFT)
  93 #define M2M_STATUS_BUF_NEXT             (2 << M2M_STATUS_BUF_SHIFT)
  94 #define M2M_STATUS_BUF_MASK             (3 << M2M_STATUS_BUF_SHIFT)
  95 #define M2M_STATUS_DONE                 BIT(6)
  96 
  97 #define M2M_BCR0                        0x0010
  98 #define M2M_BCR1                        0x0014
  99 #define M2M_SAR_BASE0                   0x0018
 100 #define M2M_SAR_BASE1                   0x001c
 101 #define M2M_DAR_BASE0                   0x002c
 102 #define M2M_DAR_BASE1                   0x0030
 103 
 104 #define DMA_MAX_CHAN_BYTES              0xffff
 105 #define DMA_MAX_CHAN_DESCRIPTORS        32
 106 
 107 struct ep93xx_dma_engine;
 108 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
 109                                          enum dma_transfer_direction dir,
 110                                          struct dma_slave_config *config);
 111 
 112 /**
 113  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
 114  * @src_addr: source address of the transaction
 115  * @dst_addr: destination address of the transaction
 116  * @size: size of the transaction (in bytes)
 117  * @complete: this descriptor is completed
 118  * @txd: dmaengine API descriptor
 119  * @tx_list: list of linked descriptors
 120  * @node: link used for putting this into a channel queue
 121  */
 122 struct ep93xx_dma_desc {
 123         u32                             src_addr;
 124         u32                             dst_addr;
 125         size_t                          size;
 126         bool                            complete;
 127         struct dma_async_tx_descriptor  txd;
 128         struct list_head                tx_list;
 129         struct list_head                node;
 130 };
 131 
 132 /**
 133  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 134  * @chan: dmaengine API channel
 135  * @edma: pointer to to the engine device
 136  * @regs: memory mapped registers
 137  * @irq: interrupt number of the channel
 138  * @clk: clock used by this channel
 139  * @tasklet: channel specific tasklet used for callbacks
 140  * @lock: lock protecting the fields following
 141  * @flags: flags for the channel
 142  * @buffer: which buffer to use next (0/1)
 143  * @active: flattened chain of descriptors currently being processed
 144  * @queue: pending descriptors which are handled next
 145  * @free_list: list of free descriptors which can be used
 146  * @runtime_addr: physical address currently used as dest/src (M2M only). This
 147  *                is set via .device_config before slave operation is
 148  *                prepared
 149  * @runtime_ctrl: M2M runtime values for the control register.
 150  *
 151  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 152  * will have slightly different scheme here: @active points to a head of
 153  * flattened DMA descriptor chain.
 154  *
 155  * @queue holds pending transactions. These are linked through the first
 156  * descriptor in the chain. When a descriptor is moved to the @active queue,
 157  * the first and chained descriptors are flattened into a single list.
 158  *
 159  * @chan.private holds pointer to &struct ep93xx_dma_data which contains
 160  * necessary channel configuration information. For memcpy channels this must
 161  * be %NULL.
 162  */
 163 struct ep93xx_dma_chan {
 164         struct dma_chan                 chan;
 165         const struct ep93xx_dma_engine  *edma;
 166         void __iomem                    *regs;
 167         int                             irq;
 168         struct clk                      *clk;
 169         struct tasklet_struct           tasklet;
 170         /* protects the fields following */
 171         spinlock_t                      lock;
 172         unsigned long                   flags;
 173 /* Channel is configured for cyclic transfers */
 174 #define EP93XX_DMA_IS_CYCLIC            0
 175 
 176         int                             buffer;
 177         struct list_head                active;
 178         struct list_head                queue;
 179         struct list_head                free_list;
 180         u32                             runtime_addr;
 181         u32                             runtime_ctrl;
 182         struct dma_slave_config         slave_config;
 183 };
 184 
 185 /**
 186  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 187  * @dma_dev: holds the dmaengine device
 188  * @m2m: is this an M2M or M2P device
 189  * @hw_setup: method which sets the channel up for operation
 190  * @hw_shutdown: shuts the channel down and flushes whatever is left
 191  * @hw_submit: pushes active descriptor(s) to the hardware
 192  * @hw_interrupt: handle the interrupt
 193  * @num_channels: number of channels for this instance
 194  * @channels: array of channels
 195  *
 196  * There is one instance of this struct for the M2P channels and one for the
 197  * M2M channels. hw_xxx() methods are used to perform operations which are
 198  * different on M2M and M2P channels. These methods are called with channel
 199  * lock held and interrupts disabled so they cannot sleep.
 200  */
 201 struct ep93xx_dma_engine {
 202         struct dma_device       dma_dev;
 203         bool                    m2m;
 204         int                     (*hw_setup)(struct ep93xx_dma_chan *);
 205         void                    (*hw_synchronize)(struct ep93xx_dma_chan *);
 206         void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
 207         void                    (*hw_submit)(struct ep93xx_dma_chan *);
 208         int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
 209 #define INTERRUPT_UNKNOWN       0
 210 #define INTERRUPT_DONE          1
 211 #define INTERRUPT_NEXT_BUFFER   2
 212 
 213         size_t                  num_channels;
 214         struct ep93xx_dma_chan  channels[];
 215 };
 216 
 217 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
 218 {
 219         return &edmac->chan.dev->device;
 220 }
 221 
 222 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
 223 {
 224         return container_of(chan, struct ep93xx_dma_chan, chan);
 225 }
 226 
 227 /**
 228  * ep93xx_dma_set_active - set new active descriptor chain
 229  * @edmac: channel
 230  * @desc: head of the new active descriptor chain
 231  *
 232  * Sets @desc to be the head of the new active descriptor chain. This is the
 233  * chain which is processed next. The active list must be empty before calling
 234  * this function.
 235  *
 236  * Called with @edmac->lock held and interrupts disabled.
 237  */
 238 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 239                                   struct ep93xx_dma_desc *desc)
 240 {
 241         BUG_ON(!list_empty(&edmac->active));
 242 
 243         list_add_tail(&desc->node, &edmac->active);
 244 
 245         /* Flatten the @desc->tx_list chain into @edmac->active list */
 246         while (!list_empty(&desc->tx_list)) {
 247                 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
 248                         struct ep93xx_dma_desc, node);
 249 
 250                 /*
 251                  * We copy the callback parameters from the first descriptor
 252                  * to all the chained descriptors. This way we can call the
 253                  * callback without having to find out the first descriptor in
 254                  * the chain. Useful for cyclic transfers.
 255                  */
 256                 d->txd.callback = desc->txd.callback;
 257                 d->txd.callback_param = desc->txd.callback_param;
 258 
 259                 list_move_tail(&d->node, &edmac->active);
 260         }
 261 }
 262 
 263 /* Called with @edmac->lock held and interrupts disabled */
 264 static struct ep93xx_dma_desc *
 265 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 266 {
 267         return list_first_entry_or_null(&edmac->active,
 268                                         struct ep93xx_dma_desc, node);
 269 }
 270 
 271 /**
 272  * ep93xx_dma_advance_active - advances to the next active descriptor
 273  * @edmac: channel
 274  *
 275  * Function advances active descriptor to the next in the @edmac->active and
 276  * returns %true if we still have descriptors in the chain to process.
 277  * Otherwise returns %false.
 278  *
 279  * When the channel is in cyclic mode always returns %true.
 280  *
 281  * Called with @edmac->lock held and interrupts disabled.
 282  */
 283 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 284 {
 285         struct ep93xx_dma_desc *desc;
 286 
 287         list_rotate_left(&edmac->active);
 288 
 289         if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 290                 return true;
 291 
 292         desc = ep93xx_dma_get_active(edmac);
 293         if (!desc)
 294                 return false;
 295 
 296         /*
 297          * If txd.cookie is set it means that we are back in the first
 298          * descriptor in the chain and hence done with it.
 299          */
 300         return !desc->txd.cookie;
 301 }
 302 
 303 /*
 304  * M2P DMA implementation
 305  */
 306 
 307 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
 308 {
 309         writel(control, edmac->regs + M2P_CONTROL);
 310         /*
 311          * EP93xx User's Guide states that we must perform a dummy read after
 312          * write to the control register.
 313          */
 314         readl(edmac->regs + M2P_CONTROL);
 315 }
 316 
 317 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 318 {
 319         struct ep93xx_dma_data *data = edmac->chan.private;
 320         u32 control;
 321 
 322         writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
 323 
 324         control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
 325                 | M2P_CONTROL_ENABLE;
 326         m2p_set_control(edmac, control);
 327 
 328         edmac->buffer = 0;
 329 
 330         return 0;
 331 }
 332 
 333 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 334 {
 335         return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 336 }
 337 
 338 static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 339 {
 340         unsigned long flags;
 341         u32 control;
 342 
 343         spin_lock_irqsave(&edmac->lock, flags);
 344         control = readl(edmac->regs + M2P_CONTROL);
 345         control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 346         m2p_set_control(edmac, control);
 347         spin_unlock_irqrestore(&edmac->lock, flags);
 348 
 349         while (m2p_channel_state(edmac) >= M2P_STATE_ON)
 350                 schedule();
 351 }
 352 
 353 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 354 {
 355         m2p_set_control(edmac, 0);
 356 
 357         while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
 358                 dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 359 }
 360 
 361 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 362 {
 363         struct ep93xx_dma_desc *desc;
 364         u32 bus_addr;
 365 
 366         desc = ep93xx_dma_get_active(edmac);
 367         if (!desc) {
 368                 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
 369                 return;
 370         }
 371 
 372         if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
 373                 bus_addr = desc->src_addr;
 374         else
 375                 bus_addr = desc->dst_addr;
 376 
 377         if (edmac->buffer == 0) {
 378                 writel(desc->size, edmac->regs + M2P_MAXCNT0);
 379                 writel(bus_addr, edmac->regs + M2P_BASE0);
 380         } else {
 381                 writel(desc->size, edmac->regs + M2P_MAXCNT1);
 382                 writel(bus_addr, edmac->regs + M2P_BASE1);
 383         }
 384 
 385         edmac->buffer ^= 1;
 386 }
 387 
 388 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
 389 {
 390         u32 control = readl(edmac->regs + M2P_CONTROL);
 391 
 392         m2p_fill_desc(edmac);
 393         control |= M2P_CONTROL_STALLINT;
 394 
 395         if (ep93xx_dma_advance_active(edmac)) {
 396                 m2p_fill_desc(edmac);
 397                 control |= M2P_CONTROL_NFBINT;
 398         }
 399 
 400         m2p_set_control(edmac, control);
 401 }
 402 
 403 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 404 {
 405         u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
 406         u32 control;
 407 
 408         if (irq_status & M2P_INTERRUPT_ERROR) {
 409                 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
 410 
 411                 /* Clear the error interrupt */
 412                 writel(1, edmac->regs + M2P_INTERRUPT);
 413 
 414                 /*
 415                  * It seems that there is no easy way of reporting errors back
 416                  * to client so we just report the error here and continue as
 417                  * usual.
 418                  *
 419                  * Revisit this when there is a mechanism to report back the
 420                  * errors.
 421                  */
 422                 dev_err(chan2dev(edmac),
 423                         "DMA transfer failed! Details:\n"
 424                         "\tcookie       : %d\n"
 425                         "\tsrc_addr     : 0x%08x\n"
 426                         "\tdst_addr     : 0x%08x\n"
 427                         "\tsize         : %zu\n",
 428                         desc->txd.cookie, desc->src_addr, desc->dst_addr,
 429                         desc->size);
 430         }
 431 
 432         /*
 433          * Even latest E2 silicon revision sometimes assert STALL interrupt
 434          * instead of NFB. Therefore we treat them equally, basing on the
 435          * amount of data we still have to transfer.
 436          */
 437         if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
 438                 return INTERRUPT_UNKNOWN;
 439 
 440         if (ep93xx_dma_advance_active(edmac)) {
 441                 m2p_fill_desc(edmac);
 442                 return INTERRUPT_NEXT_BUFFER;
 443         }
 444 
 445         /* Disable interrupts */
 446         control = readl(edmac->regs + M2P_CONTROL);
 447         control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 448         m2p_set_control(edmac, control);
 449 
 450         return INTERRUPT_DONE;
 451 }
 452 
 453 /*
 454  * M2M DMA implementation
 455  */
 456 
 457 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
 458 {
 459         const struct ep93xx_dma_data *data = edmac->chan.private;
 460         u32 control = 0;
 461 
 462         if (!data) {
 463                 /* This is memcpy channel, nothing to configure */
 464                 writel(control, edmac->regs + M2M_CONTROL);
 465                 return 0;
 466         }
 467 
 468         switch (data->port) {
 469         case EP93XX_DMA_SSP:
 470                 /*
 471                  * This was found via experimenting - anything less than 5
 472                  * causes the channel to perform only a partial transfer which
 473                  * leads to problems since we don't get DONE interrupt then.
 474                  */
 475                 control = (5 << M2M_CONTROL_PWSC_SHIFT);
 476                 control |= M2M_CONTROL_NO_HDSK;
 477 
 478                 if (data->direction == DMA_MEM_TO_DEV) {
 479                         control |= M2M_CONTROL_DAH;
 480                         control |= M2M_CONTROL_TM_TX;
 481                         control |= M2M_CONTROL_RSS_SSPTX;
 482                 } else {
 483                         control |= M2M_CONTROL_SAH;
 484                         control |= M2M_CONTROL_TM_RX;
 485                         control |= M2M_CONTROL_RSS_SSPRX;
 486                 }
 487                 break;
 488 
 489         case EP93XX_DMA_IDE:
 490                 /*
 491                  * This IDE part is totally untested. Values below are taken
 492                  * from the EP93xx Users's Guide and might not be correct.
 493                  */
 494                 if (data->direction == DMA_MEM_TO_DEV) {
 495                         /* Worst case from the UG */
 496                         control = (3 << M2M_CONTROL_PWSC_SHIFT);
 497                         control |= M2M_CONTROL_DAH;
 498                         control |= M2M_CONTROL_TM_TX;
 499                 } else {
 500                         control = (2 << M2M_CONTROL_PWSC_SHIFT);
 501                         control |= M2M_CONTROL_SAH;
 502                         control |= M2M_CONTROL_TM_RX;
 503                 }
 504 
 505                 control |= M2M_CONTROL_NO_HDSK;
 506                 control |= M2M_CONTROL_RSS_IDE;
 507                 control |= M2M_CONTROL_PW_16;
 508                 break;
 509 
 510         default:
 511                 return -EINVAL;
 512         }
 513 
 514         writel(control, edmac->regs + M2M_CONTROL);
 515         return 0;
 516 }
 517 
 518 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 519 {
 520         /* Just disable the channel */
 521         writel(0, edmac->regs + M2M_CONTROL);
 522 }
 523 
 524 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 525 {
 526         struct ep93xx_dma_desc *desc;
 527 
 528         desc = ep93xx_dma_get_active(edmac);
 529         if (!desc) {
 530                 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
 531                 return;
 532         }
 533 
 534         if (edmac->buffer == 0) {
 535                 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 536                 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
 537                 writel(desc->size, edmac->regs + M2M_BCR0);
 538         } else {
 539                 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
 540                 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
 541                 writel(desc->size, edmac->regs + M2M_BCR1);
 542         }
 543 
 544         edmac->buffer ^= 1;
 545 }
 546 
 547 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
 548 {
 549         struct ep93xx_dma_data *data = edmac->chan.private;
 550         u32 control = readl(edmac->regs + M2M_CONTROL);
 551 
 552         /*
 553          * Since we allow clients to configure PW (peripheral width) we always
 554          * clear PW bits here and then set them according what is given in
 555          * the runtime configuration.
 556          */
 557         control &= ~M2M_CONTROL_PW_MASK;
 558         control |= edmac->runtime_ctrl;
 559 
 560         m2m_fill_desc(edmac);
 561         control |= M2M_CONTROL_DONEINT;
 562 
 563         if (ep93xx_dma_advance_active(edmac)) {
 564                 m2m_fill_desc(edmac);
 565                 control |= M2M_CONTROL_NFBINT;
 566         }
 567 
 568         /*
 569          * Now we can finally enable the channel. For M2M channel this must be
 570          * done _after_ the BCRx registers are programmed.
 571          */
 572         control |= M2M_CONTROL_ENABLE;
 573         writel(control, edmac->regs + M2M_CONTROL);
 574 
 575         if (!data) {
 576                 /*
 577                  * For memcpy channels the software trigger must be asserted
 578                  * in order to start the memcpy operation.
 579                  */
 580                 control |= M2M_CONTROL_START;
 581                 writel(control, edmac->regs + M2M_CONTROL);
 582         }
 583 }
 584 
 585 /*
 586  * According to EP93xx User's Guide, we should receive DONE interrupt when all
 587  * M2M DMA controller transactions complete normally. This is not always the
 588  * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
 589  * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
 590  * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
 591  * In effect, disabling the channel when only DONE bit is set could stop
 592  * currently running DMA transfer. To avoid this, we use Buffer FSM and
 593  * Control FSM to check current state of DMA channel.
 594  */
 595 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 596 {
 597         u32 status = readl(edmac->regs + M2M_STATUS);
 598         u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
 599         u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
 600         bool done = status & M2M_STATUS_DONE;
 601         bool last_done;
 602         u32 control;
 603         struct ep93xx_dma_desc *desc;
 604 
 605         /* Accept only DONE and NFB interrupts */
 606         if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
 607                 return INTERRUPT_UNKNOWN;
 608 
 609         if (done) {
 610                 /* Clear the DONE bit */
 611                 writel(0, edmac->regs + M2M_INTERRUPT);
 612         }
 613 
 614         /*
 615          * Check whether we are done with descriptors or not. This, together
 616          * with DMA channel state, determines action to take in interrupt.
 617          */
 618         desc = ep93xx_dma_get_active(edmac);
 619         last_done = !desc || desc->txd.cookie;
 620 
 621         /*
 622          * Use M2M DMA Buffer FSM and Control FSM to check current state of
 623          * DMA channel. Using DONE and NFB bits from channel status register
 624          * or bits from channel interrupt register is not reliable.
 625          */
 626         if (!last_done &&
 627             (buf_fsm == M2M_STATUS_BUF_NO ||
 628              buf_fsm == M2M_STATUS_BUF_ON)) {
 629                 /*
 630                  * Two buffers are ready for update when Buffer FSM is in
 631                  * DMA_NO_BUF state. Only one buffer can be prepared without
 632                  * disabling the channel or polling the DONE bit.
 633                  * To simplify things, always prepare only one buffer.
 634                  */
 635                 if (ep93xx_dma_advance_active(edmac)) {
 636                         m2m_fill_desc(edmac);
 637                         if (done && !edmac->chan.private) {
 638                                 /* Software trigger for memcpy channel */
 639                                 control = readl(edmac->regs + M2M_CONTROL);
 640                                 control |= M2M_CONTROL_START;
 641                                 writel(control, edmac->regs + M2M_CONTROL);
 642                         }
 643                         return INTERRUPT_NEXT_BUFFER;
 644                 } else {
 645                         last_done = true;
 646                 }
 647         }
 648 
 649         /*
 650          * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
 651          * and Control FSM is in DMA_STALL state.
 652          */
 653         if (last_done &&
 654             buf_fsm == M2M_STATUS_BUF_NO &&
 655             ctl_fsm == M2M_STATUS_CTL_STALL) {
 656                 /* Disable interrupts and the channel */
 657                 control = readl(edmac->regs + M2M_CONTROL);
 658                 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
 659                             | M2M_CONTROL_ENABLE);
 660                 writel(control, edmac->regs + M2M_CONTROL);
 661                 return INTERRUPT_DONE;
 662         }
 663 
 664         /*
 665          * Nothing to do this time.
 666          */
 667         return INTERRUPT_NEXT_BUFFER;
 668 }
 669 
 670 /*
 671  * DMA engine API implementation
 672  */
 673 
 674 static struct ep93xx_dma_desc *
 675 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
 676 {
 677         struct ep93xx_dma_desc *desc, *_desc;
 678         struct ep93xx_dma_desc *ret = NULL;
 679         unsigned long flags;
 680 
 681         spin_lock_irqsave(&edmac->lock, flags);
 682         list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
 683                 if (async_tx_test_ack(&desc->txd)) {
 684                         list_del_init(&desc->node);
 685 
 686                         /* Re-initialize the descriptor */
 687                         desc->src_addr = 0;
 688                         desc->dst_addr = 0;
 689                         desc->size = 0;
 690                         desc->complete = false;
 691                         desc->txd.cookie = 0;
 692                         desc->txd.callback = NULL;
 693                         desc->txd.callback_param = NULL;
 694 
 695                         ret = desc;
 696                         break;
 697                 }
 698         }
 699         spin_unlock_irqrestore(&edmac->lock, flags);
 700         return ret;
 701 }
 702 
 703 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
 704                                 struct ep93xx_dma_desc *desc)
 705 {
 706         if (desc) {
 707                 unsigned long flags;
 708 
 709                 spin_lock_irqsave(&edmac->lock, flags);
 710                 list_splice_init(&desc->tx_list, &edmac->free_list);
 711                 list_add(&desc->node, &edmac->free_list);
 712                 spin_unlock_irqrestore(&edmac->lock, flags);
 713         }
 714 }
 715 
 716 /**
 717  * ep93xx_dma_advance_work - start processing the next pending transaction
 718  * @edmac: channel
 719  *
 720  * If we have pending transactions queued and we are currently idling, this
 721  * function takes the next queued transaction from the @edmac->queue and
 722  * pushes it to the hardware for execution.
 723  */
 724 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
 725 {
 726         struct ep93xx_dma_desc *new;
 727         unsigned long flags;
 728 
 729         spin_lock_irqsave(&edmac->lock, flags);
 730         if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
 731                 spin_unlock_irqrestore(&edmac->lock, flags);
 732                 return;
 733         }
 734 
 735         /* Take the next descriptor from the pending queue */
 736         new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
 737         list_del_init(&new->node);
 738 
 739         ep93xx_dma_set_active(edmac, new);
 740 
 741         /* Push it to the hardware */
 742         edmac->edma->hw_submit(edmac);
 743         spin_unlock_irqrestore(&edmac->lock, flags);
 744 }
 745 
 746 static void ep93xx_dma_tasklet(unsigned long data)
 747 {
 748         struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
 749         struct ep93xx_dma_desc *desc, *d;
 750         struct dmaengine_desc_callback cb;
 751         LIST_HEAD(list);
 752 
 753         memset(&cb, 0, sizeof(cb));
 754         spin_lock_irq(&edmac->lock);
 755         /*
 756          * If dma_terminate_all() was called before we get to run, the active
 757          * list has become empty. If that happens we aren't supposed to do
 758          * anything more than call ep93xx_dma_advance_work().
 759          */
 760         desc = ep93xx_dma_get_active(edmac);
 761         if (desc) {
 762                 if (desc->complete) {
 763                         /* mark descriptor complete for non cyclic case only */
 764                         if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 765                                 dma_cookie_complete(&desc->txd);
 766                         list_splice_init(&edmac->active, &list);
 767                 }
 768                 dmaengine_desc_get_callback(&desc->txd, &cb);
 769         }
 770         spin_unlock_irq(&edmac->lock);
 771 
 772         /* Pick up the next descriptor from the queue */
 773         ep93xx_dma_advance_work(edmac);
 774 
 775         /* Now we can release all the chained descriptors */
 776         list_for_each_entry_safe(desc, d, &list, node) {
 777                 dma_descriptor_unmap(&desc->txd);
 778                 ep93xx_dma_desc_put(edmac, desc);
 779         }
 780 
 781         dmaengine_desc_callback_invoke(&cb, NULL);
 782 }
 783 
 784 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 785 {
 786         struct ep93xx_dma_chan *edmac = dev_id;
 787         struct ep93xx_dma_desc *desc;
 788         irqreturn_t ret = IRQ_HANDLED;
 789 
 790         spin_lock(&edmac->lock);
 791 
 792         desc = ep93xx_dma_get_active(edmac);
 793         if (!desc) {
 794                 dev_warn(chan2dev(edmac),
 795                          "got interrupt while active list is empty\n");
 796                 spin_unlock(&edmac->lock);
 797                 return IRQ_NONE;
 798         }
 799 
 800         switch (edmac->edma->hw_interrupt(edmac)) {
 801         case INTERRUPT_DONE:
 802                 desc->complete = true;
 803                 tasklet_schedule(&edmac->tasklet);
 804                 break;
 805 
 806         case INTERRUPT_NEXT_BUFFER:
 807                 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 808                         tasklet_schedule(&edmac->tasklet);
 809                 break;
 810 
 811         default:
 812                 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
 813                 ret = IRQ_NONE;
 814                 break;
 815         }
 816 
 817         spin_unlock(&edmac->lock);
 818         return ret;
 819 }
 820 
 821 /**
 822  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 823  * @tx: descriptor to be executed
 824  *
 825  * Function will execute given descriptor on the hardware or if the hardware
 826  * is busy, queue the descriptor to be executed later on. Returns cookie which
 827  * can be used to poll the status of the descriptor.
 828  */
 829 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 830 {
 831         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
 832         struct ep93xx_dma_desc *desc;
 833         dma_cookie_t cookie;
 834         unsigned long flags;
 835 
 836         spin_lock_irqsave(&edmac->lock, flags);
 837         cookie = dma_cookie_assign(tx);
 838 
 839         desc = container_of(tx, struct ep93xx_dma_desc, txd);
 840 
 841         /*
 842          * If nothing is currently prosessed, we push this descriptor
 843          * directly to the hardware. Otherwise we put the descriptor
 844          * to the pending queue.
 845          */
 846         if (list_empty(&edmac->active)) {
 847                 ep93xx_dma_set_active(edmac, desc);
 848                 edmac->edma->hw_submit(edmac);
 849         } else {
 850                 list_add_tail(&desc->node, &edmac->queue);
 851         }
 852 
 853         spin_unlock_irqrestore(&edmac->lock, flags);
 854         return cookie;
 855 }
 856 
 857 /**
 858  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 859  * @chan: channel to allocate resources
 860  *
 861  * Function allocates necessary resources for the given DMA channel and
 862  * returns number of allocated descriptors for the channel. Negative errno
 863  * is returned in case of failure.
 864  */
 865 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
 866 {
 867         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 868         struct ep93xx_dma_data *data = chan->private;
 869         const char *name = dma_chan_name(chan);
 870         int ret, i;
 871 
 872         /* Sanity check the channel parameters */
 873         if (!edmac->edma->m2m) {
 874                 if (!data)
 875                         return -EINVAL;
 876                 if (data->port < EP93XX_DMA_I2S1 ||
 877                     data->port > EP93XX_DMA_IRDA)
 878                         return -EINVAL;
 879                 if (data->direction != ep93xx_dma_chan_direction(chan))
 880                         return -EINVAL;
 881         } else {
 882                 if (data) {
 883                         switch (data->port) {
 884                         case EP93XX_DMA_SSP:
 885                         case EP93XX_DMA_IDE:
 886                                 if (!is_slave_direction(data->direction))
 887                                         return -EINVAL;
 888                                 break;
 889                         default:
 890                                 return -EINVAL;
 891                         }
 892                 }
 893         }
 894 
 895         if (data && data->name)
 896                 name = data->name;
 897 
 898         ret = clk_enable(edmac->clk);
 899         if (ret)
 900                 return ret;
 901 
 902         ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
 903         if (ret)
 904                 goto fail_clk_disable;
 905 
 906         spin_lock_irq(&edmac->lock);
 907         dma_cookie_init(&edmac->chan);
 908         ret = edmac->edma->hw_setup(edmac);
 909         spin_unlock_irq(&edmac->lock);
 910 
 911         if (ret)
 912                 goto fail_free_irq;
 913 
 914         for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
 915                 struct ep93xx_dma_desc *desc;
 916 
 917                 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 918                 if (!desc) {
 919                         dev_warn(chan2dev(edmac), "not enough descriptors\n");
 920                         break;
 921                 }
 922 
 923                 INIT_LIST_HEAD(&desc->tx_list);
 924 
 925                 dma_async_tx_descriptor_init(&desc->txd, chan);
 926                 desc->txd.flags = DMA_CTRL_ACK;
 927                 desc->txd.tx_submit = ep93xx_dma_tx_submit;
 928 
 929                 ep93xx_dma_desc_put(edmac, desc);
 930         }
 931 
 932         return i;
 933 
 934 fail_free_irq:
 935         free_irq(edmac->irq, edmac);
 936 fail_clk_disable:
 937         clk_disable(edmac->clk);
 938 
 939         return ret;
 940 }
 941 
 942 /**
 943  * ep93xx_dma_free_chan_resources - release resources for the channel
 944  * @chan: channel
 945  *
 946  * Function releases all the resources allocated for the given channel.
 947  * The channel must be idle when this is called.
 948  */
 949 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
 950 {
 951         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 952         struct ep93xx_dma_desc *desc, *d;
 953         unsigned long flags;
 954         LIST_HEAD(list);
 955 
 956         BUG_ON(!list_empty(&edmac->active));
 957         BUG_ON(!list_empty(&edmac->queue));
 958 
 959         spin_lock_irqsave(&edmac->lock, flags);
 960         edmac->edma->hw_shutdown(edmac);
 961         edmac->runtime_addr = 0;
 962         edmac->runtime_ctrl = 0;
 963         edmac->buffer = 0;
 964         list_splice_init(&edmac->free_list, &list);
 965         spin_unlock_irqrestore(&edmac->lock, flags);
 966 
 967         list_for_each_entry_safe(desc, d, &list, node)
 968                 kfree(desc);
 969 
 970         clk_disable(edmac->clk);
 971         free_irq(edmac->irq, edmac);
 972 }
 973 
 974 /**
 975  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
 976  * @chan: channel
 977  * @dest: destination bus address
 978  * @src: source bus address
 979  * @len: size of the transaction
 980  * @flags: flags for the descriptor
 981  *
 982  * Returns a valid DMA descriptor or %NULL in case of failure.
 983  */
 984 static struct dma_async_tx_descriptor *
 985 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 986                            dma_addr_t src, size_t len, unsigned long flags)
 987 {
 988         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 989         struct ep93xx_dma_desc *desc, *first;
 990         size_t bytes, offset;
 991 
 992         first = NULL;
 993         for (offset = 0; offset < len; offset += bytes) {
 994                 desc = ep93xx_dma_desc_get(edmac);
 995                 if (!desc) {
 996                         dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
 997                         goto fail;
 998                 }
 999 
1000                 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1001 
1002                 desc->src_addr = src + offset;
1003                 desc->dst_addr = dest + offset;
1004                 desc->size = bytes;
1005 
1006                 if (!first)
1007                         first = desc;
1008                 else
1009                         list_add_tail(&desc->node, &first->tx_list);
1010         }
1011 
1012         first->txd.cookie = -EBUSY;
1013         first->txd.flags = flags;
1014 
1015         return &first->txd;
1016 fail:
1017         ep93xx_dma_desc_put(edmac, first);
1018         return NULL;
1019 }
1020 
1021 /**
1022  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1023  * @chan: channel
1024  * @sgl: list of buffers to transfer
1025  * @sg_len: number of entries in @sgl
1026  * @dir: direction of tha DMA transfer
1027  * @flags: flags for the descriptor
1028  * @context: operation context (ignored)
1029  *
1030  * Returns a valid DMA descriptor or %NULL in case of failure.
1031  */
1032 static struct dma_async_tx_descriptor *
1033 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1034                          unsigned int sg_len, enum dma_transfer_direction dir,
1035                          unsigned long flags, void *context)
1036 {
1037         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1038         struct ep93xx_dma_desc *desc, *first;
1039         struct scatterlist *sg;
1040         int i;
1041 
1042         if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1043                 dev_warn(chan2dev(edmac),
1044                          "channel was configured with different direction\n");
1045                 return NULL;
1046         }
1047 
1048         if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1049                 dev_warn(chan2dev(edmac),
1050                          "channel is already used for cyclic transfers\n");
1051                 return NULL;
1052         }
1053 
1054         ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1055 
1056         first = NULL;
1057         for_each_sg(sgl, sg, sg_len, i) {
1058                 size_t len = sg_dma_len(sg);
1059 
1060                 if (len > DMA_MAX_CHAN_BYTES) {
1061                         dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1062                                  len);
1063                         goto fail;
1064                 }
1065 
1066                 desc = ep93xx_dma_desc_get(edmac);
1067                 if (!desc) {
1068                         dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1069                         goto fail;
1070                 }
1071 
1072                 if (dir == DMA_MEM_TO_DEV) {
1073                         desc->src_addr = sg_dma_address(sg);
1074                         desc->dst_addr = edmac->runtime_addr;
1075                 } else {
1076                         desc->src_addr = edmac->runtime_addr;
1077                         desc->dst_addr = sg_dma_address(sg);
1078                 }
1079                 desc->size = len;
1080 
1081                 if (!first)
1082                         first = desc;
1083                 else
1084                         list_add_tail(&desc->node, &first->tx_list);
1085         }
1086 
1087         first->txd.cookie = -EBUSY;
1088         first->txd.flags = flags;
1089 
1090         return &first->txd;
1091 
1092 fail:
1093         ep93xx_dma_desc_put(edmac, first);
1094         return NULL;
1095 }
1096 
1097 /**
1098  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1099  * @chan: channel
1100  * @dma_addr: DMA mapped address of the buffer
1101  * @buf_len: length of the buffer (in bytes)
1102  * @period_len: length of a single period
1103  * @dir: direction of the operation
1104  * @flags: tx descriptor status flags
1105  *
1106  * Prepares a descriptor for cyclic DMA operation. This means that once the
1107  * descriptor is submitted, we will be submitting in a @period_len sized
1108  * buffers and calling callback once the period has been elapsed. Transfer
1109  * terminates only when client calls dmaengine_terminate_all() for this
1110  * channel.
1111  *
1112  * Returns a valid DMA descriptor or %NULL in case of failure.
1113  */
1114 static struct dma_async_tx_descriptor *
1115 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1116                            size_t buf_len, size_t period_len,
1117                            enum dma_transfer_direction dir, unsigned long flags)
1118 {
1119         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1120         struct ep93xx_dma_desc *desc, *first;
1121         size_t offset = 0;
1122 
1123         if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1124                 dev_warn(chan2dev(edmac),
1125                          "channel was configured with different direction\n");
1126                 return NULL;
1127         }
1128 
1129         if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1130                 dev_warn(chan2dev(edmac),
1131                          "channel is already used for cyclic transfers\n");
1132                 return NULL;
1133         }
1134 
1135         if (period_len > DMA_MAX_CHAN_BYTES) {
1136                 dev_warn(chan2dev(edmac), "too big period length %zu\n",
1137                          period_len);
1138                 return NULL;
1139         }
1140 
1141         ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1142 
1143         /* Split the buffer into period size chunks */
1144         first = NULL;
1145         for (offset = 0; offset < buf_len; offset += period_len) {
1146                 desc = ep93xx_dma_desc_get(edmac);
1147                 if (!desc) {
1148                         dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1149                         goto fail;
1150                 }
1151 
1152                 if (dir == DMA_MEM_TO_DEV) {
1153                         desc->src_addr = dma_addr + offset;
1154                         desc->dst_addr = edmac->runtime_addr;
1155                 } else {
1156                         desc->src_addr = edmac->runtime_addr;
1157                         desc->dst_addr = dma_addr + offset;
1158                 }
1159 
1160                 desc->size = period_len;
1161 
1162                 if (!first)
1163                         first = desc;
1164                 else
1165                         list_add_tail(&desc->node, &first->tx_list);
1166         }
1167 
1168         first->txd.cookie = -EBUSY;
1169 
1170         return &first->txd;
1171 
1172 fail:
1173         ep93xx_dma_desc_put(edmac, first);
1174         return NULL;
1175 }
1176 
1177 /**
1178  * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1179  * current context.
1180  * @chan: channel
1181  *
1182  * Synchronizes the DMA channel termination to the current context. When this
1183  * function returns it is guaranteed that all transfers for previously issued
1184  * descriptors have stopped and and it is safe to free the memory associated
1185  * with them. Furthermore it is guaranteed that all complete callback functions
1186  * for a previously submitted descriptor have finished running and it is safe to
1187  * free resources accessed from within the complete callbacks.
1188  */
1189 static void ep93xx_dma_synchronize(struct dma_chan *chan)
1190 {
1191         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1192 
1193         if (edmac->edma->hw_synchronize)
1194                 edmac->edma->hw_synchronize(edmac);
1195 }
1196 
1197 /**
1198  * ep93xx_dma_terminate_all - terminate all transactions
1199  * @chan: channel
1200  *
1201  * Stops all DMA transactions. All descriptors are put back to the
1202  * @edmac->free_list and callbacks are _not_ called.
1203  */
1204 static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1205 {
1206         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1207         struct ep93xx_dma_desc *desc, *_d;
1208         unsigned long flags;
1209         LIST_HEAD(list);
1210 
1211         spin_lock_irqsave(&edmac->lock, flags);
1212         /* First we disable and flush the DMA channel */
1213         edmac->edma->hw_shutdown(edmac);
1214         clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1215         list_splice_init(&edmac->active, &list);
1216         list_splice_init(&edmac->queue, &list);
1217         /*
1218          * We then re-enable the channel. This way we can continue submitting
1219          * the descriptors by just calling ->hw_submit() again.
1220          */
1221         edmac->edma->hw_setup(edmac);
1222         spin_unlock_irqrestore(&edmac->lock, flags);
1223 
1224         list_for_each_entry_safe(desc, _d, &list, node)
1225                 ep93xx_dma_desc_put(edmac, desc);
1226 
1227         return 0;
1228 }
1229 
1230 static int ep93xx_dma_slave_config(struct dma_chan *chan,
1231                                    struct dma_slave_config *config)
1232 {
1233         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1234 
1235         memcpy(&edmac->slave_config, config, sizeof(*config));
1236 
1237         return 0;
1238 }
1239 
1240 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1241                                          enum dma_transfer_direction dir,
1242                                          struct dma_slave_config *config)
1243 {
1244         struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1245         enum dma_slave_buswidth width;
1246         unsigned long flags;
1247         u32 addr, ctrl;
1248 
1249         if (!edmac->edma->m2m)
1250                 return -EINVAL;
1251 
1252         switch (dir) {
1253         case DMA_DEV_TO_MEM:
1254                 width = config->src_addr_width;
1255                 addr = config->src_addr;
1256                 break;
1257 
1258         case DMA_MEM_TO_DEV:
1259                 width = config->dst_addr_width;
1260                 addr = config->dst_addr;
1261                 break;
1262 
1263         default:
1264                 return -EINVAL;
1265         }
1266 
1267         switch (width) {
1268         case DMA_SLAVE_BUSWIDTH_1_BYTE:
1269                 ctrl = 0;
1270                 break;
1271         case DMA_SLAVE_BUSWIDTH_2_BYTES:
1272                 ctrl = M2M_CONTROL_PW_16;
1273                 break;
1274         case DMA_SLAVE_BUSWIDTH_4_BYTES:
1275                 ctrl = M2M_CONTROL_PW_32;
1276                 break;
1277         default:
1278                 return -EINVAL;
1279         }
1280 
1281         spin_lock_irqsave(&edmac->lock, flags);
1282         edmac->runtime_addr = addr;
1283         edmac->runtime_ctrl = ctrl;
1284         spin_unlock_irqrestore(&edmac->lock, flags);
1285 
1286         return 0;
1287 }
1288 
1289 /**
1290  * ep93xx_dma_tx_status - check if a transaction is completed
1291  * @chan: channel
1292  * @cookie: transaction specific cookie
1293  * @state: state of the transaction is stored here if given
1294  *
1295  * This function can be used to query state of a given transaction.
1296  */
1297 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1298                                             dma_cookie_t cookie,
1299                                             struct dma_tx_state *state)
1300 {
1301         return dma_cookie_status(chan, cookie, state);
1302 }
1303 
1304 /**
1305  * ep93xx_dma_issue_pending - push pending transactions to the hardware
1306  * @chan: channel
1307  *
1308  * When this function is called, all pending transactions are pushed to the
1309  * hardware and executed.
1310  */
1311 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1312 {
1313         ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1314 }
1315 
1316 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1317 {
1318         struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1319         struct ep93xx_dma_engine *edma;
1320         struct dma_device *dma_dev;
1321         size_t edma_size;
1322         int ret, i;
1323 
1324         edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1325         edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1326         if (!edma)
1327                 return -ENOMEM;
1328 
1329         dma_dev = &edma->dma_dev;
1330         edma->m2m = platform_get_device_id(pdev)->driver_data;
1331         edma->num_channels = pdata->num_channels;
1332 
1333         INIT_LIST_HEAD(&dma_dev->channels);
1334         for (i = 0; i < pdata->num_channels; i++) {
1335                 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1336                 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1337 
1338                 edmac->chan.device = dma_dev;
1339                 edmac->regs = cdata->base;
1340                 edmac->irq = cdata->irq;
1341                 edmac->edma = edma;
1342 
1343                 edmac->clk = clk_get(NULL, cdata->name);
1344                 if (IS_ERR(edmac->clk)) {
1345                         dev_warn(&pdev->dev, "failed to get clock for %s\n",
1346                                  cdata->name);
1347                         continue;
1348                 }
1349 
1350                 spin_lock_init(&edmac->lock);
1351                 INIT_LIST_HEAD(&edmac->active);
1352                 INIT_LIST_HEAD(&edmac->queue);
1353                 INIT_LIST_HEAD(&edmac->free_list);
1354                 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1355                              (unsigned long)edmac);
1356 
1357                 list_add_tail(&edmac->chan.device_node,
1358                               &dma_dev->channels);
1359         }
1360 
1361         dma_cap_zero(dma_dev->cap_mask);
1362         dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1363         dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1364 
1365         dma_dev->dev = &pdev->dev;
1366         dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1367         dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1368         dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1369         dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1370         dma_dev->device_config = ep93xx_dma_slave_config;
1371         dma_dev->device_synchronize = ep93xx_dma_synchronize;
1372         dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1373         dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1374         dma_dev->device_tx_status = ep93xx_dma_tx_status;
1375 
1376         dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1377 
1378         if (edma->m2m) {
1379                 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1380                 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1381 
1382                 edma->hw_setup = m2m_hw_setup;
1383                 edma->hw_shutdown = m2m_hw_shutdown;
1384                 edma->hw_submit = m2m_hw_submit;
1385                 edma->hw_interrupt = m2m_hw_interrupt;
1386         } else {
1387                 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1388 
1389                 edma->hw_synchronize = m2p_hw_synchronize;
1390                 edma->hw_setup = m2p_hw_setup;
1391                 edma->hw_shutdown = m2p_hw_shutdown;
1392                 edma->hw_submit = m2p_hw_submit;
1393                 edma->hw_interrupt = m2p_hw_interrupt;
1394         }
1395 
1396         ret = dma_async_device_register(dma_dev);
1397         if (unlikely(ret)) {
1398                 for (i = 0; i < edma->num_channels; i++) {
1399                         struct ep93xx_dma_chan *edmac = &edma->channels[i];
1400                         if (!IS_ERR_OR_NULL(edmac->clk))
1401                                 clk_put(edmac->clk);
1402                 }
1403                 kfree(edma);
1404         } else {
1405                 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1406                          edma->m2m ? "M" : "P");
1407         }
1408 
1409         return ret;
1410 }
1411 
1412 static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1413         { "ep93xx-dma-m2p", 0 },
1414         { "ep93xx-dma-m2m", 1 },
1415         { },
1416 };
1417 
1418 static struct platform_driver ep93xx_dma_driver = {
1419         .driver         = {
1420                 .name   = "ep93xx-dma",
1421         },
1422         .id_table       = ep93xx_dma_driver_ids,
1423 };
1424 
1425 static int __init ep93xx_dma_module_init(void)
1426 {
1427         return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1428 }
1429 subsys_initcall(ep93xx_dma_module_init);
1430 
1431 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1432 MODULE_DESCRIPTION("EP93xx DMA driver");
1433 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */