root/drivers/dma/mpc512x_dma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dma_chan_to_mpc_dma_chan
  2. dma_chan_to_mpc_dma
  3. mpc_dma_execute
  4. mpc_dma_irq_process
  5. mpc_dma_irq
  6. mpc_dma_process_completed
  7. mpc_dma_tasklet
  8. mpc_dma_tx_submit
  9. mpc_dma_alloc_chan_resources
  10. mpc_dma_free_chan_resources
  11. mpc_dma_issue_pending
  12. mpc_dma_tx_status
  13. mpc_dma_prep_memcpy
  14. buswidth_to_dmatsize
  15. mpc_dma_prep_slave_sg
  16. is_buswidth_valid
  17. mpc_dma_device_config
  18. mpc_dma_device_terminate_all
  19. mpc_dma_probe
  20. mpc_dma_remove

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
   4  * Copyright (C) Semihalf 2009
   5  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
   6  * Copyright (C) Alexander Popov, Promcontroller 2014
   7  * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
   8  *
   9  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  10  * (defines, structures and comments) was taken from MPC5121 DMA driver
  11  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
  12  *
  13  * Approved as OSADL project by a majority of OSADL members and funded
  14  * by OSADL membership fees in 2009;  for details see www.osadl.org.
  15  */
  16 
  17 /*
  18  * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
  19  * (tested using dmatest module) and data transfers between memory and
  20  * peripheral I/O memory by means of slave scatter/gather with these
  21  * limitations:
  22  *  - chunked transfers (described by s/g lists with more than one item) are
  23  *     refused as long as proper support for scatter/gather is missing
  24  *  - transfers on MPC8308 always start from software as this SoC does not have
  25  *     external request lines for peripheral flow control
  26  *  - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
  27  *     MPC512x), and 32 bytes are supported, and, consequently, source
  28  *     addresses and destination addresses must be aligned accordingly;
  29  *     furthermore, for MPC512x SoCs, the transfer size must be aligned on
  30  *     (chunk size * maxburst)
  31  */
  32 
  33 #include <linux/module.h>
  34 #include <linux/dmaengine.h>
  35 #include <linux/dma-mapping.h>
  36 #include <linux/interrupt.h>
  37 #include <linux/io.h>
  38 #include <linux/slab.h>
  39 #include <linux/of_address.h>
  40 #include <linux/of_device.h>
  41 #include <linux/of_irq.h>
  42 #include <linux/of_dma.h>
  43 #include <linux/of_platform.h>
  44 
  45 #include <linux/random.h>
  46 
  47 #include "dmaengine.h"
  48 
  49 /* Number of DMA Transfer descriptors allocated per channel */
  50 #define MPC_DMA_DESCRIPTORS     64
  51 
  52 /* Macro definitions */
  53 #define MPC_DMA_TCD_OFFSET      0x1000
  54 
  55 /*
  56  * Maximum channel counts for individual hardware variants
  57  * and the maximum channel count over all supported controllers,
  58  * used for data structure size
  59  */
  60 #define MPC8308_DMACHAN_MAX     16
  61 #define MPC512x_DMACHAN_MAX     64
  62 #define MPC_DMA_CHANNELS        64
  63 
  64 /* Arbitration mode of group and channel */
  65 #define MPC_DMA_DMACR_EDCG      (1 << 31)
  66 #define MPC_DMA_DMACR_ERGA      (1 << 3)
  67 #define MPC_DMA_DMACR_ERCA      (1 << 2)
  68 
  69 /* Error codes */
  70 #define MPC_DMA_DMAES_VLD       (1 << 31)
  71 #define MPC_DMA_DMAES_GPE       (1 << 15)
  72 #define MPC_DMA_DMAES_CPE       (1 << 14)
  73 #define MPC_DMA_DMAES_ERRCHN(err) \
  74                                 (((err) >> 8) & 0x3f)
  75 #define MPC_DMA_DMAES_SAE       (1 << 7)
  76 #define MPC_DMA_DMAES_SOE       (1 << 6)
  77 #define MPC_DMA_DMAES_DAE       (1 << 5)
  78 #define MPC_DMA_DMAES_DOE       (1 << 4)
  79 #define MPC_DMA_DMAES_NCE       (1 << 3)
  80 #define MPC_DMA_DMAES_SGE       (1 << 2)
  81 #define MPC_DMA_DMAES_SBE       (1 << 1)
  82 #define MPC_DMA_DMAES_DBE       (1 << 0)
  83 
  84 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE    (1 << 6)
  85 
  86 #define MPC_DMA_TSIZE_1         0x00
  87 #define MPC_DMA_TSIZE_2         0x01
  88 #define MPC_DMA_TSIZE_4         0x02
  89 #define MPC_DMA_TSIZE_16        0x04
  90 #define MPC_DMA_TSIZE_32        0x05
  91 
  92 /* MPC5121 DMA engine registers */
  93 struct __attribute__ ((__packed__)) mpc_dma_regs {
  94         /* 0x00 */
  95         u32 dmacr;              /* DMA control register */
  96         u32 dmaes;              /* DMA error status */
  97         /* 0x08 */
  98         u32 dmaerqh;            /* DMA enable request high(channels 63~32) */
  99         u32 dmaerql;            /* DMA enable request low(channels 31~0) */
 100         u32 dmaeeih;            /* DMA enable error interrupt high(ch63~32) */
 101         u32 dmaeeil;            /* DMA enable error interrupt low(ch31~0) */
 102         /* 0x18 */
 103         u8 dmaserq;             /* DMA set enable request */
 104         u8 dmacerq;             /* DMA clear enable request */
 105         u8 dmaseei;             /* DMA set enable error interrupt */
 106         u8 dmaceei;             /* DMA clear enable error interrupt */
 107         /* 0x1c */
 108         u8 dmacint;             /* DMA clear interrupt request */
 109         u8 dmacerr;             /* DMA clear error */
 110         u8 dmassrt;             /* DMA set start bit */
 111         u8 dmacdne;             /* DMA clear DONE status bit */
 112         /* 0x20 */
 113         u32 dmainth;            /* DMA interrupt request high(ch63~32) */
 114         u32 dmaintl;            /* DMA interrupt request low(ch31~0) */
 115         u32 dmaerrh;            /* DMA error high(ch63~32) */
 116         u32 dmaerrl;            /* DMA error low(ch31~0) */
 117         /* 0x30 */
 118         u32 dmahrsh;            /* DMA hw request status high(ch63~32) */
 119         u32 dmahrsl;            /* DMA hardware request status low(ch31~0) */
 120         union {
 121                 u32 dmaihsa;    /* DMA interrupt high select AXE(ch63~32) */
 122                 u32 dmagpor;    /* (General purpose register on MPC8308) */
 123         };
 124         u32 dmailsa;            /* DMA interrupt low select AXE(ch31~0) */
 125         /* 0x40 ~ 0xff */
 126         u32 reserve0[48];       /* Reserved */
 127         /* 0x100 */
 128         u8 dchpri[MPC_DMA_CHANNELS];
 129         /* DMA channels(0~63) priority */
 130 };
 131 
 132 struct __attribute__ ((__packed__)) mpc_dma_tcd {
 133         /* 0x00 */
 134         u32 saddr;              /* Source address */
 135 
 136         u32 smod:5;             /* Source address modulo */
 137         u32 ssize:3;            /* Source data transfer size */
 138         u32 dmod:5;             /* Destination address modulo */
 139         u32 dsize:3;            /* Destination data transfer size */
 140         u32 soff:16;            /* Signed source address offset */
 141 
 142         /* 0x08 */
 143         u32 nbytes;             /* Inner "minor" byte count */
 144         u32 slast;              /* Last source address adjustment */
 145         u32 daddr;              /* Destination address */
 146 
 147         /* 0x14 */
 148         u32 citer_elink:1;      /* Enable channel-to-channel linking on
 149                                  * minor loop complete
 150                                  */
 151         u32 citer_linkch:6;     /* Link channel for minor loop complete */
 152         u32 citer:9;            /* Current "major" iteration count */
 153         u32 doff:16;            /* Signed destination address offset */
 154 
 155         /* 0x18 */
 156         u32 dlast_sga;          /* Last Destination address adjustment/scatter
 157                                  * gather address
 158                                  */
 159 
 160         /* 0x1c */
 161         u32 biter_elink:1;      /* Enable channel-to-channel linking on major
 162                                  * loop complete
 163                                  */
 164         u32 biter_linkch:6;
 165         u32 biter:9;            /* Beginning "major" iteration count */
 166         u32 bwc:2;              /* Bandwidth control */
 167         u32 major_linkch:6;     /* Link channel number */
 168         u32 done:1;             /* Channel done */
 169         u32 active:1;           /* Channel active */
 170         u32 major_elink:1;      /* Enable channel-to-channel linking on major
 171                                  * loop complete
 172                                  */
 173         u32 e_sg:1;             /* Enable scatter/gather processing */
 174         u32 d_req:1;            /* Disable request */
 175         u32 int_half:1;         /* Enable an interrupt when major counter is
 176                                  * half complete
 177                                  */
 178         u32 int_maj:1;          /* Enable an interrupt when major iteration
 179                                  * count completes
 180                                  */
 181         u32 start:1;            /* Channel start */
 182 };
 183 
 184 struct mpc_dma_desc {
 185         struct dma_async_tx_descriptor  desc;
 186         struct mpc_dma_tcd              *tcd;
 187         dma_addr_t                      tcd_paddr;
 188         int                             error;
 189         struct list_head                node;
 190         int                             will_access_peripheral;
 191 };
 192 
 193 struct mpc_dma_chan {
 194         struct dma_chan                 chan;
 195         struct list_head                free;
 196         struct list_head                prepared;
 197         struct list_head                queued;
 198         struct list_head                active;
 199         struct list_head                completed;
 200         struct mpc_dma_tcd              *tcd;
 201         dma_addr_t                      tcd_paddr;
 202 
 203         /* Settings for access to peripheral FIFO */
 204         dma_addr_t                      src_per_paddr;
 205         u32                             src_tcd_nunits;
 206         u8                              swidth;
 207         dma_addr_t                      dst_per_paddr;
 208         u32                             dst_tcd_nunits;
 209         u8                              dwidth;
 210 
 211         /* Lock for this structure */
 212         spinlock_t                      lock;
 213 };
 214 
 215 struct mpc_dma {
 216         struct dma_device               dma;
 217         struct tasklet_struct           tasklet;
 218         struct mpc_dma_chan             channels[MPC_DMA_CHANNELS];
 219         struct mpc_dma_regs __iomem     *regs;
 220         struct mpc_dma_tcd __iomem      *tcd;
 221         int                             irq;
 222         int                             irq2;
 223         uint                            error_status;
 224         int                             is_mpc8308;
 225 
 226         /* Lock for error_status field in this structure */
 227         spinlock_t                      error_status_lock;
 228 };
 229 
 230 #define DRV_NAME        "mpc512x_dma"
 231 
 232 /* Convert struct dma_chan to struct mpc_dma_chan */
 233 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
 234 {
 235         return container_of(c, struct mpc_dma_chan, chan);
 236 }
 237 
 238 /* Convert struct dma_chan to struct mpc_dma */
 239 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
 240 {
 241         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
 242 
 243         return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
 244 }
 245 
 246 /*
 247  * Execute all queued DMA descriptors.
 248  *
 249  * Following requirements must be met while calling mpc_dma_execute():
 250  *      a) mchan->lock is acquired,
 251  *      b) mchan->active list is empty,
 252  *      c) mchan->queued list contains at least one entry.
 253  */
 254 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
 255 {
 256         struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
 257         struct mpc_dma_desc *first = NULL;
 258         struct mpc_dma_desc *prev = NULL;
 259         struct mpc_dma_desc *mdesc;
 260         int cid = mchan->chan.chan_id;
 261 
 262         while (!list_empty(&mchan->queued)) {
 263                 mdesc = list_first_entry(&mchan->queued,
 264                                                 struct mpc_dma_desc, node);
 265                 /*
 266                  * Grab either several mem-to-mem transfer descriptors
 267                  * or one peripheral transfer descriptor,
 268                  * don't mix mem-to-mem and peripheral transfer descriptors
 269                  * within the same 'active' list.
 270                  */
 271                 if (mdesc->will_access_peripheral) {
 272                         if (list_empty(&mchan->active))
 273                                 list_move_tail(&mdesc->node, &mchan->active);
 274                         break;
 275                 } else {
 276                         list_move_tail(&mdesc->node, &mchan->active);
 277                 }
 278         }
 279 
 280         /* Chain descriptors into one transaction */
 281         list_for_each_entry(mdesc, &mchan->active, node) {
 282                 if (!first)
 283                         first = mdesc;
 284 
 285                 if (!prev) {
 286                         prev = mdesc;
 287                         continue;
 288                 }
 289 
 290                 prev->tcd->dlast_sga = mdesc->tcd_paddr;
 291                 prev->tcd->e_sg = 1;
 292                 mdesc->tcd->start = 1;
 293 
 294                 prev = mdesc;
 295         }
 296 
 297         prev->tcd->int_maj = 1;
 298 
 299         /* Send first descriptor in chain into hardware */
 300         memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
 301 
 302         if (first != prev)
 303                 mdma->tcd[cid].e_sg = 1;
 304 
 305         if (mdma->is_mpc8308) {
 306                 /* MPC8308, no request lines, software initiated start */
 307                 out_8(&mdma->regs->dmassrt, cid);
 308         } else if (first->will_access_peripheral) {
 309                 /* Peripherals involved, start by external request signal */
 310                 out_8(&mdma->regs->dmaserq, cid);
 311         } else {
 312                 /* Memory to memory transfer, software initiated start */
 313                 out_8(&mdma->regs->dmassrt, cid);
 314         }
 315 }
 316 
 317 /* Handle interrupt on one half of DMA controller (32 channels) */
 318 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
 319 {
 320         struct mpc_dma_chan *mchan;
 321         struct mpc_dma_desc *mdesc;
 322         u32 status = is | es;
 323         int ch;
 324 
 325         while ((ch = fls(status) - 1) >= 0) {
 326                 status &= ~(1 << ch);
 327                 mchan = &mdma->channels[ch + off];
 328 
 329                 spin_lock(&mchan->lock);
 330 
 331                 out_8(&mdma->regs->dmacint, ch + off);
 332                 out_8(&mdma->regs->dmacerr, ch + off);
 333 
 334                 /* Check error status */
 335                 if (es & (1 << ch))
 336                         list_for_each_entry(mdesc, &mchan->active, node)
 337                                 mdesc->error = -EIO;
 338 
 339                 /* Execute queued descriptors */
 340                 list_splice_tail_init(&mchan->active, &mchan->completed);
 341                 if (!list_empty(&mchan->queued))
 342                         mpc_dma_execute(mchan);
 343 
 344                 spin_unlock(&mchan->lock);
 345         }
 346 }
 347 
 348 /* Interrupt handler */
 349 static irqreturn_t mpc_dma_irq(int irq, void *data)
 350 {
 351         struct mpc_dma *mdma = data;
 352         uint es;
 353 
 354         /* Save error status register */
 355         es = in_be32(&mdma->regs->dmaes);
 356         spin_lock(&mdma->error_status_lock);
 357         if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
 358                 mdma->error_status = es;
 359         spin_unlock(&mdma->error_status_lock);
 360 
 361         /* Handle interrupt on each channel */
 362         if (mdma->dma.chancnt > 32) {
 363                 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
 364                                         in_be32(&mdma->regs->dmaerrh), 32);
 365         }
 366         mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
 367                                         in_be32(&mdma->regs->dmaerrl), 0);
 368 
 369         /* Schedule tasklet */
 370         tasklet_schedule(&mdma->tasklet);
 371 
 372         return IRQ_HANDLED;
 373 }
 374 
 375 /* process completed descriptors */
 376 static void mpc_dma_process_completed(struct mpc_dma *mdma)
 377 {
 378         dma_cookie_t last_cookie = 0;
 379         struct mpc_dma_chan *mchan;
 380         struct mpc_dma_desc *mdesc;
 381         struct dma_async_tx_descriptor *desc;
 382         unsigned long flags;
 383         LIST_HEAD(list);
 384         int i;
 385 
 386         for (i = 0; i < mdma->dma.chancnt; i++) {
 387                 mchan = &mdma->channels[i];
 388 
 389                 /* Get all completed descriptors */
 390                 spin_lock_irqsave(&mchan->lock, flags);
 391                 if (!list_empty(&mchan->completed))
 392                         list_splice_tail_init(&mchan->completed, &list);
 393                 spin_unlock_irqrestore(&mchan->lock, flags);
 394 
 395                 if (list_empty(&list))
 396                         continue;
 397 
 398                 /* Execute callbacks and run dependencies */
 399                 list_for_each_entry(mdesc, &list, node) {
 400                         desc = &mdesc->desc;
 401 
 402                         dmaengine_desc_get_callback_invoke(desc, NULL);
 403 
 404                         last_cookie = desc->cookie;
 405                         dma_run_dependencies(desc);
 406                 }
 407 
 408                 /* Free descriptors */
 409                 spin_lock_irqsave(&mchan->lock, flags);
 410                 list_splice_tail_init(&list, &mchan->free);
 411                 mchan->chan.completed_cookie = last_cookie;
 412                 spin_unlock_irqrestore(&mchan->lock, flags);
 413         }
 414 }
 415 
 416 /* DMA Tasklet */
 417 static void mpc_dma_tasklet(unsigned long data)
 418 {
 419         struct mpc_dma *mdma = (void *)data;
 420         unsigned long flags;
 421         uint es;
 422 
 423         spin_lock_irqsave(&mdma->error_status_lock, flags);
 424         es = mdma->error_status;
 425         mdma->error_status = 0;
 426         spin_unlock_irqrestore(&mdma->error_status_lock, flags);
 427 
 428         /* Print nice error report */
 429         if (es) {
 430                 dev_err(mdma->dma.dev,
 431                         "Hardware reported following error(s) on channel %u:\n",
 432                                                       MPC_DMA_DMAES_ERRCHN(es));
 433 
 434                 if (es & MPC_DMA_DMAES_GPE)
 435                         dev_err(mdma->dma.dev, "- Group Priority Error\n");
 436                 if (es & MPC_DMA_DMAES_CPE)
 437                         dev_err(mdma->dma.dev, "- Channel Priority Error\n");
 438                 if (es & MPC_DMA_DMAES_SAE)
 439                         dev_err(mdma->dma.dev, "- Source Address Error\n");
 440                 if (es & MPC_DMA_DMAES_SOE)
 441                         dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n");
 442                 if (es & MPC_DMA_DMAES_DAE)
 443                         dev_err(mdma->dma.dev, "- Destination Address Error\n");
 444                 if (es & MPC_DMA_DMAES_DOE)
 445                         dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n");
 446                 if (es & MPC_DMA_DMAES_NCE)
 447                         dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n");
 448                 if (es & MPC_DMA_DMAES_SGE)
 449                         dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n");
 450                 if (es & MPC_DMA_DMAES_SBE)
 451                         dev_err(mdma->dma.dev, "- Source Bus Error\n");
 452                 if (es & MPC_DMA_DMAES_DBE)
 453                         dev_err(mdma->dma.dev, "- Destination Bus Error\n");
 454         }
 455 
 456         mpc_dma_process_completed(mdma);
 457 }
 458 
 459 /* Submit descriptor to hardware */
 460 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
 461 {
 462         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
 463         struct mpc_dma_desc *mdesc;
 464         unsigned long flags;
 465         dma_cookie_t cookie;
 466 
 467         mdesc = container_of(txd, struct mpc_dma_desc, desc);
 468 
 469         spin_lock_irqsave(&mchan->lock, flags);
 470 
 471         /* Move descriptor to queue */
 472         list_move_tail(&mdesc->node, &mchan->queued);
 473 
 474         /* If channel is idle, execute all queued descriptors */
 475         if (list_empty(&mchan->active))
 476                 mpc_dma_execute(mchan);
 477 
 478         /* Update cookie */
 479         cookie = dma_cookie_assign(txd);
 480         spin_unlock_irqrestore(&mchan->lock, flags);
 481 
 482         return cookie;
 483 }
 484 
 485 /* Alloc channel resources */
 486 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
 487 {
 488         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 489         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 490         struct mpc_dma_desc *mdesc;
 491         struct mpc_dma_tcd *tcd;
 492         dma_addr_t tcd_paddr;
 493         unsigned long flags;
 494         LIST_HEAD(descs);
 495         int i;
 496 
 497         /* Alloc DMA memory for Transfer Control Descriptors */
 498         tcd = dma_alloc_coherent(mdma->dma.dev,
 499                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
 500                                                         &tcd_paddr, GFP_KERNEL);
 501         if (!tcd)
 502                 return -ENOMEM;
 503 
 504         /* Alloc descriptors for this channel */
 505         for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
 506                 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
 507                 if (!mdesc) {
 508                         dev_notice(mdma->dma.dev,
 509                                 "Memory allocation error. Allocated only %u descriptors\n", i);
 510                         break;
 511                 }
 512 
 513                 dma_async_tx_descriptor_init(&mdesc->desc, chan);
 514                 mdesc->desc.flags = DMA_CTRL_ACK;
 515                 mdesc->desc.tx_submit = mpc_dma_tx_submit;
 516 
 517                 mdesc->tcd = &tcd[i];
 518                 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
 519 
 520                 list_add_tail(&mdesc->node, &descs);
 521         }
 522 
 523         /* Return error only if no descriptors were allocated */
 524         if (i == 0) {
 525                 dma_free_coherent(mdma->dma.dev,
 526                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
 527                                                                 tcd, tcd_paddr);
 528                 return -ENOMEM;
 529         }
 530 
 531         spin_lock_irqsave(&mchan->lock, flags);
 532         mchan->tcd = tcd;
 533         mchan->tcd_paddr = tcd_paddr;
 534         list_splice_tail_init(&descs, &mchan->free);
 535         spin_unlock_irqrestore(&mchan->lock, flags);
 536 
 537         /* Enable Error Interrupt */
 538         out_8(&mdma->regs->dmaseei, chan->chan_id);
 539 
 540         return 0;
 541 }
 542 
 543 /* Free channel resources */
 544 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
 545 {
 546         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 547         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 548         struct mpc_dma_desc *mdesc, *tmp;
 549         struct mpc_dma_tcd *tcd;
 550         dma_addr_t tcd_paddr;
 551         unsigned long flags;
 552         LIST_HEAD(descs);
 553 
 554         spin_lock_irqsave(&mchan->lock, flags);
 555 
 556         /* Channel must be idle */
 557         BUG_ON(!list_empty(&mchan->prepared));
 558         BUG_ON(!list_empty(&mchan->queued));
 559         BUG_ON(!list_empty(&mchan->active));
 560         BUG_ON(!list_empty(&mchan->completed));
 561 
 562         /* Move data */
 563         list_splice_tail_init(&mchan->free, &descs);
 564         tcd = mchan->tcd;
 565         tcd_paddr = mchan->tcd_paddr;
 566 
 567         spin_unlock_irqrestore(&mchan->lock, flags);
 568 
 569         /* Free DMA memory used by descriptors */
 570         dma_free_coherent(mdma->dma.dev,
 571                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
 572                                                                 tcd, tcd_paddr);
 573 
 574         /* Free descriptors */
 575         list_for_each_entry_safe(mdesc, tmp, &descs, node)
 576                 kfree(mdesc);
 577 
 578         /* Disable Error Interrupt */
 579         out_8(&mdma->regs->dmaceei, chan->chan_id);
 580 }
 581 
 582 /* Send all pending descriptor to hardware */
 583 static void mpc_dma_issue_pending(struct dma_chan *chan)
 584 {
 585         /*
 586          * We are posting descriptors to the hardware as soon as
 587          * they are ready, so this function does nothing.
 588          */
 589 }
 590 
 591 /* Check request completion status */
 592 static enum dma_status
 593 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 594                struct dma_tx_state *txstate)
 595 {
 596         return dma_cookie_status(chan, cookie, txstate);
 597 }
 598 
 599 /* Prepare descriptor for memory to memory copy */
 600 static struct dma_async_tx_descriptor *
 601 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 602                                         size_t len, unsigned long flags)
 603 {
 604         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 605         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 606         struct mpc_dma_desc *mdesc = NULL;
 607         struct mpc_dma_tcd *tcd;
 608         unsigned long iflags;
 609 
 610         /* Get free descriptor */
 611         spin_lock_irqsave(&mchan->lock, iflags);
 612         if (!list_empty(&mchan->free)) {
 613                 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
 614                                                                         node);
 615                 list_del(&mdesc->node);
 616         }
 617         spin_unlock_irqrestore(&mchan->lock, iflags);
 618 
 619         if (!mdesc) {
 620                 /* try to free completed descriptors */
 621                 mpc_dma_process_completed(mdma);
 622                 return NULL;
 623         }
 624 
 625         mdesc->error = 0;
 626         mdesc->will_access_peripheral = 0;
 627         tcd = mdesc->tcd;
 628 
 629         /* Prepare Transfer Control Descriptor for this transaction */
 630         memset(tcd, 0, sizeof(struct mpc_dma_tcd));
 631 
 632         if (IS_ALIGNED(src | dst | len, 32)) {
 633                 tcd->ssize = MPC_DMA_TSIZE_32;
 634                 tcd->dsize = MPC_DMA_TSIZE_32;
 635                 tcd->soff = 32;
 636                 tcd->doff = 32;
 637         } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
 638                 /* MPC8308 doesn't support 16 byte transfers */
 639                 tcd->ssize = MPC_DMA_TSIZE_16;
 640                 tcd->dsize = MPC_DMA_TSIZE_16;
 641                 tcd->soff = 16;
 642                 tcd->doff = 16;
 643         } else if (IS_ALIGNED(src | dst | len, 4)) {
 644                 tcd->ssize = MPC_DMA_TSIZE_4;
 645                 tcd->dsize = MPC_DMA_TSIZE_4;
 646                 tcd->soff = 4;
 647                 tcd->doff = 4;
 648         } else if (IS_ALIGNED(src | dst | len, 2)) {
 649                 tcd->ssize = MPC_DMA_TSIZE_2;
 650                 tcd->dsize = MPC_DMA_TSIZE_2;
 651                 tcd->soff = 2;
 652                 tcd->doff = 2;
 653         } else {
 654                 tcd->ssize = MPC_DMA_TSIZE_1;
 655                 tcd->dsize = MPC_DMA_TSIZE_1;
 656                 tcd->soff = 1;
 657                 tcd->doff = 1;
 658         }
 659 
 660         tcd->saddr = src;
 661         tcd->daddr = dst;
 662         tcd->nbytes = len;
 663         tcd->biter = 1;
 664         tcd->citer = 1;
 665 
 666         /* Place descriptor in prepared list */
 667         spin_lock_irqsave(&mchan->lock, iflags);
 668         list_add_tail(&mdesc->node, &mchan->prepared);
 669         spin_unlock_irqrestore(&mchan->lock, iflags);
 670 
 671         return &mdesc->desc;
 672 }
 673 
 674 inline u8 buswidth_to_dmatsize(u8 buswidth)
 675 {
 676         u8 res;
 677 
 678         for (res = 0; buswidth > 1; buswidth /= 2)
 679                 res++;
 680         return res;
 681 }
 682 
 683 static struct dma_async_tx_descriptor *
 684 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 685                 unsigned int sg_len, enum dma_transfer_direction direction,
 686                 unsigned long flags, void *context)
 687 {
 688         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 689         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 690         struct mpc_dma_desc *mdesc = NULL;
 691         dma_addr_t per_paddr;
 692         u32 tcd_nunits;
 693         struct mpc_dma_tcd *tcd;
 694         unsigned long iflags;
 695         struct scatterlist *sg;
 696         size_t len;
 697         int iter, i;
 698 
 699         /* Currently there is no proper support for scatter/gather */
 700         if (sg_len != 1)
 701                 return NULL;
 702 
 703         if (!is_slave_direction(direction))
 704                 return NULL;
 705 
 706         for_each_sg(sgl, sg, sg_len, i) {
 707                 spin_lock_irqsave(&mchan->lock, iflags);
 708 
 709                 mdesc = list_first_entry(&mchan->free,
 710                                                 struct mpc_dma_desc, node);
 711                 if (!mdesc) {
 712                         spin_unlock_irqrestore(&mchan->lock, iflags);
 713                         /* Try to free completed descriptors */
 714                         mpc_dma_process_completed(mdma);
 715                         return NULL;
 716                 }
 717 
 718                 list_del(&mdesc->node);
 719 
 720                 if (direction == DMA_DEV_TO_MEM) {
 721                         per_paddr = mchan->src_per_paddr;
 722                         tcd_nunits = mchan->src_tcd_nunits;
 723                 } else {
 724                         per_paddr = mchan->dst_per_paddr;
 725                         tcd_nunits = mchan->dst_tcd_nunits;
 726                 }
 727 
 728                 spin_unlock_irqrestore(&mchan->lock, iflags);
 729 
 730                 if (per_paddr == 0 || tcd_nunits == 0)
 731                         goto err_prep;
 732 
 733                 mdesc->error = 0;
 734                 mdesc->will_access_peripheral = 1;
 735 
 736                 /* Prepare Transfer Control Descriptor for this transaction */
 737                 tcd = mdesc->tcd;
 738 
 739                 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
 740 
 741                 if (direction == DMA_DEV_TO_MEM) {
 742                         tcd->saddr = per_paddr;
 743                         tcd->daddr = sg_dma_address(sg);
 744 
 745                         if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
 746                                 goto err_prep;
 747 
 748                         tcd->soff = 0;
 749                         tcd->doff = mchan->dwidth;
 750                 } else {
 751                         tcd->saddr = sg_dma_address(sg);
 752                         tcd->daddr = per_paddr;
 753 
 754                         if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
 755                                 goto err_prep;
 756 
 757                         tcd->soff = mchan->swidth;
 758                         tcd->doff = 0;
 759                 }
 760 
 761                 tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
 762                 tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
 763 
 764                 if (mdma->is_mpc8308) {
 765                         tcd->nbytes = sg_dma_len(sg);
 766                         if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
 767                                 goto err_prep;
 768 
 769                         /* No major loops for MPC8303 */
 770                         tcd->biter = 1;
 771                         tcd->citer = 1;
 772                 } else {
 773                         len = sg_dma_len(sg);
 774                         tcd->nbytes = tcd_nunits * tcd->ssize;
 775                         if (!IS_ALIGNED(len, tcd->nbytes))
 776                                 goto err_prep;
 777 
 778                         iter = len / tcd->nbytes;
 779                         if (iter >= 1 << 15) {
 780                                 /* len is too big */
 781                                 goto err_prep;
 782                         }
 783                         /* citer_linkch contains the high bits of iter */
 784                         tcd->biter = iter & 0x1ff;
 785                         tcd->biter_linkch = iter >> 9;
 786                         tcd->citer = tcd->biter;
 787                         tcd->citer_linkch = tcd->biter_linkch;
 788                 }
 789 
 790                 tcd->e_sg = 0;
 791                 tcd->d_req = 1;
 792 
 793                 /* Place descriptor in prepared list */
 794                 spin_lock_irqsave(&mchan->lock, iflags);
 795                 list_add_tail(&mdesc->node, &mchan->prepared);
 796                 spin_unlock_irqrestore(&mchan->lock, iflags);
 797         }
 798 
 799         return &mdesc->desc;
 800 
 801 err_prep:
 802         /* Put the descriptor back */
 803         spin_lock_irqsave(&mchan->lock, iflags);
 804         list_add_tail(&mdesc->node, &mchan->free);
 805         spin_unlock_irqrestore(&mchan->lock, iflags);
 806 
 807         return NULL;
 808 }
 809 
 810 inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
 811 {
 812         switch (buswidth) {
 813         case 16:
 814                 if (is_mpc8308)
 815                         return false;
 816         case 1:
 817         case 2:
 818         case 4:
 819         case 32:
 820                 break;
 821         default:
 822                 return false;
 823         }
 824 
 825         return true;
 826 }
 827 
 828 static int mpc_dma_device_config(struct dma_chan *chan,
 829                                  struct dma_slave_config *cfg)
 830 {
 831         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 832         struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
 833         unsigned long flags;
 834 
 835         /*
 836          * Software constraints:
 837          *  - only transfers between a peripheral device and memory are
 838          *     supported
 839          *  - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
 840          *     are supported, and, consequently, source addresses and
 841          *     destination addresses; must be aligned accordingly; furthermore,
 842          *     for MPC512x SoCs, the transfer size must be aligned on (chunk
 843          *     size * maxburst)
 844          *  - during the transfer, the RAM address is incremented by the size
 845          *     of transfer chunk
 846          *  - the peripheral port's address is constant during the transfer.
 847          */
 848 
 849         if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
 850             !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
 851                 return -EINVAL;
 852         }
 853 
 854         if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
 855             !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
 856                 return -EINVAL;
 857 
 858         spin_lock_irqsave(&mchan->lock, flags);
 859 
 860         mchan->src_per_paddr = cfg->src_addr;
 861         mchan->src_tcd_nunits = cfg->src_maxburst;
 862         mchan->swidth = cfg->src_addr_width;
 863         mchan->dst_per_paddr = cfg->dst_addr;
 864         mchan->dst_tcd_nunits = cfg->dst_maxburst;
 865         mchan->dwidth = cfg->dst_addr_width;
 866 
 867         /* Apply defaults */
 868         if (mchan->src_tcd_nunits == 0)
 869                 mchan->src_tcd_nunits = 1;
 870         if (mchan->dst_tcd_nunits == 0)
 871                 mchan->dst_tcd_nunits = 1;
 872 
 873         spin_unlock_irqrestore(&mchan->lock, flags);
 874 
 875         return 0;
 876 }
 877 
 878 static int mpc_dma_device_terminate_all(struct dma_chan *chan)
 879 {
 880         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 881         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 882         unsigned long flags;
 883 
 884         /* Disable channel requests */
 885         spin_lock_irqsave(&mchan->lock, flags);
 886 
 887         out_8(&mdma->regs->dmacerq, chan->chan_id);
 888         list_splice_tail_init(&mchan->prepared, &mchan->free);
 889         list_splice_tail_init(&mchan->queued, &mchan->free);
 890         list_splice_tail_init(&mchan->active, &mchan->free);
 891 
 892         spin_unlock_irqrestore(&mchan->lock, flags);
 893 
 894         return 0;
 895 }
 896 
 897 static int mpc_dma_probe(struct platform_device *op)
 898 {
 899         struct device_node *dn = op->dev.of_node;
 900         struct device *dev = &op->dev;
 901         struct dma_device *dma;
 902         struct mpc_dma *mdma;
 903         struct mpc_dma_chan *mchan;
 904         struct resource res;
 905         ulong regs_start, regs_size;
 906         int retval, i;
 907         u8 chancnt;
 908 
 909         mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
 910         if (!mdma) {
 911                 retval = -ENOMEM;
 912                 goto err;
 913         }
 914 
 915         mdma->irq = irq_of_parse_and_map(dn, 0);
 916         if (!mdma->irq) {
 917                 dev_err(dev, "Error mapping IRQ!\n");
 918                 retval = -EINVAL;
 919                 goto err;
 920         }
 921 
 922         if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
 923                 mdma->is_mpc8308 = 1;
 924                 mdma->irq2 = irq_of_parse_and_map(dn, 1);
 925                 if (!mdma->irq2) {
 926                         dev_err(dev, "Error mapping IRQ!\n");
 927                         retval = -EINVAL;
 928                         goto err_dispose1;
 929                 }
 930         }
 931 
 932         retval = of_address_to_resource(dn, 0, &res);
 933         if (retval) {
 934                 dev_err(dev, "Error parsing memory region!\n");
 935                 goto err_dispose2;
 936         }
 937 
 938         regs_start = res.start;
 939         regs_size = resource_size(&res);
 940 
 941         if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
 942                 dev_err(dev, "Error requesting memory region!\n");
 943                 retval = -EBUSY;
 944                 goto err_dispose2;
 945         }
 946 
 947         mdma->regs = devm_ioremap(dev, regs_start, regs_size);
 948         if (!mdma->regs) {
 949                 dev_err(dev, "Error mapping memory region!\n");
 950                 retval = -ENOMEM;
 951                 goto err_dispose2;
 952         }
 953 
 954         mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
 955                                                         + MPC_DMA_TCD_OFFSET);
 956 
 957         retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
 958         if (retval) {
 959                 dev_err(dev, "Error requesting IRQ!\n");
 960                 retval = -EINVAL;
 961                 goto err_dispose2;
 962         }
 963 
 964         if (mdma->is_mpc8308) {
 965                 retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
 966                                                         DRV_NAME, mdma);
 967                 if (retval) {
 968                         dev_err(dev, "Error requesting IRQ2!\n");
 969                         retval = -EINVAL;
 970                         goto err_free1;
 971                 }
 972         }
 973 
 974         spin_lock_init(&mdma->error_status_lock);
 975 
 976         dma = &mdma->dma;
 977         dma->dev = dev;
 978         dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
 979         dma->device_free_chan_resources = mpc_dma_free_chan_resources;
 980         dma->device_issue_pending = mpc_dma_issue_pending;
 981         dma->device_tx_status = mpc_dma_tx_status;
 982         dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
 983         dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
 984         dma->device_config = mpc_dma_device_config;
 985         dma->device_terminate_all = mpc_dma_device_terminate_all;
 986 
 987         INIT_LIST_HEAD(&dma->channels);
 988         dma_cap_set(DMA_MEMCPY, dma->cap_mask);
 989         dma_cap_set(DMA_SLAVE, dma->cap_mask);
 990 
 991         if (mdma->is_mpc8308)
 992                 chancnt = MPC8308_DMACHAN_MAX;
 993         else
 994                 chancnt = MPC512x_DMACHAN_MAX;
 995 
 996         for (i = 0; i < chancnt; i++) {
 997                 mchan = &mdma->channels[i];
 998 
 999                 mchan->chan.device = dma;
1000                 dma_cookie_init(&mchan->chan);
1001 
1002                 INIT_LIST_HEAD(&mchan->free);
1003                 INIT_LIST_HEAD(&mchan->prepared);
1004                 INIT_LIST_HEAD(&mchan->queued);
1005                 INIT_LIST_HEAD(&mchan->active);
1006                 INIT_LIST_HEAD(&mchan->completed);
1007 
1008                 spin_lock_init(&mchan->lock);
1009                 list_add_tail(&mchan->chan.device_node, &dma->channels);
1010         }
1011 
1012         tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
1013 
1014         /*
1015          * Configure DMA Engine:
1016          * - Dynamic clock,
1017          * - Round-robin group arbitration,
1018          * - Round-robin channel arbitration.
1019          */
1020         if (mdma->is_mpc8308) {
1021                 /* MPC8308 has 16 channels and lacks some registers */
1022                 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1023 
1024                 /* enable snooping */
1025                 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1026                 /* Disable error interrupts */
1027                 out_be32(&mdma->regs->dmaeeil, 0);
1028 
1029                 /* Clear interrupts status */
1030                 out_be32(&mdma->regs->dmaintl, 0xFFFF);
1031                 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1032         } else {
1033                 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1034                                                 MPC_DMA_DMACR_ERGA |
1035                                                 MPC_DMA_DMACR_ERCA);
1036 
1037                 /* Disable hardware DMA requests */
1038                 out_be32(&mdma->regs->dmaerqh, 0);
1039                 out_be32(&mdma->regs->dmaerql, 0);
1040 
1041                 /* Disable error interrupts */
1042                 out_be32(&mdma->regs->dmaeeih, 0);
1043                 out_be32(&mdma->regs->dmaeeil, 0);
1044 
1045                 /* Clear interrupts status */
1046                 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1047                 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1048                 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1049                 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1050 
1051                 /* Route interrupts to IPIC */
1052                 out_be32(&mdma->regs->dmaihsa, 0);
1053                 out_be32(&mdma->regs->dmailsa, 0);
1054         }
1055 
1056         /* Register DMA engine */
1057         dev_set_drvdata(dev, mdma);
1058         retval = dma_async_device_register(dma);
1059         if (retval)
1060                 goto err_free2;
1061 
1062         /* Register with OF helpers for DMA lookups (nonfatal) */
1063         if (dev->of_node) {
1064                 retval = of_dma_controller_register(dev->of_node,
1065                                                 of_dma_xlate_by_chan_id, mdma);
1066                 if (retval)
1067                         dev_warn(dev, "Could not register for OF lookup\n");
1068         }
1069 
1070         return 0;
1071 
1072 err_free2:
1073         if (mdma->is_mpc8308)
1074                 free_irq(mdma->irq2, mdma);
1075 err_free1:
1076         free_irq(mdma->irq, mdma);
1077 err_dispose2:
1078         if (mdma->is_mpc8308)
1079                 irq_dispose_mapping(mdma->irq2);
1080 err_dispose1:
1081         irq_dispose_mapping(mdma->irq);
1082 err:
1083         return retval;
1084 }
1085 
1086 static int mpc_dma_remove(struct platform_device *op)
1087 {
1088         struct device *dev = &op->dev;
1089         struct mpc_dma *mdma = dev_get_drvdata(dev);
1090 
1091         if (dev->of_node)
1092                 of_dma_controller_free(dev->of_node);
1093         dma_async_device_unregister(&mdma->dma);
1094         if (mdma->is_mpc8308) {
1095                 free_irq(mdma->irq2, mdma);
1096                 irq_dispose_mapping(mdma->irq2);
1097         }
1098         free_irq(mdma->irq, mdma);
1099         irq_dispose_mapping(mdma->irq);
1100         tasklet_kill(&mdma->tasklet);
1101 
1102         return 0;
1103 }
1104 
1105 static const struct of_device_id mpc_dma_match[] = {
1106         { .compatible = "fsl,mpc5121-dma", },
1107         { .compatible = "fsl,mpc8308-dma", },
1108         {},
1109 };
1110 MODULE_DEVICE_TABLE(of, mpc_dma_match);
1111 
1112 static struct platform_driver mpc_dma_driver = {
1113         .probe          = mpc_dma_probe,
1114         .remove         = mpc_dma_remove,
1115         .driver = {
1116                 .name = DRV_NAME,
1117                 .of_match_table = mpc_dma_match,
1118         },
1119 };
1120 
1121 module_platform_driver(mpc_dma_driver);
1122 
1123 MODULE_LICENSE("GPL");
1124 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");

/* [<][>][^][v][top][bottom][index][help] */