root/drivers/dma/altera-msgdma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. msgdma_get_descriptor
  2. msgdma_free_descriptor
  3. msgdma_free_desc_list
  4. msgdma_desc_config
  5. msgdma_desc_config_eod
  6. msgdma_tx_submit
  7. msgdma_prep_memcpy
  8. msgdma_prep_slave_sg
  9. msgdma_dma_config
  10. msgdma_reset
  11. msgdma_copy_one
  12. msgdma_copy_desc_to_fifo
  13. msgdma_start_transfer
  14. msgdma_issue_pending
  15. msgdma_chan_desc_cleanup
  16. msgdma_complete_descriptor
  17. msgdma_free_descriptors
  18. msgdma_free_chan_resources
  19. msgdma_alloc_chan_resources
  20. msgdma_tasklet
  21. msgdma_irq_handler
  22. msgdma_dev_remove
  23. request_and_map
  24. msgdma_probe
  25. msgdma_remove

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * DMA driver for Altera mSGDMA IP core
   4  *
   5  * Copyright (C) 2017 Stefan Roese <sr@denx.de>
   6  *
   7  * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
   8  * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
   9  */
  10 
  11 #include <linux/bitops.h>
  12 #include <linux/delay.h>
  13 #include <linux/dma-mapping.h>
  14 #include <linux/dmapool.h>
  15 #include <linux/init.h>
  16 #include <linux/interrupt.h>
  17 #include <linux/io.h>
  18 #include <linux/iopoll.h>
  19 #include <linux/module.h>
  20 #include <linux/platform_device.h>
  21 #include <linux/slab.h>
  22 
  23 #include "dmaengine.h"
  24 
  25 #define MSGDMA_MAX_TRANS_LEN            U32_MAX
  26 #define MSGDMA_DESC_NUM                 1024
  27 
  28 /**
  29  * struct msgdma_extended_desc - implements an extended descriptor
  30  * @read_addr_lo: data buffer source address low bits
  31  * @write_addr_lo: data buffer destination address low bits
  32  * @len: the number of bytes to transfer per descriptor
  33  * @burst_seq_num: bit 31:24 write burst
  34  *                 bit 23:16 read burst
  35  *                 bit 15:00 sequence number
  36  * @stride: bit 31:16 write stride
  37  *          bit 15:00 read stride
  38  * @read_addr_hi: data buffer source address high bits
  39  * @write_addr_hi: data buffer destination address high bits
  40  * @control: characteristics of the transfer
  41  */
  42 struct msgdma_extended_desc {
  43         u32 read_addr_lo;
  44         u32 write_addr_lo;
  45         u32 len;
  46         u32 burst_seq_num;
  47         u32 stride;
  48         u32 read_addr_hi;
  49         u32 write_addr_hi;
  50         u32 control;
  51 };
  52 
  53 /* mSGDMA descriptor control field bit definitions */
  54 #define MSGDMA_DESC_CTL_SET_CH(x)       ((x) & 0xff)
  55 #define MSGDMA_DESC_CTL_GEN_SOP         BIT(8)
  56 #define MSGDMA_DESC_CTL_GEN_EOP         BIT(9)
  57 #define MSGDMA_DESC_CTL_PARK_READS      BIT(10)
  58 #define MSGDMA_DESC_CTL_PARK_WRITES     BIT(11)
  59 #define MSGDMA_DESC_CTL_END_ON_EOP      BIT(12)
  60 #define MSGDMA_DESC_CTL_END_ON_LEN      BIT(13)
  61 #define MSGDMA_DESC_CTL_TR_COMP_IRQ     BIT(14)
  62 #define MSGDMA_DESC_CTL_EARLY_IRQ       BIT(15)
  63 #define MSGDMA_DESC_CTL_TR_ERR_IRQ      GENMASK(23, 16)
  64 #define MSGDMA_DESC_CTL_EARLY_DONE      BIT(24)
  65 
  66 /*
  67  * Writing "1" the "go" bit commits the entire descriptor into the
  68  * descriptor FIFO(s)
  69  */
  70 #define MSGDMA_DESC_CTL_GO              BIT(31)
  71 
  72 /* Tx buffer control flags */
  73 #define MSGDMA_DESC_CTL_TX_FIRST        (MSGDMA_DESC_CTL_GEN_SOP |      \
  74                                          MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  75                                          MSGDMA_DESC_CTL_GO)
  76 
  77 #define MSGDMA_DESC_CTL_TX_MIDDLE       (MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  78                                          MSGDMA_DESC_CTL_GO)
  79 
  80 #define MSGDMA_DESC_CTL_TX_LAST         (MSGDMA_DESC_CTL_GEN_EOP |      \
  81                                          MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
  82                                          MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  83                                          MSGDMA_DESC_CTL_GO)
  84 
  85 #define MSGDMA_DESC_CTL_TX_SINGLE       (MSGDMA_DESC_CTL_GEN_SOP |      \
  86                                          MSGDMA_DESC_CTL_GEN_EOP |      \
  87                                          MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
  88                                          MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  89                                          MSGDMA_DESC_CTL_GO)
  90 
  91 #define MSGDMA_DESC_CTL_RX_SINGLE       (MSGDMA_DESC_CTL_END_ON_EOP |   \
  92                                          MSGDMA_DESC_CTL_END_ON_LEN |   \
  93                                          MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
  94                                          MSGDMA_DESC_CTL_EARLY_IRQ |    \
  95                                          MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  96                                          MSGDMA_DESC_CTL_GO)
  97 
  98 /* mSGDMA extended descriptor stride definitions */
  99 #define MSGDMA_DESC_STRIDE_RD           0x00000001
 100 #define MSGDMA_DESC_STRIDE_WR           0x00010000
 101 #define MSGDMA_DESC_STRIDE_RW           0x00010001
 102 
 103 /* mSGDMA dispatcher control and status register map */
 104 #define MSGDMA_CSR_STATUS               0x00    /* Read / Clear */
 105 #define MSGDMA_CSR_CONTROL              0x04    /* Read / Write */
 106 #define MSGDMA_CSR_RW_FILL_LEVEL        0x08    /* 31:16 - write fill level */
 107                                                 /* 15:00 - read fill level */
 108 #define MSGDMA_CSR_RESP_FILL_LEVEL      0x0c    /* response FIFO fill level */
 109 #define MSGDMA_CSR_RW_SEQ_NUM           0x10    /* 31:16 - write seq number */
 110                                                 /* 15:00 - read seq number */
 111 
 112 /* mSGDMA CSR status register bit definitions */
 113 #define MSGDMA_CSR_STAT_BUSY                    BIT(0)
 114 #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY          BIT(1)
 115 #define MSGDMA_CSR_STAT_DESC_BUF_FULL           BIT(2)
 116 #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY          BIT(3)
 117 #define MSGDMA_CSR_STAT_RESP_BUF_FULL           BIT(4)
 118 #define MSGDMA_CSR_STAT_STOPPED                 BIT(5)
 119 #define MSGDMA_CSR_STAT_RESETTING               BIT(6)
 120 #define MSGDMA_CSR_STAT_STOPPED_ON_ERR          BIT(7)
 121 #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY        BIT(8)
 122 #define MSGDMA_CSR_STAT_IRQ                     BIT(9)
 123 #define MSGDMA_CSR_STAT_MASK                    GENMASK(9, 0)
 124 #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ        GENMASK(8, 0)
 125 
 126 #define DESC_EMPTY      (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
 127                          MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
 128 
 129 /* mSGDMA CSR control register bit definitions */
 130 #define MSGDMA_CSR_CTL_STOP                     BIT(0)
 131 #define MSGDMA_CSR_CTL_RESET                    BIT(1)
 132 #define MSGDMA_CSR_CTL_STOP_ON_ERR              BIT(2)
 133 #define MSGDMA_CSR_CTL_STOP_ON_EARLY            BIT(3)
 134 #define MSGDMA_CSR_CTL_GLOBAL_INTR              BIT(4)
 135 #define MSGDMA_CSR_CTL_STOP_DESCS               BIT(5)
 136 
 137 /* mSGDMA CSR fill level bits */
 138 #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v)         (((v) & 0xffff0000) >> 16)
 139 #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v)         ((v) & 0x0000ffff)
 140 #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v)       ((v) & 0x0000ffff)
 141 
 142 #define MSGDMA_CSR_SEQ_NUM_GET(v)               (((v) & 0xffff0000) >> 16)
 143 
 144 /* mSGDMA response register map */
 145 #define MSGDMA_RESP_BYTES_TRANSFERRED   0x00
 146 #define MSGDMA_RESP_STATUS              0x04
 147 
 148 /* mSGDMA response register bit definitions */
 149 #define MSGDMA_RESP_EARLY_TERM  BIT(8)
 150 #define MSGDMA_RESP_ERR_MASK    0xff
 151 
 152 /**
 153  * struct msgdma_sw_desc - implements a sw descriptor
 154  * @async_tx: support for the async_tx api
 155  * @hw_desc: assosiated HW descriptor
 156  * @free_list: node of the free SW descriprots list
 157  */
 158 struct msgdma_sw_desc {
 159         struct dma_async_tx_descriptor async_tx;
 160         struct msgdma_extended_desc hw_desc;
 161         struct list_head node;
 162         struct list_head tx_list;
 163 };
 164 
 165 /**
 166  * struct msgdma_device - DMA device structure
 167  */
 168 struct msgdma_device {
 169         spinlock_t lock;
 170         struct device *dev;
 171         struct tasklet_struct irq_tasklet;
 172         struct list_head pending_list;
 173         struct list_head free_list;
 174         struct list_head active_list;
 175         struct list_head done_list;
 176         u32 desc_free_cnt;
 177         bool idle;
 178 
 179         struct dma_device dmadev;
 180         struct dma_chan dmachan;
 181         dma_addr_t hw_desq;
 182         struct msgdma_sw_desc *sw_desq;
 183         unsigned int npendings;
 184 
 185         struct dma_slave_config slave_cfg;
 186 
 187         int irq;
 188 
 189         /* mSGDMA controller */
 190         void __iomem *csr;
 191 
 192         /* mSGDMA descriptors */
 193         void __iomem *desc;
 194 
 195         /* mSGDMA response */
 196         void __iomem *resp;
 197 };
 198 
 199 #define to_mdev(chan)   container_of(chan, struct msgdma_device, dmachan)
 200 #define tx_to_desc(tx)  container_of(tx, struct msgdma_sw_desc, async_tx)
 201 
 202 /**
 203  * msgdma_get_descriptor - Get the sw descriptor from the pool
 204  * @mdev: Pointer to the Altera mSGDMA device structure
 205  *
 206  * Return: The sw descriptor
 207  */
 208 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
 209 {
 210         struct msgdma_sw_desc *desc;
 211         unsigned long flags;
 212 
 213         spin_lock_irqsave(&mdev->lock, flags);
 214         desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
 215         list_del(&desc->node);
 216         spin_unlock_irqrestore(&mdev->lock, flags);
 217 
 218         INIT_LIST_HEAD(&desc->tx_list);
 219 
 220         return desc;
 221 }
 222 
 223 /**
 224  * msgdma_free_descriptor - Issue pending transactions
 225  * @mdev: Pointer to the Altera mSGDMA device structure
 226  * @desc: Transaction descriptor pointer
 227  */
 228 static void msgdma_free_descriptor(struct msgdma_device *mdev,
 229                                    struct msgdma_sw_desc *desc)
 230 {
 231         struct msgdma_sw_desc *child, *next;
 232 
 233         mdev->desc_free_cnt++;
 234         list_add_tail(&desc->node, &mdev->free_list);
 235         list_for_each_entry_safe(child, next, &desc->tx_list, node) {
 236                 mdev->desc_free_cnt++;
 237                 list_move_tail(&child->node, &mdev->free_list);
 238         }
 239 }
 240 
 241 /**
 242  * msgdma_free_desc_list - Free descriptors list
 243  * @mdev: Pointer to the Altera mSGDMA device structure
 244  * @list: List to parse and delete the descriptor
 245  */
 246 static void msgdma_free_desc_list(struct msgdma_device *mdev,
 247                                   struct list_head *list)
 248 {
 249         struct msgdma_sw_desc *desc, *next;
 250 
 251         list_for_each_entry_safe(desc, next, list, node)
 252                 msgdma_free_descriptor(mdev, desc);
 253 }
 254 
 255 /**
 256  * msgdma_desc_config - Configure the descriptor
 257  * @desc: Hw descriptor pointer
 258  * @dst: Destination buffer address
 259  * @src: Source buffer address
 260  * @len: Transfer length
 261  */
 262 static void msgdma_desc_config(struct msgdma_extended_desc *desc,
 263                                dma_addr_t dst, dma_addr_t src, size_t len,
 264                                u32 stride)
 265 {
 266         /* Set lower 32bits of src & dst addresses in the descriptor */
 267         desc->read_addr_lo = lower_32_bits(src);
 268         desc->write_addr_lo = lower_32_bits(dst);
 269 
 270         /* Set upper 32bits of src & dst addresses in the descriptor */
 271         desc->read_addr_hi = upper_32_bits(src);
 272         desc->write_addr_hi = upper_32_bits(dst);
 273 
 274         desc->len = len;
 275         desc->stride = stride;
 276         desc->burst_seq_num = 0;        /* 0 will result in max burst length */
 277 
 278         /*
 279          * Don't set interrupt on xfer end yet, this will be done later
 280          * for the "last" descriptor
 281          */
 282         desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
 283                 MSGDMA_DESC_CTL_END_ON_LEN;
 284 }
 285 
 286 /**
 287  * msgdma_desc_config_eod - Mark the descriptor as end descriptor
 288  * @desc: Hw descriptor pointer
 289  */
 290 static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
 291 {
 292         desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
 293 }
 294 
 295 /**
 296  * msgdma_tx_submit - Submit DMA transaction
 297  * @tx: Async transaction descriptor pointer
 298  *
 299  * Return: cookie value
 300  */
 301 static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
 302 {
 303         struct msgdma_device *mdev = to_mdev(tx->chan);
 304         struct msgdma_sw_desc *new;
 305         dma_cookie_t cookie;
 306         unsigned long flags;
 307 
 308         new = tx_to_desc(tx);
 309         spin_lock_irqsave(&mdev->lock, flags);
 310         cookie = dma_cookie_assign(tx);
 311 
 312         list_add_tail(&new->node, &mdev->pending_list);
 313         spin_unlock_irqrestore(&mdev->lock, flags);
 314 
 315         return cookie;
 316 }
 317 
 318 /**
 319  * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
 320  * @dchan: DMA channel
 321  * @dma_dst: Destination buffer address
 322  * @dma_src: Source buffer address
 323  * @len: Transfer length
 324  * @flags: transfer ack flags
 325  *
 326  * Return: Async transaction descriptor on success and NULL on failure
 327  */
 328 static struct dma_async_tx_descriptor *
 329 msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
 330                    dma_addr_t dma_src, size_t len, ulong flags)
 331 {
 332         struct msgdma_device *mdev = to_mdev(dchan);
 333         struct msgdma_sw_desc *new, *first = NULL;
 334         struct msgdma_extended_desc *desc;
 335         size_t copy;
 336         u32 desc_cnt;
 337         unsigned long irqflags;
 338 
 339         desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
 340 
 341         spin_lock_irqsave(&mdev->lock, irqflags);
 342         if (desc_cnt > mdev->desc_free_cnt) {
 343                 spin_unlock_irqrestore(&mdev->lock, irqflags);
 344                 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
 345                 return NULL;
 346         }
 347         mdev->desc_free_cnt -= desc_cnt;
 348         spin_unlock_irqrestore(&mdev->lock, irqflags);
 349 
 350         do {
 351                 /* Allocate and populate the descriptor */
 352                 new = msgdma_get_descriptor(mdev);
 353 
 354                 copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
 355                 desc = &new->hw_desc;
 356                 msgdma_desc_config(desc, dma_dst, dma_src, copy,
 357                                    MSGDMA_DESC_STRIDE_RW);
 358                 len -= copy;
 359                 dma_src += copy;
 360                 dma_dst += copy;
 361                 if (!first)
 362                         first = new;
 363                 else
 364                         list_add_tail(&new->node, &first->tx_list);
 365         } while (len);
 366 
 367         msgdma_desc_config_eod(desc);
 368         async_tx_ack(&first->async_tx);
 369         first->async_tx.flags = flags;
 370 
 371         return &first->async_tx;
 372 }
 373 
 374 /**
 375  * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
 376  *
 377  * @dchan: DMA channel
 378  * @sgl: Destination scatter list
 379  * @sg_len: Number of entries in destination scatter list
 380  * @dir: DMA transfer direction
 381  * @flags: transfer ack flags
 382  * @context: transfer context (unused)
 383  */
 384 static struct dma_async_tx_descriptor *
 385 msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 386                      unsigned int sg_len, enum dma_transfer_direction dir,
 387                      unsigned long flags, void *context)
 388 
 389 {
 390         struct msgdma_device *mdev = to_mdev(dchan);
 391         struct dma_slave_config *cfg = &mdev->slave_cfg;
 392         struct msgdma_sw_desc *new, *first = NULL;
 393         void *desc = NULL;
 394         size_t len, avail;
 395         dma_addr_t dma_dst, dma_src;
 396         u32 desc_cnt = 0, i;
 397         struct scatterlist *sg;
 398         u32 stride;
 399         unsigned long irqflags;
 400 
 401         for_each_sg(sgl, sg, sg_len, i)
 402                 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
 403 
 404         spin_lock_irqsave(&mdev->lock, irqflags);
 405         if (desc_cnt > mdev->desc_free_cnt) {
 406                 spin_unlock_irqrestore(&mdev->lock, irqflags);
 407                 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
 408                 return NULL;
 409         }
 410         mdev->desc_free_cnt -= desc_cnt;
 411         spin_unlock_irqrestore(&mdev->lock, irqflags);
 412 
 413         avail = sg_dma_len(sgl);
 414 
 415         /* Run until we are out of scatterlist entries */
 416         while (true) {
 417                 /* Allocate and populate the descriptor */
 418                 new = msgdma_get_descriptor(mdev);
 419 
 420                 desc = &new->hw_desc;
 421                 len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
 422 
 423                 if (dir == DMA_MEM_TO_DEV) {
 424                         dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
 425                         dma_dst = cfg->dst_addr;
 426                         stride = MSGDMA_DESC_STRIDE_RD;
 427                 } else {
 428                         dma_src = cfg->src_addr;
 429                         dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
 430                         stride = MSGDMA_DESC_STRIDE_WR;
 431                 }
 432                 msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
 433                 avail -= len;
 434 
 435                 if (!first)
 436                         first = new;
 437                 else
 438                         list_add_tail(&new->node, &first->tx_list);
 439 
 440                 /* Fetch the next scatterlist entry */
 441                 if (avail == 0) {
 442                         if (sg_len == 0)
 443                                 break;
 444                         sgl = sg_next(sgl);
 445                         if (sgl == NULL)
 446                                 break;
 447                         sg_len--;
 448                         avail = sg_dma_len(sgl);
 449                 }
 450         }
 451 
 452         msgdma_desc_config_eod(desc);
 453         first->async_tx.flags = flags;
 454 
 455         return &first->async_tx;
 456 }
 457 
 458 static int msgdma_dma_config(struct dma_chan *dchan,
 459                              struct dma_slave_config *config)
 460 {
 461         struct msgdma_device *mdev = to_mdev(dchan);
 462 
 463         memcpy(&mdev->slave_cfg, config, sizeof(*config));
 464 
 465         return 0;
 466 }
 467 
 468 static void msgdma_reset(struct msgdma_device *mdev)
 469 {
 470         u32 val;
 471         int ret;
 472 
 473         /* Reset mSGDMA */
 474         iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
 475         iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
 476 
 477         ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
 478                                  (val & MSGDMA_CSR_STAT_RESETTING) == 0,
 479                                  1, 10000);
 480         if (ret)
 481                 dev_err(mdev->dev, "DMA channel did not reset\n");
 482 
 483         /* Clear all status bits */
 484         iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
 485 
 486         /* Enable the DMA controller including interrupts */
 487         iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
 488                   MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
 489 
 490         mdev->idle = true;
 491 };
 492 
 493 static void msgdma_copy_one(struct msgdma_device *mdev,
 494                             struct msgdma_sw_desc *desc)
 495 {
 496         void __iomem *hw_desc = mdev->desc;
 497 
 498         /*
 499          * Check if the DESC FIFO it not full. If its full, we need to wait
 500          * for at least one entry to become free again
 501          */
 502         while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
 503                MSGDMA_CSR_STAT_DESC_BUF_FULL)
 504                 mdelay(1);
 505 
 506         /*
 507          * The descriptor needs to get copied into the descriptor FIFO
 508          * of the DMA controller. The descriptor will get flushed to the
 509          * FIFO, once the last word (control word) is written. Since we
 510          * are not 100% sure that memcpy() writes all word in the "correct"
 511          * oder (address from low to high) on all architectures, we make
 512          * sure this control word is written last by single coding it and
 513          * adding some write-barriers here.
 514          */
 515         memcpy((void __force *)hw_desc, &desc->hw_desc,
 516                sizeof(desc->hw_desc) - sizeof(u32));
 517 
 518         /* Write control word last to flush this descriptor into the FIFO */
 519         mdev->idle = false;
 520         wmb();
 521         iowrite32(desc->hw_desc.control, hw_desc +
 522                   offsetof(struct msgdma_extended_desc, control));
 523         wmb();
 524 }
 525 
 526 /**
 527  * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
 528  * @mdev: Pointer to the Altera mSGDMA device structure
 529  * @desc: Transaction descriptor pointer
 530  */
 531 static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
 532                                      struct msgdma_sw_desc *desc)
 533 {
 534         struct msgdma_sw_desc *sdesc, *next;
 535 
 536         msgdma_copy_one(mdev, desc);
 537 
 538         list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
 539                 msgdma_copy_one(mdev, sdesc);
 540 }
 541 
 542 /**
 543  * msgdma_start_transfer - Initiate the new transfer
 544  * @mdev: Pointer to the Altera mSGDMA device structure
 545  */
 546 static void msgdma_start_transfer(struct msgdma_device *mdev)
 547 {
 548         struct msgdma_sw_desc *desc;
 549 
 550         if (!mdev->idle)
 551                 return;
 552 
 553         desc = list_first_entry_or_null(&mdev->pending_list,
 554                                         struct msgdma_sw_desc, node);
 555         if (!desc)
 556                 return;
 557 
 558         list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
 559         msgdma_copy_desc_to_fifo(mdev, desc);
 560 }
 561 
 562 /**
 563  * msgdma_issue_pending - Issue pending transactions
 564  * @chan: DMA channel pointer
 565  */
 566 static void msgdma_issue_pending(struct dma_chan *chan)
 567 {
 568         struct msgdma_device *mdev = to_mdev(chan);
 569         unsigned long flags;
 570 
 571         spin_lock_irqsave(&mdev->lock, flags);
 572         msgdma_start_transfer(mdev);
 573         spin_unlock_irqrestore(&mdev->lock, flags);
 574 }
 575 
 576 /**
 577  * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
 578  * @mdev: Pointer to the Altera mSGDMA device structure
 579  */
 580 static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
 581 {
 582         struct msgdma_sw_desc *desc, *next;
 583 
 584         list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
 585                 dma_async_tx_callback callback;
 586                 void *callback_param;
 587 
 588                 list_del(&desc->node);
 589 
 590                 callback = desc->async_tx.callback;
 591                 callback_param = desc->async_tx.callback_param;
 592                 if (callback) {
 593                         spin_unlock(&mdev->lock);
 594                         callback(callback_param);
 595                         spin_lock(&mdev->lock);
 596                 }
 597 
 598                 /* Run any dependencies, then free the descriptor */
 599                 msgdma_free_descriptor(mdev, desc);
 600         }
 601 }
 602 
 603 /**
 604  * msgdma_complete_descriptor - Mark the active descriptor as complete
 605  * @mdev: Pointer to the Altera mSGDMA device structure
 606  */
 607 static void msgdma_complete_descriptor(struct msgdma_device *mdev)
 608 {
 609         struct msgdma_sw_desc *desc;
 610 
 611         desc = list_first_entry_or_null(&mdev->active_list,
 612                                         struct msgdma_sw_desc, node);
 613         if (!desc)
 614                 return;
 615         list_del(&desc->node);
 616         dma_cookie_complete(&desc->async_tx);
 617         list_add_tail(&desc->node, &mdev->done_list);
 618 }
 619 
 620 /**
 621  * msgdma_free_descriptors - Free channel descriptors
 622  * @mdev: Pointer to the Altera mSGDMA device structure
 623  */
 624 static void msgdma_free_descriptors(struct msgdma_device *mdev)
 625 {
 626         msgdma_free_desc_list(mdev, &mdev->active_list);
 627         msgdma_free_desc_list(mdev, &mdev->pending_list);
 628         msgdma_free_desc_list(mdev, &mdev->done_list);
 629 }
 630 
 631 /**
 632  * msgdma_free_chan_resources - Free channel resources
 633  * @dchan: DMA channel pointer
 634  */
 635 static void msgdma_free_chan_resources(struct dma_chan *dchan)
 636 {
 637         struct msgdma_device *mdev = to_mdev(dchan);
 638         unsigned long flags;
 639 
 640         spin_lock_irqsave(&mdev->lock, flags);
 641         msgdma_free_descriptors(mdev);
 642         spin_unlock_irqrestore(&mdev->lock, flags);
 643         kfree(mdev->sw_desq);
 644 }
 645 
 646 /**
 647  * msgdma_alloc_chan_resources - Allocate channel resources
 648  * @dchan: DMA channel
 649  *
 650  * Return: Number of descriptors on success and failure value on error
 651  */
 652 static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
 653 {
 654         struct msgdma_device *mdev = to_mdev(dchan);
 655         struct msgdma_sw_desc *desc;
 656         int i;
 657 
 658         mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
 659         if (!mdev->sw_desq)
 660                 return -ENOMEM;
 661 
 662         mdev->idle = true;
 663         mdev->desc_free_cnt = MSGDMA_DESC_NUM;
 664 
 665         INIT_LIST_HEAD(&mdev->free_list);
 666 
 667         for (i = 0; i < MSGDMA_DESC_NUM; i++) {
 668                 desc = mdev->sw_desq + i;
 669                 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
 670                 desc->async_tx.tx_submit = msgdma_tx_submit;
 671                 list_add_tail(&desc->node, &mdev->free_list);
 672         }
 673 
 674         return MSGDMA_DESC_NUM;
 675 }
 676 
 677 /**
 678  * msgdma_tasklet - Schedule completion tasklet
 679  * @data: Pointer to the Altera sSGDMA channel structure
 680  */
 681 static void msgdma_tasklet(unsigned long data)
 682 {
 683         struct msgdma_device *mdev = (struct msgdma_device *)data;
 684         u32 count;
 685         u32 __maybe_unused size;
 686         u32 __maybe_unused status;
 687         unsigned long flags;
 688 
 689         spin_lock_irqsave(&mdev->lock, flags);
 690 
 691         /* Read number of responses that are available */
 692         count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
 693         dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
 694                 __func__, __LINE__, count);
 695 
 696         while (count--) {
 697                 /*
 698                  * Read both longwords to purge this response from the FIFO
 699                  * On Avalon-MM implementations, size and status do not
 700                  * have any real values, like transferred bytes or error
 701                  * bits. So we need to just drop these values.
 702                  */
 703                 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
 704                 status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
 705 
 706                 msgdma_complete_descriptor(mdev);
 707                 msgdma_chan_desc_cleanup(mdev);
 708         }
 709 
 710         spin_unlock_irqrestore(&mdev->lock, flags);
 711 }
 712 
 713 /**
 714  * msgdma_irq_handler - Altera mSGDMA Interrupt handler
 715  * @irq: IRQ number
 716  * @data: Pointer to the Altera mSGDMA device structure
 717  *
 718  * Return: IRQ_HANDLED/IRQ_NONE
 719  */
 720 static irqreturn_t msgdma_irq_handler(int irq, void *data)
 721 {
 722         struct msgdma_device *mdev = data;
 723         u32 status;
 724 
 725         status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
 726         if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
 727                 /* Start next transfer if the DMA controller is idle */
 728                 spin_lock(&mdev->lock);
 729                 mdev->idle = true;
 730                 msgdma_start_transfer(mdev);
 731                 spin_unlock(&mdev->lock);
 732         }
 733 
 734         tasklet_schedule(&mdev->irq_tasklet);
 735 
 736         /* Clear interrupt in mSGDMA controller */
 737         iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
 738 
 739         return IRQ_HANDLED;
 740 }
 741 
 742 /**
 743  * msgdma_chan_remove - Channel remove function
 744  * @mdev: Pointer to the Altera mSGDMA device structure
 745  */
 746 static void msgdma_dev_remove(struct msgdma_device *mdev)
 747 {
 748         if (!mdev)
 749                 return;
 750 
 751         devm_free_irq(mdev->dev, mdev->irq, mdev);
 752         tasklet_kill(&mdev->irq_tasklet);
 753         list_del(&mdev->dmachan.device_node);
 754 }
 755 
 756 static int request_and_map(struct platform_device *pdev, const char *name,
 757                            struct resource **res, void __iomem **ptr)
 758 {
 759         struct resource *region;
 760         struct device *device = &pdev->dev;
 761 
 762         *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
 763         if (*res == NULL) {
 764                 dev_err(device, "resource %s not defined\n", name);
 765                 return -ENODEV;
 766         }
 767 
 768         region = devm_request_mem_region(device, (*res)->start,
 769                                          resource_size(*res), dev_name(device));
 770         if (region == NULL) {
 771                 dev_err(device, "unable to request %s\n", name);
 772                 return -EBUSY;
 773         }
 774 
 775         *ptr = devm_ioremap_nocache(device, region->start,
 776                                     resource_size(region));
 777         if (*ptr == NULL) {
 778                 dev_err(device, "ioremap_nocache of %s failed!", name);
 779                 return -ENOMEM;
 780         }
 781 
 782         return 0;
 783 }
 784 
 785 /**
 786  * msgdma_probe - Driver probe function
 787  * @pdev: Pointer to the platform_device structure
 788  *
 789  * Return: '0' on success and failure value on error
 790  */
 791 static int msgdma_probe(struct platform_device *pdev)
 792 {
 793         struct msgdma_device *mdev;
 794         struct dma_device *dma_dev;
 795         struct resource *dma_res;
 796         int ret;
 797 
 798         mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
 799         if (!mdev)
 800                 return -ENOMEM;
 801 
 802         mdev->dev = &pdev->dev;
 803 
 804         /* Map CSR space */
 805         ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
 806         if (ret)
 807                 return ret;
 808 
 809         /* Map (extended) descriptor space */
 810         ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
 811         if (ret)
 812                 return ret;
 813 
 814         /* Map response space */
 815         ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
 816         if (ret)
 817                 return ret;
 818 
 819         platform_set_drvdata(pdev, mdev);
 820 
 821         /* Get interrupt nr from platform data */
 822         mdev->irq = platform_get_irq(pdev, 0);
 823         if (mdev->irq < 0)
 824                 return -ENXIO;
 825 
 826         ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
 827                                0, dev_name(&pdev->dev), mdev);
 828         if (ret)
 829                 return ret;
 830 
 831         tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
 832 
 833         dma_cookie_init(&mdev->dmachan);
 834 
 835         spin_lock_init(&mdev->lock);
 836 
 837         INIT_LIST_HEAD(&mdev->active_list);
 838         INIT_LIST_HEAD(&mdev->pending_list);
 839         INIT_LIST_HEAD(&mdev->done_list);
 840         INIT_LIST_HEAD(&mdev->free_list);
 841 
 842         dma_dev = &mdev->dmadev;
 843 
 844         /* Set DMA capabilities */
 845         dma_cap_zero(dma_dev->cap_mask);
 846         dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 847         dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 848 
 849         dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
 850         dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
 851         dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
 852                 BIT(DMA_MEM_TO_MEM);
 853         dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 854 
 855         /* Init DMA link list */
 856         INIT_LIST_HEAD(&dma_dev->channels);
 857 
 858         /* Set base routines */
 859         dma_dev->device_tx_status = dma_cookie_status;
 860         dma_dev->device_issue_pending = msgdma_issue_pending;
 861         dma_dev->dev = &pdev->dev;
 862 
 863         dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
 864         dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
 865         dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
 866         dma_dev->device_config = msgdma_dma_config;
 867 
 868         dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
 869         dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
 870 
 871         mdev->dmachan.device = dma_dev;
 872         list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
 873 
 874         /* Set DMA mask to 64 bits */
 875         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 876         if (ret) {
 877                 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
 878                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 879                 if (ret)
 880                         goto fail;
 881         }
 882 
 883         msgdma_reset(mdev);
 884 
 885         ret = dma_async_device_register(dma_dev);
 886         if (ret)
 887                 goto fail;
 888 
 889         dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
 890 
 891         return 0;
 892 
 893 fail:
 894         msgdma_dev_remove(mdev);
 895 
 896         return ret;
 897 }
 898 
 899 /**
 900  * msgdma_dma_remove - Driver remove function
 901  * @pdev: Pointer to the platform_device structure
 902  *
 903  * Return: Always '0'
 904  */
 905 static int msgdma_remove(struct platform_device *pdev)
 906 {
 907         struct msgdma_device *mdev = platform_get_drvdata(pdev);
 908 
 909         dma_async_device_unregister(&mdev->dmadev);
 910         msgdma_dev_remove(mdev);
 911 
 912         dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
 913 
 914         return 0;
 915 }
 916 
 917 static struct platform_driver msgdma_driver = {
 918         .driver = {
 919                 .name = "altera-msgdma",
 920         },
 921         .probe = msgdma_probe,
 922         .remove = msgdma_remove,
 923 };
 924 
 925 module_platform_driver(msgdma_driver);
 926 
 927 MODULE_ALIAS("platform:altera-msgdma");
 928 MODULE_DESCRIPTION("Altera mSGDMA driver");
 929 MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
 930 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */