root/drivers/dma/zx_dma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_zx_chan
  2. zx_dma_terminate_chan
  3. zx_dma_set_desc
  4. zx_dma_get_curr_lli
  5. zx_dma_get_chan_stat
  6. zx_dma_init_state
  7. zx_dma_start_txd
  8. zx_dma_task
  9. zx_dma_int_handler
  10. zx_dma_free_chan_resources
  11. zx_dma_tx_status
  12. zx_dma_issue_pending
  13. zx_dma_fill_desc
  14. zx_alloc_desc_resource
  15. zx_pre_config
  16. zx_dma_prep_memcpy
  17. zx_dma_prep_slave_sg
  18. zx_dma_prep_dma_cyclic
  19. zx_dma_config
  20. zx_dma_terminate_all
  21. zx_dma_transfer_pause
  22. zx_dma_transfer_resume
  23. zx_dma_free_desc
  24. zx_of_dma_simple_xlate
  25. zx_dma_probe
  26. zx_dma_remove
  27. zx_dma_suspend_dev
  28. zx_dma_resume_dev

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright 2015 Linaro.
   4  */
   5 #include <linux/sched.h>
   6 #include <linux/device.h>
   7 #include <linux/dmaengine.h>
   8 #include <linux/dma-mapping.h>
   9 #include <linux/dmapool.h>
  10 #include <linux/init.h>
  11 #include <linux/interrupt.h>
  12 #include <linux/kernel.h>
  13 #include <linux/module.h>
  14 #include <linux/platform_device.h>
  15 #include <linux/slab.h>
  16 #include <linux/spinlock.h>
  17 #include <linux/of_device.h>
  18 #include <linux/of.h>
  19 #include <linux/clk.h>
  20 #include <linux/of_dma.h>
  21 
  22 #include "virt-dma.h"
  23 
  24 #define DRIVER_NAME             "zx-dma"
  25 #define DMA_ALIGN               4
  26 #define DMA_MAX_SIZE            (0x10000 - 512)
  27 #define LLI_BLOCK_SIZE          (4 * PAGE_SIZE)
  28 
  29 #define REG_ZX_SRC_ADDR                 0x00
  30 #define REG_ZX_DST_ADDR                 0x04
  31 #define REG_ZX_TX_X_COUNT               0x08
  32 #define REG_ZX_TX_ZY_COUNT              0x0c
  33 #define REG_ZX_SRC_ZY_STEP              0x10
  34 #define REG_ZX_DST_ZY_STEP              0x14
  35 #define REG_ZX_LLI_ADDR                 0x1c
  36 #define REG_ZX_CTRL                     0x20
  37 #define REG_ZX_TC_IRQ                   0x800
  38 #define REG_ZX_SRC_ERR_IRQ              0x804
  39 #define REG_ZX_DST_ERR_IRQ              0x808
  40 #define REG_ZX_CFG_ERR_IRQ              0x80c
  41 #define REG_ZX_TC_IRQ_RAW               0x810
  42 #define REG_ZX_SRC_ERR_IRQ_RAW          0x814
  43 #define REG_ZX_DST_ERR_IRQ_RAW          0x818
  44 #define REG_ZX_CFG_ERR_IRQ_RAW          0x81c
  45 #define REG_ZX_STATUS                   0x820
  46 #define REG_ZX_DMA_GRP_PRIO             0x824
  47 #define REG_ZX_DMA_ARB                  0x828
  48 
  49 #define ZX_FORCE_CLOSE                  BIT(31)
  50 #define ZX_DST_BURST_WIDTH(x)           (((x) & 0x7) << 13)
  51 #define ZX_MAX_BURST_LEN                16
  52 #define ZX_SRC_BURST_LEN(x)             (((x) & 0xf) << 9)
  53 #define ZX_SRC_BURST_WIDTH(x)           (((x) & 0x7) << 6)
  54 #define ZX_IRQ_ENABLE_ALL               (3 << 4)
  55 #define ZX_DST_FIFO_MODE                BIT(3)
  56 #define ZX_SRC_FIFO_MODE                BIT(2)
  57 #define ZX_SOFT_REQ                     BIT(1)
  58 #define ZX_CH_ENABLE                    BIT(0)
  59 
  60 #define ZX_DMA_BUSWIDTHS \
  61         (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
  62         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  63         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  64         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
  65         BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
  66 
  67 enum zx_dma_burst_width {
  68         ZX_DMA_WIDTH_8BIT       = 0,
  69         ZX_DMA_WIDTH_16BIT      = 1,
  70         ZX_DMA_WIDTH_32BIT      = 2,
  71         ZX_DMA_WIDTH_64BIT      = 3,
  72 };
  73 
  74 struct zx_desc_hw {
  75         u32 saddr;
  76         u32 daddr;
  77         u32 src_x;
  78         u32 src_zy;
  79         u32 src_zy_step;
  80         u32 dst_zy_step;
  81         u32 reserved1;
  82         u32 lli;
  83         u32 ctr;
  84         u32 reserved[7]; /* pack as hardware registers region size */
  85 } __aligned(32);
  86 
  87 struct zx_dma_desc_sw {
  88         struct virt_dma_desc    vd;
  89         dma_addr_t              desc_hw_lli;
  90         size_t                  desc_num;
  91         size_t                  size;
  92         struct zx_desc_hw       *desc_hw;
  93 };
  94 
  95 struct zx_dma_phy;
  96 
  97 struct zx_dma_chan {
  98         struct dma_slave_config slave_cfg;
  99         int                     id; /* Request phy chan id */
 100         u32                     ccfg;
 101         u32                     cyclic;
 102         struct virt_dma_chan    vc;
 103         struct zx_dma_phy       *phy;
 104         struct list_head        node;
 105         dma_addr_t              dev_addr;
 106         enum dma_status         status;
 107 };
 108 
 109 struct zx_dma_phy {
 110         u32                     idx;
 111         void __iomem            *base;
 112         struct zx_dma_chan      *vchan;
 113         struct zx_dma_desc_sw   *ds_run;
 114         struct zx_dma_desc_sw   *ds_done;
 115 };
 116 
 117 struct zx_dma_dev {
 118         struct dma_device       slave;
 119         void __iomem            *base;
 120         spinlock_t              lock; /* lock for ch and phy */
 121         struct list_head        chan_pending;
 122         struct zx_dma_phy       *phy;
 123         struct zx_dma_chan      *chans;
 124         struct clk              *clk;
 125         struct dma_pool         *pool;
 126         u32                     dma_channels;
 127         u32                     dma_requests;
 128         int                     irq;
 129 };
 130 
 131 #define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
 132 
 133 static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
 134 {
 135         return container_of(chan, struct zx_dma_chan, vc.chan);
 136 }
 137 
 138 static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
 139 {
 140         u32 val = 0;
 141 
 142         val = readl_relaxed(phy->base + REG_ZX_CTRL);
 143         val &= ~ZX_CH_ENABLE;
 144         val |= ZX_FORCE_CLOSE;
 145         writel_relaxed(val, phy->base + REG_ZX_CTRL);
 146 
 147         val = 0x1 << phy->idx;
 148         writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
 149         writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
 150         writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
 151         writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
 152 }
 153 
 154 static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
 155 {
 156         writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
 157         writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
 158         writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
 159         writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
 160         writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
 161         writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
 162         writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
 163         writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
 164 }
 165 
 166 static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
 167 {
 168         return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
 169 }
 170 
 171 static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
 172 {
 173         return readl_relaxed(d->base + REG_ZX_STATUS);
 174 }
 175 
 176 static void zx_dma_init_state(struct zx_dma_dev *d)
 177 {
 178         /* set same priority */
 179         writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
 180         /* clear all irq */
 181         writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
 182         writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
 183         writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
 184         writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
 185 }
 186 
 187 static int zx_dma_start_txd(struct zx_dma_chan *c)
 188 {
 189         struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
 190         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
 191 
 192         if (!c->phy)
 193                 return -EAGAIN;
 194 
 195         if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
 196                 return -EAGAIN;
 197 
 198         if (vd) {
 199                 struct zx_dma_desc_sw *ds =
 200                         container_of(vd, struct zx_dma_desc_sw, vd);
 201                 /*
 202                  * fetch and remove request from vc->desc_issued
 203                  * so vc->desc_issued only contains desc pending
 204                  */
 205                 list_del(&ds->vd.node);
 206                 c->phy->ds_run = ds;
 207                 c->phy->ds_done = NULL;
 208                 /* start dma */
 209                 zx_dma_set_desc(c->phy, ds->desc_hw);
 210                 return 0;
 211         }
 212         c->phy->ds_done = NULL;
 213         c->phy->ds_run = NULL;
 214         return -EAGAIN;
 215 }
 216 
 217 static void zx_dma_task(struct zx_dma_dev *d)
 218 {
 219         struct zx_dma_phy *p;
 220         struct zx_dma_chan *c, *cn;
 221         unsigned pch, pch_alloc = 0;
 222         unsigned long flags;
 223 
 224         /* check new dma request of running channel in vc->desc_issued */
 225         list_for_each_entry_safe(c, cn, &d->slave.channels,
 226                                  vc.chan.device_node) {
 227                 spin_lock_irqsave(&c->vc.lock, flags);
 228                 p = c->phy;
 229                 if (p && p->ds_done && zx_dma_start_txd(c)) {
 230                         /* No current txd associated with this channel */
 231                         dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
 232                         /* Mark this channel free */
 233                         c->phy = NULL;
 234                         p->vchan = NULL;
 235                 }
 236                 spin_unlock_irqrestore(&c->vc.lock, flags);
 237         }
 238 
 239         /* check new channel request in d->chan_pending */
 240         spin_lock_irqsave(&d->lock, flags);
 241         while (!list_empty(&d->chan_pending)) {
 242                 c = list_first_entry(&d->chan_pending,
 243                                      struct zx_dma_chan, node);
 244                 p = &d->phy[c->id];
 245                 if (!p->vchan) {
 246                         /* remove from d->chan_pending */
 247                         list_del_init(&c->node);
 248                         pch_alloc |= 1 << c->id;
 249                         /* Mark this channel allocated */
 250                         p->vchan = c;
 251                         c->phy = p;
 252                 } else {
 253                         dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
 254                 }
 255         }
 256         spin_unlock_irqrestore(&d->lock, flags);
 257 
 258         for (pch = 0; pch < d->dma_channels; pch++) {
 259                 if (pch_alloc & (1 << pch)) {
 260                         p = &d->phy[pch];
 261                         c = p->vchan;
 262                         if (c) {
 263                                 spin_lock_irqsave(&c->vc.lock, flags);
 264                                 zx_dma_start_txd(c);
 265                                 spin_unlock_irqrestore(&c->vc.lock, flags);
 266                         }
 267                 }
 268         }
 269 }
 270 
 271 static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
 272 {
 273         struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
 274         struct zx_dma_phy *p;
 275         struct zx_dma_chan *c;
 276         u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
 277         u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
 278         u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
 279         u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
 280         u32 i, irq_chan = 0, task = 0;
 281 
 282         while (tc) {
 283                 i = __ffs(tc);
 284                 tc &= ~BIT(i);
 285                 p = &d->phy[i];
 286                 c = p->vchan;
 287                 if (c) {
 288                         unsigned long flags;
 289 
 290                         spin_lock_irqsave(&c->vc.lock, flags);
 291                         if (c->cyclic) {
 292                                 vchan_cyclic_callback(&p->ds_run->vd);
 293                         } else {
 294                                 vchan_cookie_complete(&p->ds_run->vd);
 295                                 p->ds_done = p->ds_run;
 296                                 task = 1;
 297                         }
 298                         spin_unlock_irqrestore(&c->vc.lock, flags);
 299                         irq_chan |= BIT(i);
 300                 }
 301         }
 302 
 303         if (serr || derr || cfg)
 304                 dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
 305                          serr, derr, cfg);
 306 
 307         writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
 308         writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
 309         writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
 310         writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
 311 
 312         if (task)
 313                 zx_dma_task(d);
 314         return IRQ_HANDLED;
 315 }
 316 
 317 static void zx_dma_free_chan_resources(struct dma_chan *chan)
 318 {
 319         struct zx_dma_chan *c = to_zx_chan(chan);
 320         struct zx_dma_dev *d = to_zx_dma(chan->device);
 321         unsigned long flags;
 322 
 323         spin_lock_irqsave(&d->lock, flags);
 324         list_del_init(&c->node);
 325         spin_unlock_irqrestore(&d->lock, flags);
 326 
 327         vchan_free_chan_resources(&c->vc);
 328         c->ccfg = 0;
 329 }
 330 
 331 static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
 332                                         dma_cookie_t cookie,
 333                                         struct dma_tx_state *state)
 334 {
 335         struct zx_dma_chan *c = to_zx_chan(chan);
 336         struct zx_dma_phy *p;
 337         struct virt_dma_desc *vd;
 338         unsigned long flags;
 339         enum dma_status ret;
 340         size_t bytes = 0;
 341 
 342         ret = dma_cookie_status(&c->vc.chan, cookie, state);
 343         if (ret == DMA_COMPLETE || !state)
 344                 return ret;
 345 
 346         spin_lock_irqsave(&c->vc.lock, flags);
 347         p = c->phy;
 348         ret = c->status;
 349 
 350         /*
 351          * If the cookie is on our issue queue, then the residue is
 352          * its total size.
 353          */
 354         vd = vchan_find_desc(&c->vc, cookie);
 355         if (vd) {
 356                 bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
 357         } else if ((!p) || (!p->ds_run)) {
 358                 bytes = 0;
 359         } else {
 360                 struct zx_dma_desc_sw *ds = p->ds_run;
 361                 u32 clli = 0, index = 0;
 362 
 363                 bytes = 0;
 364                 clli = zx_dma_get_curr_lli(p);
 365                 index = (clli - ds->desc_hw_lli) /
 366                                 sizeof(struct zx_desc_hw) + 1;
 367                 for (; index < ds->desc_num; index++) {
 368                         bytes += ds->desc_hw[index].src_x;
 369                         /* end of lli */
 370                         if (!ds->desc_hw[index].lli)
 371                                 break;
 372                 }
 373         }
 374         spin_unlock_irqrestore(&c->vc.lock, flags);
 375         dma_set_residue(state, bytes);
 376         return ret;
 377 }
 378 
 379 static void zx_dma_issue_pending(struct dma_chan *chan)
 380 {
 381         struct zx_dma_chan *c = to_zx_chan(chan);
 382         struct zx_dma_dev *d = to_zx_dma(chan->device);
 383         unsigned long flags;
 384         int issue = 0;
 385 
 386         spin_lock_irqsave(&c->vc.lock, flags);
 387         /* add request to vc->desc_issued */
 388         if (vchan_issue_pending(&c->vc)) {
 389                 spin_lock(&d->lock);
 390                 if (!c->phy && list_empty(&c->node)) {
 391                         /* if new channel, add chan_pending */
 392                         list_add_tail(&c->node, &d->chan_pending);
 393                         issue = 1;
 394                         dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
 395                 }
 396                 spin_unlock(&d->lock);
 397         } else {
 398                 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
 399         }
 400         spin_unlock_irqrestore(&c->vc.lock, flags);
 401 
 402         if (issue)
 403                 zx_dma_task(d);
 404 }
 405 
 406 static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
 407                              dma_addr_t src, size_t len, u32 num, u32 ccfg)
 408 {
 409         if ((num + 1) < ds->desc_num)
 410                 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
 411                         sizeof(struct zx_desc_hw);
 412         ds->desc_hw[num].saddr = src;
 413         ds->desc_hw[num].daddr = dst;
 414         ds->desc_hw[num].src_x = len;
 415         ds->desc_hw[num].ctr = ccfg;
 416 }
 417 
 418 static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
 419                                                      struct dma_chan *chan)
 420 {
 421         struct zx_dma_chan *c = to_zx_chan(chan);
 422         struct zx_dma_desc_sw *ds;
 423         struct zx_dma_dev *d = to_zx_dma(chan->device);
 424         int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
 425 
 426         if (num > lli_limit) {
 427                 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
 428                         &c->vc, num, lli_limit);
 429                 return NULL;
 430         }
 431 
 432         ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
 433         if (!ds)
 434                 return NULL;
 435 
 436         ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
 437         if (!ds->desc_hw) {
 438                 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
 439                 kfree(ds);
 440                 return NULL;
 441         }
 442         ds->desc_num = num;
 443         return ds;
 444 }
 445 
 446 static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
 447 {
 448         switch (width) {
 449         case DMA_SLAVE_BUSWIDTH_1_BYTE:
 450         case DMA_SLAVE_BUSWIDTH_2_BYTES:
 451         case DMA_SLAVE_BUSWIDTH_4_BYTES:
 452         case DMA_SLAVE_BUSWIDTH_8_BYTES:
 453                 return ffs(width) - 1;
 454         default:
 455                 return ZX_DMA_WIDTH_32BIT;
 456         }
 457 }
 458 
 459 static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
 460 {
 461         struct dma_slave_config *cfg = &c->slave_cfg;
 462         enum zx_dma_burst_width src_width;
 463         enum zx_dma_burst_width dst_width;
 464         u32 maxburst = 0;
 465 
 466         switch (dir) {
 467         case DMA_MEM_TO_MEM:
 468                 c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
 469                         | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
 470                         | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
 471                         | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
 472                 break;
 473         case DMA_MEM_TO_DEV:
 474                 c->dev_addr = cfg->dst_addr;
 475                 /* dst len is calculated from src width, len and dst width.
 476                  * We need make sure dst len not exceed MAX LEN.
 477                  * Trailing single transaction that does not fill a full
 478                  * burst also require identical src/dst data width.
 479                  */
 480                 dst_width = zx_dma_burst_width(cfg->dst_addr_width);
 481                 maxburst = cfg->dst_maxburst;
 482                 maxburst = maxburst < ZX_MAX_BURST_LEN ?
 483                                 maxburst : ZX_MAX_BURST_LEN;
 484                 c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
 485                         | ZX_SRC_BURST_LEN(maxburst - 1)
 486                         | ZX_SRC_BURST_WIDTH(dst_width)
 487                         | ZX_DST_BURST_WIDTH(dst_width);
 488                 break;
 489         case DMA_DEV_TO_MEM:
 490                 c->dev_addr = cfg->src_addr;
 491                 src_width = zx_dma_burst_width(cfg->src_addr_width);
 492                 maxburst = cfg->src_maxburst;
 493                 maxburst = maxburst < ZX_MAX_BURST_LEN ?
 494                                 maxburst : ZX_MAX_BURST_LEN;
 495                 c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
 496                         | ZX_SRC_BURST_LEN(maxburst - 1)
 497                         | ZX_SRC_BURST_WIDTH(src_width)
 498                         | ZX_DST_BURST_WIDTH(src_width);
 499                 break;
 500         default:
 501                 return -EINVAL;
 502         }
 503         return 0;
 504 }
 505 
 506 static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
 507         struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
 508         size_t len, unsigned long flags)
 509 {
 510         struct zx_dma_chan *c = to_zx_chan(chan);
 511         struct zx_dma_desc_sw *ds;
 512         size_t copy = 0;
 513         int num = 0;
 514 
 515         if (!len)
 516                 return NULL;
 517 
 518         if (zx_pre_config(c, DMA_MEM_TO_MEM))
 519                 return NULL;
 520 
 521         num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
 522 
 523         ds = zx_alloc_desc_resource(num, chan);
 524         if (!ds)
 525                 return NULL;
 526 
 527         ds->size = len;
 528         num = 0;
 529 
 530         do {
 531                 copy = min_t(size_t, len, DMA_MAX_SIZE);
 532                 zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
 533 
 534                 src += copy;
 535                 dst += copy;
 536                 len -= copy;
 537         } while (len);
 538 
 539         c->cyclic = 0;
 540         ds->desc_hw[num - 1].lli = 0;   /* end of link */
 541         ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
 542         return vchan_tx_prep(&c->vc, &ds->vd, flags);
 543 }
 544 
 545 static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
 546         struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
 547         enum dma_transfer_direction dir, unsigned long flags, void *context)
 548 {
 549         struct zx_dma_chan *c = to_zx_chan(chan);
 550         struct zx_dma_desc_sw *ds;
 551         size_t len, avail, total = 0;
 552         struct scatterlist *sg;
 553         dma_addr_t addr, src = 0, dst = 0;
 554         int num = sglen, i;
 555 
 556         if (!sgl)
 557                 return NULL;
 558 
 559         if (zx_pre_config(c, dir))
 560                 return NULL;
 561 
 562         for_each_sg(sgl, sg, sglen, i) {
 563                 avail = sg_dma_len(sg);
 564                 if (avail > DMA_MAX_SIZE)
 565                         num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
 566         }
 567 
 568         ds = zx_alloc_desc_resource(num, chan);
 569         if (!ds)
 570                 return NULL;
 571 
 572         c->cyclic = 0;
 573         num = 0;
 574         for_each_sg(sgl, sg, sglen, i) {
 575                 addr = sg_dma_address(sg);
 576                 avail = sg_dma_len(sg);
 577                 total += avail;
 578 
 579                 do {
 580                         len = min_t(size_t, avail, DMA_MAX_SIZE);
 581 
 582                         if (dir == DMA_MEM_TO_DEV) {
 583                                 src = addr;
 584                                 dst = c->dev_addr;
 585                         } else if (dir == DMA_DEV_TO_MEM) {
 586                                 src = c->dev_addr;
 587                                 dst = addr;
 588                         }
 589 
 590                         zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
 591 
 592                         addr += len;
 593                         avail -= len;
 594                 } while (avail);
 595         }
 596 
 597         ds->desc_hw[num - 1].lli = 0;   /* end of link */
 598         ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
 599         ds->size = total;
 600         return vchan_tx_prep(&c->vc, &ds->vd, flags);
 601 }
 602 
 603 static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
 604                 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 605                 size_t period_len, enum dma_transfer_direction dir,
 606                 unsigned long flags)
 607 {
 608         struct zx_dma_chan *c = to_zx_chan(chan);
 609         struct zx_dma_desc_sw *ds;
 610         dma_addr_t src = 0, dst = 0;
 611         int num_periods = buf_len / period_len;
 612         int buf = 0, num = 0;
 613 
 614         if (period_len > DMA_MAX_SIZE) {
 615                 dev_err(chan->device->dev, "maximum period size exceeded\n");
 616                 return NULL;
 617         }
 618 
 619         if (zx_pre_config(c, dir))
 620                 return NULL;
 621 
 622         ds = zx_alloc_desc_resource(num_periods, chan);
 623         if (!ds)
 624                 return NULL;
 625         c->cyclic = 1;
 626 
 627         while (buf < buf_len) {
 628                 if (dir == DMA_MEM_TO_DEV) {
 629                         src = dma_addr;
 630                         dst = c->dev_addr;
 631                 } else if (dir == DMA_DEV_TO_MEM) {
 632                         src = c->dev_addr;
 633                         dst = dma_addr;
 634                 }
 635                 zx_dma_fill_desc(ds, dst, src, period_len, num++,
 636                                  c->ccfg | ZX_IRQ_ENABLE_ALL);
 637                 dma_addr += period_len;
 638                 buf += period_len;
 639         }
 640 
 641         ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
 642         ds->size = buf_len;
 643         return vchan_tx_prep(&c->vc, &ds->vd, flags);
 644 }
 645 
 646 static int zx_dma_config(struct dma_chan *chan,
 647                          struct dma_slave_config *cfg)
 648 {
 649         struct zx_dma_chan *c = to_zx_chan(chan);
 650 
 651         if (!cfg)
 652                 return -EINVAL;
 653 
 654         memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
 655 
 656         return 0;
 657 }
 658 
 659 static int zx_dma_terminate_all(struct dma_chan *chan)
 660 {
 661         struct zx_dma_chan *c = to_zx_chan(chan);
 662         struct zx_dma_dev *d = to_zx_dma(chan->device);
 663         struct zx_dma_phy *p = c->phy;
 664         unsigned long flags;
 665         LIST_HEAD(head);
 666 
 667         dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 668 
 669         /* Prevent this channel being scheduled */
 670         spin_lock(&d->lock);
 671         list_del_init(&c->node);
 672         spin_unlock(&d->lock);
 673 
 674         /* Clear the tx descriptor lists */
 675         spin_lock_irqsave(&c->vc.lock, flags);
 676         vchan_get_all_descriptors(&c->vc, &head);
 677         if (p) {
 678                 /* vchan is assigned to a pchan - stop the channel */
 679                 zx_dma_terminate_chan(p, d);
 680                 c->phy = NULL;
 681                 p->vchan = NULL;
 682                 p->ds_run = NULL;
 683                 p->ds_done = NULL;
 684         }
 685         spin_unlock_irqrestore(&c->vc.lock, flags);
 686         vchan_dma_desc_free_list(&c->vc, &head);
 687 
 688         return 0;
 689 }
 690 
 691 static int zx_dma_transfer_pause(struct dma_chan *chan)
 692 {
 693         struct zx_dma_chan *c = to_zx_chan(chan);
 694         u32 val = 0;
 695 
 696         val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
 697         val &= ~ZX_CH_ENABLE;
 698         writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
 699 
 700         return 0;
 701 }
 702 
 703 static int zx_dma_transfer_resume(struct dma_chan *chan)
 704 {
 705         struct zx_dma_chan *c = to_zx_chan(chan);
 706         u32 val = 0;
 707 
 708         val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
 709         val |= ZX_CH_ENABLE;
 710         writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
 711 
 712         return 0;
 713 }
 714 
 715 static void zx_dma_free_desc(struct virt_dma_desc *vd)
 716 {
 717         struct zx_dma_desc_sw *ds =
 718                 container_of(vd, struct zx_dma_desc_sw, vd);
 719         struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
 720 
 721         dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
 722         kfree(ds);
 723 }
 724 
 725 static const struct of_device_id zx6702_dma_dt_ids[] = {
 726         { .compatible = "zte,zx296702-dma", },
 727         {}
 728 };
 729 MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
 730 
 731 static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
 732                                                struct of_dma *ofdma)
 733 {
 734         struct zx_dma_dev *d = ofdma->of_dma_data;
 735         unsigned int request = dma_spec->args[0];
 736         struct dma_chan *chan;
 737         struct zx_dma_chan *c;
 738 
 739         if (request >= d->dma_requests)
 740                 return NULL;
 741 
 742         chan = dma_get_any_slave_channel(&d->slave);
 743         if (!chan) {
 744                 dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
 745                 return NULL;
 746         }
 747         c = to_zx_chan(chan);
 748         c->id = request;
 749         dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
 750                  c->id, &c->vc);
 751         return chan;
 752 }
 753 
 754 static int zx_dma_probe(struct platform_device *op)
 755 {
 756         struct zx_dma_dev *d;
 757         struct resource *iores;
 758         int i, ret = 0;
 759 
 760         iores = platform_get_resource(op, IORESOURCE_MEM, 0);
 761         if (!iores)
 762                 return -EINVAL;
 763 
 764         d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
 765         if (!d)
 766                 return -ENOMEM;
 767 
 768         d->base = devm_ioremap_resource(&op->dev, iores);
 769         if (IS_ERR(d->base))
 770                 return PTR_ERR(d->base);
 771 
 772         of_property_read_u32((&op->dev)->of_node,
 773                              "dma-channels", &d->dma_channels);
 774         of_property_read_u32((&op->dev)->of_node,
 775                              "dma-requests", &d->dma_requests);
 776         if (!d->dma_requests || !d->dma_channels)
 777                 return -EINVAL;
 778 
 779         d->clk = devm_clk_get(&op->dev, NULL);
 780         if (IS_ERR(d->clk)) {
 781                 dev_err(&op->dev, "no dma clk\n");
 782                 return PTR_ERR(d->clk);
 783         }
 784 
 785         d->irq = platform_get_irq(op, 0);
 786         ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
 787                                0, DRIVER_NAME, d);
 788         if (ret)
 789                 return ret;
 790 
 791         /* A DMA memory pool for LLIs, align on 32-byte boundary */
 792         d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
 793                         LLI_BLOCK_SIZE, 32, 0);
 794         if (!d->pool)
 795                 return -ENOMEM;
 796 
 797         /* init phy channel */
 798         d->phy = devm_kcalloc(&op->dev,
 799                 d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL);
 800         if (!d->phy)
 801                 return -ENOMEM;
 802 
 803         for (i = 0; i < d->dma_channels; i++) {
 804                 struct zx_dma_phy *p = &d->phy[i];
 805 
 806                 p->idx = i;
 807                 p->base = d->base + i * 0x40;
 808         }
 809 
 810         INIT_LIST_HEAD(&d->slave.channels);
 811         dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 812         dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
 813         dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 814         dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
 815         d->slave.dev = &op->dev;
 816         d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
 817         d->slave.device_tx_status = zx_dma_tx_status;
 818         d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
 819         d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
 820         d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
 821         d->slave.device_issue_pending = zx_dma_issue_pending;
 822         d->slave.device_config = zx_dma_config;
 823         d->slave.device_terminate_all = zx_dma_terminate_all;
 824         d->slave.device_pause = zx_dma_transfer_pause;
 825         d->slave.device_resume = zx_dma_transfer_resume;
 826         d->slave.copy_align = DMA_ALIGN;
 827         d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
 828         d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
 829         d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
 830                         | BIT(DMA_DEV_TO_MEM);
 831         d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 832 
 833         /* init virtual channel */
 834         d->chans = devm_kcalloc(&op->dev,
 835                 d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL);
 836         if (!d->chans)
 837                 return -ENOMEM;
 838 
 839         for (i = 0; i < d->dma_requests; i++) {
 840                 struct zx_dma_chan *c = &d->chans[i];
 841 
 842                 c->status = DMA_IN_PROGRESS;
 843                 INIT_LIST_HEAD(&c->node);
 844                 c->vc.desc_free = zx_dma_free_desc;
 845                 vchan_init(&c->vc, &d->slave);
 846         }
 847 
 848         /* Enable clock before accessing registers */
 849         ret = clk_prepare_enable(d->clk);
 850         if (ret < 0) {
 851                 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
 852                 goto zx_dma_out;
 853         }
 854 
 855         zx_dma_init_state(d);
 856 
 857         spin_lock_init(&d->lock);
 858         INIT_LIST_HEAD(&d->chan_pending);
 859         platform_set_drvdata(op, d);
 860 
 861         ret = dma_async_device_register(&d->slave);
 862         if (ret)
 863                 goto clk_dis;
 864 
 865         ret = of_dma_controller_register((&op->dev)->of_node,
 866                                          zx_of_dma_simple_xlate, d);
 867         if (ret)
 868                 goto of_dma_register_fail;
 869 
 870         dev_info(&op->dev, "initialized\n");
 871         return 0;
 872 
 873 of_dma_register_fail:
 874         dma_async_device_unregister(&d->slave);
 875 clk_dis:
 876         clk_disable_unprepare(d->clk);
 877 zx_dma_out:
 878         return ret;
 879 }
 880 
 881 static int zx_dma_remove(struct platform_device *op)
 882 {
 883         struct zx_dma_chan *c, *cn;
 884         struct zx_dma_dev *d = platform_get_drvdata(op);
 885 
 886         /* explictly free the irq */
 887         devm_free_irq(&op->dev, d->irq, d);
 888 
 889         dma_async_device_unregister(&d->slave);
 890         of_dma_controller_free((&op->dev)->of_node);
 891 
 892         list_for_each_entry_safe(c, cn, &d->slave.channels,
 893                                  vc.chan.device_node) {
 894                 list_del(&c->vc.chan.device_node);
 895         }
 896         clk_disable_unprepare(d->clk);
 897         dmam_pool_destroy(d->pool);
 898 
 899         return 0;
 900 }
 901 
 902 #ifdef CONFIG_PM_SLEEP
 903 static int zx_dma_suspend_dev(struct device *dev)
 904 {
 905         struct zx_dma_dev *d = dev_get_drvdata(dev);
 906         u32 stat = 0;
 907 
 908         stat = zx_dma_get_chan_stat(d);
 909         if (stat) {
 910                 dev_warn(d->slave.dev,
 911                          "chan %d is running fail to suspend\n", stat);
 912                 return -1;
 913         }
 914         clk_disable_unprepare(d->clk);
 915         return 0;
 916 }
 917 
 918 static int zx_dma_resume_dev(struct device *dev)
 919 {
 920         struct zx_dma_dev *d = dev_get_drvdata(dev);
 921         int ret = 0;
 922 
 923         ret = clk_prepare_enable(d->clk);
 924         if (ret < 0) {
 925                 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
 926                 return ret;
 927         }
 928         zx_dma_init_state(d);
 929         return 0;
 930 }
 931 #endif
 932 
 933 static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
 934 
 935 static struct platform_driver zx_pdma_driver = {
 936         .driver         = {
 937                 .name   = DRIVER_NAME,
 938                 .pm     = &zx_dma_pmops,
 939                 .of_match_table = zx6702_dma_dt_ids,
 940         },
 941         .probe          = zx_dma_probe,
 942         .remove         = zx_dma_remove,
 943 };
 944 
 945 module_platform_driver(zx_pdma_driver);
 946 
 947 MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
 948 MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
 949 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */