root/drivers/dma/pch_dma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_pd_desc
  2. to_pd_chan
  3. to_pd
  4. chan2dev
  5. chan2parent
  6. pdc_first_active
  7. pdc_first_queued
  8. pdc_enable_irq
  9. pdc_set_dir
  10. pdc_set_mode
  11. pdc_get_status0
  12. pdc_get_status2
  13. pdc_is_idle
  14. pdc_dostart
  15. pdc_chain_complete
  16. pdc_complete_all
  17. pdc_handle_error
  18. pdc_advance_work
  19. pd_tx_submit
  20. pdc_alloc_desc
  21. pdc_desc_get
  22. pdc_desc_put
  23. pd_alloc_chan_resources
  24. pd_free_chan_resources
  25. pd_tx_status
  26. pd_issue_pending
  27. pd_prep_slave_sg
  28. pd_device_terminate_all
  29. pdc_tasklet
  30. pd_irq
  31. pch_dma_save_regs
  32. pch_dma_restore_regs
  33. pch_dma_suspend
  34. pch_dma_resume
  35. pch_dma_probe
  36. pch_dma_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Topcliff PCH DMA controller driver
   4  * Copyright (c) 2010 Intel Corporation
   5  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
   6  */
   7 
   8 #include <linux/dmaengine.h>
   9 #include <linux/dma-mapping.h>
  10 #include <linux/init.h>
  11 #include <linux/pci.h>
  12 #include <linux/slab.h>
  13 #include <linux/interrupt.h>
  14 #include <linux/module.h>
  15 #include <linux/pch_dma.h>
  16 
  17 #include "dmaengine.h"
  18 
  19 #define DRV_NAME "pch-dma"
  20 
  21 #define DMA_CTL0_DISABLE                0x0
  22 #define DMA_CTL0_SG                     0x1
  23 #define DMA_CTL0_ONESHOT                0x2
  24 #define DMA_CTL0_MODE_MASK_BITS         0x3
  25 #define DMA_CTL0_DIR_SHIFT_BITS         2
  26 #define DMA_CTL0_BITS_PER_CH            4
  27 
  28 #define DMA_CTL2_START_SHIFT_BITS       8
  29 #define DMA_CTL2_IRQ_ENABLE_MASK        ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
  30 
  31 #define DMA_STATUS_IDLE                 0x0
  32 #define DMA_STATUS_DESC_READ            0x1
  33 #define DMA_STATUS_WAIT                 0x2
  34 #define DMA_STATUS_ACCESS               0x3
  35 #define DMA_STATUS_BITS_PER_CH          2
  36 #define DMA_STATUS_MASK_BITS            0x3
  37 #define DMA_STATUS_SHIFT_BITS           16
  38 #define DMA_STATUS_IRQ(x)               (0x1 << (x))
  39 #define DMA_STATUS0_ERR(x)              (0x1 << ((x) + 8))
  40 #define DMA_STATUS2_ERR(x)              (0x1 << (x))
  41 
  42 #define DMA_DESC_WIDTH_SHIFT_BITS       12
  43 #define DMA_DESC_WIDTH_1_BYTE           (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
  44 #define DMA_DESC_WIDTH_2_BYTES          (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
  45 #define DMA_DESC_WIDTH_4_BYTES          (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
  46 #define DMA_DESC_MAX_COUNT_1_BYTE       0x3FF
  47 #define DMA_DESC_MAX_COUNT_2_BYTES      0x3FF
  48 #define DMA_DESC_MAX_COUNT_4_BYTES      0x7FF
  49 #define DMA_DESC_END_WITHOUT_IRQ        0x0
  50 #define DMA_DESC_END_WITH_IRQ           0x1
  51 #define DMA_DESC_FOLLOW_WITHOUT_IRQ     0x2
  52 #define DMA_DESC_FOLLOW_WITH_IRQ        0x3
  53 
  54 #define MAX_CHAN_NR                     12
  55 
  56 #define DMA_MASK_CTL0_MODE      0x33333333
  57 #define DMA_MASK_CTL2_MODE      0x00003333
  58 
  59 static unsigned int init_nr_desc_per_channel = 64;
  60 module_param(init_nr_desc_per_channel, uint, 0644);
  61 MODULE_PARM_DESC(init_nr_desc_per_channel,
  62                  "initial descriptors per channel (default: 64)");
  63 
  64 struct pch_dma_desc_regs {
  65         u32     dev_addr;
  66         u32     mem_addr;
  67         u32     size;
  68         u32     next;
  69 };
  70 
  71 struct pch_dma_regs {
  72         u32     dma_ctl0;
  73         u32     dma_ctl1;
  74         u32     dma_ctl2;
  75         u32     dma_ctl3;
  76         u32     dma_sts0;
  77         u32     dma_sts1;
  78         u32     dma_sts2;
  79         u32     reserved3;
  80         struct pch_dma_desc_regs desc[MAX_CHAN_NR];
  81 };
  82 
  83 struct pch_dma_desc {
  84         struct pch_dma_desc_regs regs;
  85         struct dma_async_tx_descriptor txd;
  86         struct list_head        desc_node;
  87         struct list_head        tx_list;
  88 };
  89 
  90 struct pch_dma_chan {
  91         struct dma_chan         chan;
  92         void __iomem *membase;
  93         enum dma_transfer_direction dir;
  94         struct tasklet_struct   tasklet;
  95         unsigned long           err_status;
  96 
  97         spinlock_t              lock;
  98 
  99         struct list_head        active_list;
 100         struct list_head        queue;
 101         struct list_head        free_list;
 102         unsigned int            descs_allocated;
 103 };
 104 
 105 #define PDC_DEV_ADDR    0x00
 106 #define PDC_MEM_ADDR    0x04
 107 #define PDC_SIZE        0x08
 108 #define PDC_NEXT        0x0C
 109 
 110 #define channel_readl(pdc, name) \
 111         readl((pdc)->membase + PDC_##name)
 112 #define channel_writel(pdc, name, val) \
 113         writel((val), (pdc)->membase + PDC_##name)
 114 
 115 struct pch_dma {
 116         struct dma_device       dma;
 117         void __iomem *membase;
 118         struct dma_pool         *pool;
 119         struct pch_dma_regs     regs;
 120         struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
 121         struct pch_dma_chan     channels[MAX_CHAN_NR];
 122 };
 123 
 124 #define PCH_DMA_CTL0    0x00
 125 #define PCH_DMA_CTL1    0x04
 126 #define PCH_DMA_CTL2    0x08
 127 #define PCH_DMA_CTL3    0x0C
 128 #define PCH_DMA_STS0    0x10
 129 #define PCH_DMA_STS1    0x14
 130 #define PCH_DMA_STS2    0x18
 131 
 132 #define dma_readl(pd, name) \
 133         readl((pd)->membase + PCH_DMA_##name)
 134 #define dma_writel(pd, name, val) \
 135         writel((val), (pd)->membase + PCH_DMA_##name)
 136 
 137 static inline
 138 struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
 139 {
 140         return container_of(txd, struct pch_dma_desc, txd);
 141 }
 142 
 143 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
 144 {
 145         return container_of(chan, struct pch_dma_chan, chan);
 146 }
 147 
 148 static inline struct pch_dma *to_pd(struct dma_device *ddev)
 149 {
 150         return container_of(ddev, struct pch_dma, dma);
 151 }
 152 
 153 static inline struct device *chan2dev(struct dma_chan *chan)
 154 {
 155         return &chan->dev->device;
 156 }
 157 
 158 static inline struct device *chan2parent(struct dma_chan *chan)
 159 {
 160         return chan->dev->device.parent;
 161 }
 162 
 163 static inline
 164 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
 165 {
 166         return list_first_entry(&pd_chan->active_list,
 167                                 struct pch_dma_desc, desc_node);
 168 }
 169 
 170 static inline
 171 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
 172 {
 173         return list_first_entry(&pd_chan->queue,
 174                                 struct pch_dma_desc, desc_node);
 175 }
 176 
 177 static void pdc_enable_irq(struct dma_chan *chan, int enable)
 178 {
 179         struct pch_dma *pd = to_pd(chan->device);
 180         u32 val;
 181         int pos;
 182 
 183         if (chan->chan_id < 8)
 184                 pos = chan->chan_id;
 185         else
 186                 pos = chan->chan_id + 8;
 187 
 188         val = dma_readl(pd, CTL2);
 189 
 190         if (enable)
 191                 val |= 0x1 << pos;
 192         else
 193                 val &= ~(0x1 << pos);
 194 
 195         dma_writel(pd, CTL2, val);
 196 
 197         dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
 198                 chan->chan_id, val);
 199 }
 200 
 201 static void pdc_set_dir(struct dma_chan *chan)
 202 {
 203         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 204         struct pch_dma *pd = to_pd(chan->device);
 205         u32 val;
 206         u32 mask_mode;
 207         u32 mask_ctl;
 208 
 209         if (chan->chan_id < 8) {
 210                 val = dma_readl(pd, CTL0);
 211 
 212                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
 213                                         (DMA_CTL0_BITS_PER_CH * chan->chan_id);
 214                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 215                                        (DMA_CTL0_BITS_PER_CH * chan->chan_id));
 216                 val &= mask_mode;
 217                 if (pd_chan->dir == DMA_MEM_TO_DEV)
 218                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
 219                                        DMA_CTL0_DIR_SHIFT_BITS);
 220                 else
 221                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
 222                                          DMA_CTL0_DIR_SHIFT_BITS));
 223 
 224                 val |= mask_ctl;
 225                 dma_writel(pd, CTL0, val);
 226         } else {
 227                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
 228                 val = dma_readl(pd, CTL3);
 229 
 230                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
 231                                                 (DMA_CTL0_BITS_PER_CH * ch);
 232                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 233                                                  (DMA_CTL0_BITS_PER_CH * ch));
 234                 val &= mask_mode;
 235                 if (pd_chan->dir == DMA_MEM_TO_DEV)
 236                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
 237                                        DMA_CTL0_DIR_SHIFT_BITS);
 238                 else
 239                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
 240                                          DMA_CTL0_DIR_SHIFT_BITS));
 241                 val |= mask_ctl;
 242                 dma_writel(pd, CTL3, val);
 243         }
 244 
 245         dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
 246                 chan->chan_id, val);
 247 }
 248 
 249 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
 250 {
 251         struct pch_dma *pd = to_pd(chan->device);
 252         u32 val;
 253         u32 mask_ctl;
 254         u32 mask_dir;
 255 
 256         if (chan->chan_id < 8) {
 257                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 258                            (DMA_CTL0_BITS_PER_CH * chan->chan_id));
 259                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
 260                                  DMA_CTL0_DIR_SHIFT_BITS);
 261                 val = dma_readl(pd, CTL0);
 262                 val &= mask_dir;
 263                 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
 264                 val |= mask_ctl;
 265                 dma_writel(pd, CTL0, val);
 266         } else {
 267                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
 268                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 269                                                  (DMA_CTL0_BITS_PER_CH * ch));
 270                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
 271                                  DMA_CTL0_DIR_SHIFT_BITS);
 272                 val = dma_readl(pd, CTL3);
 273                 val &= mask_dir;
 274                 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
 275                 val |= mask_ctl;
 276                 dma_writel(pd, CTL3, val);
 277         }
 278 
 279         dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
 280                 chan->chan_id, val);
 281 }
 282 
 283 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
 284 {
 285         struct pch_dma *pd = to_pd(pd_chan->chan.device);
 286         u32 val;
 287 
 288         val = dma_readl(pd, STS0);
 289         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
 290                         DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
 291 }
 292 
 293 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
 294 {
 295         struct pch_dma *pd = to_pd(pd_chan->chan.device);
 296         u32 val;
 297 
 298         val = dma_readl(pd, STS2);
 299         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
 300                         DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
 301 }
 302 
 303 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
 304 {
 305         u32 sts;
 306 
 307         if (pd_chan->chan.chan_id < 8)
 308                 sts = pdc_get_status0(pd_chan);
 309         else
 310                 sts = pdc_get_status2(pd_chan);
 311 
 312 
 313         if (sts == DMA_STATUS_IDLE)
 314                 return true;
 315         else
 316                 return false;
 317 }
 318 
 319 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
 320 {
 321         if (!pdc_is_idle(pd_chan)) {
 322                 dev_err(chan2dev(&pd_chan->chan),
 323                         "BUG: Attempt to start non-idle channel\n");
 324                 return;
 325         }
 326 
 327         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
 328                 pd_chan->chan.chan_id, desc->regs.dev_addr);
 329         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
 330                 pd_chan->chan.chan_id, desc->regs.mem_addr);
 331         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
 332                 pd_chan->chan.chan_id, desc->regs.size);
 333         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
 334                 pd_chan->chan.chan_id, desc->regs.next);
 335 
 336         if (list_empty(&desc->tx_list)) {
 337                 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
 338                 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
 339                 channel_writel(pd_chan, SIZE, desc->regs.size);
 340                 channel_writel(pd_chan, NEXT, desc->regs.next);
 341                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
 342         } else {
 343                 channel_writel(pd_chan, NEXT, desc->txd.phys);
 344                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
 345         }
 346 }
 347 
 348 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
 349                                struct pch_dma_desc *desc)
 350 {
 351         struct dma_async_tx_descriptor *txd = &desc->txd;
 352         struct dmaengine_desc_callback cb;
 353 
 354         dmaengine_desc_get_callback(txd, &cb);
 355         list_splice_init(&desc->tx_list, &pd_chan->free_list);
 356         list_move(&desc->desc_node, &pd_chan->free_list);
 357 
 358         dmaengine_desc_callback_invoke(&cb, NULL);
 359 }
 360 
 361 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
 362 {
 363         struct pch_dma_desc *desc, *_d;
 364         LIST_HEAD(list);
 365 
 366         BUG_ON(!pdc_is_idle(pd_chan));
 367 
 368         if (!list_empty(&pd_chan->queue))
 369                 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
 370 
 371         list_splice_init(&pd_chan->active_list, &list);
 372         list_splice_init(&pd_chan->queue, &pd_chan->active_list);
 373 
 374         list_for_each_entry_safe(desc, _d, &list, desc_node)
 375                 pdc_chain_complete(pd_chan, desc);
 376 }
 377 
 378 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
 379 {
 380         struct pch_dma_desc *bad_desc;
 381 
 382         bad_desc = pdc_first_active(pd_chan);
 383         list_del(&bad_desc->desc_node);
 384 
 385         list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
 386 
 387         if (!list_empty(&pd_chan->active_list))
 388                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
 389 
 390         dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
 391         dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
 392                  bad_desc->txd.cookie);
 393 
 394         pdc_chain_complete(pd_chan, bad_desc);
 395 }
 396 
 397 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
 398 {
 399         if (list_empty(&pd_chan->active_list) ||
 400                 list_is_singular(&pd_chan->active_list)) {
 401                 pdc_complete_all(pd_chan);
 402         } else {
 403                 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
 404                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
 405         }
 406 }
 407 
 408 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
 409 {
 410         struct pch_dma_desc *desc = to_pd_desc(txd);
 411         struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
 412 
 413         spin_lock(&pd_chan->lock);
 414 
 415         if (list_empty(&pd_chan->active_list)) {
 416                 list_add_tail(&desc->desc_node, &pd_chan->active_list);
 417                 pdc_dostart(pd_chan, desc);
 418         } else {
 419                 list_add_tail(&desc->desc_node, &pd_chan->queue);
 420         }
 421 
 422         spin_unlock(&pd_chan->lock);
 423         return 0;
 424 }
 425 
 426 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
 427 {
 428         struct pch_dma_desc *desc = NULL;
 429         struct pch_dma *pd = to_pd(chan->device);
 430         dma_addr_t addr;
 431 
 432         desc = dma_pool_zalloc(pd->pool, flags, &addr);
 433         if (desc) {
 434                 INIT_LIST_HEAD(&desc->tx_list);
 435                 dma_async_tx_descriptor_init(&desc->txd, chan);
 436                 desc->txd.tx_submit = pd_tx_submit;
 437                 desc->txd.flags = DMA_CTRL_ACK;
 438                 desc->txd.phys = addr;
 439         }
 440 
 441         return desc;
 442 }
 443 
 444 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
 445 {
 446         struct pch_dma_desc *desc, *_d;
 447         struct pch_dma_desc *ret = NULL;
 448         int i = 0;
 449 
 450         spin_lock(&pd_chan->lock);
 451         list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
 452                 i++;
 453                 if (async_tx_test_ack(&desc->txd)) {
 454                         list_del(&desc->desc_node);
 455                         ret = desc;
 456                         break;
 457                 }
 458                 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
 459         }
 460         spin_unlock(&pd_chan->lock);
 461         dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
 462 
 463         if (!ret) {
 464                 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
 465                 if (ret) {
 466                         spin_lock(&pd_chan->lock);
 467                         pd_chan->descs_allocated++;
 468                         spin_unlock(&pd_chan->lock);
 469                 } else {
 470                         dev_err(chan2dev(&pd_chan->chan),
 471                                 "failed to alloc desc\n");
 472                 }
 473         }
 474 
 475         return ret;
 476 }
 477 
 478 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
 479                          struct pch_dma_desc *desc)
 480 {
 481         if (desc) {
 482                 spin_lock(&pd_chan->lock);
 483                 list_splice_init(&desc->tx_list, &pd_chan->free_list);
 484                 list_add(&desc->desc_node, &pd_chan->free_list);
 485                 spin_unlock(&pd_chan->lock);
 486         }
 487 }
 488 
 489 static int pd_alloc_chan_resources(struct dma_chan *chan)
 490 {
 491         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 492         struct pch_dma_desc *desc;
 493         LIST_HEAD(tmp_list);
 494         int i;
 495 
 496         if (!pdc_is_idle(pd_chan)) {
 497                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
 498                 return -EIO;
 499         }
 500 
 501         if (!list_empty(&pd_chan->free_list))
 502                 return pd_chan->descs_allocated;
 503 
 504         for (i = 0; i < init_nr_desc_per_channel; i++) {
 505                 desc = pdc_alloc_desc(chan, GFP_KERNEL);
 506 
 507                 if (!desc) {
 508                         dev_warn(chan2dev(chan),
 509                                 "Only allocated %d initial descriptors\n", i);
 510                         break;
 511                 }
 512 
 513                 list_add_tail(&desc->desc_node, &tmp_list);
 514         }
 515 
 516         spin_lock_irq(&pd_chan->lock);
 517         list_splice(&tmp_list, &pd_chan->free_list);
 518         pd_chan->descs_allocated = i;
 519         dma_cookie_init(chan);
 520         spin_unlock_irq(&pd_chan->lock);
 521 
 522         pdc_enable_irq(chan, 1);
 523 
 524         return pd_chan->descs_allocated;
 525 }
 526 
 527 static void pd_free_chan_resources(struct dma_chan *chan)
 528 {
 529         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 530         struct pch_dma *pd = to_pd(chan->device);
 531         struct pch_dma_desc *desc, *_d;
 532         LIST_HEAD(tmp_list);
 533 
 534         BUG_ON(!pdc_is_idle(pd_chan));
 535         BUG_ON(!list_empty(&pd_chan->active_list));
 536         BUG_ON(!list_empty(&pd_chan->queue));
 537 
 538         spin_lock_irq(&pd_chan->lock);
 539         list_splice_init(&pd_chan->free_list, &tmp_list);
 540         pd_chan->descs_allocated = 0;
 541         spin_unlock_irq(&pd_chan->lock);
 542 
 543         list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
 544                 dma_pool_free(pd->pool, desc, desc->txd.phys);
 545 
 546         pdc_enable_irq(chan, 0);
 547 }
 548 
 549 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 550                                     struct dma_tx_state *txstate)
 551 {
 552         return dma_cookie_status(chan, cookie, txstate);
 553 }
 554 
 555 static void pd_issue_pending(struct dma_chan *chan)
 556 {
 557         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 558 
 559         if (pdc_is_idle(pd_chan)) {
 560                 spin_lock(&pd_chan->lock);
 561                 pdc_advance_work(pd_chan);
 562                 spin_unlock(&pd_chan->lock);
 563         }
 564 }
 565 
 566 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
 567                         struct scatterlist *sgl, unsigned int sg_len,
 568                         enum dma_transfer_direction direction, unsigned long flags,
 569                         void *context)
 570 {
 571         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 572         struct pch_dma_slave *pd_slave = chan->private;
 573         struct pch_dma_desc *first = NULL;
 574         struct pch_dma_desc *prev = NULL;
 575         struct pch_dma_desc *desc = NULL;
 576         struct scatterlist *sg;
 577         dma_addr_t reg;
 578         int i;
 579 
 580         if (unlikely(!sg_len)) {
 581                 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
 582                 return NULL;
 583         }
 584 
 585         if (direction == DMA_DEV_TO_MEM)
 586                 reg = pd_slave->rx_reg;
 587         else if (direction == DMA_MEM_TO_DEV)
 588                 reg = pd_slave->tx_reg;
 589         else
 590                 return NULL;
 591 
 592         pd_chan->dir = direction;
 593         pdc_set_dir(chan);
 594 
 595         for_each_sg(sgl, sg, sg_len, i) {
 596                 desc = pdc_desc_get(pd_chan);
 597 
 598                 if (!desc)
 599                         goto err_desc_get;
 600 
 601                 desc->regs.dev_addr = reg;
 602                 desc->regs.mem_addr = sg_dma_address(sg);
 603                 desc->regs.size = sg_dma_len(sg);
 604                 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
 605 
 606                 switch (pd_slave->width) {
 607                 case PCH_DMA_WIDTH_1_BYTE:
 608                         if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
 609                                 goto err_desc_get;
 610                         desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
 611                         break;
 612                 case PCH_DMA_WIDTH_2_BYTES:
 613                         if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
 614                                 goto err_desc_get;
 615                         desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
 616                         break;
 617                 case PCH_DMA_WIDTH_4_BYTES:
 618                         if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
 619                                 goto err_desc_get;
 620                         desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
 621                         break;
 622                 default:
 623                         goto err_desc_get;
 624                 }
 625 
 626                 if (!first) {
 627                         first = desc;
 628                 } else {
 629                         prev->regs.next |= desc->txd.phys;
 630                         list_add_tail(&desc->desc_node, &first->tx_list);
 631                 }
 632 
 633                 prev = desc;
 634         }
 635 
 636         if (flags & DMA_PREP_INTERRUPT)
 637                 desc->regs.next = DMA_DESC_END_WITH_IRQ;
 638         else
 639                 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
 640 
 641         first->txd.cookie = -EBUSY;
 642         desc->txd.flags = flags;
 643 
 644         return &first->txd;
 645 
 646 err_desc_get:
 647         dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
 648         pdc_desc_put(pd_chan, first);
 649         return NULL;
 650 }
 651 
 652 static int pd_device_terminate_all(struct dma_chan *chan)
 653 {
 654         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 655         struct pch_dma_desc *desc, *_d;
 656         LIST_HEAD(list);
 657 
 658         spin_lock_irq(&pd_chan->lock);
 659 
 660         pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
 661 
 662         list_splice_init(&pd_chan->active_list, &list);
 663         list_splice_init(&pd_chan->queue, &list);
 664 
 665         list_for_each_entry_safe(desc, _d, &list, desc_node)
 666                 pdc_chain_complete(pd_chan, desc);
 667 
 668         spin_unlock_irq(&pd_chan->lock);
 669 
 670         return 0;
 671 }
 672 
 673 static void pdc_tasklet(unsigned long data)
 674 {
 675         struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
 676         unsigned long flags;
 677 
 678         if (!pdc_is_idle(pd_chan)) {
 679                 dev_err(chan2dev(&pd_chan->chan),
 680                         "BUG: handle non-idle channel in tasklet\n");
 681                 return;
 682         }
 683 
 684         spin_lock_irqsave(&pd_chan->lock, flags);
 685         if (test_and_clear_bit(0, &pd_chan->err_status))
 686                 pdc_handle_error(pd_chan);
 687         else
 688                 pdc_advance_work(pd_chan);
 689         spin_unlock_irqrestore(&pd_chan->lock, flags);
 690 }
 691 
 692 static irqreturn_t pd_irq(int irq, void *devid)
 693 {
 694         struct pch_dma *pd = (struct pch_dma *)devid;
 695         struct pch_dma_chan *pd_chan;
 696         u32 sts0;
 697         u32 sts2;
 698         int i;
 699         int ret0 = IRQ_NONE;
 700         int ret2 = IRQ_NONE;
 701 
 702         sts0 = dma_readl(pd, STS0);
 703         sts2 = dma_readl(pd, STS2);
 704 
 705         dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
 706 
 707         for (i = 0; i < pd->dma.chancnt; i++) {
 708                 pd_chan = &pd->channels[i];
 709 
 710                 if (i < 8) {
 711                         if (sts0 & DMA_STATUS_IRQ(i)) {
 712                                 if (sts0 & DMA_STATUS0_ERR(i))
 713                                         set_bit(0, &pd_chan->err_status);
 714 
 715                                 tasklet_schedule(&pd_chan->tasklet);
 716                                 ret0 = IRQ_HANDLED;
 717                         }
 718                 } else {
 719                         if (sts2 & DMA_STATUS_IRQ(i - 8)) {
 720                                 if (sts2 & DMA_STATUS2_ERR(i))
 721                                         set_bit(0, &pd_chan->err_status);
 722 
 723                                 tasklet_schedule(&pd_chan->tasklet);
 724                                 ret2 = IRQ_HANDLED;
 725                         }
 726                 }
 727         }
 728 
 729         /* clear interrupt bits in status register */
 730         if (ret0)
 731                 dma_writel(pd, STS0, sts0);
 732         if (ret2)
 733                 dma_writel(pd, STS2, sts2);
 734 
 735         return ret0 | ret2;
 736 }
 737 
 738 #ifdef  CONFIG_PM
 739 static void pch_dma_save_regs(struct pch_dma *pd)
 740 {
 741         struct pch_dma_chan *pd_chan;
 742         struct dma_chan *chan, *_c;
 743         int i = 0;
 744 
 745         pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
 746         pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
 747         pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
 748         pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
 749 
 750         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
 751                 pd_chan = to_pd_chan(chan);
 752 
 753                 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
 754                 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
 755                 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
 756                 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
 757 
 758                 i++;
 759         }
 760 }
 761 
 762 static void pch_dma_restore_regs(struct pch_dma *pd)
 763 {
 764         struct pch_dma_chan *pd_chan;
 765         struct dma_chan *chan, *_c;
 766         int i = 0;
 767 
 768         dma_writel(pd, CTL0, pd->regs.dma_ctl0);
 769         dma_writel(pd, CTL1, pd->regs.dma_ctl1);
 770         dma_writel(pd, CTL2, pd->regs.dma_ctl2);
 771         dma_writel(pd, CTL3, pd->regs.dma_ctl3);
 772 
 773         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
 774                 pd_chan = to_pd_chan(chan);
 775 
 776                 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
 777                 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
 778                 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
 779                 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
 780 
 781                 i++;
 782         }
 783 }
 784 
 785 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
 786 {
 787         struct pch_dma *pd = pci_get_drvdata(pdev);
 788 
 789         if (pd)
 790                 pch_dma_save_regs(pd);
 791 
 792         pci_save_state(pdev);
 793         pci_disable_device(pdev);
 794         pci_set_power_state(pdev, pci_choose_state(pdev, state));
 795 
 796         return 0;
 797 }
 798 
 799 static int pch_dma_resume(struct pci_dev *pdev)
 800 {
 801         struct pch_dma *pd = pci_get_drvdata(pdev);
 802         int err;
 803 
 804         pci_set_power_state(pdev, PCI_D0);
 805         pci_restore_state(pdev);
 806 
 807         err = pci_enable_device(pdev);
 808         if (err) {
 809                 dev_dbg(&pdev->dev, "failed to enable device\n");
 810                 return err;
 811         }
 812 
 813         if (pd)
 814                 pch_dma_restore_regs(pd);
 815 
 816         return 0;
 817 }
 818 #endif
 819 
 820 static int pch_dma_probe(struct pci_dev *pdev,
 821                                    const struct pci_device_id *id)
 822 {
 823         struct pch_dma *pd;
 824         struct pch_dma_regs *regs;
 825         unsigned int nr_channels;
 826         int err;
 827         int i;
 828 
 829         nr_channels = id->driver_data;
 830         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 831         if (!pd)
 832                 return -ENOMEM;
 833 
 834         pci_set_drvdata(pdev, pd);
 835 
 836         err = pci_enable_device(pdev);
 837         if (err) {
 838                 dev_err(&pdev->dev, "Cannot enable PCI device\n");
 839                 goto err_free_mem;
 840         }
 841 
 842         if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
 843                 dev_err(&pdev->dev, "Cannot find proper base address\n");
 844                 err = -ENODEV;
 845                 goto err_disable_pdev;
 846         }
 847 
 848         err = pci_request_regions(pdev, DRV_NAME);
 849         if (err) {
 850                 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
 851                 goto err_disable_pdev;
 852         }
 853 
 854         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 855         if (err) {
 856                 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
 857                 goto err_free_res;
 858         }
 859 
 860         regs = pd->membase = pci_iomap(pdev, 1, 0);
 861         if (!pd->membase) {
 862                 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
 863                 err = -ENOMEM;
 864                 goto err_free_res;
 865         }
 866 
 867         pci_set_master(pdev);
 868         pd->dma.dev = &pdev->dev;
 869 
 870         err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
 871         if (err) {
 872                 dev_err(&pdev->dev, "Failed to request IRQ\n");
 873                 goto err_iounmap;
 874         }
 875 
 876         pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
 877                                    sizeof(struct pch_dma_desc), 4, 0);
 878         if (!pd->pool) {
 879                 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
 880                 err = -ENOMEM;
 881                 goto err_free_irq;
 882         }
 883 
 884 
 885         INIT_LIST_HEAD(&pd->dma.channels);
 886 
 887         for (i = 0; i < nr_channels; i++) {
 888                 struct pch_dma_chan *pd_chan = &pd->channels[i];
 889 
 890                 pd_chan->chan.device = &pd->dma;
 891                 dma_cookie_init(&pd_chan->chan);
 892 
 893                 pd_chan->membase = &regs->desc[i];
 894 
 895                 spin_lock_init(&pd_chan->lock);
 896 
 897                 INIT_LIST_HEAD(&pd_chan->active_list);
 898                 INIT_LIST_HEAD(&pd_chan->queue);
 899                 INIT_LIST_HEAD(&pd_chan->free_list);
 900 
 901                 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
 902                              (unsigned long)pd_chan);
 903                 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
 904         }
 905 
 906         dma_cap_zero(pd->dma.cap_mask);
 907         dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
 908         dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
 909 
 910         pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
 911         pd->dma.device_free_chan_resources = pd_free_chan_resources;
 912         pd->dma.device_tx_status = pd_tx_status;
 913         pd->dma.device_issue_pending = pd_issue_pending;
 914         pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
 915         pd->dma.device_terminate_all = pd_device_terminate_all;
 916 
 917         err = dma_async_device_register(&pd->dma);
 918         if (err) {
 919                 dev_err(&pdev->dev, "Failed to register DMA device\n");
 920                 goto err_free_pool;
 921         }
 922 
 923         return 0;
 924 
 925 err_free_pool:
 926         dma_pool_destroy(pd->pool);
 927 err_free_irq:
 928         free_irq(pdev->irq, pd);
 929 err_iounmap:
 930         pci_iounmap(pdev, pd->membase);
 931 err_free_res:
 932         pci_release_regions(pdev);
 933 err_disable_pdev:
 934         pci_disable_device(pdev);
 935 err_free_mem:
 936         kfree(pd);
 937         return err;
 938 }
 939 
 940 static void pch_dma_remove(struct pci_dev *pdev)
 941 {
 942         struct pch_dma *pd = pci_get_drvdata(pdev);
 943         struct pch_dma_chan *pd_chan;
 944         struct dma_chan *chan, *_c;
 945 
 946         if (pd) {
 947                 dma_async_device_unregister(&pd->dma);
 948 
 949                 free_irq(pdev->irq, pd);
 950 
 951                 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
 952                                          device_node) {
 953                         pd_chan = to_pd_chan(chan);
 954 
 955                         tasklet_kill(&pd_chan->tasklet);
 956                 }
 957 
 958                 dma_pool_destroy(pd->pool);
 959                 pci_iounmap(pdev, pd->membase);
 960                 pci_release_regions(pdev);
 961                 pci_disable_device(pdev);
 962                 kfree(pd);
 963         }
 964 }
 965 
 966 /* PCI Device ID of DMA device */
 967 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
 968 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
 969 #define PCI_DEVICE_ID_ML7213_DMA1_8CH   0x8026
 970 #define PCI_DEVICE_ID_ML7213_DMA2_8CH   0x802B
 971 #define PCI_DEVICE_ID_ML7213_DMA3_4CH   0x8034
 972 #define PCI_DEVICE_ID_ML7213_DMA4_12CH  0x8032
 973 #define PCI_DEVICE_ID_ML7223_DMA1_4CH   0x800B
 974 #define PCI_DEVICE_ID_ML7223_DMA2_4CH   0x800E
 975 #define PCI_DEVICE_ID_ML7223_DMA3_4CH   0x8017
 976 #define PCI_DEVICE_ID_ML7223_DMA4_4CH   0x803B
 977 #define PCI_DEVICE_ID_ML7831_DMA1_8CH   0x8810
 978 #define PCI_DEVICE_ID_ML7831_DMA2_4CH   0x8815
 979 
 980 static const struct pci_device_id pch_dma_id_table[] = {
 981         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
 982         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
 983         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
 984         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
 985         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
 986         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
 987         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
 988         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
 989         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
 990         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
 991         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
 992         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
 993         { 0, },
 994 };
 995 
 996 static struct pci_driver pch_dma_driver = {
 997         .name           = DRV_NAME,
 998         .id_table       = pch_dma_id_table,
 999         .probe          = pch_dma_probe,
1000         .remove         = pch_dma_remove,
1001 #ifdef CONFIG_PM
1002         .suspend        = pch_dma_suspend,
1003         .resume         = pch_dma_resume,
1004 #endif
1005 };
1006 
1007 module_pci_driver(pch_dma_driver);
1008 
1009 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1010                    "DMA controller driver");
1011 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1012 MODULE_LICENSE("GPL v2");
1013 MODULE_DEVICE_TABLE(pci, pch_dma_id_table);

/* [<][>][^][v][top][bottom][index][help] */