root/drivers/dma/hsu/hsu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hsu_chan_disable
  2. hsu_chan_enable
  3. hsu_dma_chan_start
  4. hsu_dma_stop_channel
  5. hsu_dma_start_channel
  6. hsu_dma_start_transfer
  7. hsu_dma_get_status
  8. hsu_dma_do_irq
  9. hsu_dma_alloc_desc
  10. hsu_dma_desc_free
  11. hsu_dma_prep_slave_sg
  12. hsu_dma_issue_pending
  13. hsu_dma_active_desc_size
  14. hsu_dma_tx_status
  15. hsu_dma_slave_config
  16. hsu_dma_pause
  17. hsu_dma_resume
  18. hsu_dma_terminate_all
  19. hsu_dma_free_chan_resources
  20. hsu_dma_synchronize
  21. hsu_dma_probe
  22. hsu_dma_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Core driver for the High Speed UART DMA
   4  *
   5  * Copyright (C) 2015 Intel Corporation
   6  * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
   7  *
   8  * Partially based on the bits found in drivers/tty/serial/mfd.c.
   9  */
  10 
  11 /*
  12  * DMA channel allocation:
  13  * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
  14  *    Write (UART RX).
  15  * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
  16  *    port 3, and so on.
  17  */
  18 
  19 #include <linux/delay.h>
  20 #include <linux/dmaengine.h>
  21 #include <linux/dma-mapping.h>
  22 #include <linux/init.h>
  23 #include <linux/module.h>
  24 #include <linux/slab.h>
  25 
  26 #include "hsu.h"
  27 
  28 #define HSU_DMA_BUSWIDTHS                               \
  29         BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)       |       \
  30         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          |       \
  31         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         |       \
  32         BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)         |       \
  33         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)         |       \
  34         BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)         |       \
  35         BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
  36 
  37 static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
  38 {
  39         hsu_chan_writel(hsuc, HSU_CH_CR, 0);
  40 }
  41 
  42 static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
  43 {
  44         u32 cr = HSU_CH_CR_CHA;
  45 
  46         if (hsuc->direction == DMA_MEM_TO_DEV)
  47                 cr &= ~HSU_CH_CR_CHD;
  48         else if (hsuc->direction == DMA_DEV_TO_MEM)
  49                 cr |= HSU_CH_CR_CHD;
  50 
  51         hsu_chan_writel(hsuc, HSU_CH_CR, cr);
  52 }
  53 
  54 static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
  55 {
  56         struct dma_slave_config *config = &hsuc->config;
  57         struct hsu_dma_desc *desc = hsuc->desc;
  58         u32 bsr = 0, mtsr = 0;  /* to shut the compiler up */
  59         u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
  60         unsigned int i, count;
  61 
  62         if (hsuc->direction == DMA_MEM_TO_DEV) {
  63                 bsr = config->dst_maxburst;
  64                 mtsr = config->dst_addr_width;
  65         } else if (hsuc->direction == DMA_DEV_TO_MEM) {
  66                 bsr = config->src_maxburst;
  67                 mtsr = config->src_addr_width;
  68         }
  69 
  70         hsu_chan_disable(hsuc);
  71 
  72         hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
  73         hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
  74         hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
  75 
  76         /* Set descriptors */
  77         count = desc->nents - desc->active;
  78         for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
  79                 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
  80                 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
  81 
  82                 /* Prepare value for DCR */
  83                 dcr |= HSU_CH_DCR_DESCA(i);
  84                 dcr |= HSU_CH_DCR_CHTOI(i);     /* timeout bit, see HSU Errata 1 */
  85 
  86                 desc->active++;
  87         }
  88         /* Only for the last descriptor in the chain */
  89         dcr |= HSU_CH_DCR_CHSOD(count - 1);
  90         dcr |= HSU_CH_DCR_CHDI(count - 1);
  91 
  92         hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
  93 
  94         hsu_chan_enable(hsuc);
  95 }
  96 
  97 static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
  98 {
  99         hsu_chan_disable(hsuc);
 100         hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
 101 }
 102 
 103 static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
 104 {
 105         hsu_dma_chan_start(hsuc);
 106 }
 107 
 108 static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
 109 {
 110         struct virt_dma_desc *vdesc;
 111 
 112         /* Get the next descriptor */
 113         vdesc = vchan_next_desc(&hsuc->vchan);
 114         if (!vdesc) {
 115                 hsuc->desc = NULL;
 116                 return;
 117         }
 118 
 119         list_del(&vdesc->node);
 120         hsuc->desc = to_hsu_dma_desc(vdesc);
 121 
 122         /* Start the channel with a new descriptor */
 123         hsu_dma_start_channel(hsuc);
 124 }
 125 
 126 /*
 127  *      hsu_dma_get_status() - get DMA channel status
 128  *      @chip: HSUART DMA chip
 129  *      @nr: DMA channel number
 130  *      @status: pointer for DMA Channel Status Register value
 131  *
 132  *      Description:
 133  *      The function reads and clears the DMA Channel Status Register, checks
 134  *      if it was a timeout interrupt and returns a corresponding value.
 135  *
 136  *      Caller should provide a valid pointer for the DMA Channel Status
 137  *      Register value that will be returned in @status.
 138  *
 139  *      Return:
 140  *      1 for DMA timeout status, 0 for other DMA status, or error code for
 141  *      invalid parameters or no interrupt pending.
 142  */
 143 int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
 144                        u32 *status)
 145 {
 146         struct hsu_dma_chan *hsuc;
 147         unsigned long flags;
 148         u32 sr;
 149 
 150         /* Sanity check */
 151         if (nr >= chip->hsu->nr_channels)
 152                 return -EINVAL;
 153 
 154         hsuc = &chip->hsu->chan[nr];
 155 
 156         /*
 157          * No matter what situation, need read clear the IRQ status
 158          * There is a bug, see Errata 5, HSD 2900918
 159          */
 160         spin_lock_irqsave(&hsuc->vchan.lock, flags);
 161         sr = hsu_chan_readl(hsuc, HSU_CH_SR);
 162         spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 163 
 164         /* Check if any interrupt is pending */
 165         sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
 166         if (!sr)
 167                 return -EIO;
 168 
 169         /* Timeout IRQ, need wait some time, see Errata 2 */
 170         if (sr & HSU_CH_SR_DESCTO_ANY)
 171                 udelay(2);
 172 
 173         /*
 174          * At this point, at least one of Descriptor Time Out, Channel Error
 175          * or Descriptor Done bits must be set. Clear the Descriptor Time Out
 176          * bits and if sr is still non-zero, it must be channel error or
 177          * descriptor done which are higher priority than timeout and handled
 178          * in hsu_dma_do_irq(). Else, it must be a timeout.
 179          */
 180         sr &= ~HSU_CH_SR_DESCTO_ANY;
 181 
 182         *status = sr;
 183 
 184         return sr ? 0 : 1;
 185 }
 186 EXPORT_SYMBOL_GPL(hsu_dma_get_status);
 187 
 188 /*
 189  *      hsu_dma_do_irq() - DMA interrupt handler
 190  *      @chip: HSUART DMA chip
 191  *      @nr: DMA channel number
 192  *      @status: Channel Status Register value
 193  *
 194  *      Description:
 195  *      This function handles Channel Error and Descriptor Done interrupts.
 196  *      This function should be called after determining that the DMA interrupt
 197  *      is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
 198  *
 199  *      Return:
 200  *      0 for invalid channel number, 1 otherwise.
 201  */
 202 int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
 203 {
 204         struct hsu_dma_chan *hsuc;
 205         struct hsu_dma_desc *desc;
 206         unsigned long flags;
 207 
 208         /* Sanity check */
 209         if (nr >= chip->hsu->nr_channels)
 210                 return 0;
 211 
 212         hsuc = &chip->hsu->chan[nr];
 213 
 214         spin_lock_irqsave(&hsuc->vchan.lock, flags);
 215         desc = hsuc->desc;
 216         if (desc) {
 217                 if (status & HSU_CH_SR_CHE) {
 218                         desc->status = DMA_ERROR;
 219                 } else if (desc->active < desc->nents) {
 220                         hsu_dma_start_channel(hsuc);
 221                 } else {
 222                         vchan_cookie_complete(&desc->vdesc);
 223                         desc->status = DMA_COMPLETE;
 224                         hsu_dma_start_transfer(hsuc);
 225                 }
 226         }
 227         spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 228 
 229         return 1;
 230 }
 231 EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
 232 
 233 static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
 234 {
 235         struct hsu_dma_desc *desc;
 236 
 237         desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
 238         if (!desc)
 239                 return NULL;
 240 
 241         desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
 242         if (!desc->sg) {
 243                 kfree(desc);
 244                 return NULL;
 245         }
 246 
 247         return desc;
 248 }
 249 
 250 static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
 251 {
 252         struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
 253 
 254         kfree(desc->sg);
 255         kfree(desc);
 256 }
 257 
 258 static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
 259                 struct dma_chan *chan, struct scatterlist *sgl,
 260                 unsigned int sg_len, enum dma_transfer_direction direction,
 261                 unsigned long flags, void *context)
 262 {
 263         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 264         struct hsu_dma_desc *desc;
 265         struct scatterlist *sg;
 266         unsigned int i;
 267 
 268         desc = hsu_dma_alloc_desc(sg_len);
 269         if (!desc)
 270                 return NULL;
 271 
 272         for_each_sg(sgl, sg, sg_len, i) {
 273                 desc->sg[i].addr = sg_dma_address(sg);
 274                 desc->sg[i].len = sg_dma_len(sg);
 275 
 276                 desc->length += sg_dma_len(sg);
 277         }
 278 
 279         desc->nents = sg_len;
 280         desc->direction = direction;
 281         /* desc->active = 0 by kzalloc */
 282         desc->status = DMA_IN_PROGRESS;
 283 
 284         return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
 285 }
 286 
 287 static void hsu_dma_issue_pending(struct dma_chan *chan)
 288 {
 289         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 290         unsigned long flags;
 291 
 292         spin_lock_irqsave(&hsuc->vchan.lock, flags);
 293         if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
 294                 hsu_dma_start_transfer(hsuc);
 295         spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 296 }
 297 
 298 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
 299 {
 300         struct hsu_dma_desc *desc = hsuc->desc;
 301         size_t bytes = 0;
 302         int i;
 303 
 304         for (i = desc->active; i < desc->nents; i++)
 305                 bytes += desc->sg[i].len;
 306 
 307         i = HSU_DMA_CHAN_NR_DESC - 1;
 308         do {
 309                 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
 310         } while (--i >= 0);
 311 
 312         return bytes;
 313 }
 314 
 315 static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
 316         dma_cookie_t cookie, struct dma_tx_state *state)
 317 {
 318         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 319         struct virt_dma_desc *vdesc;
 320         enum dma_status status;
 321         size_t bytes;
 322         unsigned long flags;
 323 
 324         status = dma_cookie_status(chan, cookie, state);
 325         if (status == DMA_COMPLETE)
 326                 return status;
 327 
 328         spin_lock_irqsave(&hsuc->vchan.lock, flags);
 329         vdesc = vchan_find_desc(&hsuc->vchan, cookie);
 330         if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
 331                 bytes = hsu_dma_active_desc_size(hsuc);
 332                 dma_set_residue(state, bytes);
 333                 status = hsuc->desc->status;
 334         } else if (vdesc) {
 335                 bytes = to_hsu_dma_desc(vdesc)->length;
 336                 dma_set_residue(state, bytes);
 337         }
 338         spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 339 
 340         return status;
 341 }
 342 
 343 static int hsu_dma_slave_config(struct dma_chan *chan,
 344                                 struct dma_slave_config *config)
 345 {
 346         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 347 
 348         memcpy(&hsuc->config, config, sizeof(hsuc->config));
 349 
 350         return 0;
 351 }
 352 
 353 static int hsu_dma_pause(struct dma_chan *chan)
 354 {
 355         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 356         unsigned long flags;
 357 
 358         spin_lock_irqsave(&hsuc->vchan.lock, flags);
 359         if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
 360                 hsu_chan_disable(hsuc);
 361                 hsuc->desc->status = DMA_PAUSED;
 362         }
 363         spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 364 
 365         return 0;
 366 }
 367 
 368 static int hsu_dma_resume(struct dma_chan *chan)
 369 {
 370         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 371         unsigned long flags;
 372 
 373         spin_lock_irqsave(&hsuc->vchan.lock, flags);
 374         if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
 375                 hsuc->desc->status = DMA_IN_PROGRESS;
 376                 hsu_chan_enable(hsuc);
 377         }
 378         spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 379 
 380         return 0;
 381 }
 382 
 383 static int hsu_dma_terminate_all(struct dma_chan *chan)
 384 {
 385         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 386         unsigned long flags;
 387         LIST_HEAD(head);
 388 
 389         spin_lock_irqsave(&hsuc->vchan.lock, flags);
 390 
 391         hsu_dma_stop_channel(hsuc);
 392         if (hsuc->desc) {
 393                 hsu_dma_desc_free(&hsuc->desc->vdesc);
 394                 hsuc->desc = NULL;
 395         }
 396 
 397         vchan_get_all_descriptors(&hsuc->vchan, &head);
 398         spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 399         vchan_dma_desc_free_list(&hsuc->vchan, &head);
 400 
 401         return 0;
 402 }
 403 
 404 static void hsu_dma_free_chan_resources(struct dma_chan *chan)
 405 {
 406         vchan_free_chan_resources(to_virt_chan(chan));
 407 }
 408 
 409 static void hsu_dma_synchronize(struct dma_chan *chan)
 410 {
 411         struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 412 
 413         vchan_synchronize(&hsuc->vchan);
 414 }
 415 
 416 int hsu_dma_probe(struct hsu_dma_chip *chip)
 417 {
 418         struct hsu_dma *hsu;
 419         void __iomem *addr = chip->regs + chip->offset;
 420         unsigned short i;
 421         int ret;
 422 
 423         hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
 424         if (!hsu)
 425                 return -ENOMEM;
 426 
 427         chip->hsu = hsu;
 428 
 429         /* Calculate nr_channels from the IO space length */
 430         hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
 431 
 432         hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
 433                                  sizeof(*hsu->chan), GFP_KERNEL);
 434         if (!hsu->chan)
 435                 return -ENOMEM;
 436 
 437         INIT_LIST_HEAD(&hsu->dma.channels);
 438         for (i = 0; i < hsu->nr_channels; i++) {
 439                 struct hsu_dma_chan *hsuc = &hsu->chan[i];
 440 
 441                 hsuc->vchan.desc_free = hsu_dma_desc_free;
 442                 vchan_init(&hsuc->vchan, &hsu->dma);
 443 
 444                 hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 445                 hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
 446         }
 447 
 448         dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
 449         dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
 450 
 451         hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
 452 
 453         hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
 454 
 455         hsu->dma.device_issue_pending = hsu_dma_issue_pending;
 456         hsu->dma.device_tx_status = hsu_dma_tx_status;
 457 
 458         hsu->dma.device_config = hsu_dma_slave_config;
 459         hsu->dma.device_pause = hsu_dma_pause;
 460         hsu->dma.device_resume = hsu_dma_resume;
 461         hsu->dma.device_terminate_all = hsu_dma_terminate_all;
 462         hsu->dma.device_synchronize = hsu_dma_synchronize;
 463 
 464         hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
 465         hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
 466         hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 467         hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 468 
 469         hsu->dma.dev = chip->dev;
 470 
 471         dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
 472 
 473         ret = dma_async_device_register(&hsu->dma);
 474         if (ret)
 475                 return ret;
 476 
 477         dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
 478         return 0;
 479 }
 480 EXPORT_SYMBOL_GPL(hsu_dma_probe);
 481 
 482 int hsu_dma_remove(struct hsu_dma_chip *chip)
 483 {
 484         struct hsu_dma *hsu = chip->hsu;
 485         unsigned short i;
 486 
 487         dma_async_device_unregister(&hsu->dma);
 488 
 489         for (i = 0; i < hsu->nr_channels; i++) {
 490                 struct hsu_dma_chan *hsuc = &hsu->chan[i];
 491 
 492                 tasklet_kill(&hsuc->vchan.task);
 493         }
 494 
 495         return 0;
 496 }
 497 EXPORT_SYMBOL_GPL(hsu_dma_remove);
 498 
 499 MODULE_LICENSE("GPL v2");
 500 MODULE_DESCRIPTION("High Speed UART DMA core driver");
 501 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");

/* [<][>][^][v][top][bottom][index][help] */