root/arch/sh/drivers/dma/dma-sh.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dma_find_base
  2. dma_base_addr
  3. get_dmte_irq
  4. get_dmte_irq
  5. calc_xmit_shift
  6. dma_tei
  7. sh_dmac_request_dma
  8. sh_dmac_free_dma
  9. sh_dmac_configure_channel
  10. sh_dmac_enable_dma
  11. sh_dmac_disable_dma
  12. sh_dmac_xfer_dma
  13. sh_dmac_get_dma_residue
  14. dmaor_reset
  15. get_dma_error_irq
  16. get_dma_error_irq
  17. dma_err
  18. dmae_irq_init
  19. dmae_irq_free
  20. dmae_irq_init
  21. dmae_irq_free
  22. sh_dmac_init
  23. sh_dmac_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * arch/sh/drivers/dma/dma-sh.c
   4  *
   5  * SuperH On-chip DMAC Support
   6  *
   7  * Copyright (C) 2000 Takashi YOSHII
   8  * Copyright (C) 2003, 2004 Paul Mundt
   9  * Copyright (C) 2005 Andriy Skulysh
  10  */
  11 #include <linux/init.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/module.h>
  14 #include <linux/io.h>
  15 #include <mach-dreamcast/mach/dma.h>
  16 #include <asm/dma.h>
  17 #include <asm/dma-register.h>
  18 #include <cpu/dma-register.h>
  19 #include <cpu/dma.h>
  20 
  21 /*
  22  * Define the default configuration for dual address memory-memory transfer.
  23  * The 0x400 value represents auto-request, external->external.
  24  */
  25 #define RS_DUAL (DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT))
  26 
  27 static unsigned long dma_find_base(unsigned int chan)
  28 {
  29         unsigned long base = SH_DMAC_BASE0;
  30 
  31 #ifdef SH_DMAC_BASE1
  32         if (chan >= 6)
  33                 base = SH_DMAC_BASE1;
  34 #endif
  35 
  36         return base;
  37 }
  38 
  39 static unsigned long dma_base_addr(unsigned int chan)
  40 {
  41         unsigned long base = dma_find_base(chan);
  42 
  43         /* Normalize offset calculation */
  44         if (chan >= 9)
  45                 chan -= 6;
  46         if (chan >= 4)
  47                 base += 0x10;
  48 
  49         return base + (chan * 0x10);
  50 }
  51 
  52 #ifdef CONFIG_SH_DMA_IRQ_MULTI
  53 static inline unsigned int get_dmte_irq(unsigned int chan)
  54 {
  55         return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ;
  56 }
  57 #else
  58 
  59 static unsigned int dmte_irq_map[] = {
  60         DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3,
  61 
  62 #ifdef DMTE4_IRQ
  63         DMTE4_IRQ, DMTE4_IRQ + 1,
  64 #endif
  65 
  66 #ifdef DMTE6_IRQ
  67         DMTE6_IRQ, DMTE6_IRQ + 1,
  68 #endif
  69 
  70 #ifdef DMTE8_IRQ
  71         DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ,
  72 #endif
  73 };
  74 
  75 static inline unsigned int get_dmte_irq(unsigned int chan)
  76 {
  77         return dmte_irq_map[chan];
  78 }
  79 #endif
  80 
  81 /*
  82  * We determine the correct shift size based off of the CHCR transmit size
  83  * for the given channel. Since we know that it will take:
  84  *
  85  *      info->count >> ts_shift[transmit_size]
  86  *
  87  * iterations to complete the transfer.
  88  */
  89 static unsigned int ts_shift[] = TS_SHIFT;
  90 
  91 static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
  92 {
  93         u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
  94         int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
  95                 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
  96 
  97         return ts_shift[cnt];
  98 }
  99 
 100 /*
 101  * The transfer end interrupt must read the chcr register to end the
 102  * hardware interrupt active condition.
 103  * Besides that it needs to waken any waiting process, which should handle
 104  * setting up the next transfer.
 105  */
 106 static irqreturn_t dma_tei(int irq, void *dev_id)
 107 {
 108         struct dma_channel *chan = dev_id;
 109         u32 chcr;
 110 
 111         chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
 112 
 113         if (!(chcr & CHCR_TE))
 114                 return IRQ_NONE;
 115 
 116         chcr &= ~(CHCR_IE | CHCR_DE);
 117         __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
 118 
 119         wake_up(&chan->wait_queue);
 120 
 121         return IRQ_HANDLED;
 122 }
 123 
 124 static int sh_dmac_request_dma(struct dma_channel *chan)
 125 {
 126         if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
 127                 return 0;
 128 
 129         return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED,
 130                            chan->dev_id, chan);
 131 }
 132 
 133 static void sh_dmac_free_dma(struct dma_channel *chan)
 134 {
 135         free_irq(get_dmte_irq(chan->chan), chan);
 136 }
 137 
 138 static int
 139 sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
 140 {
 141         if (!chcr)
 142                 chcr = RS_DUAL | CHCR_IE;
 143 
 144         if (chcr & CHCR_IE) {
 145                 chcr &= ~CHCR_IE;
 146                 chan->flags |= DMA_TEI_CAPABLE;
 147         } else {
 148                 chan->flags &= ~DMA_TEI_CAPABLE;
 149         }
 150 
 151         __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
 152 
 153         chan->flags |= DMA_CONFIGURED;
 154         return 0;
 155 }
 156 
 157 static void sh_dmac_enable_dma(struct dma_channel *chan)
 158 {
 159         int irq;
 160         u32 chcr;
 161 
 162         chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
 163         chcr |= CHCR_DE;
 164 
 165         if (chan->flags & DMA_TEI_CAPABLE)
 166                 chcr |= CHCR_IE;
 167 
 168         __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
 169 
 170         if (chan->flags & DMA_TEI_CAPABLE) {
 171                 irq = get_dmte_irq(chan->chan);
 172                 enable_irq(irq);
 173         }
 174 }
 175 
 176 static void sh_dmac_disable_dma(struct dma_channel *chan)
 177 {
 178         int irq;
 179         u32 chcr;
 180 
 181         if (chan->flags & DMA_TEI_CAPABLE) {
 182                 irq = get_dmte_irq(chan->chan);
 183                 disable_irq(irq);
 184         }
 185 
 186         chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
 187         chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
 188         __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
 189 }
 190 
 191 static int sh_dmac_xfer_dma(struct dma_channel *chan)
 192 {
 193         /*
 194          * If we haven't pre-configured the channel with special flags, use
 195          * the defaults.
 196          */
 197         if (unlikely(!(chan->flags & DMA_CONFIGURED)))
 198                 sh_dmac_configure_channel(chan, 0);
 199 
 200         sh_dmac_disable_dma(chan);
 201 
 202         /*
 203          * Single-address mode usage note!
 204          *
 205          * It's important that we don't accidentally write any value to SAR/DAR
 206          * (this includes 0) that hasn't been directly specified by the user if
 207          * we're in single-address mode.
 208          *
 209          * In this case, only one address can be defined, anything else will
 210          * result in a DMA address error interrupt (at least on the SH-4),
 211          * which will subsequently halt the transfer.
 212          *
 213          * Channel 2 on the Dreamcast is a special case, as this is used for
 214          * cascading to the PVR2 DMAC. In this case, we still need to write
 215          * SAR and DAR, regardless of value, in order for cascading to work.
 216          */
 217         if (chan->sar || (mach_is_dreamcast() &&
 218                           chan->chan == PVR2_CASCADE_CHAN))
 219                 __raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR));
 220         if (chan->dar || (mach_is_dreamcast() &&
 221                           chan->chan == PVR2_CASCADE_CHAN))
 222                 __raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR));
 223 
 224         __raw_writel(chan->count >> calc_xmit_shift(chan),
 225                 (dma_base_addr(chan->chan) + TCR));
 226 
 227         sh_dmac_enable_dma(chan);
 228 
 229         return 0;
 230 }
 231 
 232 static int sh_dmac_get_dma_residue(struct dma_channel *chan)
 233 {
 234         if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE))
 235                 return 0;
 236 
 237         return __raw_readl(dma_base_addr(chan->chan) + TCR)
 238                  << calc_xmit_shift(chan);
 239 }
 240 
 241 /*
 242  * DMAOR handling
 243  */
 244 #if defined(CONFIG_CPU_SUBTYPE_SH7723)  || \
 245     defined(CONFIG_CPU_SUBTYPE_SH7724)  || \
 246     defined(CONFIG_CPU_SUBTYPE_SH7780)  || \
 247     defined(CONFIG_CPU_SUBTYPE_SH7785)
 248 #define NR_DMAOR        2
 249 #else
 250 #define NR_DMAOR        1
 251 #endif
 252 
 253 /*
 254  * DMAOR bases are broken out amongst channel groups. DMAOR0 manages
 255  * channels 0 - 5, DMAOR1 6 - 11 (optional).
 256  */
 257 #define dmaor_read_reg(n)               __raw_readw(dma_find_base((n)*6))
 258 #define dmaor_write_reg(n, data)        __raw_writew(data, dma_find_base(n)*6)
 259 
 260 static inline int dmaor_reset(int no)
 261 {
 262         unsigned long dmaor = dmaor_read_reg(no);
 263 
 264         /* Try to clear the error flags first, incase they are set */
 265         dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
 266         dmaor_write_reg(no, dmaor);
 267 
 268         dmaor |= DMAOR_INIT;
 269         dmaor_write_reg(no, dmaor);
 270 
 271         /* See if we got an error again */
 272         if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
 273                 printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
 274                 return -EINVAL;
 275         }
 276 
 277         return 0;
 278 }
 279 
 280 /*
 281  * DMAE handling
 282  */
 283 #ifdef CONFIG_CPU_SH4
 284 
 285 #if defined(DMAE1_IRQ)
 286 #define NR_DMAE         2
 287 #else
 288 #define NR_DMAE         1
 289 #endif
 290 
 291 static const char *dmae_name[] = {
 292         "DMAC Address Error0",
 293         "DMAC Address Error1"
 294 };
 295 
 296 #ifdef CONFIG_SH_DMA_IRQ_MULTI
 297 static inline unsigned int get_dma_error_irq(int n)
 298 {
 299         return get_dmte_irq(n * 6);
 300 }
 301 #else
 302 
 303 static unsigned int dmae_irq_map[] = {
 304         DMAE0_IRQ,
 305 
 306 #ifdef DMAE1_IRQ
 307         DMAE1_IRQ,
 308 #endif
 309 };
 310 
 311 static inline unsigned int get_dma_error_irq(int n)
 312 {
 313         return dmae_irq_map[n];
 314 }
 315 #endif
 316 
 317 static irqreturn_t dma_err(int irq, void *dummy)
 318 {
 319         int i;
 320 
 321         for (i = 0; i < NR_DMAOR; i++)
 322                 dmaor_reset(i);
 323 
 324         disable_irq(irq);
 325 
 326         return IRQ_HANDLED;
 327 }
 328 
 329 static int dmae_irq_init(void)
 330 {
 331         int n;
 332 
 333         for (n = 0; n < NR_DMAE; n++) {
 334                 int i = request_irq(get_dma_error_irq(n), dma_err,
 335                                     IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
 336                 if (unlikely(i < 0)) {
 337                         printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
 338                         return i;
 339                 }
 340         }
 341 
 342         return 0;
 343 }
 344 
 345 static void dmae_irq_free(void)
 346 {
 347         int n;
 348 
 349         for (n = 0; n < NR_DMAE; n++)
 350                 free_irq(get_dma_error_irq(n), NULL);
 351 }
 352 #else
 353 static inline int dmae_irq_init(void)
 354 {
 355         return 0;
 356 }
 357 
 358 static void dmae_irq_free(void)
 359 {
 360 }
 361 #endif
 362 
 363 static struct dma_ops sh_dmac_ops = {
 364         .request        = sh_dmac_request_dma,
 365         .free           = sh_dmac_free_dma,
 366         .get_residue    = sh_dmac_get_dma_residue,
 367         .xfer           = sh_dmac_xfer_dma,
 368         .configure      = sh_dmac_configure_channel,
 369 };
 370 
 371 static struct dma_info sh_dmac_info = {
 372         .name           = "sh_dmac",
 373         .nr_channels    = CONFIG_NR_ONCHIP_DMA_CHANNELS,
 374         .ops            = &sh_dmac_ops,
 375         .flags          = DMAC_CHANNELS_TEI_CAPABLE,
 376 };
 377 
 378 static int __init sh_dmac_init(void)
 379 {
 380         struct dma_info *info = &sh_dmac_info;
 381         int i, rc;
 382 
 383         /*
 384          * Initialize DMAE, for parts that support it.
 385          */
 386         rc = dmae_irq_init();
 387         if (unlikely(rc != 0))
 388                 return rc;
 389 
 390         /*
 391          * Initialize DMAOR, and clean up any error flags that may have
 392          * been set.
 393          */
 394         for (i = 0; i < NR_DMAOR; i++) {
 395                 rc = dmaor_reset(i);
 396                 if (unlikely(rc != 0))
 397                         return rc;
 398         }
 399 
 400         return register_dmac(info);
 401 }
 402 
 403 static void __exit sh_dmac_exit(void)
 404 {
 405         dmae_irq_free();
 406         unregister_dmac(&sh_dmac_info);
 407 }
 408 
 409 subsys_initcall(sh_dmac_init);
 410 module_exit(sh_dmac_exit);
 411 
 412 MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
 413 MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
 414 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */