root/arch/mips/lantiq/xway/dma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ltq_dma_enable_irq
  2. ltq_dma_disable_irq
  3. ltq_dma_ack_irq
  4. ltq_dma_open
  5. ltq_dma_close
  6. ltq_dma_alloc
  7. ltq_dma_alloc_tx
  8. ltq_dma_alloc_rx
  9. ltq_dma_free
  10. ltq_dma_init_port
  11. ltq_dma_init
  12. dma_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *
   4  *   Copyright (C) 2011 John Crispin <john@phrozen.org>
   5  */
   6 
   7 #include <linux/init.h>
   8 #include <linux/platform_device.h>
   9 #include <linux/io.h>
  10 #include <linux/dma-mapping.h>
  11 #include <linux/export.h>
  12 #include <linux/spinlock.h>
  13 #include <linux/clk.h>
  14 #include <linux/err.h>
  15 
  16 #include <lantiq_soc.h>
  17 #include <xway_dma.h>
  18 
  19 #define LTQ_DMA_ID              0x08
  20 #define LTQ_DMA_CTRL            0x10
  21 #define LTQ_DMA_CPOLL           0x14
  22 #define LTQ_DMA_CS              0x18
  23 #define LTQ_DMA_CCTRL           0x1C
  24 #define LTQ_DMA_CDBA            0x20
  25 #define LTQ_DMA_CDLEN           0x24
  26 #define LTQ_DMA_CIS             0x28
  27 #define LTQ_DMA_CIE             0x2C
  28 #define LTQ_DMA_PS              0x40
  29 #define LTQ_DMA_PCTRL           0x44
  30 #define LTQ_DMA_IRNEN           0xf4
  31 
  32 #define DMA_DESCPT              BIT(3)          /* descriptor complete irq */
  33 #define DMA_TX                  BIT(8)          /* TX channel direction */
  34 #define DMA_CHAN_ON             BIT(0)          /* channel on / off bit */
  35 #define DMA_PDEN                BIT(6)          /* enable packet drop */
  36 #define DMA_CHAN_RST            BIT(1)          /* channel on / off bit */
  37 #define DMA_RESET               BIT(0)          /* channel on / off bit */
  38 #define DMA_IRQ_ACK             0x7e            /* IRQ status register */
  39 #define DMA_POLL                BIT(31)         /* turn on channel polling */
  40 #define DMA_CLK_DIV4            BIT(6)          /* polling clock divider */
  41 #define DMA_2W_BURST            BIT(1)          /* 2 word burst length */
  42 #define DMA_MAX_CHANNEL         20              /* the soc has 20 channels */
  43 #define DMA_ETOP_ENDIANNESS     (0xf << 8) /* endianness swap etop channels */
  44 #define DMA_WEIGHT      (BIT(17) | BIT(16))     /* default channel wheight */
  45 
  46 #define ltq_dma_r32(x)                  ltq_r32(ltq_dma_membase + (x))
  47 #define ltq_dma_w32(x, y)               ltq_w32(x, ltq_dma_membase + (y))
  48 #define ltq_dma_w32_mask(x, y, z)       ltq_w32_mask(x, y, \
  49                                                 ltq_dma_membase + (z))
  50 
  51 static void __iomem *ltq_dma_membase;
  52 static DEFINE_SPINLOCK(ltq_dma_lock);
  53 
  54 void
  55 ltq_dma_enable_irq(struct ltq_dma_channel *ch)
  56 {
  57         unsigned long flags;
  58 
  59         spin_lock_irqsave(&ltq_dma_lock, flags);
  60         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  61         ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
  62         spin_unlock_irqrestore(&ltq_dma_lock, flags);
  63 }
  64 EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
  65 
  66 void
  67 ltq_dma_disable_irq(struct ltq_dma_channel *ch)
  68 {
  69         unsigned long flags;
  70 
  71         spin_lock_irqsave(&ltq_dma_lock, flags);
  72         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  73         ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
  74         spin_unlock_irqrestore(&ltq_dma_lock, flags);
  75 }
  76 EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
  77 
  78 void
  79 ltq_dma_ack_irq(struct ltq_dma_channel *ch)
  80 {
  81         unsigned long flags;
  82 
  83         spin_lock_irqsave(&ltq_dma_lock, flags);
  84         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  85         ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
  86         spin_unlock_irqrestore(&ltq_dma_lock, flags);
  87 }
  88 EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
  89 
  90 void
  91 ltq_dma_open(struct ltq_dma_channel *ch)
  92 {
  93         unsigned long flag;
  94 
  95         spin_lock_irqsave(&ltq_dma_lock, flag);
  96         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  97         ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
  98         spin_unlock_irqrestore(&ltq_dma_lock, flag);
  99 }
 100 EXPORT_SYMBOL_GPL(ltq_dma_open);
 101 
 102 void
 103 ltq_dma_close(struct ltq_dma_channel *ch)
 104 {
 105         unsigned long flag;
 106 
 107         spin_lock_irqsave(&ltq_dma_lock, flag);
 108         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 109         ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
 110         ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
 111         spin_unlock_irqrestore(&ltq_dma_lock, flag);
 112 }
 113 EXPORT_SYMBOL_GPL(ltq_dma_close);
 114 
 115 static void
 116 ltq_dma_alloc(struct ltq_dma_channel *ch)
 117 {
 118         unsigned long flags;
 119 
 120         ch->desc = 0;
 121         ch->desc_base = dma_alloc_coherent(ch->dev,
 122                                            LTQ_DESC_NUM * LTQ_DESC_SIZE,
 123                                            &ch->phys, GFP_ATOMIC);
 124 
 125         spin_lock_irqsave(&ltq_dma_lock, flags);
 126         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 127         ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
 128         ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
 129         ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
 130         wmb();
 131         ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
 132         while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
 133                 ;
 134         spin_unlock_irqrestore(&ltq_dma_lock, flags);
 135 }
 136 
 137 void
 138 ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
 139 {
 140         unsigned long flags;
 141 
 142         ltq_dma_alloc(ch);
 143 
 144         spin_lock_irqsave(&ltq_dma_lock, flags);
 145         ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
 146         ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
 147         ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
 148         spin_unlock_irqrestore(&ltq_dma_lock, flags);
 149 }
 150 EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
 151 
 152 void
 153 ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
 154 {
 155         unsigned long flags;
 156 
 157         ltq_dma_alloc(ch);
 158 
 159         spin_lock_irqsave(&ltq_dma_lock, flags);
 160         ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
 161         ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
 162         ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
 163         spin_unlock_irqrestore(&ltq_dma_lock, flags);
 164 }
 165 EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
 166 
 167 void
 168 ltq_dma_free(struct ltq_dma_channel *ch)
 169 {
 170         if (!ch->desc_base)
 171                 return;
 172         ltq_dma_close(ch);
 173         dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
 174                 ch->desc_base, ch->phys);
 175 }
 176 EXPORT_SYMBOL_GPL(ltq_dma_free);
 177 
 178 void
 179 ltq_dma_init_port(int p)
 180 {
 181         ltq_dma_w32(p, LTQ_DMA_PS);
 182         switch (p) {
 183         case DMA_PORT_ETOP:
 184                 /*
 185                  * Tell the DMA engine to swap the endianness of data frames and
 186                  * drop packets if the channel arbitration fails.
 187                  */
 188                 ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN,
 189                         LTQ_DMA_PCTRL);
 190                 break;
 191 
 192         case DMA_PORT_DEU:
 193                 ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2),
 194                         LTQ_DMA_PCTRL);
 195                 break;
 196 
 197         default:
 198                 break;
 199         }
 200 }
 201 EXPORT_SYMBOL_GPL(ltq_dma_init_port);
 202 
 203 static int
 204 ltq_dma_init(struct platform_device *pdev)
 205 {
 206         struct clk *clk;
 207         struct resource *res;
 208         unsigned id;
 209         int i;
 210 
 211         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 212         ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res);
 213         if (IS_ERR(ltq_dma_membase))
 214                 panic("Failed to remap dma resource");
 215 
 216         /* power up and reset the dma engine */
 217         clk = clk_get(&pdev->dev, NULL);
 218         if (IS_ERR(clk))
 219                 panic("Failed to get dma clock");
 220 
 221         clk_enable(clk);
 222         ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
 223 
 224         /* disable all interrupts */
 225         ltq_dma_w32(0, LTQ_DMA_IRNEN);
 226 
 227         /* reset/configure each channel */
 228         for (i = 0; i < DMA_MAX_CHANNEL; i++) {
 229                 ltq_dma_w32(i, LTQ_DMA_CS);
 230                 ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL);
 231                 ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
 232                 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
 233         }
 234 
 235         id = ltq_dma_r32(LTQ_DMA_ID);
 236         dev_info(&pdev->dev,
 237                 "Init done - hw rev: %X, ports: %d, channels: %d\n",
 238                 id & 0x1f, (id >> 16) & 0xf, id >> 20);
 239 
 240         return 0;
 241 }
 242 
 243 static const struct of_device_id dma_match[] = {
 244         { .compatible = "lantiq,dma-xway" },
 245         {},
 246 };
 247 
 248 static struct platform_driver dma_driver = {
 249         .probe = ltq_dma_init,
 250         .driver = {
 251                 .name = "dma-xway",
 252                 .of_match_table = dma_match,
 253         },
 254 };
 255 
 256 int __init
 257 dma_init(void)
 258 {
 259         return platform_driver_register(&dma_driver);
 260 }
 261 
 262 postcore_initcall(dma_init);

/* [<][>][^][v][top][bottom][index][help] */