root/drivers/dma/mv_xor.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2007, 2008, Marvell International Ltd.
   4  */
   5 
   6 #ifndef MV_XOR_H
   7 #define MV_XOR_H
   8 
   9 #include <linux/types.h>
  10 #include <linux/io.h>
  11 #include <linux/dmaengine.h>
  12 #include <linux/interrupt.h>
  13 
  14 #define MV_XOR_POOL_SIZE                (MV_XOR_SLOT_SIZE * 3072)
  15 #define MV_XOR_SLOT_SIZE                64
  16 #define MV_XOR_THRESHOLD                1
  17 #define MV_XOR_MAX_CHANNELS             2
  18 
  19 #define MV_XOR_MIN_BYTE_COUNT           SZ_128
  20 #define MV_XOR_MAX_BYTE_COUNT           (SZ_16M - 1)
  21 
  22 /* Values for the XOR_CONFIG register */
  23 #define XOR_OPERATION_MODE_XOR          0
  24 #define XOR_OPERATION_MODE_MEMCPY       2
  25 #define XOR_OPERATION_MODE_IN_DESC      7
  26 #define XOR_DESCRIPTOR_SWAP             BIT(14)
  27 #define XOR_DESC_SUCCESS                0x40000000
  28 
  29 #define XOR_DESC_OPERATION_XOR          (0 << 24)
  30 #define XOR_DESC_OPERATION_CRC32C       (1 << 24)
  31 #define XOR_DESC_OPERATION_MEMCPY       (2 << 24)
  32 
  33 #define XOR_DESC_DMA_OWNED              BIT(31)
  34 #define XOR_DESC_EOD_INT_EN             BIT(31)
  35 
  36 #define XOR_CURR_DESC(chan)     (chan->mmr_high_base + 0x10 + (chan->idx * 4))
  37 #define XOR_NEXT_DESC(chan)     (chan->mmr_high_base + 0x00 + (chan->idx * 4))
  38 #define XOR_BYTE_COUNT(chan)    (chan->mmr_high_base + 0x20 + (chan->idx * 4))
  39 #define XOR_DEST_POINTER(chan)  (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
  40 #define XOR_BLOCK_SIZE(chan)    (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
  41 #define XOR_INIT_VALUE_LOW(chan)        (chan->mmr_high_base + 0xE0)
  42 #define XOR_INIT_VALUE_HIGH(chan)       (chan->mmr_high_base + 0xE4)
  43 
  44 #define XOR_CONFIG(chan)        (chan->mmr_base + 0x10 + (chan->idx * 4))
  45 #define XOR_ACTIVATION(chan)    (chan->mmr_base + 0x20 + (chan->idx * 4))
  46 #define XOR_INTR_CAUSE(chan)    (chan->mmr_base + 0x30)
  47 #define XOR_INTR_MASK(chan)     (chan->mmr_base + 0x40)
  48 #define XOR_ERROR_CAUSE(chan)   (chan->mmr_base + 0x50)
  49 #define XOR_ERROR_ADDR(chan)    (chan->mmr_base + 0x60)
  50 
  51 #define XOR_INT_END_OF_DESC     BIT(0)
  52 #define XOR_INT_END_OF_CHAIN    BIT(1)
  53 #define XOR_INT_STOPPED         BIT(2)
  54 #define XOR_INT_PAUSED          BIT(3)
  55 #define XOR_INT_ERR_DECODE      BIT(4)
  56 #define XOR_INT_ERR_RDPROT      BIT(5)
  57 #define XOR_INT_ERR_WRPROT      BIT(6)
  58 #define XOR_INT_ERR_OWN         BIT(7)
  59 #define XOR_INT_ERR_PAR         BIT(8)
  60 #define XOR_INT_ERR_MBUS        BIT(9)
  61 
  62 #define XOR_INTR_ERRORS         (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
  63                                  XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN    | \
  64                                  XOR_INT_ERR_PAR    | XOR_INT_ERR_MBUS)
  65 
  66 #define XOR_INTR_MASK_VALUE     (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
  67                                  XOR_INT_STOPPED     | XOR_INTR_ERRORS)
  68 
  69 #define WINDOW_BASE(w)          (0x50 + ((w) << 2))
  70 #define WINDOW_SIZE(w)          (0x70 + ((w) << 2))
  71 #define WINDOW_REMAP_HIGH(w)    (0x90 + ((w) << 2))
  72 #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
  73 #define WINDOW_OVERRIDE_CTRL(chan)      (0xA0 + ((chan) << 2))
  74 
  75 #define WINDOW_COUNT            8
  76 
  77 struct mv_xor_device {
  78         void __iomem         *xor_base;
  79         void __iomem         *xor_high_base;
  80         struct clk           *clk;
  81         struct mv_xor_chan   *channels[MV_XOR_MAX_CHANNELS];
  82         int                  xor_type;
  83 
  84         u32                  win_start[WINDOW_COUNT];
  85         u32                  win_end[WINDOW_COUNT];
  86 };
  87 
  88 /**
  89  * struct mv_xor_chan - internal representation of a XOR channel
  90  * @pending: allows batching of hardware operations
  91  * @lock: serializes enqueue/dequeue operations to the descriptors pool
  92  * @mmr_base: memory mapped register base
  93  * @idx: the index of the xor channel
  94  * @chain: device chain view of the descriptors
  95  * @free_slots: free slots usable by the channel
  96  * @allocated_slots: slots allocated by the driver
  97  * @completed_slots: slots completed by HW but still need to be acked
  98  * @device: parent device
  99  * @common: common dmaengine channel object members
 100  * @slots_allocated: records the actual size of the descriptor slot pool
 101  * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
 102  * @op_in_desc: new mode of driver, each op is writen to descriptor.
 103  */
 104 struct mv_xor_chan {
 105         int                     pending;
 106         spinlock_t              lock; /* protects the descriptor slot pool */
 107         void __iomem            *mmr_base;
 108         void __iomem            *mmr_high_base;
 109         unsigned int            idx;
 110         int                     irq;
 111         struct list_head        chain;
 112         struct list_head        free_slots;
 113         struct list_head        allocated_slots;
 114         struct list_head        completed_slots;
 115         dma_addr_t              dma_desc_pool;
 116         void                    *dma_desc_pool_virt;
 117         size_t                  pool_size;
 118         struct dma_device       dmadev;
 119         struct dma_chan         dmachan;
 120         int                     slots_allocated;
 121         struct tasklet_struct   irq_tasklet;
 122         int                     op_in_desc;
 123         char                    dummy_src[MV_XOR_MIN_BYTE_COUNT];
 124         char                    dummy_dst[MV_XOR_MIN_BYTE_COUNT];
 125         dma_addr_t              dummy_src_addr, dummy_dst_addr;
 126         u32                     saved_config_reg, saved_int_mask_reg;
 127 
 128         struct mv_xor_device    *xordev;
 129 };
 130 
 131 /**
 132  * struct mv_xor_desc_slot - software descriptor
 133  * @node: node on the mv_xor_chan lists
 134  * @hw_desc: virtual address of the hardware descriptor chain
 135  * @phys: hardware address of the hardware descriptor chain
 136  * @slot_used: slot in use or not
 137  * @idx: pool index
 138  * @tx_list: list of slots that make up a multi-descriptor transaction
 139  * @async_tx: support for the async_tx api
 140  */
 141 struct mv_xor_desc_slot {
 142         struct list_head        node;
 143         struct list_head        sg_tx_list;
 144         enum dma_transaction_type       type;
 145         void                    *hw_desc;
 146         u16                     idx;
 147         struct dma_async_tx_descriptor  async_tx;
 148 };
 149 
 150 /*
 151  * This structure describes XOR descriptor size 64bytes. The
 152  * mv_phy_src_idx() macro must be used when indexing the values of the
 153  * phy_src_addr[] array. This is due to the fact that the 'descriptor
 154  * swap' feature, used on big endian systems, swaps descriptors data
 155  * within blocks of 8 bytes. So two consecutive values of the
 156  * phy_src_addr[] array are actually swapped in big-endian, which
 157  * explains the different mv_phy_src_idx() implementation.
 158  */
 159 #if defined(__LITTLE_ENDIAN)
 160 struct mv_xor_desc {
 161         u32 status;             /* descriptor execution status */
 162         u32 crc32_result;       /* result of CRC-32 calculation */
 163         u32 desc_command;       /* type of operation to be carried out */
 164         u32 phy_next_desc;      /* next descriptor address pointer */
 165         u32 byte_count;         /* size of src/dst blocks in bytes */
 166         u32 phy_dest_addr;      /* destination block address */
 167         u32 phy_src_addr[8];    /* source block addresses */
 168         u32 reserved0;
 169         u32 reserved1;
 170 };
 171 #define mv_phy_src_idx(src_idx) (src_idx)
 172 #else
 173 struct mv_xor_desc {
 174         u32 crc32_result;       /* result of CRC-32 calculation */
 175         u32 status;             /* descriptor execution status */
 176         u32 phy_next_desc;      /* next descriptor address pointer */
 177         u32 desc_command;       /* type of operation to be carried out */
 178         u32 phy_dest_addr;      /* destination block address */
 179         u32 byte_count;         /* size of src/dst blocks in bytes */
 180         u32 phy_src_addr[8];    /* source block addresses */
 181         u32 reserved1;
 182         u32 reserved0;
 183 };
 184 #define mv_phy_src_idx(src_idx) (src_idx ^ 1)
 185 #endif
 186 
 187 #define to_mv_sw_desc(addr_hw_desc)             \
 188         container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
 189 
 190 #define mv_hw_desc_slot_idx(hw_desc, idx)       \
 191         ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
 192 
 193 #endif

/* [<][>][^][v][top][bottom][index][help] */