root/drivers/dma/ioat/dma.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. to_ioat_chan
  2. __dump_desc_dbg
  3. ioat_chan_by_index
  4. ioat_chansts
  5. ioat_chansts_to_addr
  6. ioat_chanerr
  7. ioat_suspend
  8. ioat_reset
  9. ioat_reset_pending
  10. is_ioat_active
  11. is_ioat_idle
  12. is_ioat_halted
  13. is_ioat_suspended
  14. is_ioat_bug
  15. ioat_ring_size
  16. ioat_ring_active
  17. ioat_ring_pending
  18. ioat_ring_space
  19. ioat_xferlen_to_descs
  20. ioat_get_ring_ent
  21. ioat_set_chainaddr

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
   4  */
   5 #ifndef IOATDMA_H
   6 #define IOATDMA_H
   7 
   8 #include <linux/dmaengine.h>
   9 #include <linux/init.h>
  10 #include <linux/dmapool.h>
  11 #include <linux/cache.h>
  12 #include <linux/pci_ids.h>
  13 #include <linux/circ_buf.h>
  14 #include <linux/interrupt.h>
  15 #include "registers.h"
  16 #include "hw.h"
  17 
  18 #define IOAT_DMA_VERSION  "5.00"
  19 
  20 #define IOAT_DMA_DCA_ANY_CPU            ~0
  21 
  22 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
  23 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
  24 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
  25 
  26 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
  27 
  28 /* ioat hardware assumes at least two sources for raid operations */
  29 #define src_cnt_to_sw(x) ((x) + 2)
  30 #define src_cnt_to_hw(x) ((x) - 2)
  31 #define ndest_to_sw(x) ((x) + 1)
  32 #define ndest_to_hw(x) ((x) - 1)
  33 #define src16_cnt_to_sw(x) ((x) + 9)
  34 #define src16_cnt_to_hw(x) ((x) - 9)
  35 
  36 /*
  37  * workaround for IOAT ver.3.0 null descriptor issue
  38  * (channel returns error when size is 0)
  39  */
  40 #define NULL_DESC_BUFFER_SIZE 1
  41 
  42 enum ioat_irq_mode {
  43         IOAT_NOIRQ = 0,
  44         IOAT_MSIX,
  45         IOAT_MSI,
  46         IOAT_INTX
  47 };
  48 
  49 /**
  50  * struct ioatdma_device - internal representation of a IOAT device
  51  * @pdev: PCI-Express device
  52  * @reg_base: MMIO register space base address
  53  * @completion_pool: DMA buffers for completion ops
  54  * @sed_hw_pool: DMA super descriptor pools
  55  * @dma_dev: embedded struct dma_device
  56  * @version: version of ioatdma device
  57  * @msix_entries: irq handlers
  58  * @idx: per channel data
  59  * @dca: direct cache access context
  60  * @irq_mode: interrupt mode (INTX, MSI, MSIX)
  61  * @cap: read DMA capabilities register
  62  */
  63 struct ioatdma_device {
  64         struct pci_dev *pdev;
  65         void __iomem *reg_base;
  66         struct dma_pool *completion_pool;
  67 #define MAX_SED_POOLS   5
  68         struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
  69         struct dma_device dma_dev;
  70         u8 version;
  71 #define IOAT_MAX_CHANS 4
  72         struct msix_entry msix_entries[IOAT_MAX_CHANS];
  73         struct ioatdma_chan *idx[IOAT_MAX_CHANS];
  74         struct dca_provider *dca;
  75         enum ioat_irq_mode irq_mode;
  76         u32 cap;
  77 
  78         /* shadow version for CB3.3 chan reset errata workaround */
  79         u64 msixtba0;
  80         u64 msixdata0;
  81         u32 msixpba;
  82 };
  83 
  84 struct ioat_descs {
  85         void *virt;
  86         dma_addr_t hw;
  87 };
  88 
  89 struct ioatdma_chan {
  90         struct dma_chan dma_chan;
  91         void __iomem *reg_base;
  92         dma_addr_t last_completion;
  93         spinlock_t cleanup_lock;
  94         unsigned long state;
  95         #define IOAT_CHAN_DOWN 0
  96         #define IOAT_COMPLETION_ACK 1
  97         #define IOAT_RESET_PENDING 2
  98         #define IOAT_KOBJ_INIT_FAIL 3
  99         #define IOAT_RUN 5
 100         #define IOAT_CHAN_ACTIVE 6
 101         struct timer_list timer;
 102         #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
 103         #define IDLE_TIMEOUT msecs_to_jiffies(2000)
 104         #define RESET_DELAY msecs_to_jiffies(100)
 105         struct ioatdma_device *ioat_dma;
 106         dma_addr_t completion_dma;
 107         u64 *completion;
 108         struct tasklet_struct cleanup_task;
 109         struct kobject kobj;
 110 
 111 /* ioat v2 / v3 channel attributes
 112  * @xfercap_log; log2 of channel max transfer length (for fast division)
 113  * @head: allocated index
 114  * @issued: hardware notification point
 115  * @tail: cleanup index
 116  * @dmacount: identical to 'head' except for occasionally resetting to zero
 117  * @alloc_order: log2 of the number of allocated descriptors
 118  * @produce: number of descriptors to produce at submit time
 119  * @ring: software ring buffer implementation of hardware ring
 120  * @prep_lock: serializes descriptor preparation (producers)
 121  */
 122         size_t xfercap_log;
 123         u16 head;
 124         u16 issued;
 125         u16 tail;
 126         u16 dmacount;
 127         u16 alloc_order;
 128         u16 produce;
 129         struct ioat_ring_ent **ring;
 130         spinlock_t prep_lock;
 131         struct ioat_descs descs[2];
 132         int desc_chunks;
 133         int intr_coalesce;
 134         int prev_intr_coalesce;
 135 };
 136 
 137 struct ioat_sysfs_entry {
 138         struct attribute attr;
 139         ssize_t (*show)(struct dma_chan *, char *);
 140         ssize_t (*store)(struct dma_chan *, const char *, size_t);
 141 };
 142 
 143 /**
 144  * struct ioat_sed_ent - wrapper around super extended hardware descriptor
 145  * @hw: hardware SED
 146  * @dma: dma address for the SED
 147  * @parent: point to the dma descriptor that's the parent
 148  * @hw_pool: descriptor pool index
 149  */
 150 struct ioat_sed_ent {
 151         struct ioat_sed_raw_descriptor *hw;
 152         dma_addr_t dma;
 153         struct ioat_ring_ent *parent;
 154         unsigned int hw_pool;
 155 };
 156 
 157 /**
 158  * struct ioat_ring_ent - wrapper around hardware descriptor
 159  * @hw: hardware DMA descriptor (for memcpy)
 160  * @xor: hardware xor descriptor
 161  * @xor_ex: hardware xor extension descriptor
 162  * @pq: hardware pq descriptor
 163  * @pq_ex: hardware pq extension descriptor
 164  * @pqu: hardware pq update descriptor
 165  * @raw: hardware raw (un-typed) descriptor
 166  * @txd: the generic software descriptor for all engines
 167  * @len: total transaction length for unmap
 168  * @result: asynchronous result of validate operations
 169  * @id: identifier for debug
 170  * @sed: pointer to super extended descriptor sw desc
 171  */
 172 
 173 struct ioat_ring_ent {
 174         union {
 175                 struct ioat_dma_descriptor *hw;
 176                 struct ioat_xor_descriptor *xor;
 177                 struct ioat_xor_ext_descriptor *xor_ex;
 178                 struct ioat_pq_descriptor *pq;
 179                 struct ioat_pq_ext_descriptor *pq_ex;
 180                 struct ioat_pq_update_descriptor *pqu;
 181                 struct ioat_raw_descriptor *raw;
 182         };
 183         size_t len;
 184         struct dma_async_tx_descriptor txd;
 185         enum sum_check_flags *result;
 186         #ifdef DEBUG
 187         int id;
 188         #endif
 189         struct ioat_sed_ent *sed;
 190 };
 191 
 192 extern const struct sysfs_ops ioat_sysfs_ops;
 193 extern struct ioat_sysfs_entry ioat_version_attr;
 194 extern struct ioat_sysfs_entry ioat_cap_attr;
 195 extern int ioat_pending_level;
 196 extern int ioat_ring_alloc_order;
 197 extern struct kobj_type ioat_ktype;
 198 extern struct kmem_cache *ioat_cache;
 199 extern int ioat_ring_max_alloc_order;
 200 extern struct kmem_cache *ioat_sed_cache;
 201 
 202 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
 203 {
 204         return container_of(c, struct ioatdma_chan, dma_chan);
 205 }
 206 
 207 /* wrapper around hardware descriptor format + additional software fields */
 208 #ifdef DEBUG
 209 #define set_desc_id(desc, i) ((desc)->id = (i))
 210 #define desc_id(desc) ((desc)->id)
 211 #else
 212 #define set_desc_id(desc, i)
 213 #define desc_id(desc) (0)
 214 #endif
 215 
 216 static inline void
 217 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
 218                 struct dma_async_tx_descriptor *tx, int id)
 219 {
 220         struct device *dev = to_dev(ioat_chan);
 221 
 222         dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
 223                 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
 224                 (unsigned long long) tx->phys,
 225                 (unsigned long long) hw->next, tx->cookie, tx->flags,
 226                 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
 227 }
 228 
 229 #define dump_desc_dbg(c, d) \
 230         ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
 231 
 232 static inline struct ioatdma_chan *
 233 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
 234 {
 235         return ioat_dma->idx[index];
 236 }
 237 
 238 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
 239 {
 240         return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
 241 }
 242 
 243 static inline u64 ioat_chansts_to_addr(u64 status)
 244 {
 245         return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
 246 }
 247 
 248 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
 249 {
 250         return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 251 }
 252 
 253 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
 254 {
 255         u8 ver = ioat_chan->ioat_dma->version;
 256 
 257         writeb(IOAT_CHANCMD_SUSPEND,
 258                ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 259 }
 260 
 261 static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
 262 {
 263         u8 ver = ioat_chan->ioat_dma->version;
 264 
 265         writeb(IOAT_CHANCMD_RESET,
 266                ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 267 }
 268 
 269 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
 270 {
 271         u8 ver = ioat_chan->ioat_dma->version;
 272         u8 cmd;
 273 
 274         cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 275         return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
 276 }
 277 
 278 static inline bool is_ioat_active(unsigned long status)
 279 {
 280         return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
 281 }
 282 
 283 static inline bool is_ioat_idle(unsigned long status)
 284 {
 285         return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
 286 }
 287 
 288 static inline bool is_ioat_halted(unsigned long status)
 289 {
 290         return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
 291 }
 292 
 293 static inline bool is_ioat_suspended(unsigned long status)
 294 {
 295         return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
 296 }
 297 
 298 /* channel was fatally programmed */
 299 static inline bool is_ioat_bug(unsigned long err)
 300 {
 301         return !!err;
 302 }
 303 
 304 #define IOAT_MAX_ORDER 16
 305 #define IOAT_MAX_DESCS 65536
 306 #define IOAT_DESCS_PER_2M 32768
 307 
 308 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
 309 {
 310         return 1 << ioat_chan->alloc_order;
 311 }
 312 
 313 /* count of descriptors in flight with the engine */
 314 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
 315 {
 316         return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
 317                         ioat_ring_size(ioat_chan));
 318 }
 319 
 320 /* count of descriptors pending submission to hardware */
 321 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
 322 {
 323         return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
 324                         ioat_ring_size(ioat_chan));
 325 }
 326 
 327 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
 328 {
 329         return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
 330 }
 331 
 332 static inline u16
 333 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
 334 {
 335         u16 num_descs = len >> ioat_chan->xfercap_log;
 336 
 337         num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
 338         return num_descs;
 339 }
 340 
 341 static inline struct ioat_ring_ent *
 342 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
 343 {
 344         return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
 345 }
 346 
 347 static inline void
 348 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
 349 {
 350         writel(addr & 0x00000000FFFFFFFF,
 351                ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
 352         writel(addr >> 32,
 353                ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
 354 }
 355 
 356 /* IOAT Prep functions */
 357 struct dma_async_tx_descriptor *
 358 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
 359                            dma_addr_t dma_src, size_t len, unsigned long flags);
 360 struct dma_async_tx_descriptor *
 361 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
 362 struct dma_async_tx_descriptor *
 363 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 364                unsigned int src_cnt, size_t len, unsigned long flags);
 365 struct dma_async_tx_descriptor *
 366 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
 367                     unsigned int src_cnt, size_t len,
 368                     enum sum_check_flags *result, unsigned long flags);
 369 struct dma_async_tx_descriptor *
 370 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 371               unsigned int src_cnt, const unsigned char *scf, size_t len,
 372               unsigned long flags);
 373 struct dma_async_tx_descriptor *
 374 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
 375                   unsigned int src_cnt, const unsigned char *scf, size_t len,
 376                   enum sum_check_flags *pqres, unsigned long flags);
 377 struct dma_async_tx_descriptor *
 378 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
 379                  unsigned int src_cnt, size_t len, unsigned long flags);
 380 struct dma_async_tx_descriptor *
 381 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
 382                      unsigned int src_cnt, size_t len,
 383                      enum sum_check_flags *result, unsigned long flags);
 384 
 385 /* IOAT Operation functions */
 386 irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
 387 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
 388 struct ioat_ring_ent **
 389 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
 390 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
 391 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
 392 int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
 393 enum dma_status
 394 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
 395                 struct dma_tx_state *txstate);
 396 void ioat_cleanup_event(unsigned long data);
 397 void ioat_timer_event(struct timer_list *t);
 398 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
 399 void ioat_issue_pending(struct dma_chan *chan);
 400 
 401 /* IOAT Init functions */
 402 bool is_bwd_ioat(struct pci_dev *pdev);
 403 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
 404 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
 405 void ioat_kobject_del(struct ioatdma_device *ioat_dma);
 406 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
 407 void ioat_stop(struct ioatdma_chan *ioat_chan);
 408 #endif /* IOATDMA_H */

/* [<][>][^][v][top][bottom][index][help] */