root/drivers/dma/virt-dma.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. to_virt_chan
  2. vchan_tx_prep
  3. vchan_issue_pending
  4. vchan_cookie_complete
  5. vchan_vdesc_fini
  6. vchan_cyclic_callback
  7. vchan_terminate_vdesc
  8. vchan_next_desc
  9. vchan_get_all_descriptors
  10. vchan_free_chan_resources
  11. vchan_synchronize

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Virtual DMA channel support for DMAengine
   4  *
   5  * Copyright (C) 2012 Russell King
   6  */
   7 #ifndef VIRT_DMA_H
   8 #define VIRT_DMA_H
   9 
  10 #include <linux/dmaengine.h>
  11 #include <linux/interrupt.h>
  12 
  13 #include "dmaengine.h"
  14 
  15 struct virt_dma_desc {
  16         struct dma_async_tx_descriptor tx;
  17         struct dmaengine_result tx_result;
  18         /* protected by vc.lock */
  19         struct list_head node;
  20 };
  21 
  22 struct virt_dma_chan {
  23         struct dma_chan chan;
  24         struct tasklet_struct task;
  25         void (*desc_free)(struct virt_dma_desc *);
  26 
  27         spinlock_t lock;
  28 
  29         /* protected by vc.lock */
  30         struct list_head desc_allocated;
  31         struct list_head desc_submitted;
  32         struct list_head desc_issued;
  33         struct list_head desc_completed;
  34 
  35         struct virt_dma_desc *cyclic;
  36         struct virt_dma_desc *vd_terminated;
  37 };
  38 
  39 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
  40 {
  41         return container_of(chan, struct virt_dma_chan, chan);
  42 }
  43 
  44 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
  45 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
  46 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
  47 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
  48 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
  49 
  50 /**
  51  * vchan_tx_prep - prepare a descriptor
  52  * @vc: virtual channel allocating this descriptor
  53  * @vd: virtual descriptor to prepare
  54  * @tx_flags: flags argument passed in to prepare function
  55  */
  56 static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
  57         struct virt_dma_desc *vd, unsigned long tx_flags)
  58 {
  59         unsigned long flags;
  60 
  61         dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
  62         vd->tx.flags = tx_flags;
  63         vd->tx.tx_submit = vchan_tx_submit;
  64         vd->tx.desc_free = vchan_tx_desc_free;
  65 
  66         vd->tx_result.result = DMA_TRANS_NOERROR;
  67         vd->tx_result.residue = 0;
  68 
  69         spin_lock_irqsave(&vc->lock, flags);
  70         list_add_tail(&vd->node, &vc->desc_allocated);
  71         spin_unlock_irqrestore(&vc->lock, flags);
  72 
  73         return &vd->tx;
  74 }
  75 
  76 /**
  77  * vchan_issue_pending - move submitted descriptors to issued list
  78  * @vc: virtual channel to update
  79  *
  80  * vc.lock must be held by caller
  81  */
  82 static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
  83 {
  84         list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
  85         return !list_empty(&vc->desc_issued);
  86 }
  87 
  88 /**
  89  * vchan_cookie_complete - report completion of a descriptor
  90  * @vd: virtual descriptor to update
  91  *
  92  * vc.lock must be held by caller
  93  */
  94 static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
  95 {
  96         struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
  97         dma_cookie_t cookie;
  98 
  99         cookie = vd->tx.cookie;
 100         dma_cookie_complete(&vd->tx);
 101         dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
 102                  vd, cookie);
 103         list_add_tail(&vd->node, &vc->desc_completed);
 104 
 105         tasklet_schedule(&vc->task);
 106 }
 107 
 108 /**
 109  * vchan_vdesc_fini - Free or reuse a descriptor
 110  * @vd: virtual descriptor to free/reuse
 111  */
 112 static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
 113 {
 114         struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 115 
 116         if (dmaengine_desc_test_reuse(&vd->tx))
 117                 list_add(&vd->node, &vc->desc_allocated);
 118         else
 119                 vc->desc_free(vd);
 120 }
 121 
 122 /**
 123  * vchan_cyclic_callback - report the completion of a period
 124  * @vd: virtual descriptor
 125  */
 126 static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
 127 {
 128         struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 129 
 130         vc->cyclic = vd;
 131         tasklet_schedule(&vc->task);
 132 }
 133 
 134 /**
 135  * vchan_terminate_vdesc - Disable pending cyclic callback
 136  * @vd: virtual descriptor to be terminated
 137  *
 138  * vc.lock must be held by caller
 139  */
 140 static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
 141 {
 142         struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 143 
 144         /* free up stuck descriptor */
 145         if (vc->vd_terminated)
 146                 vchan_vdesc_fini(vc->vd_terminated);
 147 
 148         vc->vd_terminated = vd;
 149         if (vc->cyclic == vd)
 150                 vc->cyclic = NULL;
 151 }
 152 
 153 /**
 154  * vchan_next_desc - peek at the next descriptor to be processed
 155  * @vc: virtual channel to obtain descriptor from
 156  *
 157  * vc.lock must be held by caller
 158  */
 159 static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
 160 {
 161         return list_first_entry_or_null(&vc->desc_issued,
 162                                         struct virt_dma_desc, node);
 163 }
 164 
 165 /**
 166  * vchan_get_all_descriptors - obtain all submitted and issued descriptors
 167  * @vc: virtual channel to get descriptors from
 168  * @head: list of descriptors found
 169  *
 170  * vc.lock must be held by caller
 171  *
 172  * Removes all submitted and issued descriptors from internal lists, and
 173  * provides a list of all descriptors found
 174  */
 175 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
 176         struct list_head *head)
 177 {
 178         list_splice_tail_init(&vc->desc_allocated, head);
 179         list_splice_tail_init(&vc->desc_submitted, head);
 180         list_splice_tail_init(&vc->desc_issued, head);
 181         list_splice_tail_init(&vc->desc_completed, head);
 182 }
 183 
 184 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
 185 {
 186         struct virt_dma_desc *vd;
 187         unsigned long flags;
 188         LIST_HEAD(head);
 189 
 190         spin_lock_irqsave(&vc->lock, flags);
 191         vchan_get_all_descriptors(vc, &head);
 192         list_for_each_entry(vd, &head, node)
 193                 dmaengine_desc_clear_reuse(&vd->tx);
 194         spin_unlock_irqrestore(&vc->lock, flags);
 195 
 196         vchan_dma_desc_free_list(vc, &head);
 197 }
 198 
 199 /**
 200  * vchan_synchronize() - synchronize callback execution to the current context
 201  * @vc: virtual channel to synchronize
 202  *
 203  * Makes sure that all scheduled or active callbacks have finished running. For
 204  * proper operation the caller has to ensure that no new callbacks are scheduled
 205  * after the invocation of this function started.
 206  * Free up the terminated cyclic descriptor to prevent memory leakage.
 207  */
 208 static inline void vchan_synchronize(struct virt_dma_chan *vc)
 209 {
 210         unsigned long flags;
 211 
 212         tasklet_kill(&vc->task);
 213 
 214         spin_lock_irqsave(&vc->lock, flags);
 215         if (vc->vd_terminated) {
 216                 vchan_vdesc_fini(vc->vd_terminated);
 217                 vc->vd_terminated = NULL;
 218         }
 219         spin_unlock_irqrestore(&vc->lock, flags);
 220 }
 221 
 222 #endif

/* [<][>][^][v][top][bottom][index][help] */