root/crypto/async_tx/async_tx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. async_tx_init
  2. async_tx_exit
  3. __async_tx_find_channel
  4. async_tx_channel_switch
  5. async_tx_submit
  6. async_trigger_callback
  7. async_tx_quiesce

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * core routines for the asynchronous memory transfer/transform api
   4  *
   5  * Copyright © 2006, Intel Corporation.
   6  *
   7  *      Dan Williams <dan.j.williams@intel.com>
   8  *
   9  *      with architecture considerations by:
  10  *      Neil Brown <neilb@suse.de>
  11  *      Jeff Garzik <jeff@garzik.org>
  12  */
  13 #include <linux/rculist.h>
  14 #include <linux/module.h>
  15 #include <linux/kernel.h>
  16 #include <linux/async_tx.h>
  17 
  18 #ifdef CONFIG_DMA_ENGINE
  19 static int __init async_tx_init(void)
  20 {
  21         async_dmaengine_get();
  22 
  23         printk(KERN_INFO "async_tx: api initialized (async)\n");
  24 
  25         return 0;
  26 }
  27 
  28 static void __exit async_tx_exit(void)
  29 {
  30         async_dmaengine_put();
  31 }
  32 
  33 module_init(async_tx_init);
  34 module_exit(async_tx_exit);
  35 
  36 /**
  37  * __async_tx_find_channel - find a channel to carry out the operation or let
  38  *      the transaction execute synchronously
  39  * @submit: transaction dependency and submission modifiers
  40  * @tx_type: transaction type
  41  */
  42 struct dma_chan *
  43 __async_tx_find_channel(struct async_submit_ctl *submit,
  44                         enum dma_transaction_type tx_type)
  45 {
  46         struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
  47 
  48         /* see if we can keep the chain on one channel */
  49         if (depend_tx &&
  50             dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
  51                 return depend_tx->chan;
  52         return async_dma_find_channel(tx_type);
  53 }
  54 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
  55 #endif
  56 
  57 
  58 /**
  59  * async_tx_channel_switch - queue an interrupt descriptor with a dependency
  60  *      pre-attached.
  61  * @depend_tx: the operation that must finish before the new operation runs
  62  * @tx: the new operation
  63  */
  64 static void
  65 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
  66                         struct dma_async_tx_descriptor *tx)
  67 {
  68         struct dma_chan *chan = depend_tx->chan;
  69         struct dma_device *device = chan->device;
  70         struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
  71 
  72         /* first check to see if we can still append to depend_tx */
  73         txd_lock(depend_tx);
  74         if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
  75                 txd_chain(depend_tx, tx);
  76                 intr_tx = NULL;
  77         }
  78         txd_unlock(depend_tx);
  79 
  80         /* attached dependency, flush the parent channel */
  81         if (!intr_tx) {
  82                 device->device_issue_pending(chan);
  83                 return;
  84         }
  85 
  86         /* see if we can schedule an interrupt
  87          * otherwise poll for completion
  88          */
  89         if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  90                 intr_tx = device->device_prep_dma_interrupt(chan, 0);
  91         else
  92                 intr_tx = NULL;
  93 
  94         if (intr_tx) {
  95                 intr_tx->callback = NULL;
  96                 intr_tx->callback_param = NULL;
  97                 /* safe to chain outside the lock since we know we are
  98                  * not submitted yet
  99                  */
 100                 txd_chain(intr_tx, tx);
 101 
 102                 /* check if we need to append */
 103                 txd_lock(depend_tx);
 104                 if (txd_parent(depend_tx)) {
 105                         txd_chain(depend_tx, intr_tx);
 106                         async_tx_ack(intr_tx);
 107                         intr_tx = NULL;
 108                 }
 109                 txd_unlock(depend_tx);
 110 
 111                 if (intr_tx) {
 112                         txd_clear_parent(intr_tx);
 113                         intr_tx->tx_submit(intr_tx);
 114                         async_tx_ack(intr_tx);
 115                 }
 116                 device->device_issue_pending(chan);
 117         } else {
 118                 if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
 119                         panic("%s: DMA error waiting for depend_tx\n",
 120                               __func__);
 121                 tx->tx_submit(tx);
 122         }
 123 }
 124 
 125 
 126 /**
 127  * submit_disposition - flags for routing an incoming operation
 128  * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
 129  * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
 130  * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
 131  *
 132  * while holding depend_tx->lock we must avoid submitting new operations
 133  * to prevent a circular locking dependency with drivers that already
 134  * hold a channel lock when calling async_tx_run_dependencies.
 135  */
 136 enum submit_disposition {
 137         ASYNC_TX_SUBMITTED,
 138         ASYNC_TX_CHANNEL_SWITCH,
 139         ASYNC_TX_DIRECT_SUBMIT,
 140 };
 141 
 142 void
 143 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
 144                 struct async_submit_ctl *submit)
 145 {
 146         struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
 147 
 148         tx->callback = submit->cb_fn;
 149         tx->callback_param = submit->cb_param;
 150 
 151         if (depend_tx) {
 152                 enum submit_disposition s;
 153 
 154                 /* sanity check the dependency chain:
 155                  * 1/ if ack is already set then we cannot be sure
 156                  * we are referring to the correct operation
 157                  * 2/ dependencies are 1:1 i.e. two transactions can
 158                  * not depend on the same parent
 159                  */
 160                 BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
 161                        txd_parent(tx));
 162 
 163                 /* the lock prevents async_tx_run_dependencies from missing
 164                  * the setting of ->next when ->parent != NULL
 165                  */
 166                 txd_lock(depend_tx);
 167                 if (txd_parent(depend_tx)) {
 168                         /* we have a parent so we can not submit directly
 169                          * if we are staying on the same channel: append
 170                          * else: channel switch
 171                          */
 172                         if (depend_tx->chan == chan) {
 173                                 txd_chain(depend_tx, tx);
 174                                 s = ASYNC_TX_SUBMITTED;
 175                         } else
 176                                 s = ASYNC_TX_CHANNEL_SWITCH;
 177                 } else {
 178                         /* we do not have a parent so we may be able to submit
 179                          * directly if we are staying on the same channel
 180                          */
 181                         if (depend_tx->chan == chan)
 182                                 s = ASYNC_TX_DIRECT_SUBMIT;
 183                         else
 184                                 s = ASYNC_TX_CHANNEL_SWITCH;
 185                 }
 186                 txd_unlock(depend_tx);
 187 
 188                 switch (s) {
 189                 case ASYNC_TX_SUBMITTED:
 190                         break;
 191                 case ASYNC_TX_CHANNEL_SWITCH:
 192                         async_tx_channel_switch(depend_tx, tx);
 193                         break;
 194                 case ASYNC_TX_DIRECT_SUBMIT:
 195                         txd_clear_parent(tx);
 196                         tx->tx_submit(tx);
 197                         break;
 198                 }
 199         } else {
 200                 txd_clear_parent(tx);
 201                 tx->tx_submit(tx);
 202         }
 203 
 204         if (submit->flags & ASYNC_TX_ACK)
 205                 async_tx_ack(tx);
 206 
 207         if (depend_tx)
 208                 async_tx_ack(depend_tx);
 209 }
 210 EXPORT_SYMBOL_GPL(async_tx_submit);
 211 
 212 /**
 213  * async_trigger_callback - schedules the callback function to be run
 214  * @submit: submission and completion parameters
 215  *
 216  * honored flags: ASYNC_TX_ACK
 217  *
 218  * The callback is run after any dependent operations have completed.
 219  */
 220 struct dma_async_tx_descriptor *
 221 async_trigger_callback(struct async_submit_ctl *submit)
 222 {
 223         struct dma_chan *chan;
 224         struct dma_device *device;
 225         struct dma_async_tx_descriptor *tx;
 226         struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
 227 
 228         if (depend_tx) {
 229                 chan = depend_tx->chan;
 230                 device = chan->device;
 231 
 232                 /* see if we can schedule an interrupt
 233                  * otherwise poll for completion
 234                  */
 235                 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 236                         device = NULL;
 237 
 238                 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
 239         } else
 240                 tx = NULL;
 241 
 242         if (tx) {
 243                 pr_debug("%s: (async)\n", __func__);
 244 
 245                 async_tx_submit(chan, tx, submit);
 246         } else {
 247                 pr_debug("%s: (sync)\n", __func__);
 248 
 249                 /* wait for any prerequisite operations */
 250                 async_tx_quiesce(&submit->depend_tx);
 251 
 252                 async_tx_sync_epilog(submit);
 253         }
 254 
 255         return tx;
 256 }
 257 EXPORT_SYMBOL_GPL(async_trigger_callback);
 258 
 259 /**
 260  * async_tx_quiesce - ensure tx is complete and freeable upon return
 261  * @tx - transaction to quiesce
 262  */
 263 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
 264 {
 265         if (*tx) {
 266                 /* if ack is already set then we cannot be sure
 267                  * we are referring to the correct operation
 268                  */
 269                 BUG_ON(async_tx_test_ack(*tx));
 270                 if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
 271                         panic("%s: DMA error waiting for transaction\n",
 272                               __func__);
 273                 async_tx_ack(*tx);
 274                 *tx = NULL;
 275         }
 276 }
 277 EXPORT_SYMBOL_GPL(async_tx_quiesce);
 278 
 279 MODULE_AUTHOR("Intel Corporation");
 280 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
 281 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */