root/drivers/iio/buffer/industrialio-buffer-dma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. iio_buffer_block_release
  2. iio_buffer_block_get
  3. iio_buffer_block_put
  4. iio_dma_buffer_cleanup_worker
  5. iio_buffer_block_release_atomic
  6. iio_buffer_block_put_atomic
  7. iio_buffer_to_queue
  8. iio_dma_buffer_alloc_block
  9. _iio_dma_buffer_block_done
  10. iio_dma_buffer_block_done
  11. iio_dma_buffer_block_list_abort
  12. iio_dma_block_reusable
  13. iio_dma_buffer_request_update
  14. iio_dma_buffer_submit_block
  15. iio_dma_buffer_enable
  16. iio_dma_buffer_disable
  17. iio_dma_buffer_enqueue
  18. iio_dma_buffer_dequeue
  19. iio_dma_buffer_read
  20. iio_dma_buffer_data_available
  21. iio_dma_buffer_set_bytes_per_datum
  22. iio_dma_buffer_set_length
  23. iio_dma_buffer_init
  24. iio_dma_buffer_exit
  25. iio_dma_buffer_release

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright 2013-2015 Analog Devices Inc.
   4  *  Author: Lars-Peter Clausen <lars@metafoo.de>
   5  */
   6 
   7 #include <linux/slab.h>
   8 #include <linux/kernel.h>
   9 #include <linux/module.h>
  10 #include <linux/device.h>
  11 #include <linux/workqueue.h>
  12 #include <linux/mutex.h>
  13 #include <linux/sched.h>
  14 #include <linux/poll.h>
  15 #include <linux/iio/buffer.h>
  16 #include <linux/iio/buffer_impl.h>
  17 #include <linux/iio/buffer-dma.h>
  18 #include <linux/dma-mapping.h>
  19 #include <linux/sizes.h>
  20 
  21 /*
  22  * For DMA buffers the storage is sub-divided into so called blocks. Each block
  23  * has its own memory buffer. The size of the block is the granularity at which
  24  * memory is exchanged between the hardware and the application. Increasing the
  25  * basic unit of data exchange from one sample to one block decreases the
  26  * management overhead that is associated with each sample. E.g. if we say the
  27  * management overhead for one exchange is x and the unit of exchange is one
  28  * sample the overhead will be x for each sample. Whereas when using a block
  29  * which contains n samples the overhead per sample is reduced to x/n. This
  30  * allows to achieve much higher samplerates than what can be sustained with
  31  * the one sample approach.
  32  *
  33  * Blocks are exchanged between the DMA controller and the application via the
  34  * means of two queues. The incoming queue and the outgoing queue. Blocks on the
  35  * incoming queue are waiting for the DMA controller to pick them up and fill
  36  * them with data. Block on the outgoing queue have been filled with data and
  37  * are waiting for the application to dequeue them and read the data.
  38  *
  39  * A block can be in one of the following states:
  40  *  * Owned by the application. In this state the application can read data from
  41  *    the block.
  42  *  * On the incoming list: Blocks on the incoming list are queued up to be
  43  *    processed by the DMA controller.
  44  *  * Owned by the DMA controller: The DMA controller is processing the block
  45  *    and filling it with data.
  46  *  * On the outgoing list: Blocks on the outgoing list have been successfully
  47  *    processed by the DMA controller and contain data. They can be dequeued by
  48  *    the application.
  49  *  * Dead: A block that is dead has been marked as to be freed. It might still
  50  *    be owned by either the application or the DMA controller at the moment.
  51  *    But once they are done processing it instead of going to either the
  52  *    incoming or outgoing queue the block will be freed.
  53  *
  54  * In addition to this blocks are reference counted and the memory associated
  55  * with both the block structure as well as the storage memory for the block
  56  * will be freed when the last reference to the block is dropped. This means a
  57  * block must not be accessed without holding a reference.
  58  *
  59  * The iio_dma_buffer implementation provides a generic infrastructure for
  60  * managing the blocks.
  61  *
  62  * A driver for a specific piece of hardware that has DMA capabilities need to
  63  * implement the submit() callback from the iio_dma_buffer_ops structure. This
  64  * callback is supposed to initiate the DMA transfer copying data from the
  65  * converter to the memory region of the block. Once the DMA transfer has been
  66  * completed the driver must call iio_dma_buffer_block_done() for the completed
  67  * block.
  68  *
  69  * Prior to this it must set the bytes_used field of the block contains
  70  * the actual number of bytes in the buffer. Typically this will be equal to the
  71  * size of the block, but if the DMA hardware has certain alignment requirements
  72  * for the transfer length it might choose to use less than the full size. In
  73  * either case it is expected that bytes_used is a multiple of the bytes per
  74  * datum, i.e. the block must not contain partial samples.
  75  *
  76  * The driver must call iio_dma_buffer_block_done() for each block it has
  77  * received through its submit_block() callback, even if it does not actually
  78  * perform a DMA transfer for the block, e.g. because the buffer was disabled
  79  * before the block transfer was started. In this case it should set bytes_used
  80  * to 0.
  81  *
  82  * In addition it is recommended that a driver implements the abort() callback.
  83  * It will be called when the buffer is disabled and can be used to cancel
  84  * pending and stop active transfers.
  85  *
  86  * The specific driver implementation should use the default callback
  87  * implementations provided by this module for the iio_buffer_access_funcs
  88  * struct. It may overload some callbacks with custom variants if the hardware
  89  * has special requirements that are not handled by the generic functions. If a
  90  * driver chooses to overload a callback it has to ensure that the generic
  91  * callback is called from within the custom callback.
  92  */
  93 
  94 static void iio_buffer_block_release(struct kref *kref)
  95 {
  96         struct iio_dma_buffer_block *block = container_of(kref,
  97                 struct iio_dma_buffer_block, kref);
  98 
  99         WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
 100 
 101         dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
 102                                         block->vaddr, block->phys_addr);
 103 
 104         iio_buffer_put(&block->queue->buffer);
 105         kfree(block);
 106 }
 107 
 108 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
 109 {
 110         kref_get(&block->kref);
 111 }
 112 
 113 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
 114 {
 115         kref_put(&block->kref, iio_buffer_block_release);
 116 }
 117 
 118 /*
 119  * dma_free_coherent can sleep, hence we need to take some special care to be
 120  * able to drop a reference from an atomic context.
 121  */
 122 static LIST_HEAD(iio_dma_buffer_dead_blocks);
 123 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
 124 
 125 static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
 126 {
 127         struct iio_dma_buffer_block *block, *_block;
 128         LIST_HEAD(block_list);
 129 
 130         spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
 131         list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
 132         spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
 133 
 134         list_for_each_entry_safe(block, _block, &block_list, head)
 135                 iio_buffer_block_release(&block->kref);
 136 }
 137 static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
 138 
 139 static void iio_buffer_block_release_atomic(struct kref *kref)
 140 {
 141         struct iio_dma_buffer_block *block;
 142         unsigned long flags;
 143 
 144         block = container_of(kref, struct iio_dma_buffer_block, kref);
 145 
 146         spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
 147         list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
 148         spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
 149 
 150         schedule_work(&iio_dma_buffer_cleanup_work);
 151 }
 152 
 153 /*
 154  * Version of iio_buffer_block_put() that can be called from atomic context
 155  */
 156 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
 157 {
 158         kref_put(&block->kref, iio_buffer_block_release_atomic);
 159 }
 160 
 161 static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
 162 {
 163         return container_of(buf, struct iio_dma_buffer_queue, buffer);
 164 }
 165 
 166 static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
 167         struct iio_dma_buffer_queue *queue, size_t size)
 168 {
 169         struct iio_dma_buffer_block *block;
 170 
 171         block = kzalloc(sizeof(*block), GFP_KERNEL);
 172         if (!block)
 173                 return NULL;
 174 
 175         block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
 176                 &block->phys_addr, GFP_KERNEL);
 177         if (!block->vaddr) {
 178                 kfree(block);
 179                 return NULL;
 180         }
 181 
 182         block->size = size;
 183         block->state = IIO_BLOCK_STATE_DEQUEUED;
 184         block->queue = queue;
 185         INIT_LIST_HEAD(&block->head);
 186         kref_init(&block->kref);
 187 
 188         iio_buffer_get(&queue->buffer);
 189 
 190         return block;
 191 }
 192 
 193 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 194 {
 195         struct iio_dma_buffer_queue *queue = block->queue;
 196 
 197         /*
 198          * The buffer has already been freed by the application, just drop the
 199          * reference.
 200          */
 201         if (block->state != IIO_BLOCK_STATE_DEAD) {
 202                 block->state = IIO_BLOCK_STATE_DONE;
 203                 list_add_tail(&block->head, &queue->outgoing);
 204         }
 205 }
 206 
 207 /**
 208  * iio_dma_buffer_block_done() - Indicate that a block has been completed
 209  * @block: The completed block
 210  *
 211  * Should be called when the DMA controller has finished handling the block to
 212  * pass back ownership of the block to the queue.
 213  */
 214 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 215 {
 216         struct iio_dma_buffer_queue *queue = block->queue;
 217         unsigned long flags;
 218 
 219         spin_lock_irqsave(&queue->list_lock, flags);
 220         _iio_dma_buffer_block_done(block);
 221         spin_unlock_irqrestore(&queue->list_lock, flags);
 222 
 223         iio_buffer_block_put_atomic(block);
 224         wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 225 }
 226 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
 227 
 228 /**
 229  * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
 230  *   aborted
 231  * @queue: Queue for which to complete blocks.
 232  * @list: List of aborted blocks. All blocks in this list must be from @queue.
 233  *
 234  * Typically called from the abort() callback after the DMA controller has been
 235  * stopped. This will set bytes_used to 0 for each block in the list and then
 236  * hand the blocks back to the queue.
 237  */
 238 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
 239         struct list_head *list)
 240 {
 241         struct iio_dma_buffer_block *block, *_block;
 242         unsigned long flags;
 243 
 244         spin_lock_irqsave(&queue->list_lock, flags);
 245         list_for_each_entry_safe(block, _block, list, head) {
 246                 list_del(&block->head);
 247                 block->bytes_used = 0;
 248                 _iio_dma_buffer_block_done(block);
 249                 iio_buffer_block_put_atomic(block);
 250         }
 251         spin_unlock_irqrestore(&queue->list_lock, flags);
 252 
 253         wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 254 }
 255 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
 256 
 257 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
 258 {
 259         /*
 260          * If the core owns the block it can be re-used. This should be the
 261          * default case when enabling the buffer, unless the DMA controller does
 262          * not support abort and has not given back the block yet.
 263          */
 264         switch (block->state) {
 265         case IIO_BLOCK_STATE_DEQUEUED:
 266         case IIO_BLOCK_STATE_QUEUED:
 267         case IIO_BLOCK_STATE_DONE:
 268                 return true;
 269         default:
 270                 return false;
 271         }
 272 }
 273 
 274 /**
 275  * iio_dma_buffer_request_update() - DMA buffer request_update callback
 276  * @buffer: The buffer which to request an update
 277  *
 278  * Should be used as the iio_dma_buffer_request_update() callback for
 279  * iio_buffer_access_ops struct for DMA buffers.
 280  */
 281 int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 282 {
 283         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 284         struct iio_dma_buffer_block *block;
 285         bool try_reuse = false;
 286         size_t size;
 287         int ret = 0;
 288         int i;
 289 
 290         /*
 291          * Split the buffer into two even parts. This is used as a double
 292          * buffering scheme with usually one block at a time being used by the
 293          * DMA and the other one by the application.
 294          */
 295         size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
 296                 queue->buffer.length, 2);
 297 
 298         mutex_lock(&queue->lock);
 299 
 300         /* Allocations are page aligned */
 301         if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
 302                 try_reuse = true;
 303 
 304         queue->fileio.block_size = size;
 305         queue->fileio.active_block = NULL;
 306 
 307         spin_lock_irq(&queue->list_lock);
 308         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 309                 block = queue->fileio.blocks[i];
 310 
 311                 /* If we can't re-use it free it */
 312                 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
 313                         block->state = IIO_BLOCK_STATE_DEAD;
 314         }
 315 
 316         /*
 317          * At this point all blocks are either owned by the core or marked as
 318          * dead. This means we can reset the lists without having to fear
 319          * corrution.
 320          */
 321         INIT_LIST_HEAD(&queue->outgoing);
 322         spin_unlock_irq(&queue->list_lock);
 323 
 324         INIT_LIST_HEAD(&queue->incoming);
 325 
 326         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 327                 if (queue->fileio.blocks[i]) {
 328                         block = queue->fileio.blocks[i];
 329                         if (block->state == IIO_BLOCK_STATE_DEAD) {
 330                                 /* Could not reuse it */
 331                                 iio_buffer_block_put(block);
 332                                 block = NULL;
 333                         } else {
 334                                 block->size = size;
 335                         }
 336                 } else {
 337                         block = NULL;
 338                 }
 339 
 340                 if (!block) {
 341                         block = iio_dma_buffer_alloc_block(queue, size);
 342                         if (!block) {
 343                                 ret = -ENOMEM;
 344                                 goto out_unlock;
 345                         }
 346                         queue->fileio.blocks[i] = block;
 347                 }
 348 
 349                 block->state = IIO_BLOCK_STATE_QUEUED;
 350                 list_add_tail(&block->head, &queue->incoming);
 351         }
 352 
 353 out_unlock:
 354         mutex_unlock(&queue->lock);
 355 
 356         return ret;
 357 }
 358 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
 359 
 360 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 361         struct iio_dma_buffer_block *block)
 362 {
 363         int ret;
 364 
 365         /*
 366          * If the hardware has already been removed we put the block into
 367          * limbo. It will neither be on the incoming nor outgoing list, nor will
 368          * it ever complete. It will just wait to be freed eventually.
 369          */
 370         if (!queue->ops)
 371                 return;
 372 
 373         block->state = IIO_BLOCK_STATE_ACTIVE;
 374         iio_buffer_block_get(block);
 375         ret = queue->ops->submit(queue, block);
 376         if (ret) {
 377                 /*
 378                  * This is a bit of a problem and there is not much we can do
 379                  * other then wait for the buffer to be disabled and re-enabled
 380                  * and try again. But it should not really happen unless we run
 381                  * out of memory or something similar.
 382                  *
 383                  * TODO: Implement support in the IIO core to allow buffers to
 384                  * notify consumers that something went wrong and the buffer
 385                  * should be disabled.
 386                  */
 387                 iio_buffer_block_put(block);
 388         }
 389 }
 390 
 391 /**
 392  * iio_dma_buffer_enable() - Enable DMA buffer
 393  * @buffer: IIO buffer to enable
 394  * @indio_dev: IIO device the buffer is attached to
 395  *
 396  * Needs to be called when the device that the buffer is attached to starts
 397  * sampling. Typically should be the iio_buffer_access_ops enable callback.
 398  *
 399  * This will allocate the DMA buffers and start the DMA transfers.
 400  */
 401 int iio_dma_buffer_enable(struct iio_buffer *buffer,
 402         struct iio_dev *indio_dev)
 403 {
 404         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 405         struct iio_dma_buffer_block *block, *_block;
 406 
 407         mutex_lock(&queue->lock);
 408         queue->active = true;
 409         list_for_each_entry_safe(block, _block, &queue->incoming, head) {
 410                 list_del(&block->head);
 411                 iio_dma_buffer_submit_block(queue, block);
 412         }
 413         mutex_unlock(&queue->lock);
 414 
 415         return 0;
 416 }
 417 EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
 418 
 419 /**
 420  * iio_dma_buffer_disable() - Disable DMA buffer
 421  * @buffer: IIO DMA buffer to disable
 422  * @indio_dev: IIO device the buffer is attached to
 423  *
 424  * Needs to be called when the device that the buffer is attached to stops
 425  * sampling. Typically should be the iio_buffer_access_ops disable callback.
 426  */
 427 int iio_dma_buffer_disable(struct iio_buffer *buffer,
 428         struct iio_dev *indio_dev)
 429 {
 430         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 431 
 432         mutex_lock(&queue->lock);
 433         queue->active = false;
 434 
 435         if (queue->ops && queue->ops->abort)
 436                 queue->ops->abort(queue);
 437         mutex_unlock(&queue->lock);
 438 
 439         return 0;
 440 }
 441 EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
 442 
 443 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
 444         struct iio_dma_buffer_block *block)
 445 {
 446         if (block->state == IIO_BLOCK_STATE_DEAD) {
 447                 iio_buffer_block_put(block);
 448         } else if (queue->active) {
 449                 iio_dma_buffer_submit_block(queue, block);
 450         } else {
 451                 block->state = IIO_BLOCK_STATE_QUEUED;
 452                 list_add_tail(&block->head, &queue->incoming);
 453         }
 454 }
 455 
 456 static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
 457         struct iio_dma_buffer_queue *queue)
 458 {
 459         struct iio_dma_buffer_block *block;
 460 
 461         spin_lock_irq(&queue->list_lock);
 462         block = list_first_entry_or_null(&queue->outgoing, struct
 463                 iio_dma_buffer_block, head);
 464         if (block != NULL) {
 465                 list_del(&block->head);
 466                 block->state = IIO_BLOCK_STATE_DEQUEUED;
 467         }
 468         spin_unlock_irq(&queue->list_lock);
 469 
 470         return block;
 471 }
 472 
 473 /**
 474  * iio_dma_buffer_read() - DMA buffer read callback
 475  * @buffer: Buffer to read form
 476  * @n: Number of bytes to read
 477  * @user_buffer: Userspace buffer to copy the data to
 478  *
 479  * Should be used as the read_first_n callback for iio_buffer_access_ops
 480  * struct for DMA buffers.
 481  */
 482 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
 483         char __user *user_buffer)
 484 {
 485         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 486         struct iio_dma_buffer_block *block;
 487         int ret;
 488 
 489         if (n < buffer->bytes_per_datum)
 490                 return -EINVAL;
 491 
 492         mutex_lock(&queue->lock);
 493 
 494         if (!queue->fileio.active_block) {
 495                 block = iio_dma_buffer_dequeue(queue);
 496                 if (block == NULL) {
 497                         ret = 0;
 498                         goto out_unlock;
 499                 }
 500                 queue->fileio.pos = 0;
 501                 queue->fileio.active_block = block;
 502         } else {
 503                 block = queue->fileio.active_block;
 504         }
 505 
 506         n = rounddown(n, buffer->bytes_per_datum);
 507         if (n > block->bytes_used - queue->fileio.pos)
 508                 n = block->bytes_used - queue->fileio.pos;
 509 
 510         if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
 511                 ret = -EFAULT;
 512                 goto out_unlock;
 513         }
 514 
 515         queue->fileio.pos += n;
 516 
 517         if (queue->fileio.pos == block->bytes_used) {
 518                 queue->fileio.active_block = NULL;
 519                 iio_dma_buffer_enqueue(queue, block);
 520         }
 521 
 522         ret = n;
 523 
 524 out_unlock:
 525         mutex_unlock(&queue->lock);
 526 
 527         return ret;
 528 }
 529 EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
 530 
 531 /**
 532  * iio_dma_buffer_data_available() - DMA buffer data_available callback
 533  * @buf: Buffer to check for data availability
 534  *
 535  * Should be used as the data_available callback for iio_buffer_access_ops
 536  * struct for DMA buffers.
 537  */
 538 size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
 539 {
 540         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
 541         struct iio_dma_buffer_block *block;
 542         size_t data_available = 0;
 543 
 544         /*
 545          * For counting the available bytes we'll use the size of the block not
 546          * the number of actual bytes available in the block. Otherwise it is
 547          * possible that we end up with a value that is lower than the watermark
 548          * but won't increase since all blocks are in use.
 549          */
 550 
 551         mutex_lock(&queue->lock);
 552         if (queue->fileio.active_block)
 553                 data_available += queue->fileio.active_block->size;
 554 
 555         spin_lock_irq(&queue->list_lock);
 556         list_for_each_entry(block, &queue->outgoing, head)
 557                 data_available += block->size;
 558         spin_unlock_irq(&queue->list_lock);
 559         mutex_unlock(&queue->lock);
 560 
 561         return data_available;
 562 }
 563 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
 564 
 565 /**
 566  * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
 567  * @buffer: Buffer to set the bytes-per-datum for
 568  * @bpd: The new bytes-per-datum value
 569  *
 570  * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
 571  * struct for DMA buffers.
 572  */
 573 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
 574 {
 575         buffer->bytes_per_datum = bpd;
 576 
 577         return 0;
 578 }
 579 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
 580 
 581 /**
 582  * iio_dma_buffer_set_length - DMA buffer set_length callback
 583  * @buffer: Buffer to set the length for
 584  * @length: The new buffer length
 585  *
 586  * Should be used as the set_length callback for iio_buffer_access_ops
 587  * struct for DMA buffers.
 588  */
 589 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
 590 {
 591         /* Avoid an invalid state */
 592         if (length < 2)
 593                 length = 2;
 594         buffer->length = length;
 595         buffer->watermark = length / 2;
 596 
 597         return 0;
 598 }
 599 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
 600 
 601 /**
 602  * iio_dma_buffer_init() - Initialize DMA buffer queue
 603  * @queue: Buffer to initialize
 604  * @dev: DMA device
 605  * @ops: DMA buffer queue callback operations
 606  *
 607  * The DMA device will be used by the queue to do DMA memory allocations. So it
 608  * should refer to the device that will perform the DMA to ensure that
 609  * allocations are done from a memory region that can be accessed by the device.
 610  */
 611 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
 612         struct device *dev, const struct iio_dma_buffer_ops *ops)
 613 {
 614         iio_buffer_init(&queue->buffer);
 615         queue->buffer.length = PAGE_SIZE;
 616         queue->buffer.watermark = queue->buffer.length / 2;
 617         queue->dev = dev;
 618         queue->ops = ops;
 619 
 620         INIT_LIST_HEAD(&queue->incoming);
 621         INIT_LIST_HEAD(&queue->outgoing);
 622 
 623         mutex_init(&queue->lock);
 624         spin_lock_init(&queue->list_lock);
 625 
 626         return 0;
 627 }
 628 EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
 629 
 630 /**
 631  * iio_dma_buffer_exit() - Cleanup DMA buffer queue
 632  * @queue: Buffer to cleanup
 633  *
 634  * After this function has completed it is safe to free any resources that are
 635  * associated with the buffer and are accessed inside the callback operations.
 636  */
 637 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
 638 {
 639         unsigned int i;
 640 
 641         mutex_lock(&queue->lock);
 642 
 643         spin_lock_irq(&queue->list_lock);
 644         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 645                 if (!queue->fileio.blocks[i])
 646                         continue;
 647                 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
 648         }
 649         INIT_LIST_HEAD(&queue->outgoing);
 650         spin_unlock_irq(&queue->list_lock);
 651 
 652         INIT_LIST_HEAD(&queue->incoming);
 653 
 654         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 655                 if (!queue->fileio.blocks[i])
 656                         continue;
 657                 iio_buffer_block_put(queue->fileio.blocks[i]);
 658                 queue->fileio.blocks[i] = NULL;
 659         }
 660         queue->fileio.active_block = NULL;
 661         queue->ops = NULL;
 662 
 663         mutex_unlock(&queue->lock);
 664 }
 665 EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
 666 
 667 /**
 668  * iio_dma_buffer_release() - Release final buffer resources
 669  * @queue: Buffer to release
 670  *
 671  * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
 672  * called in the buffers release callback implementation right before freeing
 673  * the memory associated with the buffer.
 674  */
 675 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
 676 {
 677         mutex_destroy(&queue->lock);
 678 }
 679 EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
 680 
 681 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 682 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
 683 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */