root/sound/core/seq/seq_fifo.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. snd_seq_fifo_new
  2. snd_seq_fifo_delete
  3. snd_seq_fifo_clear
  4. snd_seq_fifo_event_in
  5. fifo_cell_out
  6. snd_seq_fifo_cell_out
  7. snd_seq_fifo_cell_putback
  8. snd_seq_fifo_poll_wait
  9. snd_seq_fifo_resize
  10. snd_seq_fifo_unused_cells

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *   ALSA sequencer FIFO
   4  *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
   5  */
   6 
   7 #include <sound/core.h>
   8 #include <linux/slab.h>
   9 #include <linux/sched/signal.h>
  10 
  11 #include "seq_fifo.h"
  12 #include "seq_lock.h"
  13 
  14 
  15 /* FIFO */
  16 
  17 /* create new fifo */
  18 struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
  19 {
  20         struct snd_seq_fifo *f;
  21 
  22         f = kzalloc(sizeof(*f), GFP_KERNEL);
  23         if (!f)
  24                 return NULL;
  25 
  26         f->pool = snd_seq_pool_new(poolsize);
  27         if (f->pool == NULL) {
  28                 kfree(f);
  29                 return NULL;
  30         }
  31         if (snd_seq_pool_init(f->pool) < 0) {
  32                 snd_seq_pool_delete(&f->pool);
  33                 kfree(f);
  34                 return NULL;
  35         }
  36 
  37         spin_lock_init(&f->lock);
  38         snd_use_lock_init(&f->use_lock);
  39         init_waitqueue_head(&f->input_sleep);
  40         atomic_set(&f->overflow, 0);
  41 
  42         f->head = NULL;
  43         f->tail = NULL;
  44         f->cells = 0;
  45         
  46         return f;
  47 }
  48 
  49 void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
  50 {
  51         struct snd_seq_fifo *f;
  52 
  53         if (snd_BUG_ON(!fifo))
  54                 return;
  55         f = *fifo;
  56         if (snd_BUG_ON(!f))
  57                 return;
  58         *fifo = NULL;
  59 
  60         if (f->pool)
  61                 snd_seq_pool_mark_closing(f->pool);
  62 
  63         snd_seq_fifo_clear(f);
  64 
  65         /* wake up clients if any */
  66         if (waitqueue_active(&f->input_sleep))
  67                 wake_up(&f->input_sleep);
  68 
  69         /* release resources...*/
  70         /*....................*/
  71 
  72         if (f->pool) {
  73                 snd_seq_pool_done(f->pool);
  74                 snd_seq_pool_delete(&f->pool);
  75         }
  76         
  77         kfree(f);
  78 }
  79 
  80 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
  81 
  82 /* clear queue */
  83 void snd_seq_fifo_clear(struct snd_seq_fifo *f)
  84 {
  85         struct snd_seq_event_cell *cell;
  86 
  87         /* clear overflow flag */
  88         atomic_set(&f->overflow, 0);
  89 
  90         snd_use_lock_sync(&f->use_lock);
  91         spin_lock_irq(&f->lock);
  92         /* drain the fifo */
  93         while ((cell = fifo_cell_out(f)) != NULL) {
  94                 snd_seq_cell_free(cell);
  95         }
  96         spin_unlock_irq(&f->lock);
  97 }
  98 
  99 
 100 /* enqueue event to fifo */
 101 int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
 102                           struct snd_seq_event *event)
 103 {
 104         struct snd_seq_event_cell *cell;
 105         unsigned long flags;
 106         int err;
 107 
 108         if (snd_BUG_ON(!f))
 109                 return -EINVAL;
 110 
 111         snd_use_lock_use(&f->use_lock);
 112         err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
 113         if (err < 0) {
 114                 if ((err == -ENOMEM) || (err == -EAGAIN))
 115                         atomic_inc(&f->overflow);
 116                 snd_use_lock_free(&f->use_lock);
 117                 return err;
 118         }
 119                 
 120         /* append new cells to fifo */
 121         spin_lock_irqsave(&f->lock, flags);
 122         if (f->tail != NULL)
 123                 f->tail->next = cell;
 124         f->tail = cell;
 125         if (f->head == NULL)
 126                 f->head = cell;
 127         cell->next = NULL;
 128         f->cells++;
 129         spin_unlock_irqrestore(&f->lock, flags);
 130 
 131         /* wakeup client */
 132         if (waitqueue_active(&f->input_sleep))
 133                 wake_up(&f->input_sleep);
 134 
 135         snd_use_lock_free(&f->use_lock);
 136 
 137         return 0; /* success */
 138 
 139 }
 140 
 141 /* dequeue cell from fifo */
 142 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
 143 {
 144         struct snd_seq_event_cell *cell;
 145 
 146         if ((cell = f->head) != NULL) {
 147                 f->head = cell->next;
 148 
 149                 /* reset tail if this was the last element */
 150                 if (f->tail == cell)
 151                         f->tail = NULL;
 152 
 153                 cell->next = NULL;
 154                 f->cells--;
 155         }
 156 
 157         return cell;
 158 }
 159 
 160 /* dequeue cell from fifo and copy on user space */
 161 int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
 162                           struct snd_seq_event_cell **cellp, int nonblock)
 163 {
 164         struct snd_seq_event_cell *cell;
 165         unsigned long flags;
 166         wait_queue_entry_t wait;
 167 
 168         if (snd_BUG_ON(!f))
 169                 return -EINVAL;
 170 
 171         *cellp = NULL;
 172         init_waitqueue_entry(&wait, current);
 173         spin_lock_irqsave(&f->lock, flags);
 174         while ((cell = fifo_cell_out(f)) == NULL) {
 175                 if (nonblock) {
 176                         /* non-blocking - return immediately */
 177                         spin_unlock_irqrestore(&f->lock, flags);
 178                         return -EAGAIN;
 179                 }
 180                 set_current_state(TASK_INTERRUPTIBLE);
 181                 add_wait_queue(&f->input_sleep, &wait);
 182                 spin_unlock_irqrestore(&f->lock, flags);
 183                 schedule();
 184                 spin_lock_irqsave(&f->lock, flags);
 185                 remove_wait_queue(&f->input_sleep, &wait);
 186                 if (signal_pending(current)) {
 187                         spin_unlock_irqrestore(&f->lock, flags);
 188                         return -ERESTARTSYS;
 189                 }
 190         }
 191         spin_unlock_irqrestore(&f->lock, flags);
 192         *cellp = cell;
 193 
 194         return 0;
 195 }
 196 
 197 
 198 void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
 199                                struct snd_seq_event_cell *cell)
 200 {
 201         unsigned long flags;
 202 
 203         if (cell) {
 204                 spin_lock_irqsave(&f->lock, flags);
 205                 cell->next = f->head;
 206                 f->head = cell;
 207                 if (!f->tail)
 208                         f->tail = cell;
 209                 f->cells++;
 210                 spin_unlock_irqrestore(&f->lock, flags);
 211         }
 212 }
 213 
 214 
 215 /* polling; return non-zero if queue is available */
 216 int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
 217                            poll_table *wait)
 218 {
 219         poll_wait(file, &f->input_sleep, wait);
 220         return (f->cells > 0);
 221 }
 222 
 223 /* change the size of pool; all old events are removed */
 224 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
 225 {
 226         struct snd_seq_pool *newpool, *oldpool;
 227         struct snd_seq_event_cell *cell, *next, *oldhead;
 228 
 229         if (snd_BUG_ON(!f || !f->pool))
 230                 return -EINVAL;
 231 
 232         /* allocate new pool */
 233         newpool = snd_seq_pool_new(poolsize);
 234         if (newpool == NULL)
 235                 return -ENOMEM;
 236         if (snd_seq_pool_init(newpool) < 0) {
 237                 snd_seq_pool_delete(&newpool);
 238                 return -ENOMEM;
 239         }
 240 
 241         spin_lock_irq(&f->lock);
 242         /* remember old pool */
 243         oldpool = f->pool;
 244         oldhead = f->head;
 245         /* exchange pools */
 246         f->pool = newpool;
 247         f->head = NULL;
 248         f->tail = NULL;
 249         f->cells = 0;
 250         /* NOTE: overflow flag is not cleared */
 251         spin_unlock_irq(&f->lock);
 252 
 253         /* close the old pool and wait until all users are gone */
 254         snd_seq_pool_mark_closing(oldpool);
 255         snd_use_lock_sync(&f->use_lock);
 256 
 257         /* release cells in old pool */
 258         for (cell = oldhead; cell; cell = next) {
 259                 next = cell->next;
 260                 snd_seq_cell_free(cell);
 261         }
 262         snd_seq_pool_delete(&oldpool);
 263 
 264         return 0;
 265 }
 266 
 267 /* get the number of unused cells safely */
 268 int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
 269 {
 270         unsigned long flags;
 271         int cells;
 272 
 273         if (!f)
 274                 return 0;
 275 
 276         snd_use_lock_use(&f->use_lock);
 277         spin_lock_irqsave(&f->lock, flags);
 278         cells = snd_seq_unused_cells(f->pool);
 279         spin_unlock_irqrestore(&f->lock, flags);
 280         snd_use_lock_free(&f->use_lock);
 281         return cells;
 282 }

/* [<][>][^][v][top][bottom][index][help] */