root/sound/core/seq/seq_memory.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. snd_seq_pool_available
  2. snd_seq_output_ok
  3. get_var_len
  4. snd_seq_dump_var_event
  5. seq_copy_in_kernel
  6. seq_copy_in_user
  7. snd_seq_expand_var_event
  8. free_cell
  9. snd_seq_cell_free
  10. snd_seq_cell_alloc
  11. snd_seq_event_dup
  12. snd_seq_pool_poll_wait
  13. snd_seq_pool_init
  14. snd_seq_pool_mark_closing
  15. snd_seq_pool_done
  16. snd_seq_pool_new
  17. snd_seq_pool_delete
  18. snd_seq_info_pool

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  ALSA sequencer Memory Manager
   4  *  Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
   5  *                        Jaroslav Kysela <perex@perex.cz>
   6  *                2000 by Takashi Iwai <tiwai@suse.de>
   7  */
   8 
   9 #include <linux/init.h>
  10 #include <linux/export.h>
  11 #include <linux/slab.h>
  12 #include <linux/sched/signal.h>
  13 #include <linux/mm.h>
  14 #include <sound/core.h>
  15 
  16 #include <sound/seq_kernel.h>
  17 #include "seq_memory.h"
  18 #include "seq_queue.h"
  19 #include "seq_info.h"
  20 #include "seq_lock.h"
  21 
  22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
  23 {
  24         return pool->total_elements - atomic_read(&pool->counter);
  25 }
  26 
  27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
  28 {
  29         return snd_seq_pool_available(pool) >= pool->room;
  30 }
  31 
  32 /*
  33  * Variable length event:
  34  * The event like sysex uses variable length type.
  35  * The external data may be stored in three different formats.
  36  * 1) kernel space
  37  *    This is the normal case.
  38  *      ext.data.len = length
  39  *      ext.data.ptr = buffer pointer
  40  * 2) user space
  41  *    When an event is generated via read(), the external data is
  42  *    kept in user space until expanded.
  43  *      ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
  44  *      ext.data.ptr = userspace pointer
  45  * 3) chained cells
  46  *    When the variable length event is enqueued (in prioq or fifo),
  47  *    the external data is decomposed to several cells.
  48  *      ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
  49  *      ext.data.ptr = the additiona cell head
  50  *         -> cell.next -> cell.next -> ..
  51  */
  52 
  53 /*
  54  * exported:
  55  * call dump function to expand external data.
  56  */
  57 
  58 static int get_var_len(const struct snd_seq_event *event)
  59 {
  60         if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
  61                 return -EINVAL;
  62 
  63         return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  64 }
  65 
  66 int snd_seq_dump_var_event(const struct snd_seq_event *event,
  67                            snd_seq_dump_func_t func, void *private_data)
  68 {
  69         int len, err;
  70         struct snd_seq_event_cell *cell;
  71 
  72         if ((len = get_var_len(event)) <= 0)
  73                 return len;
  74 
  75         if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
  76                 char buf[32];
  77                 char __user *curptr = (char __force __user *)event->data.ext.ptr;
  78                 while (len > 0) {
  79                         int size = sizeof(buf);
  80                         if (len < size)
  81                                 size = len;
  82                         if (copy_from_user(buf, curptr, size))
  83                                 return -EFAULT;
  84                         err = func(private_data, buf, size);
  85                         if (err < 0)
  86                                 return err;
  87                         curptr += size;
  88                         len -= size;
  89                 }
  90                 return 0;
  91         }
  92         if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
  93                 return func(private_data, event->data.ext.ptr, len);
  94 
  95         cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
  96         for (; len > 0 && cell; cell = cell->next) {
  97                 int size = sizeof(struct snd_seq_event);
  98                 if (len < size)
  99                         size = len;
 100                 err = func(private_data, &cell->event, size);
 101                 if (err < 0)
 102                         return err;
 103                 len -= size;
 104         }
 105         return 0;
 106 }
 107 EXPORT_SYMBOL(snd_seq_dump_var_event);
 108 
 109 
 110 /*
 111  * exported:
 112  * expand the variable length event to linear buffer space.
 113  */
 114 
 115 static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
 116 {
 117         memcpy(*bufptr, src, size);
 118         *bufptr += size;
 119         return 0;
 120 }
 121 
 122 static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
 123 {
 124         if (copy_to_user(*bufptr, src, size))
 125                 return -EFAULT;
 126         *bufptr += size;
 127         return 0;
 128 }
 129 
 130 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
 131                              int in_kernel, int size_aligned)
 132 {
 133         int len, newlen;
 134         int err;
 135 
 136         if ((len = get_var_len(event)) < 0)
 137                 return len;
 138         newlen = len;
 139         if (size_aligned > 0)
 140                 newlen = roundup(len, size_aligned);
 141         if (count < newlen)
 142                 return -EAGAIN;
 143 
 144         if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
 145                 if (! in_kernel)
 146                         return -EINVAL;
 147                 if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
 148                         return -EFAULT;
 149                 return newlen;
 150         }
 151         err = snd_seq_dump_var_event(event,
 152                                      in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
 153                                      (snd_seq_dump_func_t)seq_copy_in_user,
 154                                      &buf);
 155         return err < 0 ? err : newlen;
 156 }
 157 EXPORT_SYMBOL(snd_seq_expand_var_event);
 158 
 159 /*
 160  * release this cell, free extended data if available
 161  */
 162 
 163 static inline void free_cell(struct snd_seq_pool *pool,
 164                              struct snd_seq_event_cell *cell)
 165 {
 166         cell->next = pool->free;
 167         pool->free = cell;
 168         atomic_dec(&pool->counter);
 169 }
 170 
 171 void snd_seq_cell_free(struct snd_seq_event_cell * cell)
 172 {
 173         unsigned long flags;
 174         struct snd_seq_pool *pool;
 175 
 176         if (snd_BUG_ON(!cell))
 177                 return;
 178         pool = cell->pool;
 179         if (snd_BUG_ON(!pool))
 180                 return;
 181 
 182         spin_lock_irqsave(&pool->lock, flags);
 183         free_cell(pool, cell);
 184         if (snd_seq_ev_is_variable(&cell->event)) {
 185                 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
 186                         struct snd_seq_event_cell *curp, *nextptr;
 187                         curp = cell->event.data.ext.ptr;
 188                         for (; curp; curp = nextptr) {
 189                                 nextptr = curp->next;
 190                                 curp->next = pool->free;
 191                                 free_cell(pool, curp);
 192                         }
 193                 }
 194         }
 195         if (waitqueue_active(&pool->output_sleep)) {
 196                 /* has enough space now? */
 197                 if (snd_seq_output_ok(pool))
 198                         wake_up(&pool->output_sleep);
 199         }
 200         spin_unlock_irqrestore(&pool->lock, flags);
 201 }
 202 
 203 
 204 /*
 205  * allocate an event cell.
 206  */
 207 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
 208                               struct snd_seq_event_cell **cellp,
 209                               int nonblock, struct file *file,
 210                               struct mutex *mutexp)
 211 {
 212         struct snd_seq_event_cell *cell;
 213         unsigned long flags;
 214         int err = -EAGAIN;
 215         wait_queue_entry_t wait;
 216 
 217         if (pool == NULL)
 218                 return -EINVAL;
 219 
 220         *cellp = NULL;
 221 
 222         init_waitqueue_entry(&wait, current);
 223         spin_lock_irqsave(&pool->lock, flags);
 224         if (pool->ptr == NULL) {        /* not initialized */
 225                 pr_debug("ALSA: seq: pool is not initialized\n");
 226                 err = -EINVAL;
 227                 goto __error;
 228         }
 229         while (pool->free == NULL && ! nonblock && ! pool->closing) {
 230 
 231                 set_current_state(TASK_INTERRUPTIBLE);
 232                 add_wait_queue(&pool->output_sleep, &wait);
 233                 spin_unlock_irqrestore(&pool->lock, flags);
 234                 if (mutexp)
 235                         mutex_unlock(mutexp);
 236                 schedule();
 237                 if (mutexp)
 238                         mutex_lock(mutexp);
 239                 spin_lock_irqsave(&pool->lock, flags);
 240                 remove_wait_queue(&pool->output_sleep, &wait);
 241                 /* interrupted? */
 242                 if (signal_pending(current)) {
 243                         err = -ERESTARTSYS;
 244                         goto __error;
 245                 }
 246         }
 247         if (pool->closing) { /* closing.. */
 248                 err = -ENOMEM;
 249                 goto __error;
 250         }
 251 
 252         cell = pool->free;
 253         if (cell) {
 254                 int used;
 255                 pool->free = cell->next;
 256                 atomic_inc(&pool->counter);
 257                 used = atomic_read(&pool->counter);
 258                 if (pool->max_used < used)
 259                         pool->max_used = used;
 260                 pool->event_alloc_success++;
 261                 /* clear cell pointers */
 262                 cell->next = NULL;
 263                 err = 0;
 264         } else
 265                 pool->event_alloc_failures++;
 266         *cellp = cell;
 267 
 268 __error:
 269         spin_unlock_irqrestore(&pool->lock, flags);
 270         return err;
 271 }
 272 
 273 
 274 /*
 275  * duplicate the event to a cell.
 276  * if the event has external data, the data is decomposed to additional
 277  * cells.
 278  */
 279 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
 280                       struct snd_seq_event_cell **cellp, int nonblock,
 281                       struct file *file, struct mutex *mutexp)
 282 {
 283         int ncells, err;
 284         unsigned int extlen;
 285         struct snd_seq_event_cell *cell;
 286 
 287         *cellp = NULL;
 288 
 289         ncells = 0;
 290         extlen = 0;
 291         if (snd_seq_ev_is_variable(event)) {
 292                 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
 293                 ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
 294         }
 295         if (ncells >= pool->total_elements)
 296                 return -ENOMEM;
 297 
 298         err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
 299         if (err < 0)
 300                 return err;
 301 
 302         /* copy the event */
 303         cell->event = *event;
 304 
 305         /* decompose */
 306         if (snd_seq_ev_is_variable(event)) {
 307                 int len = extlen;
 308                 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
 309                 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
 310                 struct snd_seq_event_cell *src, *tmp, *tail;
 311                 char *buf;
 312 
 313                 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
 314                 cell->event.data.ext.ptr = NULL;
 315 
 316                 src = (struct snd_seq_event_cell *)event->data.ext.ptr;
 317                 buf = (char *)event->data.ext.ptr;
 318                 tail = NULL;
 319 
 320                 while (ncells-- > 0) {
 321                         int size = sizeof(struct snd_seq_event);
 322                         if (len < size)
 323                                 size = len;
 324                         err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
 325                                                  mutexp);
 326                         if (err < 0)
 327                                 goto __error;
 328                         if (cell->event.data.ext.ptr == NULL)
 329                                 cell->event.data.ext.ptr = tmp;
 330                         if (tail)
 331                                 tail->next = tmp;
 332                         tail = tmp;
 333                         /* copy chunk */
 334                         if (is_chained && src) {
 335                                 tmp->event = src->event;
 336                                 src = src->next;
 337                         } else if (is_usrptr) {
 338                                 if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
 339                                         err = -EFAULT;
 340                                         goto __error;
 341                                 }
 342                         } else {
 343                                 memcpy(&tmp->event, buf, size);
 344                         }
 345                         buf += size;
 346                         len -= size;
 347                 }
 348         }
 349 
 350         *cellp = cell;
 351         return 0;
 352 
 353 __error:
 354         snd_seq_cell_free(cell);
 355         return err;
 356 }
 357   
 358 
 359 /* poll wait */
 360 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
 361                            poll_table *wait)
 362 {
 363         poll_wait(file, &pool->output_sleep, wait);
 364         return snd_seq_output_ok(pool);
 365 }
 366 
 367 
 368 /* allocate room specified number of events */
 369 int snd_seq_pool_init(struct snd_seq_pool *pool)
 370 {
 371         int cell;
 372         struct snd_seq_event_cell *cellptr;
 373 
 374         if (snd_BUG_ON(!pool))
 375                 return -EINVAL;
 376 
 377         cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
 378                                  GFP_KERNEL);
 379         if (!cellptr)
 380                 return -ENOMEM;
 381 
 382         /* add new cells to the free cell list */
 383         spin_lock_irq(&pool->lock);
 384         if (pool->ptr) {
 385                 spin_unlock_irq(&pool->lock);
 386                 kvfree(cellptr);
 387                 return 0;
 388         }
 389 
 390         pool->ptr = cellptr;
 391         pool->free = NULL;
 392 
 393         for (cell = 0; cell < pool->size; cell++) {
 394                 cellptr = pool->ptr + cell;
 395                 cellptr->pool = pool;
 396                 cellptr->next = pool->free;
 397                 pool->free = cellptr;
 398         }
 399         pool->room = (pool->size + 1) / 2;
 400 
 401         /* init statistics */
 402         pool->max_used = 0;
 403         pool->total_elements = pool->size;
 404         spin_unlock_irq(&pool->lock);
 405         return 0;
 406 }
 407 
 408 /* refuse the further insertion to the pool */
 409 void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
 410 {
 411         unsigned long flags;
 412 
 413         if (snd_BUG_ON(!pool))
 414                 return;
 415         spin_lock_irqsave(&pool->lock, flags);
 416         pool->closing = 1;
 417         spin_unlock_irqrestore(&pool->lock, flags);
 418 }
 419 
 420 /* remove events */
 421 int snd_seq_pool_done(struct snd_seq_pool *pool)
 422 {
 423         struct snd_seq_event_cell *ptr;
 424 
 425         if (snd_BUG_ON(!pool))
 426                 return -EINVAL;
 427 
 428         /* wait for closing all threads */
 429         if (waitqueue_active(&pool->output_sleep))
 430                 wake_up(&pool->output_sleep);
 431 
 432         while (atomic_read(&pool->counter) > 0)
 433                 schedule_timeout_uninterruptible(1);
 434         
 435         /* release all resources */
 436         spin_lock_irq(&pool->lock);
 437         ptr = pool->ptr;
 438         pool->ptr = NULL;
 439         pool->free = NULL;
 440         pool->total_elements = 0;
 441         spin_unlock_irq(&pool->lock);
 442 
 443         kvfree(ptr);
 444 
 445         spin_lock_irq(&pool->lock);
 446         pool->closing = 0;
 447         spin_unlock_irq(&pool->lock);
 448 
 449         return 0;
 450 }
 451 
 452 
 453 /* init new memory pool */
 454 struct snd_seq_pool *snd_seq_pool_new(int poolsize)
 455 {
 456         struct snd_seq_pool *pool;
 457 
 458         /* create pool block */
 459         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 460         if (!pool)
 461                 return NULL;
 462         spin_lock_init(&pool->lock);
 463         pool->ptr = NULL;
 464         pool->free = NULL;
 465         pool->total_elements = 0;
 466         atomic_set(&pool->counter, 0);
 467         pool->closing = 0;
 468         init_waitqueue_head(&pool->output_sleep);
 469         
 470         pool->size = poolsize;
 471 
 472         /* init statistics */
 473         pool->max_used = 0;
 474         return pool;
 475 }
 476 
 477 /* remove memory pool */
 478 int snd_seq_pool_delete(struct snd_seq_pool **ppool)
 479 {
 480         struct snd_seq_pool *pool = *ppool;
 481 
 482         *ppool = NULL;
 483         if (pool == NULL)
 484                 return 0;
 485         snd_seq_pool_mark_closing(pool);
 486         snd_seq_pool_done(pool);
 487         kfree(pool);
 488         return 0;
 489 }
 490 
 491 /* exported to seq_clientmgr.c */
 492 void snd_seq_info_pool(struct snd_info_buffer *buffer,
 493                        struct snd_seq_pool *pool, char *space)
 494 {
 495         if (pool == NULL)
 496                 return;
 497         snd_iprintf(buffer, "%sPool size          : %d\n", space, pool->total_elements);
 498         snd_iprintf(buffer, "%sCells in use       : %d\n", space, atomic_read(&pool->counter));
 499         snd_iprintf(buffer, "%sPeak cells in use  : %d\n", space, pool->max_used);
 500         snd_iprintf(buffer, "%sAlloc success      : %d\n", space, pool->event_alloc_success);
 501         snd_iprintf(buffer, "%sAlloc failures     : %d\n", space, pool->event_alloc_failures);
 502 }

/* [<][>][^][v][top][bottom][index][help] */