1/*
2 *  ALSA sequencer Memory Manager
3 *  Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
4 *                        Jaroslav Kysela <perex@perex.cz>
5 *                2000 by Takashi Iwai <tiwai@suse.de>
6 *
7 *   This program is free software; you can redistribute it and/or modify
8 *   it under the terms of the GNU General Public License as published by
9 *   the Free Software Foundation; either version 2 of the License, or
10 *   (at your option) any later version.
11 *
12 *   This program is distributed in the hope that it will be useful,
13 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 *   GNU General Public License for more details.
16 *
17 *   You should have received a copy of the GNU General Public License
18 *   along with this program; if not, write to the Free Software
19 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20 *
21 */
22
23#include <linux/init.h>
24#include <linux/export.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <sound/core.h>
28
29#include <sound/seq_kernel.h>
30#include "seq_memory.h"
31#include "seq_queue.h"
32#include "seq_info.h"
33#include "seq_lock.h"
34
35static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
36{
37	return pool->total_elements - atomic_read(&pool->counter);
38}
39
40static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
41{
42	return snd_seq_pool_available(pool) >= pool->room;
43}
44
45/*
46 * Variable length event:
47 * The event like sysex uses variable length type.
48 * The external data may be stored in three different formats.
49 * 1) kernel space
50 *    This is the normal case.
51 *      ext.data.len = length
52 *      ext.data.ptr = buffer pointer
53 * 2) user space
54 *    When an event is generated via read(), the external data is
55 *    kept in user space until expanded.
56 *      ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
57 *      ext.data.ptr = userspace pointer
58 * 3) chained cells
59 *    When the variable length event is enqueued (in prioq or fifo),
60 *    the external data is decomposed to several cells.
61 *      ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
62 *      ext.data.ptr = the additiona cell head
63 *         -> cell.next -> cell.next -> ..
64 */
65
66/*
67 * exported:
68 * call dump function to expand external data.
69 */
70
71static int get_var_len(const struct snd_seq_event *event)
72{
73	if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
74		return -EINVAL;
75
76	return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
77}
78
79int snd_seq_dump_var_event(const struct snd_seq_event *event,
80			   snd_seq_dump_func_t func, void *private_data)
81{
82	int len, err;
83	struct snd_seq_event_cell *cell;
84
85	if ((len = get_var_len(event)) <= 0)
86		return len;
87
88	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
89		char buf[32];
90		char __user *curptr = (char __force __user *)event->data.ext.ptr;
91		while (len > 0) {
92			int size = sizeof(buf);
93			if (len < size)
94				size = len;
95			if (copy_from_user(buf, curptr, size))
96				return -EFAULT;
97			err = func(private_data, buf, size);
98			if (err < 0)
99				return err;
100			curptr += size;
101			len -= size;
102		}
103		return 0;
104	}
105	if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
106		return func(private_data, event->data.ext.ptr, len);
107
108	cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
109	for (; len > 0 && cell; cell = cell->next) {
110		int size = sizeof(struct snd_seq_event);
111		if (len < size)
112			size = len;
113		err = func(private_data, &cell->event, size);
114		if (err < 0)
115			return err;
116		len -= size;
117	}
118	return 0;
119}
120
121EXPORT_SYMBOL(snd_seq_dump_var_event);
122
123
124/*
125 * exported:
126 * expand the variable length event to linear buffer space.
127 */
128
129static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
130{
131	memcpy(*bufptr, src, size);
132	*bufptr += size;
133	return 0;
134}
135
136static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
137{
138	if (copy_to_user(*bufptr, src, size))
139		return -EFAULT;
140	*bufptr += size;
141	return 0;
142}
143
144int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
145			     int in_kernel, int size_aligned)
146{
147	int len, newlen;
148	int err;
149
150	if ((len = get_var_len(event)) < 0)
151		return len;
152	newlen = len;
153	if (size_aligned > 0)
154		newlen = roundup(len, size_aligned);
155	if (count < newlen)
156		return -EAGAIN;
157
158	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
159		if (! in_kernel)
160			return -EINVAL;
161		if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
162			return -EFAULT;
163		return newlen;
164	}
165	err = snd_seq_dump_var_event(event,
166				     in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
167				     (snd_seq_dump_func_t)seq_copy_in_user,
168				     &buf);
169	return err < 0 ? err : newlen;
170}
171
172EXPORT_SYMBOL(snd_seq_expand_var_event);
173
174/*
175 * release this cell, free extended data if available
176 */
177
178static inline void free_cell(struct snd_seq_pool *pool,
179			     struct snd_seq_event_cell *cell)
180{
181	cell->next = pool->free;
182	pool->free = cell;
183	atomic_dec(&pool->counter);
184}
185
186void snd_seq_cell_free(struct snd_seq_event_cell * cell)
187{
188	unsigned long flags;
189	struct snd_seq_pool *pool;
190
191	if (snd_BUG_ON(!cell))
192		return;
193	pool = cell->pool;
194	if (snd_BUG_ON(!pool))
195		return;
196
197	spin_lock_irqsave(&pool->lock, flags);
198	free_cell(pool, cell);
199	if (snd_seq_ev_is_variable(&cell->event)) {
200		if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
201			struct snd_seq_event_cell *curp, *nextptr;
202			curp = cell->event.data.ext.ptr;
203			for (; curp; curp = nextptr) {
204				nextptr = curp->next;
205				curp->next = pool->free;
206				free_cell(pool, curp);
207			}
208		}
209	}
210	if (waitqueue_active(&pool->output_sleep)) {
211		/* has enough space now? */
212		if (snd_seq_output_ok(pool))
213			wake_up(&pool->output_sleep);
214	}
215	spin_unlock_irqrestore(&pool->lock, flags);
216}
217
218
219/*
220 * allocate an event cell.
221 */
222static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
223			      struct snd_seq_event_cell **cellp,
224			      int nonblock, struct file *file)
225{
226	struct snd_seq_event_cell *cell;
227	unsigned long flags;
228	int err = -EAGAIN;
229	wait_queue_t wait;
230
231	if (pool == NULL)
232		return -EINVAL;
233
234	*cellp = NULL;
235
236	init_waitqueue_entry(&wait, current);
237	spin_lock_irqsave(&pool->lock, flags);
238	if (pool->ptr == NULL) {	/* not initialized */
239		pr_debug("ALSA: seq: pool is not initialized\n");
240		err = -EINVAL;
241		goto __error;
242	}
243	while (pool->free == NULL && ! nonblock && ! pool->closing) {
244
245		set_current_state(TASK_INTERRUPTIBLE);
246		add_wait_queue(&pool->output_sleep, &wait);
247		spin_unlock_irq(&pool->lock);
248		schedule();
249		spin_lock_irq(&pool->lock);
250		remove_wait_queue(&pool->output_sleep, &wait);
251		/* interrupted? */
252		if (signal_pending(current)) {
253			err = -ERESTARTSYS;
254			goto __error;
255		}
256	}
257	if (pool->closing) { /* closing.. */
258		err = -ENOMEM;
259		goto __error;
260	}
261
262	cell = pool->free;
263	if (cell) {
264		int used;
265		pool->free = cell->next;
266		atomic_inc(&pool->counter);
267		used = atomic_read(&pool->counter);
268		if (pool->max_used < used)
269			pool->max_used = used;
270		pool->event_alloc_success++;
271		/* clear cell pointers */
272		cell->next = NULL;
273		err = 0;
274	} else
275		pool->event_alloc_failures++;
276	*cellp = cell;
277
278__error:
279	spin_unlock_irqrestore(&pool->lock, flags);
280	return err;
281}
282
283
284/*
285 * duplicate the event to a cell.
286 * if the event has external data, the data is decomposed to additional
287 * cells.
288 */
289int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
290		      struct snd_seq_event_cell **cellp, int nonblock,
291		      struct file *file)
292{
293	int ncells, err;
294	unsigned int extlen;
295	struct snd_seq_event_cell *cell;
296
297	*cellp = NULL;
298
299	ncells = 0;
300	extlen = 0;
301	if (snd_seq_ev_is_variable(event)) {
302		extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
303		ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
304	}
305	if (ncells >= pool->total_elements)
306		return -ENOMEM;
307
308	err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
309	if (err < 0)
310		return err;
311
312	/* copy the event */
313	cell->event = *event;
314
315	/* decompose */
316	if (snd_seq_ev_is_variable(event)) {
317		int len = extlen;
318		int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
319		int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
320		struct snd_seq_event_cell *src, *tmp, *tail;
321		char *buf;
322
323		cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
324		cell->event.data.ext.ptr = NULL;
325
326		src = (struct snd_seq_event_cell *)event->data.ext.ptr;
327		buf = (char *)event->data.ext.ptr;
328		tail = NULL;
329
330		while (ncells-- > 0) {
331			int size = sizeof(struct snd_seq_event);
332			if (len < size)
333				size = len;
334			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
335			if (err < 0)
336				goto __error;
337			if (cell->event.data.ext.ptr == NULL)
338				cell->event.data.ext.ptr = tmp;
339			if (tail)
340				tail->next = tmp;
341			tail = tmp;
342			/* copy chunk */
343			if (is_chained && src) {
344				tmp->event = src->event;
345				src = src->next;
346			} else if (is_usrptr) {
347				if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
348					err = -EFAULT;
349					goto __error;
350				}
351			} else {
352				memcpy(&tmp->event, buf, size);
353			}
354			buf += size;
355			len -= size;
356		}
357	}
358
359	*cellp = cell;
360	return 0;
361
362__error:
363	snd_seq_cell_free(cell);
364	return err;
365}
366
367
368/* poll wait */
369int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
370			   poll_table *wait)
371{
372	poll_wait(file, &pool->output_sleep, wait);
373	return snd_seq_output_ok(pool);
374}
375
376
377/* allocate room specified number of events */
378int snd_seq_pool_init(struct snd_seq_pool *pool)
379{
380	int cell;
381	struct snd_seq_event_cell *cellptr;
382	unsigned long flags;
383
384	if (snd_BUG_ON(!pool))
385		return -EINVAL;
386
387	cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
388	if (!cellptr)
389		return -ENOMEM;
390
391	/* add new cells to the free cell list */
392	spin_lock_irqsave(&pool->lock, flags);
393	if (pool->ptr) {
394		spin_unlock_irqrestore(&pool->lock, flags);
395		vfree(cellptr);
396		return 0;
397	}
398
399	pool->ptr = cellptr;
400	pool->free = NULL;
401
402	for (cell = 0; cell < pool->size; cell++) {
403		cellptr = pool->ptr + cell;
404		cellptr->pool = pool;
405		cellptr->next = pool->free;
406		pool->free = cellptr;
407	}
408	pool->room = (pool->size + 1) / 2;
409
410	/* init statistics */
411	pool->max_used = 0;
412	pool->total_elements = pool->size;
413	spin_unlock_irqrestore(&pool->lock, flags);
414	return 0;
415}
416
417/* remove events */
418int snd_seq_pool_done(struct snd_seq_pool *pool)
419{
420	unsigned long flags;
421	struct snd_seq_event_cell *ptr;
422	int max_count = 5 * HZ;
423
424	if (snd_BUG_ON(!pool))
425		return -EINVAL;
426
427	/* wait for closing all threads */
428	spin_lock_irqsave(&pool->lock, flags);
429	pool->closing = 1;
430	spin_unlock_irqrestore(&pool->lock, flags);
431
432	if (waitqueue_active(&pool->output_sleep))
433		wake_up(&pool->output_sleep);
434
435	while (atomic_read(&pool->counter) > 0) {
436		if (max_count == 0) {
437			pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
438			break;
439		}
440		schedule_timeout_uninterruptible(1);
441		max_count--;
442	}
443
444	/* release all resources */
445	spin_lock_irqsave(&pool->lock, flags);
446	ptr = pool->ptr;
447	pool->ptr = NULL;
448	pool->free = NULL;
449	pool->total_elements = 0;
450	spin_unlock_irqrestore(&pool->lock, flags);
451
452	vfree(ptr);
453
454	spin_lock_irqsave(&pool->lock, flags);
455	pool->closing = 0;
456	spin_unlock_irqrestore(&pool->lock, flags);
457
458	return 0;
459}
460
461
462/* init new memory pool */
463struct snd_seq_pool *snd_seq_pool_new(int poolsize)
464{
465	struct snd_seq_pool *pool;
466
467	/* create pool block */
468	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
469	if (!pool)
470		return NULL;
471	spin_lock_init(&pool->lock);
472	pool->ptr = NULL;
473	pool->free = NULL;
474	pool->total_elements = 0;
475	atomic_set(&pool->counter, 0);
476	pool->closing = 0;
477	init_waitqueue_head(&pool->output_sleep);
478
479	pool->size = poolsize;
480
481	/* init statistics */
482	pool->max_used = 0;
483	return pool;
484}
485
486/* remove memory pool */
487int snd_seq_pool_delete(struct snd_seq_pool **ppool)
488{
489	struct snd_seq_pool *pool = *ppool;
490
491	*ppool = NULL;
492	if (pool == NULL)
493		return 0;
494	snd_seq_pool_done(pool);
495	kfree(pool);
496	return 0;
497}
498
499/* initialize sequencer memory */
500int __init snd_sequencer_memory_init(void)
501{
502	return 0;
503}
504
505/* release sequencer memory */
506void __exit snd_sequencer_memory_done(void)
507{
508}
509
510
511/* exported to seq_clientmgr.c */
512void snd_seq_info_pool(struct snd_info_buffer *buffer,
513		       struct snd_seq_pool *pool, char *space)
514{
515	if (pool == NULL)
516		return;
517	snd_iprintf(buffer, "%sPool size          : %d\n", space, pool->total_elements);
518	snd_iprintf(buffer, "%sCells in use       : %d\n", space, atomic_read(&pool->counter));
519	snd_iprintf(buffer, "%sPeak cells in use  : %d\n", space, pool->max_used);
520	snd_iprintf(buffer, "%sAlloc success      : %d\n", space, pool->event_alloc_success);
521	snd_iprintf(buffer, "%sAlloc failures     : %d\n", space, pool->event_alloc_failures);
522}
523