1/*
2 * comedi_buf.c
3 *
4 * COMEDI - Linux Control and Measurement Device Interface
5 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
6 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/vmalloc.h>
20#include <linux/slab.h>
21
22#include "comedidev.h"
23#include "comedi_internal.h"
24
25#ifdef PAGE_KERNEL_NOCACHE
26#define COMEDI_PAGE_PROTECTION		PAGE_KERNEL_NOCACHE
27#else
28#define COMEDI_PAGE_PROTECTION		PAGE_KERNEL
29#endif
30
31static void comedi_buf_map_kref_release(struct kref *kref)
32{
33	struct comedi_buf_map *bm =
34		container_of(kref, struct comedi_buf_map, refcount);
35	struct comedi_buf_page *buf;
36	unsigned int i;
37
38	if (bm->page_list) {
39		for (i = 0; i < bm->n_pages; i++) {
40			buf = &bm->page_list[i];
41			clear_bit(PG_reserved,
42				  &(virt_to_page(buf->virt_addr)->flags));
43			if (bm->dma_dir != DMA_NONE) {
44#ifdef CONFIG_HAS_DMA
45				dma_free_coherent(bm->dma_hw_dev,
46						  PAGE_SIZE,
47						  buf->virt_addr,
48						  buf->dma_addr);
49#endif
50			} else {
51				free_page((unsigned long)buf->virt_addr);
52			}
53		}
54		vfree(bm->page_list);
55	}
56	if (bm->dma_dir != DMA_NONE)
57		put_device(bm->dma_hw_dev);
58	kfree(bm);
59}
60
61static void __comedi_buf_free(struct comedi_device *dev,
62			      struct comedi_subdevice *s)
63{
64	struct comedi_async *async = s->async;
65	struct comedi_buf_map *bm;
66	unsigned long flags;
67
68	if (async->prealloc_buf) {
69		vunmap(async->prealloc_buf);
70		async->prealloc_buf = NULL;
71		async->prealloc_bufsz = 0;
72	}
73
74	spin_lock_irqsave(&s->spin_lock, flags);
75	bm = async->buf_map;
76	async->buf_map = NULL;
77	spin_unlock_irqrestore(&s->spin_lock, flags);
78	comedi_buf_map_put(bm);
79}
80
81static void __comedi_buf_alloc(struct comedi_device *dev,
82			       struct comedi_subdevice *s,
83			       unsigned n_pages)
84{
85	struct comedi_async *async = s->async;
86	struct page **pages = NULL;
87	struct comedi_buf_map *bm;
88	struct comedi_buf_page *buf;
89	unsigned long flags;
90	unsigned i;
91
92	if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
93		dev_err(dev->class_dev,
94			"dma buffer allocation not supported\n");
95		return;
96	}
97
98	bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
99	if (!bm)
100		return;
101
102	kref_init(&bm->refcount);
103	spin_lock_irqsave(&s->spin_lock, flags);
104	async->buf_map = bm;
105	spin_unlock_irqrestore(&s->spin_lock, flags);
106	bm->dma_dir = s->async_dma_dir;
107	if (bm->dma_dir != DMA_NONE)
108		/* Need ref to hardware device to free buffer later. */
109		bm->dma_hw_dev = get_device(dev->hw_dev);
110
111	bm->page_list = vzalloc(sizeof(*buf) * n_pages);
112	if (bm->page_list)
113		pages = vmalloc(sizeof(struct page *) * n_pages);
114
115	if (!pages)
116		return;
117
118	for (i = 0; i < n_pages; i++) {
119		buf = &bm->page_list[i];
120		if (bm->dma_dir != DMA_NONE)
121#ifdef CONFIG_HAS_DMA
122			buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
123							    PAGE_SIZE,
124							    &buf->dma_addr,
125							    GFP_KERNEL |
126							    __GFP_COMP);
127#else
128			break;
129#endif
130		else
131			buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
132		if (!buf->virt_addr)
133			break;
134
135		set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
136
137		pages[i] = virt_to_page(buf->virt_addr);
138	}
139	spin_lock_irqsave(&s->spin_lock, flags);
140	bm->n_pages = i;
141	spin_unlock_irqrestore(&s->spin_lock, flags);
142
143	/* vmap the prealloc_buf if all the pages were allocated */
144	if (i == n_pages)
145		async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
146					   COMEDI_PAGE_PROTECTION);
147
148	vfree(pages);
149}
150
151void comedi_buf_map_get(struct comedi_buf_map *bm)
152{
153	if (bm)
154		kref_get(&bm->refcount);
155}
156
157int comedi_buf_map_put(struct comedi_buf_map *bm)
158{
159	if (bm)
160		return kref_put(&bm->refcount, comedi_buf_map_kref_release);
161	return 1;
162}
163
164/* returns s->async->buf_map and increments its kref refcount */
165struct comedi_buf_map *
166comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
167{
168	struct comedi_async *async = s->async;
169	struct comedi_buf_map *bm = NULL;
170	unsigned long flags;
171
172	if (!async)
173		return NULL;
174
175	spin_lock_irqsave(&s->spin_lock, flags);
176	bm = async->buf_map;
177	/* only want it if buffer pages allocated */
178	if (bm && bm->n_pages)
179		comedi_buf_map_get(bm);
180	else
181		bm = NULL;
182	spin_unlock_irqrestore(&s->spin_lock, flags);
183
184	return bm;
185}
186
187bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
188{
189	struct comedi_buf_map *bm = s->async->buf_map;
190
191	return bm && (atomic_read(&bm->refcount.refcount) > 1);
192}
193
194int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
195		     unsigned long new_size)
196{
197	struct comedi_async *async = s->async;
198
199	/* Round up new_size to multiple of PAGE_SIZE */
200	new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
201
202	/* if no change is required, do nothing */
203	if (async->prealloc_buf && async->prealloc_bufsz == new_size)
204		return 0;
205
206	/* deallocate old buffer */
207	__comedi_buf_free(dev, s);
208
209	/* allocate new buffer */
210	if (new_size) {
211		unsigned n_pages = new_size >> PAGE_SHIFT;
212
213		__comedi_buf_alloc(dev, s, n_pages);
214
215		if (!async->prealloc_buf) {
216			/* allocation failed */
217			__comedi_buf_free(dev, s);
218			return -ENOMEM;
219		}
220	}
221	async->prealloc_bufsz = new_size;
222
223	return 0;
224}
225
226void comedi_buf_reset(struct comedi_subdevice *s)
227{
228	struct comedi_async *async = s->async;
229
230	async->buf_write_alloc_count = 0;
231	async->buf_write_count = 0;
232	async->buf_read_alloc_count = 0;
233	async->buf_read_count = 0;
234
235	async->buf_write_ptr = 0;
236	async->buf_read_ptr = 0;
237
238	async->cur_chan = 0;
239	async->scans_done = 0;
240	async->scan_progress = 0;
241	async->munge_chan = 0;
242	async->munge_count = 0;
243	async->munge_ptr = 0;
244
245	async->events = 0;
246}
247
248static unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
249{
250	struct comedi_async *async = s->async;
251	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
252
253	return free_end - async->buf_write_alloc_count;
254}
255
256/* allocates chunk for the writer from free buffer space */
257unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
258				    unsigned int nbytes)
259{
260	struct comedi_async *async = s->async;
261	unsigned int available = comedi_buf_write_n_available(s);
262
263	if (nbytes > available)
264		nbytes = available;
265
266	async->buf_write_alloc_count += nbytes;
267
268	/*
269	 * ensure the async buffer 'counts' are read and updated
270	 * before we write data to the write-alloc'ed buffer space
271	 */
272	smp_mb();
273
274	return nbytes;
275}
276EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
277
278/*
279 * munging is applied to data by core as it passes between user
280 * and kernel space
281 */
282static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
283				     unsigned int num_bytes)
284{
285	struct comedi_async *async = s->async;
286	unsigned int count = 0;
287	const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
288
289	if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
290		async->munge_count += num_bytes;
291		count = num_bytes;
292	} else {
293		/* don't munge partial samples */
294		num_bytes -= num_bytes % num_sample_bytes;
295		while (count < num_bytes) {
296			int block_size = num_bytes - count;
297			unsigned int buf_end;
298
299			buf_end = async->prealloc_bufsz - async->munge_ptr;
300			if (block_size > buf_end)
301				block_size = buf_end;
302
303			s->munge(s->device, s,
304				 async->prealloc_buf + async->munge_ptr,
305				 block_size, async->munge_chan);
306
307			/*
308			 * ensure data is munged in buffer before the
309			 * async buffer munge_count is incremented
310			 */
311			smp_wmb();
312
313			async->munge_chan += block_size / num_sample_bytes;
314			async->munge_chan %= async->cmd.chanlist_len;
315			async->munge_count += block_size;
316			async->munge_ptr += block_size;
317			async->munge_ptr %= async->prealloc_bufsz;
318			count += block_size;
319		}
320	}
321
322	return count;
323}
324
325unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
326{
327	struct comedi_async *async = s->async;
328
329	return async->buf_write_alloc_count - async->buf_write_count;
330}
331
332/* transfers a chunk from writer to filled buffer space */
333unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
334				   unsigned int nbytes)
335{
336	struct comedi_async *async = s->async;
337	unsigned int allocated = comedi_buf_write_n_allocated(s);
338
339	if (nbytes > allocated)
340		nbytes = allocated;
341
342	async->buf_write_count += nbytes;
343	async->buf_write_ptr += nbytes;
344	comedi_buf_munge(s, async->buf_write_count - async->munge_count);
345	if (async->buf_write_ptr >= async->prealloc_bufsz)
346		async->buf_write_ptr %= async->prealloc_bufsz;
347
348	return nbytes;
349}
350EXPORT_SYMBOL_GPL(comedi_buf_write_free);
351
352unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
353{
354	struct comedi_async *async = s->async;
355	unsigned num_bytes;
356
357	if (!async)
358		return 0;
359
360	num_bytes = async->munge_count - async->buf_read_count;
361
362	/*
363	 * ensure the async buffer 'counts' are read before we
364	 * attempt to read data from the buffer
365	 */
366	smp_rmb();
367
368	return num_bytes;
369}
370EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
371
372/* allocates a chunk for the reader from filled (and munged) buffer space */
373unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
374				   unsigned int nbytes)
375{
376	struct comedi_async *async = s->async;
377	unsigned int available;
378
379	available = async->munge_count - async->buf_read_alloc_count;
380	if (nbytes > available)
381		nbytes = available;
382
383	async->buf_read_alloc_count += nbytes;
384
385	/*
386	 * ensure the async buffer 'counts' are read before we
387	 * attempt to read data from the read-alloc'ed buffer space
388	 */
389	smp_rmb();
390
391	return nbytes;
392}
393EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
394
395static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
396{
397	return async->buf_read_alloc_count - async->buf_read_count;
398}
399
400/* transfers control of a chunk from reader to free buffer space */
401unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
402				  unsigned int nbytes)
403{
404	struct comedi_async *async = s->async;
405	unsigned int allocated;
406
407	/*
408	 * ensure data has been read out of buffer before
409	 * the async read count is incremented
410	 */
411	smp_mb();
412
413	allocated = comedi_buf_read_n_allocated(async);
414	if (nbytes > allocated)
415		nbytes = allocated;
416
417	async->buf_read_count += nbytes;
418	async->buf_read_ptr += nbytes;
419	async->buf_read_ptr %= async->prealloc_bufsz;
420	return nbytes;
421}
422EXPORT_SYMBOL_GPL(comedi_buf_read_free);
423
424static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
425				 const void *data, unsigned int num_bytes)
426{
427	struct comedi_async *async = s->async;
428	unsigned int write_ptr = async->buf_write_ptr;
429
430	while (num_bytes) {
431		unsigned int block_size;
432
433		if (write_ptr + num_bytes > async->prealloc_bufsz)
434			block_size = async->prealloc_bufsz - write_ptr;
435		else
436			block_size = num_bytes;
437
438		memcpy(async->prealloc_buf + write_ptr, data, block_size);
439
440		data += block_size;
441		num_bytes -= block_size;
442
443		write_ptr = 0;
444	}
445}
446
447static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
448				   void *dest, unsigned int nbytes)
449{
450	void *src;
451	struct comedi_async *async = s->async;
452	unsigned int read_ptr = async->buf_read_ptr;
453
454	while (nbytes) {
455		unsigned int block_size;
456
457		src = async->prealloc_buf + read_ptr;
458
459		if (nbytes >= async->prealloc_bufsz - read_ptr)
460			block_size = async->prealloc_bufsz - read_ptr;
461		else
462			block_size = nbytes;
463
464		memcpy(dest, src, block_size);
465		nbytes -= block_size;
466		dest += block_size;
467		read_ptr = 0;
468	}
469}
470
471/**
472 * comedi_buf_write_samples - write sample data to comedi buffer
473 * @s: comedi_subdevice struct
474 * @data: samples
475 * @nsamples: number of samples
476 *
477 * Writes nsamples to the comedi buffer associated with the subdevice, marks
478 * it as written and updates the acquisition scan progress.
479 *
480 * Returns the amount of data written in bytes.
481 */
482unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
483				      const void *data, unsigned int nsamples)
484{
485	unsigned int max_samples;
486	unsigned int nbytes;
487
488	/*
489	 * Make sure there is enough room in the buffer for all the samples.
490	 * If not, clamp the nsamples to the number that will fit, flag the
491	 * buffer overrun and add the samples that fit.
492	 */
493	max_samples = comedi_bytes_to_samples(s,
494					      comedi_buf_write_n_available(s));
495	if (nsamples > max_samples) {
496		dev_warn(s->device->class_dev, "buffer overrun\n");
497		s->async->events |= COMEDI_CB_OVERFLOW;
498		nsamples = max_samples;
499	}
500
501	if (nsamples == 0)
502		return 0;
503
504	nbytes = comedi_buf_write_alloc(s,
505					comedi_samples_to_bytes(s, nsamples));
506	comedi_buf_memcpy_to(s, data, nbytes);
507	comedi_buf_write_free(s, nbytes);
508	comedi_inc_scan_progress(s, nbytes);
509	s->async->events |= COMEDI_CB_BLOCK;
510
511	return nbytes;
512}
513EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
514
515/**
516 * comedi_buf_read_samples - read sample data from comedi buffer
517 * @s: comedi_subdevice struct
518 * @data: destination
519 * @nsamples: maximum number of samples to read
520 *
521 * Reads up to nsamples from the comedi buffer associated with the subdevice,
522 * marks it as read and updates the acquisition scan progress.
523 *
524 * Returns the amount of data read in bytes.
525 */
526unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
527				     void *data, unsigned int nsamples)
528{
529	unsigned int max_samples;
530	unsigned int nbytes;
531
532	/* clamp nsamples to the number of full samples available */
533	max_samples = comedi_bytes_to_samples(s,
534					      comedi_buf_read_n_available(s));
535	if (nsamples > max_samples)
536		nsamples = max_samples;
537
538	if (nsamples == 0)
539		return 0;
540
541	nbytes = comedi_buf_read_alloc(s,
542				       comedi_samples_to_bytes(s, nsamples));
543	comedi_buf_memcpy_from(s, data, nbytes);
544	comedi_buf_read_free(s, nbytes);
545	comedi_inc_scan_progress(s, nbytes);
546	s->async->events |= COMEDI_CB_BLOCK;
547
548	return nbytes;
549}
550EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
551