This source file includes following definitions.
- comedi_buf_map_kref_release
- __comedi_buf_free
- comedi_buf_map_alloc
- __comedi_buf_alloc
- comedi_buf_map_get
- comedi_buf_map_put
- comedi_buf_map_access
- comedi_buf_map_from_subdev_get
- comedi_buf_is_mmapped
- comedi_buf_alloc
- comedi_buf_reset
- comedi_buf_write_n_unalloc
- comedi_buf_write_n_available
- comedi_buf_write_alloc
- comedi_buf_munge
- comedi_buf_write_n_allocated
- comedi_buf_write_free
- comedi_buf_read_n_available
- comedi_buf_read_alloc
- comedi_buf_read_n_allocated
- comedi_buf_read_free
- comedi_buf_memcpy_to
- comedi_buf_memcpy_from
- comedi_buf_write_samples
- comedi_buf_read_samples
1
2
3
4
5
6
7
8
9
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12
13 #include "comedidev.h"
14 #include "comedi_internal.h"
15
16 #ifdef PAGE_KERNEL_NOCACHE
17 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
18 #else
19 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL
20 #endif
21
22 static void comedi_buf_map_kref_release(struct kref *kref)
23 {
24 struct comedi_buf_map *bm =
25 container_of(kref, struct comedi_buf_map, refcount);
26 struct comedi_buf_page *buf;
27 unsigned int i;
28
29 if (bm->page_list) {
30 if (bm->dma_dir != DMA_NONE) {
31
32
33
34
35 buf = &bm->page_list[0];
36 dma_free_coherent(bm->dma_hw_dev,
37 PAGE_SIZE * bm->n_pages,
38 buf->virt_addr, buf->dma_addr);
39 } else {
40 for (i = 0; i < bm->n_pages; i++) {
41 buf = &bm->page_list[i];
42 ClearPageReserved(virt_to_page(buf->virt_addr));
43 free_page((unsigned long)buf->virt_addr);
44 }
45 }
46 vfree(bm->page_list);
47 }
48 if (bm->dma_dir != DMA_NONE)
49 put_device(bm->dma_hw_dev);
50 kfree(bm);
51 }
52
53 static void __comedi_buf_free(struct comedi_device *dev,
54 struct comedi_subdevice *s)
55 {
56 struct comedi_async *async = s->async;
57 struct comedi_buf_map *bm;
58 unsigned long flags;
59
60 if (async->prealloc_buf) {
61 if (s->async_dma_dir == DMA_NONE)
62 vunmap(async->prealloc_buf);
63 async->prealloc_buf = NULL;
64 async->prealloc_bufsz = 0;
65 }
66
67 spin_lock_irqsave(&s->spin_lock, flags);
68 bm = async->buf_map;
69 async->buf_map = NULL;
70 spin_unlock_irqrestore(&s->spin_lock, flags);
71 comedi_buf_map_put(bm);
72 }
73
74 static struct comedi_buf_map *
75 comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
76 unsigned int n_pages)
77 {
78 struct comedi_buf_map *bm;
79 struct comedi_buf_page *buf;
80 unsigned int i;
81
82 bm = kzalloc(sizeof(*bm), GFP_KERNEL);
83 if (!bm)
84 return NULL;
85
86 kref_init(&bm->refcount);
87 bm->dma_dir = dma_dir;
88 if (bm->dma_dir != DMA_NONE) {
89
90 bm->dma_hw_dev = get_device(dev->hw_dev);
91 }
92
93 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
94 if (!bm->page_list)
95 goto err;
96
97 if (bm->dma_dir != DMA_NONE) {
98 void *virt_addr;
99 dma_addr_t dma_addr;
100
101
102
103
104
105 virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
106 PAGE_SIZE * n_pages, &dma_addr,
107 GFP_KERNEL);
108 if (!virt_addr)
109 goto err;
110
111 for (i = 0; i < n_pages; i++) {
112 buf = &bm->page_list[i];
113 buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
114 buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
115 }
116
117 bm->n_pages = i;
118 } else {
119 for (i = 0; i < n_pages; i++) {
120 buf = &bm->page_list[i];
121 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
122 if (!buf->virt_addr)
123 break;
124
125 SetPageReserved(virt_to_page(buf->virt_addr));
126 }
127
128 bm->n_pages = i;
129 if (i < n_pages)
130 goto err;
131 }
132
133 return bm;
134
135 err:
136 comedi_buf_map_put(bm);
137 return NULL;
138 }
139
140 static void __comedi_buf_alloc(struct comedi_device *dev,
141 struct comedi_subdevice *s,
142 unsigned int n_pages)
143 {
144 struct comedi_async *async = s->async;
145 struct page **pages = NULL;
146 struct comedi_buf_map *bm;
147 struct comedi_buf_page *buf;
148 unsigned long flags;
149 unsigned int i;
150
151 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
152 dev_err(dev->class_dev,
153 "dma buffer allocation not supported\n");
154 return;
155 }
156
157 bm = comedi_buf_map_alloc(dev, s->async_dma_dir, n_pages);
158 if (!bm)
159 return;
160
161 spin_lock_irqsave(&s->spin_lock, flags);
162 async->buf_map = bm;
163 spin_unlock_irqrestore(&s->spin_lock, flags);
164
165 if (bm->dma_dir != DMA_NONE) {
166
167
168
169
170 buf = &bm->page_list[0];
171 async->prealloc_buf = buf->virt_addr;
172 } else {
173 pages = vmalloc(sizeof(struct page *) * n_pages);
174 if (!pages)
175 return;
176
177 for (i = 0; i < n_pages; i++) {
178 buf = &bm->page_list[i];
179 pages[i] = virt_to_page(buf->virt_addr);
180 }
181
182
183 async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
184 COMEDI_PAGE_PROTECTION);
185
186 vfree(pages);
187 }
188 }
189
190 void comedi_buf_map_get(struct comedi_buf_map *bm)
191 {
192 if (bm)
193 kref_get(&bm->refcount);
194 }
195
196 int comedi_buf_map_put(struct comedi_buf_map *bm)
197 {
198 if (bm)
199 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
200 return 1;
201 }
202
203
204 int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
205 void *buf, int len, int write)
206 {
207 unsigned int pgoff = offset_in_page(offset);
208 unsigned long pg = offset >> PAGE_SHIFT;
209 int done = 0;
210
211 while (done < len && pg < bm->n_pages) {
212 int l = min_t(int, len - done, PAGE_SIZE - pgoff);
213 void *b = bm->page_list[pg].virt_addr + pgoff;
214
215 if (write)
216 memcpy(b, buf, l);
217 else
218 memcpy(buf, b, l);
219 buf += l;
220 done += l;
221 pg++;
222 pgoff = 0;
223 }
224 return done;
225 }
226
227
228 struct comedi_buf_map *
229 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
230 {
231 struct comedi_async *async = s->async;
232 struct comedi_buf_map *bm = NULL;
233 unsigned long flags;
234
235 if (!async)
236 return NULL;
237
238 spin_lock_irqsave(&s->spin_lock, flags);
239 bm = async->buf_map;
240
241 if (bm && bm->n_pages)
242 comedi_buf_map_get(bm);
243 else
244 bm = NULL;
245 spin_unlock_irqrestore(&s->spin_lock, flags);
246
247 return bm;
248 }
249
250 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
251 {
252 struct comedi_buf_map *bm = s->async->buf_map;
253
254 return bm && (kref_read(&bm->refcount) > 1);
255 }
256
257 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
258 unsigned long new_size)
259 {
260 struct comedi_async *async = s->async;
261
262 lockdep_assert_held(&dev->mutex);
263
264
265 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
266
267
268 if (async->prealloc_buf && async->prealloc_bufsz == new_size)
269 return 0;
270
271
272 __comedi_buf_free(dev, s);
273
274
275 if (new_size) {
276 unsigned int n_pages = new_size >> PAGE_SHIFT;
277
278 __comedi_buf_alloc(dev, s, n_pages);
279
280 if (!async->prealloc_buf) {
281
282 __comedi_buf_free(dev, s);
283 return -ENOMEM;
284 }
285 }
286 async->prealloc_bufsz = new_size;
287
288 return 0;
289 }
290
291 void comedi_buf_reset(struct comedi_subdevice *s)
292 {
293 struct comedi_async *async = s->async;
294
295 async->buf_write_alloc_count = 0;
296 async->buf_write_count = 0;
297 async->buf_read_alloc_count = 0;
298 async->buf_read_count = 0;
299
300 async->buf_write_ptr = 0;
301 async->buf_read_ptr = 0;
302
303 async->cur_chan = 0;
304 async->scans_done = 0;
305 async->scan_progress = 0;
306 async->munge_chan = 0;
307 async->munge_count = 0;
308 async->munge_ptr = 0;
309
310 async->events = 0;
311 }
312
313 static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
314 {
315 struct comedi_async *async = s->async;
316 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
317
318 return free_end - async->buf_write_alloc_count;
319 }
320
321 unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
322 {
323 struct comedi_async *async = s->async;
324 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
325
326 return free_end - async->buf_write_count;
327 }
328
329
330
331
332
333
334
335
336
337
338
339
340 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
341 unsigned int nbytes)
342 {
343 struct comedi_async *async = s->async;
344 unsigned int unalloc = comedi_buf_write_n_unalloc(s);
345
346 if (nbytes > unalloc)
347 nbytes = unalloc;
348
349 async->buf_write_alloc_count += nbytes;
350
351
352
353
354
355 smp_mb();
356
357 return nbytes;
358 }
359 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
360
361
362
363
364
365 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
366 unsigned int num_bytes)
367 {
368 struct comedi_async *async = s->async;
369 unsigned int count = 0;
370 const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
371
372 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
373 async->munge_count += num_bytes;
374 count = num_bytes;
375 } else {
376
377 num_bytes -= num_bytes % num_sample_bytes;
378 while (count < num_bytes) {
379 int block_size = num_bytes - count;
380 unsigned int buf_end;
381
382 buf_end = async->prealloc_bufsz - async->munge_ptr;
383 if (block_size > buf_end)
384 block_size = buf_end;
385
386 s->munge(s->device, s,
387 async->prealloc_buf + async->munge_ptr,
388 block_size, async->munge_chan);
389
390
391
392
393
394 smp_wmb();
395
396 async->munge_chan += block_size / num_sample_bytes;
397 async->munge_chan %= async->cmd.chanlist_len;
398 async->munge_count += block_size;
399 async->munge_ptr += block_size;
400 async->munge_ptr %= async->prealloc_bufsz;
401 count += block_size;
402 }
403 }
404
405 return count;
406 }
407
408 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
409 {
410 struct comedi_async *async = s->async;
411
412 return async->buf_write_alloc_count - async->buf_write_count;
413 }
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
431 unsigned int nbytes)
432 {
433 struct comedi_async *async = s->async;
434 unsigned int allocated = comedi_buf_write_n_allocated(s);
435
436 if (nbytes > allocated)
437 nbytes = allocated;
438
439 async->buf_write_count += nbytes;
440 async->buf_write_ptr += nbytes;
441 comedi_buf_munge(s, async->buf_write_count - async->munge_count);
442 if (async->buf_write_ptr >= async->prealloc_bufsz)
443 async->buf_write_ptr %= async->prealloc_bufsz;
444
445 return nbytes;
446 }
447 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
448
449
450
451
452
453
454
455
456
457
458
459
460 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
461 {
462 struct comedi_async *async = s->async;
463 unsigned int num_bytes;
464
465 if (!async)
466 return 0;
467
468 num_bytes = async->munge_count - async->buf_read_count;
469
470
471
472
473
474 smp_rmb();
475
476 return num_bytes;
477 }
478 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
496 unsigned int nbytes)
497 {
498 struct comedi_async *async = s->async;
499 unsigned int available;
500
501 available = async->munge_count - async->buf_read_alloc_count;
502 if (nbytes > available)
503 nbytes = available;
504
505 async->buf_read_alloc_count += nbytes;
506
507
508
509
510
511 smp_rmb();
512
513 return nbytes;
514 }
515 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
516
517 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
518 {
519 return async->buf_read_alloc_count - async->buf_read_count;
520 }
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
536 unsigned int nbytes)
537 {
538 struct comedi_async *async = s->async;
539 unsigned int allocated;
540
541
542
543
544
545 smp_mb();
546
547 allocated = comedi_buf_read_n_allocated(async);
548 if (nbytes > allocated)
549 nbytes = allocated;
550
551 async->buf_read_count += nbytes;
552 async->buf_read_ptr += nbytes;
553 async->buf_read_ptr %= async->prealloc_bufsz;
554 return nbytes;
555 }
556 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
557
558 static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
559 const void *data, unsigned int num_bytes)
560 {
561 struct comedi_async *async = s->async;
562 unsigned int write_ptr = async->buf_write_ptr;
563
564 while (num_bytes) {
565 unsigned int block_size;
566
567 if (write_ptr + num_bytes > async->prealloc_bufsz)
568 block_size = async->prealloc_bufsz - write_ptr;
569 else
570 block_size = num_bytes;
571
572 memcpy(async->prealloc_buf + write_ptr, data, block_size);
573
574 data += block_size;
575 num_bytes -= block_size;
576
577 write_ptr = 0;
578 }
579 }
580
581 static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
582 void *dest, unsigned int nbytes)
583 {
584 void *src;
585 struct comedi_async *async = s->async;
586 unsigned int read_ptr = async->buf_read_ptr;
587
588 while (nbytes) {
589 unsigned int block_size;
590
591 src = async->prealloc_buf + read_ptr;
592
593 if (nbytes >= async->prealloc_bufsz - read_ptr)
594 block_size = async->prealloc_bufsz - read_ptr;
595 else
596 block_size = nbytes;
597
598 memcpy(dest, src, block_size);
599 nbytes -= block_size;
600 dest += block_size;
601 read_ptr = 0;
602 }
603 }
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622 unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
623 const void *data, unsigned int nsamples)
624 {
625 unsigned int max_samples;
626 unsigned int nbytes;
627
628
629
630
631
632
633 max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
634 if (nsamples > max_samples) {
635 dev_warn(s->device->class_dev, "buffer overrun\n");
636 s->async->events |= COMEDI_CB_OVERFLOW;
637 nsamples = max_samples;
638 }
639
640 if (nsamples == 0)
641 return 0;
642
643 nbytes = comedi_buf_write_alloc(s,
644 comedi_samples_to_bytes(s, nsamples));
645 comedi_buf_memcpy_to(s, data, nbytes);
646 comedi_buf_write_free(s, nbytes);
647 comedi_inc_scan_progress(s, nbytes);
648 s->async->events |= COMEDI_CB_BLOCK;
649
650 return nbytes;
651 }
652 EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668 unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
669 void *data, unsigned int nsamples)
670 {
671 unsigned int max_samples;
672 unsigned int nbytes;
673
674
675 max_samples = comedi_bytes_to_samples(s,
676 comedi_buf_read_n_available(s));
677 if (nsamples > max_samples)
678 nsamples = max_samples;
679
680 if (nsamples == 0)
681 return 0;
682
683 nbytes = comedi_buf_read_alloc(s,
684 comedi_samples_to_bytes(s, nsamples));
685 comedi_buf_memcpy_from(s, data, nbytes);
686 comedi_buf_read_free(s, nbytes);
687 comedi_inc_scan_progress(s, nbytes);
688 s->async->events |= COMEDI_CB_BLOCK;
689
690 return nbytes;
691 }
692 EXPORT_SYMBOL_GPL(comedi_buf_read_samples);