1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/cdev.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24
25#include <linux/iio/iio.h>
26#include "iio_core.h"
27#include <linux/iio/sysfs.h>
28#include <linux/iio/buffer.h>
29
30static const char * const iio_endian_prefix[] = {
31	[IIO_BE] = "be",
32	[IIO_LE] = "le",
33};
34
35static bool iio_buffer_is_active(struct iio_buffer *buf)
36{
37	return !list_empty(&buf->buffer_list);
38}
39
40static size_t iio_buffer_data_available(struct iio_buffer *buf)
41{
42	return buf->access->data_available(buf);
43}
44
45static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
46				   struct iio_buffer *buf, size_t required)
47{
48	if (!indio_dev->info->hwfifo_flush_to_buffer)
49		return -ENODEV;
50
51	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
52}
53
54static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
55			     size_t to_wait, int to_flush)
56{
57	size_t avail;
58	int flushed = 0;
59
60	/* wakeup if the device was unregistered */
61	if (!indio_dev->info)
62		return true;
63
64	/* drain the buffer if it was disabled */
65	if (!iio_buffer_is_active(buf)) {
66		to_wait = min_t(size_t, to_wait, 1);
67		to_flush = 0;
68	}
69
70	avail = iio_buffer_data_available(buf);
71
72	if (avail >= to_wait) {
73		/* force a flush for non-blocking reads */
74		if (!to_wait && avail < to_flush)
75			iio_buffer_flush_hwfifo(indio_dev, buf,
76						to_flush - avail);
77		return true;
78	}
79
80	if (to_flush)
81		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
82						  to_wait - avail);
83	if (flushed <= 0)
84		return false;
85
86	if (avail + flushed >= to_wait)
87		return true;
88
89	return false;
90}
91
92/**
93 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
94 * @filp:	File structure pointer for the char device
95 * @buf:	Destination buffer for iio buffer read
96 * @n:		First n bytes to read
97 * @f_ps:	Long offset provided by the user as a seek position
98 *
99 * This function relies on all buffer implementations having an
100 * iio_buffer as their first element.
101 *
102 * Return: negative values corresponding to error codes or ret != 0
103 *	   for ending the reading activity
104 **/
105ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
106				      size_t n, loff_t *f_ps)
107{
108	struct iio_dev *indio_dev = filp->private_data;
109	struct iio_buffer *rb = indio_dev->buffer;
110	size_t datum_size;
111	size_t to_wait;
112	int ret;
113
114	if (!indio_dev->info)
115		return -ENODEV;
116
117	if (!rb || !rb->access->read_first_n)
118		return -EINVAL;
119
120	datum_size = rb->bytes_per_datum;
121
122	/*
123	 * If datum_size is 0 there will never be anything to read from the
124	 * buffer, so signal end of file now.
125	 */
126	if (!datum_size)
127		return 0;
128
129	if (filp->f_flags & O_NONBLOCK)
130		to_wait = 0;
131	else
132		to_wait = min_t(size_t, n / datum_size, rb->watermark);
133
134	do {
135		ret = wait_event_interruptible(rb->pollq,
136		      iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
137		if (ret)
138			return ret;
139
140		if (!indio_dev->info)
141			return -ENODEV;
142
143		ret = rb->access->read_first_n(rb, n, buf);
144		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
145			ret = -EAGAIN;
146	 } while (ret == 0);
147
148	return ret;
149}
150
151/**
152 * iio_buffer_poll() - poll the buffer to find out if it has data
153 * @filp:	File structure pointer for device access
154 * @wait:	Poll table structure pointer for which the driver adds
155 *		a wait queue
156 *
157 * Return: (POLLIN | POLLRDNORM) if data is available for reading
158 *	   or 0 for other cases
159 */
160unsigned int iio_buffer_poll(struct file *filp,
161			     struct poll_table_struct *wait)
162{
163	struct iio_dev *indio_dev = filp->private_data;
164	struct iio_buffer *rb = indio_dev->buffer;
165
166	if (!indio_dev->info)
167		return 0;
168
169	poll_wait(filp, &rb->pollq, wait);
170	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
171		return POLLIN | POLLRDNORM;
172	return 0;
173}
174
175/**
176 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
177 * @indio_dev: The IIO device
178 *
179 * Wakes up the event waitqueue used for poll(). Should usually
180 * be called when the device is unregistered.
181 */
182void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
183{
184	if (!indio_dev->buffer)
185		return;
186
187	wake_up(&indio_dev->buffer->pollq);
188}
189
190void iio_buffer_init(struct iio_buffer *buffer)
191{
192	INIT_LIST_HEAD(&buffer->demux_list);
193	INIT_LIST_HEAD(&buffer->buffer_list);
194	init_waitqueue_head(&buffer->pollq);
195	kref_init(&buffer->ref);
196	buffer->watermark = 1;
197}
198EXPORT_SYMBOL(iio_buffer_init);
199
200static ssize_t iio_show_scan_index(struct device *dev,
201				   struct device_attribute *attr,
202				   char *buf)
203{
204	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
205}
206
207static ssize_t iio_show_fixed_type(struct device *dev,
208				   struct device_attribute *attr,
209				   char *buf)
210{
211	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
212	u8 type = this_attr->c->scan_type.endianness;
213
214	if (type == IIO_CPU) {
215#ifdef __LITTLE_ENDIAN
216		type = IIO_LE;
217#else
218		type = IIO_BE;
219#endif
220	}
221	if (this_attr->c->scan_type.repeat > 1)
222		return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
223		       iio_endian_prefix[type],
224		       this_attr->c->scan_type.sign,
225		       this_attr->c->scan_type.realbits,
226		       this_attr->c->scan_type.storagebits,
227		       this_attr->c->scan_type.repeat,
228		       this_attr->c->scan_type.shift);
229		else
230			return sprintf(buf, "%s:%c%d/%d>>%u\n",
231		       iio_endian_prefix[type],
232		       this_attr->c->scan_type.sign,
233		       this_attr->c->scan_type.realbits,
234		       this_attr->c->scan_type.storagebits,
235		       this_attr->c->scan_type.shift);
236}
237
238static ssize_t iio_scan_el_show(struct device *dev,
239				struct device_attribute *attr,
240				char *buf)
241{
242	int ret;
243	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
244
245	/* Ensure ret is 0 or 1. */
246	ret = !!test_bit(to_iio_dev_attr(attr)->address,
247		       indio_dev->buffer->scan_mask);
248
249	return sprintf(buf, "%d\n", ret);
250}
251
252/* Note NULL used as error indicator as it doesn't make sense. */
253static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
254					  unsigned int masklength,
255					  const unsigned long *mask,
256					  bool strict)
257{
258	if (bitmap_empty(mask, masklength))
259		return NULL;
260	while (*av_masks) {
261		if (strict) {
262			if (bitmap_equal(mask, av_masks, masklength))
263				return av_masks;
264		} else {
265			if (bitmap_subset(mask, av_masks, masklength))
266				return av_masks;
267		}
268		av_masks += BITS_TO_LONGS(masklength);
269	}
270	return NULL;
271}
272
273static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
274	const unsigned long *mask)
275{
276	if (!indio_dev->setup_ops->validate_scan_mask)
277		return true;
278
279	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
280}
281
282/**
283 * iio_scan_mask_set() - set particular bit in the scan mask
284 * @indio_dev: the iio device
285 * @buffer: the buffer whose scan mask we are interested in
286 * @bit: the bit to be set.
287 *
288 * Note that at this point we have no way of knowing what other
289 * buffers might request, hence this code only verifies that the
290 * individual buffers request is plausible.
291 */
292static int iio_scan_mask_set(struct iio_dev *indio_dev,
293		      struct iio_buffer *buffer, int bit)
294{
295	const unsigned long *mask;
296	unsigned long *trialmask;
297
298	trialmask = kmalloc(sizeof(*trialmask)*
299			    BITS_TO_LONGS(indio_dev->masklength),
300			    GFP_KERNEL);
301
302	if (trialmask == NULL)
303		return -ENOMEM;
304	if (!indio_dev->masklength) {
305		WARN(1, "Trying to set scanmask prior to registering buffer\n");
306		goto err_invalid_mask;
307	}
308	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
309	set_bit(bit, trialmask);
310
311	if (!iio_validate_scan_mask(indio_dev, trialmask))
312		goto err_invalid_mask;
313
314	if (indio_dev->available_scan_masks) {
315		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
316					   indio_dev->masklength,
317					   trialmask, false);
318		if (!mask)
319			goto err_invalid_mask;
320	}
321	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
322
323	kfree(trialmask);
324
325	return 0;
326
327err_invalid_mask:
328	kfree(trialmask);
329	return -EINVAL;
330}
331
332static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
333{
334	clear_bit(bit, buffer->scan_mask);
335	return 0;
336}
337
338static ssize_t iio_scan_el_store(struct device *dev,
339				 struct device_attribute *attr,
340				 const char *buf,
341				 size_t len)
342{
343	int ret;
344	bool state;
345	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
346	struct iio_buffer *buffer = indio_dev->buffer;
347	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
348
349	ret = strtobool(buf, &state);
350	if (ret < 0)
351		return ret;
352	mutex_lock(&indio_dev->mlock);
353	if (iio_buffer_is_active(indio_dev->buffer)) {
354		ret = -EBUSY;
355		goto error_ret;
356	}
357	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
358	if (ret < 0)
359		goto error_ret;
360	if (!state && ret) {
361		ret = iio_scan_mask_clear(buffer, this_attr->address);
362		if (ret)
363			goto error_ret;
364	} else if (state && !ret) {
365		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
366		if (ret)
367			goto error_ret;
368	}
369
370error_ret:
371	mutex_unlock(&indio_dev->mlock);
372
373	return ret < 0 ? ret : len;
374
375}
376
377static ssize_t iio_scan_el_ts_show(struct device *dev,
378				   struct device_attribute *attr,
379				   char *buf)
380{
381	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
382	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
383}
384
385static ssize_t iio_scan_el_ts_store(struct device *dev,
386				    struct device_attribute *attr,
387				    const char *buf,
388				    size_t len)
389{
390	int ret;
391	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
392	bool state;
393
394	ret = strtobool(buf, &state);
395	if (ret < 0)
396		return ret;
397
398	mutex_lock(&indio_dev->mlock);
399	if (iio_buffer_is_active(indio_dev->buffer)) {
400		ret = -EBUSY;
401		goto error_ret;
402	}
403	indio_dev->buffer->scan_timestamp = state;
404error_ret:
405	mutex_unlock(&indio_dev->mlock);
406
407	return ret ? ret : len;
408}
409
410static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
411					const struct iio_chan_spec *chan)
412{
413	int ret, attrcount = 0;
414	struct iio_buffer *buffer = indio_dev->buffer;
415
416	ret = __iio_add_chan_devattr("index",
417				     chan,
418				     &iio_show_scan_index,
419				     NULL,
420				     0,
421				     IIO_SEPARATE,
422				     &indio_dev->dev,
423				     &buffer->scan_el_dev_attr_list);
424	if (ret)
425		return ret;
426	attrcount++;
427	ret = __iio_add_chan_devattr("type",
428				     chan,
429				     &iio_show_fixed_type,
430				     NULL,
431				     0,
432				     0,
433				     &indio_dev->dev,
434				     &buffer->scan_el_dev_attr_list);
435	if (ret)
436		return ret;
437	attrcount++;
438	if (chan->type != IIO_TIMESTAMP)
439		ret = __iio_add_chan_devattr("en",
440					     chan,
441					     &iio_scan_el_show,
442					     &iio_scan_el_store,
443					     chan->scan_index,
444					     0,
445					     &indio_dev->dev,
446					     &buffer->scan_el_dev_attr_list);
447	else
448		ret = __iio_add_chan_devattr("en",
449					     chan,
450					     &iio_scan_el_ts_show,
451					     &iio_scan_el_ts_store,
452					     chan->scan_index,
453					     0,
454					     &indio_dev->dev,
455					     &buffer->scan_el_dev_attr_list);
456	if (ret)
457		return ret;
458	attrcount++;
459	ret = attrcount;
460	return ret;
461}
462
463static ssize_t iio_buffer_read_length(struct device *dev,
464				      struct device_attribute *attr,
465				      char *buf)
466{
467	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
468	struct iio_buffer *buffer = indio_dev->buffer;
469
470	return sprintf(buf, "%d\n", buffer->length);
471}
472
473static ssize_t iio_buffer_write_length(struct device *dev,
474				       struct device_attribute *attr,
475				       const char *buf, size_t len)
476{
477	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
478	struct iio_buffer *buffer = indio_dev->buffer;
479	unsigned int val;
480	int ret;
481
482	ret = kstrtouint(buf, 10, &val);
483	if (ret)
484		return ret;
485
486	if (val == buffer->length)
487		return len;
488
489	mutex_lock(&indio_dev->mlock);
490	if (iio_buffer_is_active(indio_dev->buffer)) {
491		ret = -EBUSY;
492	} else {
493		buffer->access->set_length(buffer, val);
494		ret = 0;
495	}
496	if (ret)
497		goto out;
498	if (buffer->length && buffer->length < buffer->watermark)
499		buffer->watermark = buffer->length;
500out:
501	mutex_unlock(&indio_dev->mlock);
502
503	return ret ? ret : len;
504}
505
506static ssize_t iio_buffer_show_enable(struct device *dev,
507				      struct device_attribute *attr,
508				      char *buf)
509{
510	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
511	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
512}
513
514static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
515				const unsigned long *mask, bool timestamp)
516{
517	const struct iio_chan_spec *ch;
518	unsigned bytes = 0;
519	int length, i;
520
521	/* How much space will the demuxed element take? */
522	for_each_set_bit(i, mask,
523			 indio_dev->masklength) {
524		ch = iio_find_channel_from_si(indio_dev, i);
525		if (ch->scan_type.repeat > 1)
526			length = ch->scan_type.storagebits / 8 *
527				ch->scan_type.repeat;
528		else
529			length = ch->scan_type.storagebits / 8;
530		bytes = ALIGN(bytes, length);
531		bytes += length;
532	}
533	if (timestamp) {
534		ch = iio_find_channel_from_si(indio_dev,
535					      indio_dev->scan_index_timestamp);
536		if (ch->scan_type.repeat > 1)
537			length = ch->scan_type.storagebits / 8 *
538				ch->scan_type.repeat;
539		else
540			length = ch->scan_type.storagebits / 8;
541		bytes = ALIGN(bytes, length);
542		bytes += length;
543	}
544	return bytes;
545}
546
547static void iio_buffer_activate(struct iio_dev *indio_dev,
548	struct iio_buffer *buffer)
549{
550	iio_buffer_get(buffer);
551	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
552}
553
554static void iio_buffer_deactivate(struct iio_buffer *buffer)
555{
556	list_del_init(&buffer->buffer_list);
557	wake_up_interruptible(&buffer->pollq);
558	iio_buffer_put(buffer);
559}
560
561static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
562{
563	struct iio_buffer *buffer, *_buffer;
564
565	list_for_each_entry_safe(buffer, _buffer,
566			&indio_dev->buffer_list, buffer_list)
567		iio_buffer_deactivate(buffer);
568}
569
570static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
571	struct iio_buffer *buffer)
572{
573	unsigned int bytes;
574
575	if (!buffer->access->set_bytes_per_datum)
576		return;
577
578	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
579		buffer->scan_timestamp);
580
581	buffer->access->set_bytes_per_datum(buffer, bytes);
582}
583
584static int iio_buffer_request_update(struct iio_dev *indio_dev,
585	struct iio_buffer *buffer)
586{
587	int ret;
588
589	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
590	if (buffer->access->request_update) {
591		ret = buffer->access->request_update(buffer);
592		if (ret) {
593			dev_dbg(&indio_dev->dev,
594			       "Buffer not started: buffer parameter update failed (%d)\n",
595				ret);
596			return ret;
597		}
598	}
599
600	return 0;
601}
602
603static void iio_free_scan_mask(struct iio_dev *indio_dev,
604	const unsigned long *mask)
605{
606	/* If the mask is dynamically allocated free it, otherwise do nothing */
607	if (!indio_dev->available_scan_masks)
608		kfree(mask);
609}
610
611struct iio_device_config {
612	unsigned int mode;
613	const unsigned long *scan_mask;
614	unsigned int scan_bytes;
615	bool scan_timestamp;
616};
617
618static int iio_verify_update(struct iio_dev *indio_dev,
619	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
620	struct iio_device_config *config)
621{
622	unsigned long *compound_mask;
623	const unsigned long *scan_mask;
624	bool strict_scanmask = false;
625	struct iio_buffer *buffer;
626	bool scan_timestamp;
627	unsigned int modes;
628
629	memset(config, 0, sizeof(*config));
630
631	/*
632	 * If there is just one buffer and we are removing it there is nothing
633	 * to verify.
634	 */
635	if (remove_buffer && !insert_buffer &&
636		list_is_singular(&indio_dev->buffer_list))
637			return 0;
638
639	modes = indio_dev->modes;
640
641	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
642		if (buffer == remove_buffer)
643			continue;
644		modes &= buffer->access->modes;
645	}
646
647	if (insert_buffer)
648		modes &= insert_buffer->access->modes;
649
650	/* Definitely possible for devices to support both of these. */
651	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
652		config->mode = INDIO_BUFFER_TRIGGERED;
653	} else if (modes & INDIO_BUFFER_HARDWARE) {
654		/*
655		 * Keep things simple for now and only allow a single buffer to
656		 * be connected in hardware mode.
657		 */
658		if (insert_buffer && !list_empty(&indio_dev->buffer_list))
659			return -EINVAL;
660		config->mode = INDIO_BUFFER_HARDWARE;
661		strict_scanmask = true;
662	} else if (modes & INDIO_BUFFER_SOFTWARE) {
663		config->mode = INDIO_BUFFER_SOFTWARE;
664	} else {
665		/* Can only occur on first buffer */
666		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
667			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
668		return -EINVAL;
669	}
670
671	/* What scan mask do we actually have? */
672	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
673				sizeof(long), GFP_KERNEL);
674	if (compound_mask == NULL)
675		return -ENOMEM;
676
677	scan_timestamp = false;
678
679	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
680		if (buffer == remove_buffer)
681			continue;
682		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
683			  indio_dev->masklength);
684		scan_timestamp |= buffer->scan_timestamp;
685	}
686
687	if (insert_buffer) {
688		bitmap_or(compound_mask, compound_mask,
689			  insert_buffer->scan_mask, indio_dev->masklength);
690		scan_timestamp |= insert_buffer->scan_timestamp;
691	}
692
693	if (indio_dev->available_scan_masks) {
694		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
695				    indio_dev->masklength,
696				    compound_mask,
697				    strict_scanmask);
698		kfree(compound_mask);
699		if (scan_mask == NULL)
700			return -EINVAL;
701	} else {
702	    scan_mask = compound_mask;
703	}
704
705	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
706				    scan_mask, scan_timestamp);
707	config->scan_mask = scan_mask;
708	config->scan_timestamp = scan_timestamp;
709
710	return 0;
711}
712
713static int iio_enable_buffers(struct iio_dev *indio_dev,
714	struct iio_device_config *config)
715{
716	int ret;
717
718	indio_dev->active_scan_mask = config->scan_mask;
719	indio_dev->scan_timestamp = config->scan_timestamp;
720	indio_dev->scan_bytes = config->scan_bytes;
721
722	iio_update_demux(indio_dev);
723
724	/* Wind up again */
725	if (indio_dev->setup_ops->preenable) {
726		ret = indio_dev->setup_ops->preenable(indio_dev);
727		if (ret) {
728			dev_dbg(&indio_dev->dev,
729			       "Buffer not started: buffer preenable failed (%d)\n", ret);
730			goto err_undo_config;
731		}
732	}
733
734	if (indio_dev->info->update_scan_mode) {
735		ret = indio_dev->info
736			->update_scan_mode(indio_dev,
737					   indio_dev->active_scan_mask);
738		if (ret < 0) {
739			dev_dbg(&indio_dev->dev,
740				"Buffer not started: update scan mode failed (%d)\n",
741				ret);
742			goto err_run_postdisable;
743		}
744	}
745
746	indio_dev->currentmode = config->mode;
747
748	if (indio_dev->setup_ops->postenable) {
749		ret = indio_dev->setup_ops->postenable(indio_dev);
750		if (ret) {
751			dev_dbg(&indio_dev->dev,
752			       "Buffer not started: postenable failed (%d)\n", ret);
753			goto err_run_postdisable;
754		}
755	}
756
757	return 0;
758
759err_run_postdisable:
760	indio_dev->currentmode = INDIO_DIRECT_MODE;
761	if (indio_dev->setup_ops->postdisable)
762		indio_dev->setup_ops->postdisable(indio_dev);
763err_undo_config:
764	indio_dev->active_scan_mask = NULL;
765
766	return ret;
767}
768
769static int iio_disable_buffers(struct iio_dev *indio_dev)
770{
771	int ret = 0;
772	int ret2;
773
774	/* Wind down existing buffers - iff there are any */
775	if (list_empty(&indio_dev->buffer_list))
776		return 0;
777
778	/*
779	 * If things go wrong at some step in disable we still need to continue
780	 * to perform the other steps, otherwise we leave the device in a
781	 * inconsistent state. We return the error code for the first error we
782	 * encountered.
783	 */
784
785	if (indio_dev->setup_ops->predisable) {
786		ret2 = indio_dev->setup_ops->predisable(indio_dev);
787		if (ret2 && !ret)
788			ret = ret2;
789	}
790
791	indio_dev->currentmode = INDIO_DIRECT_MODE;
792
793	if (indio_dev->setup_ops->postdisable) {
794		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
795		if (ret2 && !ret)
796			ret = ret2;
797	}
798
799	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
800	indio_dev->active_scan_mask = NULL;
801
802	return ret;
803}
804
805static int __iio_update_buffers(struct iio_dev *indio_dev,
806		       struct iio_buffer *insert_buffer,
807		       struct iio_buffer *remove_buffer)
808{
809	struct iio_device_config new_config;
810	int ret;
811
812	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
813		&new_config);
814	if (ret)
815		return ret;
816
817	if (insert_buffer) {
818		ret = iio_buffer_request_update(indio_dev, insert_buffer);
819		if (ret)
820			goto err_free_config;
821	}
822
823	ret = iio_disable_buffers(indio_dev);
824	if (ret)
825		goto err_deactivate_all;
826
827	if (remove_buffer)
828		iio_buffer_deactivate(remove_buffer);
829	if (insert_buffer)
830		iio_buffer_activate(indio_dev, insert_buffer);
831
832	/* If no buffers in list, we are done */
833	if (list_empty(&indio_dev->buffer_list))
834		return 0;
835
836	ret = iio_enable_buffers(indio_dev, &new_config);
837	if (ret)
838		goto err_deactivate_all;
839
840	return 0;
841
842err_deactivate_all:
843	/*
844	 * We've already verified that the config is valid earlier. If things go
845	 * wrong in either enable or disable the most likely reason is an IO
846	 * error from the device. In this case there is no good recovery
847	 * strategy. Just make sure to disable everything and leave the device
848	 * in a sane state.  With a bit of luck the device might come back to
849	 * life again later and userspace can try again.
850	 */
851	iio_buffer_deactivate_all(indio_dev);
852
853err_free_config:
854	iio_free_scan_mask(indio_dev, new_config.scan_mask);
855	return ret;
856}
857
858int iio_update_buffers(struct iio_dev *indio_dev,
859		       struct iio_buffer *insert_buffer,
860		       struct iio_buffer *remove_buffer)
861{
862	int ret;
863
864	if (insert_buffer == remove_buffer)
865		return 0;
866
867	mutex_lock(&indio_dev->info_exist_lock);
868	mutex_lock(&indio_dev->mlock);
869
870	if (insert_buffer && iio_buffer_is_active(insert_buffer))
871		insert_buffer = NULL;
872
873	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
874		remove_buffer = NULL;
875
876	if (!insert_buffer && !remove_buffer) {
877		ret = 0;
878		goto out_unlock;
879	}
880
881	if (indio_dev->info == NULL) {
882		ret = -ENODEV;
883		goto out_unlock;
884	}
885
886	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
887
888out_unlock:
889	mutex_unlock(&indio_dev->mlock);
890	mutex_unlock(&indio_dev->info_exist_lock);
891
892	return ret;
893}
894EXPORT_SYMBOL_GPL(iio_update_buffers);
895
896void iio_disable_all_buffers(struct iio_dev *indio_dev)
897{
898	iio_disable_buffers(indio_dev);
899	iio_buffer_deactivate_all(indio_dev);
900}
901
902static ssize_t iio_buffer_store_enable(struct device *dev,
903				       struct device_attribute *attr,
904				       const char *buf,
905				       size_t len)
906{
907	int ret;
908	bool requested_state;
909	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
910	bool inlist;
911
912	ret = strtobool(buf, &requested_state);
913	if (ret < 0)
914		return ret;
915
916	mutex_lock(&indio_dev->mlock);
917
918	/* Find out if it is in the list */
919	inlist = iio_buffer_is_active(indio_dev->buffer);
920	/* Already in desired state */
921	if (inlist == requested_state)
922		goto done;
923
924	if (requested_state)
925		ret = __iio_update_buffers(indio_dev,
926					 indio_dev->buffer, NULL);
927	else
928		ret = __iio_update_buffers(indio_dev,
929					 NULL, indio_dev->buffer);
930
931done:
932	mutex_unlock(&indio_dev->mlock);
933	return (ret < 0) ? ret : len;
934}
935
936static const char * const iio_scan_elements_group_name = "scan_elements";
937
938static ssize_t iio_buffer_show_watermark(struct device *dev,
939					 struct device_attribute *attr,
940					 char *buf)
941{
942	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
943	struct iio_buffer *buffer = indio_dev->buffer;
944
945	return sprintf(buf, "%u\n", buffer->watermark);
946}
947
948static ssize_t iio_buffer_store_watermark(struct device *dev,
949					  struct device_attribute *attr,
950					  const char *buf,
951					  size_t len)
952{
953	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
954	struct iio_buffer *buffer = indio_dev->buffer;
955	unsigned int val;
956	int ret;
957
958	ret = kstrtouint(buf, 10, &val);
959	if (ret)
960		return ret;
961	if (!val)
962		return -EINVAL;
963
964	mutex_lock(&indio_dev->mlock);
965
966	if (val > buffer->length) {
967		ret = -EINVAL;
968		goto out;
969	}
970
971	if (iio_buffer_is_active(indio_dev->buffer)) {
972		ret = -EBUSY;
973		goto out;
974	}
975
976	buffer->watermark = val;
977
978	if (indio_dev->info->hwfifo_set_watermark)
979		indio_dev->info->hwfifo_set_watermark(indio_dev, val);
980out:
981	mutex_unlock(&indio_dev->mlock);
982
983	return ret ? ret : len;
984}
985
986static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
987		   iio_buffer_write_length);
988static struct device_attribute dev_attr_length_ro = __ATTR(length,
989	S_IRUGO, iio_buffer_read_length, NULL);
990static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
991		   iio_buffer_show_enable, iio_buffer_store_enable);
992static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
993		   iio_buffer_show_watermark, iio_buffer_store_watermark);
994
995static struct attribute *iio_buffer_attrs[] = {
996	&dev_attr_length.attr,
997	&dev_attr_enable.attr,
998	&dev_attr_watermark.attr,
999};
1000
1001int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1002{
1003	struct iio_dev_attr *p;
1004	struct attribute **attr;
1005	struct iio_buffer *buffer = indio_dev->buffer;
1006	int ret, i, attrn, attrcount, attrcount_orig = 0;
1007	const struct iio_chan_spec *channels;
1008
1009	channels = indio_dev->channels;
1010	if (channels) {
1011		int ml = indio_dev->masklength;
1012
1013		for (i = 0; i < indio_dev->num_channels; i++)
1014			ml = max(ml, channels[i].scan_index + 1);
1015		indio_dev->masklength = ml;
1016	}
1017
1018	if (!buffer)
1019		return 0;
1020
1021	attrcount = 0;
1022	if (buffer->attrs) {
1023		while (buffer->attrs[attrcount] != NULL)
1024			attrcount++;
1025	}
1026
1027	attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
1028		       sizeof(struct attribute *), GFP_KERNEL);
1029	if (!attr)
1030		return -ENOMEM;
1031
1032	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1033	if (!buffer->access->set_length)
1034		attr[0] = &dev_attr_length_ro.attr;
1035
1036	if (buffer->attrs)
1037		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1038		       sizeof(struct attribute *) * attrcount);
1039
1040	attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1041
1042	buffer->buffer_group.name = "buffer";
1043	buffer->buffer_group.attrs = attr;
1044
1045	indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1046
1047	if (buffer->scan_el_attrs != NULL) {
1048		attr = buffer->scan_el_attrs->attrs;
1049		while (*attr++ != NULL)
1050			attrcount_orig++;
1051	}
1052	attrcount = attrcount_orig;
1053	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1054	channels = indio_dev->channels;
1055	if (channels) {
1056		/* new magic */
1057		for (i = 0; i < indio_dev->num_channels; i++) {
1058			if (channels[i].scan_index < 0)
1059				continue;
1060
1061			ret = iio_buffer_add_channel_sysfs(indio_dev,
1062							 &channels[i]);
1063			if (ret < 0)
1064				goto error_cleanup_dynamic;
1065			attrcount += ret;
1066			if (channels[i].type == IIO_TIMESTAMP)
1067				indio_dev->scan_index_timestamp =
1068					channels[i].scan_index;
1069		}
1070		if (indio_dev->masklength && buffer->scan_mask == NULL) {
1071			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
1072						    sizeof(*buffer->scan_mask),
1073						    GFP_KERNEL);
1074			if (buffer->scan_mask == NULL) {
1075				ret = -ENOMEM;
1076				goto error_cleanup_dynamic;
1077			}
1078		}
1079	}
1080
1081	buffer->scan_el_group.name = iio_scan_elements_group_name;
1082
1083	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1084					      sizeof(buffer->scan_el_group.attrs[0]),
1085					      GFP_KERNEL);
1086	if (buffer->scan_el_group.attrs == NULL) {
1087		ret = -ENOMEM;
1088		goto error_free_scan_mask;
1089	}
1090	if (buffer->scan_el_attrs)
1091		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
1092		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
1093	attrn = attrcount_orig;
1094
1095	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1096		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1097	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1098
1099	return 0;
1100
1101error_free_scan_mask:
1102	kfree(buffer->scan_mask);
1103error_cleanup_dynamic:
1104	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1105	kfree(indio_dev->buffer->buffer_group.attrs);
1106
1107	return ret;
1108}
1109
1110void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1111{
1112	if (!indio_dev->buffer)
1113		return;
1114
1115	kfree(indio_dev->buffer->scan_mask);
1116	kfree(indio_dev->buffer->buffer_group.attrs);
1117	kfree(indio_dev->buffer->scan_el_group.attrs);
1118	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
1119}
1120
1121/**
1122 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1123 * @indio_dev: the iio device
1124 * @mask: scan mask to be checked
1125 *
1126 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1127 * can be used for devices where only one channel can be active for sampling at
1128 * a time.
1129 */
1130bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1131	const unsigned long *mask)
1132{
1133	return bitmap_weight(mask, indio_dev->masklength) == 1;
1134}
1135EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1136
1137int iio_scan_mask_query(struct iio_dev *indio_dev,
1138			struct iio_buffer *buffer, int bit)
1139{
1140	if (bit > indio_dev->masklength)
1141		return -EINVAL;
1142
1143	if (!buffer->scan_mask)
1144		return 0;
1145
1146	/* Ensure return value is 0 or 1. */
1147	return !!test_bit(bit, buffer->scan_mask);
1148};
1149EXPORT_SYMBOL_GPL(iio_scan_mask_query);
1150
1151/**
1152 * struct iio_demux_table - table describing demux memcpy ops
1153 * @from:	index to copy from
1154 * @to:		index to copy to
1155 * @length:	how many bytes to copy
1156 * @l:		list head used for management
1157 */
1158struct iio_demux_table {
1159	unsigned from;
1160	unsigned to;
1161	unsigned length;
1162	struct list_head l;
1163};
1164
1165static const void *iio_demux(struct iio_buffer *buffer,
1166				 const void *datain)
1167{
1168	struct iio_demux_table *t;
1169
1170	if (list_empty(&buffer->demux_list))
1171		return datain;
1172	list_for_each_entry(t, &buffer->demux_list, l)
1173		memcpy(buffer->demux_bounce + t->to,
1174		       datain + t->from, t->length);
1175
1176	return buffer->demux_bounce;
1177}
1178
1179static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1180{
1181	const void *dataout = iio_demux(buffer, data);
1182	int ret;
1183
1184	ret = buffer->access->store_to(buffer, dataout);
1185	if (ret)
1186		return ret;
1187
1188	/*
1189	 * We can't just test for watermark to decide if we wake the poll queue
1190	 * because read may request less samples than the watermark.
1191	 */
1192	wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
1193	return 0;
1194}
1195
1196static void iio_buffer_demux_free(struct iio_buffer *buffer)
1197{
1198	struct iio_demux_table *p, *q;
1199	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
1200		list_del(&p->l);
1201		kfree(p);
1202	}
1203}
1204
1205
1206int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1207{
1208	int ret;
1209	struct iio_buffer *buf;
1210
1211	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1212		ret = iio_push_to_buffer(buf, data);
1213		if (ret < 0)
1214			return ret;
1215	}
1216
1217	return 0;
1218}
1219EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1220
1221static int iio_buffer_add_demux(struct iio_buffer *buffer,
1222	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
1223	unsigned int length)
1224{
1225
1226	if (*p && (*p)->from + (*p)->length == in_loc &&
1227		(*p)->to + (*p)->length == out_loc) {
1228		(*p)->length += length;
1229	} else {
1230		*p = kmalloc(sizeof(**p), GFP_KERNEL);
1231		if (*p == NULL)
1232			return -ENOMEM;
1233		(*p)->from = in_loc;
1234		(*p)->to = out_loc;
1235		(*p)->length = length;
1236		list_add_tail(&(*p)->l, &buffer->demux_list);
1237	}
1238
1239	return 0;
1240}
1241
1242static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1243				   struct iio_buffer *buffer)
1244{
1245	const struct iio_chan_spec *ch;
1246	int ret, in_ind = -1, out_ind, length;
1247	unsigned in_loc = 0, out_loc = 0;
1248	struct iio_demux_table *p = NULL;
1249
1250	/* Clear out any old demux */
1251	iio_buffer_demux_free(buffer);
1252	kfree(buffer->demux_bounce);
1253	buffer->demux_bounce = NULL;
1254
1255	/* First work out which scan mode we will actually have */
1256	if (bitmap_equal(indio_dev->active_scan_mask,
1257			 buffer->scan_mask,
1258			 indio_dev->masklength))
1259		return 0;
1260
1261	/* Now we have the two masks, work from least sig and build up sizes */
1262	for_each_set_bit(out_ind,
1263			 buffer->scan_mask,
1264			 indio_dev->masklength) {
1265		in_ind = find_next_bit(indio_dev->active_scan_mask,
1266				       indio_dev->masklength,
1267				       in_ind + 1);
1268		while (in_ind != out_ind) {
1269			in_ind = find_next_bit(indio_dev->active_scan_mask,
1270					       indio_dev->masklength,
1271					       in_ind + 1);
1272			ch = iio_find_channel_from_si(indio_dev, in_ind);
1273			if (ch->scan_type.repeat > 1)
1274				length = ch->scan_type.storagebits / 8 *
1275					ch->scan_type.repeat;
1276			else
1277				length = ch->scan_type.storagebits / 8;
1278			/* Make sure we are aligned */
1279			in_loc = roundup(in_loc, length) + length;
1280		}
1281		ch = iio_find_channel_from_si(indio_dev, in_ind);
1282		if (ch->scan_type.repeat > 1)
1283			length = ch->scan_type.storagebits / 8 *
1284				ch->scan_type.repeat;
1285		else
1286			length = ch->scan_type.storagebits / 8;
1287		out_loc = roundup(out_loc, length);
1288		in_loc = roundup(in_loc, length);
1289		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1290		if (ret)
1291			goto error_clear_mux_table;
1292		out_loc += length;
1293		in_loc += length;
1294	}
1295	/* Relies on scan_timestamp being last */
1296	if (buffer->scan_timestamp) {
1297		ch = iio_find_channel_from_si(indio_dev,
1298			indio_dev->scan_index_timestamp);
1299		if (ch->scan_type.repeat > 1)
1300			length = ch->scan_type.storagebits / 8 *
1301				ch->scan_type.repeat;
1302		else
1303			length = ch->scan_type.storagebits / 8;
1304		out_loc = roundup(out_loc, length);
1305		in_loc = roundup(in_loc, length);
1306		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1307		if (ret)
1308			goto error_clear_mux_table;
1309		out_loc += length;
1310		in_loc += length;
1311	}
1312	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1313	if (buffer->demux_bounce == NULL) {
1314		ret = -ENOMEM;
1315		goto error_clear_mux_table;
1316	}
1317	return 0;
1318
1319error_clear_mux_table:
1320	iio_buffer_demux_free(buffer);
1321
1322	return ret;
1323}
1324
1325int iio_update_demux(struct iio_dev *indio_dev)
1326{
1327	struct iio_buffer *buffer;
1328	int ret;
1329
1330	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1331		ret = iio_buffer_update_demux(indio_dev, buffer);
1332		if (ret < 0)
1333			goto error_clear_mux_table;
1334	}
1335	return 0;
1336
1337error_clear_mux_table:
1338	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1339		iio_buffer_demux_free(buffer);
1340
1341	return ret;
1342}
1343EXPORT_SYMBOL_GPL(iio_update_demux);
1344
1345/**
1346 * iio_buffer_release() - Free a buffer's resources
1347 * @ref: Pointer to the kref embedded in the iio_buffer struct
1348 *
1349 * This function is called when the last reference to the buffer has been
1350 * dropped. It will typically free all resources allocated by the buffer. Do not
1351 * call this function manually, always use iio_buffer_put() when done using a
1352 * buffer.
1353 */
1354static void iio_buffer_release(struct kref *ref)
1355{
1356	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1357
1358	buffer->access->release(buffer);
1359}
1360
1361/**
1362 * iio_buffer_get() - Grab a reference to the buffer
1363 * @buffer: The buffer to grab a reference for, may be NULL
1364 *
1365 * Returns the pointer to the buffer that was passed into the function.
1366 */
1367struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1368{
1369	if (buffer)
1370		kref_get(&buffer->ref);
1371
1372	return buffer;
1373}
1374EXPORT_SYMBOL_GPL(iio_buffer_get);
1375
1376/**
1377 * iio_buffer_put() - Release the reference to the buffer
1378 * @buffer: The buffer to release the reference for, may be NULL
1379 */
1380void iio_buffer_put(struct iio_buffer *buffer)
1381{
1382	if (buffer)
1383		kref_put(&buffer->ref, iio_buffer_release);
1384}
1385EXPORT_SYMBOL_GPL(iio_buffer_put);
1386