This source file includes following definitions.
- iio_buffer_to_hw_consumer_buffer
- iio_hw_buf_release
- iio_hw_consumer_get_buffer
- iio_hw_consumer_alloc
- iio_hw_consumer_free
- devm_iio_hw_consumer_release
- devm_iio_hw_consumer_match
- devm_iio_hw_consumer_alloc
- devm_iio_hw_consumer_free
- iio_hw_consumer_enable
- iio_hw_consumer_disable
1
2
3
4
5
6
7 #include <linux/err.h>
8 #include <linux/export.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11
12 #include <linux/iio/iio.h>
13 #include <linux/iio/consumer.h>
14 #include <linux/iio/hw-consumer.h>
15 #include <linux/iio/buffer_impl.h>
16
17
18
19
20
21
22 struct iio_hw_consumer {
23 struct list_head buffers;
24 struct iio_channel *channels;
25 };
26
27 struct hw_consumer_buffer {
28 struct list_head head;
29 struct iio_dev *indio_dev;
30 struct iio_buffer buffer;
31 long scan_mask[];
32 };
33
34 static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
35 struct iio_buffer *buffer)
36 {
37 return container_of(buffer, struct hw_consumer_buffer, buffer);
38 }
39
40 static void iio_hw_buf_release(struct iio_buffer *buffer)
41 {
42 struct hw_consumer_buffer *hw_buf =
43 iio_buffer_to_hw_consumer_buffer(buffer);
44 kfree(hw_buf);
45 }
46
47 static const struct iio_buffer_access_funcs iio_hw_buf_access = {
48 .release = &iio_hw_buf_release,
49 .modes = INDIO_BUFFER_HARDWARE,
50 };
51
52 static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
53 struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
54 {
55 size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
56 struct hw_consumer_buffer *buf;
57
58 list_for_each_entry(buf, &hwc->buffers, head) {
59 if (buf->indio_dev == indio_dev)
60 return buf;
61 }
62
63 buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
64 if (!buf)
65 return NULL;
66
67 buf->buffer.access = &iio_hw_buf_access;
68 buf->indio_dev = indio_dev;
69 buf->buffer.scan_mask = buf->scan_mask;
70
71 iio_buffer_init(&buf->buffer);
72 list_add_tail(&buf->head, &hwc->buffers);
73
74 return buf;
75 }
76
77
78
79
80
81
82
83 struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
84 {
85 struct hw_consumer_buffer *buf;
86 struct iio_hw_consumer *hwc;
87 struct iio_channel *chan;
88 int ret;
89
90 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
91 if (!hwc)
92 return ERR_PTR(-ENOMEM);
93
94 INIT_LIST_HEAD(&hwc->buffers);
95
96 hwc->channels = iio_channel_get_all(dev);
97 if (IS_ERR(hwc->channels)) {
98 ret = PTR_ERR(hwc->channels);
99 goto err_free_hwc;
100 }
101
102 chan = &hwc->channels[0];
103 while (chan->indio_dev) {
104 buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
105 if (!buf) {
106 ret = -ENOMEM;
107 goto err_put_buffers;
108 }
109 set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
110 chan++;
111 }
112
113 return hwc;
114
115 err_put_buffers:
116 list_for_each_entry(buf, &hwc->buffers, head)
117 iio_buffer_put(&buf->buffer);
118 iio_channel_release_all(hwc->channels);
119 err_free_hwc:
120 kfree(hwc);
121 return ERR_PTR(ret);
122 }
123 EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
124
125
126
127
128
129 void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
130 {
131 struct hw_consumer_buffer *buf, *n;
132
133 iio_channel_release_all(hwc->channels);
134 list_for_each_entry_safe(buf, n, &hwc->buffers, head)
135 iio_buffer_put(&buf->buffer);
136 kfree(hwc);
137 }
138 EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
139
140 static void devm_iio_hw_consumer_release(struct device *dev, void *res)
141 {
142 iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
143 }
144
145 static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
146 {
147 struct iio_hw_consumer **r = res;
148
149 if (!r || !*r) {
150 WARN_ON(!r || !*r);
151 return 0;
152 }
153 return *r == data;
154 }
155
156
157
158
159
160
161
162
163
164
165
166
167
168 struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
169 {
170 struct iio_hw_consumer **ptr, *iio_hwc;
171
172 ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
173 GFP_KERNEL);
174 if (!ptr)
175 return NULL;
176
177 iio_hwc = iio_hw_consumer_alloc(dev);
178 if (IS_ERR(iio_hwc)) {
179 devres_free(ptr);
180 } else {
181 *ptr = iio_hwc;
182 devres_add(dev, ptr);
183 }
184
185 return iio_hwc;
186 }
187 EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
188
189
190
191
192
193
194
195
196 void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
197 {
198 int rc;
199
200 rc = devres_release(dev, devm_iio_hw_consumer_release,
201 devm_iio_hw_consumer_match, hwc);
202 WARN_ON(rc);
203 }
204 EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
205
206
207
208
209
210
211
212 int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
213 {
214 struct hw_consumer_buffer *buf;
215 int ret;
216
217 list_for_each_entry(buf, &hwc->buffers, head) {
218 ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
219 if (ret)
220 goto err_disable_buffers;
221 }
222
223 return 0;
224
225 err_disable_buffers:
226 list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
227 iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
228 return ret;
229 }
230 EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
231
232
233
234
235
236 void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
237 {
238 struct hw_consumer_buffer *buf;
239
240 list_for_each_entry(buf, &hwc->buffers, head)
241 iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
242 }
243 EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
244
245 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
246 MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
247 MODULE_LICENSE("GPL v2");