This source file includes following definitions.
- ch_has_mbo
- ch_get_mbo
- get_channel
- stop_channel
- destroy_cdev
- destroy_channel
- comp_open
- comp_close
- comp_write
- comp_read
- comp_poll
- comp_disconnect_channel
- comp_rx_completion
- comp_tx_completion
- comp_probe
- mod_init
- mod_exit
1
2
3
4
5
6
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/fs.h>
12 #include <linux/slab.h>
13 #include <linux/device.h>
14 #include <linux/cdev.h>
15 #include <linux/poll.h>
16 #include <linux/kfifo.h>
17 #include <linux/uaccess.h>
18 #include <linux/idr.h>
19 #include "most/core.h"
20
21 #define CHRDEV_REGION_SIZE 50
22
23 static struct cdev_component {
24 dev_t devno;
25 struct ida minor_id;
26 unsigned int major;
27 struct class *class;
28 struct core_component cc;
29 } comp;
30
31 struct comp_channel {
32 wait_queue_head_t wq;
33 spinlock_t unlink;
34 struct cdev cdev;
35 struct device *dev;
36 struct mutex io_mutex;
37 struct most_interface *iface;
38 struct most_channel_config *cfg;
39 unsigned int channel_id;
40 dev_t devno;
41 size_t mbo_offs;
42 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
43 int access_ref;
44 struct list_head list;
45 };
46
47 #define to_channel(d) container_of(d, struct comp_channel, cdev)
48 static struct list_head channel_list;
49 static spinlock_t ch_list_lock;
50
51 static inline bool ch_has_mbo(struct comp_channel *c)
52 {
53 return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
54 }
55
56 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
57 {
58 if (!kfifo_peek(&c->fifo, mbo)) {
59 *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
60 if (*mbo)
61 kfifo_in(&c->fifo, mbo, 1);
62 }
63 return *mbo;
64 }
65
66 static struct comp_channel *get_channel(struct most_interface *iface, int id)
67 {
68 struct comp_channel *c, *tmp;
69 unsigned long flags;
70 int found_channel = 0;
71
72 spin_lock_irqsave(&ch_list_lock, flags);
73 list_for_each_entry_safe(c, tmp, &channel_list, list) {
74 if ((c->iface == iface) && (c->channel_id == id)) {
75 found_channel = 1;
76 break;
77 }
78 }
79 spin_unlock_irqrestore(&ch_list_lock, flags);
80 if (!found_channel)
81 return NULL;
82 return c;
83 }
84
85 static void stop_channel(struct comp_channel *c)
86 {
87 struct mbo *mbo;
88
89 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
90 most_put_mbo(mbo);
91 most_stop_channel(c->iface, c->channel_id, &comp.cc);
92 }
93
94 static void destroy_cdev(struct comp_channel *c)
95 {
96 unsigned long flags;
97
98 device_destroy(comp.class, c->devno);
99 cdev_del(&c->cdev);
100 spin_lock_irqsave(&ch_list_lock, flags);
101 list_del(&c->list);
102 spin_unlock_irqrestore(&ch_list_lock, flags);
103 }
104
105 static void destroy_channel(struct comp_channel *c)
106 {
107 ida_simple_remove(&comp.minor_id, MINOR(c->devno));
108 kfifo_free(&c->fifo);
109 kfree(c);
110 }
111
112
113
114
115
116
117
118
119
120 static int comp_open(struct inode *inode, struct file *filp)
121 {
122 struct comp_channel *c;
123 int ret;
124
125 c = to_channel(inode->i_cdev);
126 filp->private_data = c;
127
128 if (((c->cfg->direction == MOST_CH_RX) &&
129 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
130 ((c->cfg->direction == MOST_CH_TX) &&
131 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
132 pr_info("WARN: Access flags mismatch\n");
133 return -EACCES;
134 }
135
136 mutex_lock(&c->io_mutex);
137 if (!c->dev) {
138 pr_info("WARN: Device is destroyed\n");
139 mutex_unlock(&c->io_mutex);
140 return -ENODEV;
141 }
142
143 if (c->access_ref) {
144 pr_info("WARN: Device is busy\n");
145 mutex_unlock(&c->io_mutex);
146 return -EBUSY;
147 }
148
149 c->mbo_offs = 0;
150 ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
151 if (!ret)
152 c->access_ref = 1;
153 mutex_unlock(&c->io_mutex);
154 return ret;
155 }
156
157
158
159
160
161
162
163
164 static int comp_close(struct inode *inode, struct file *filp)
165 {
166 struct comp_channel *c = to_channel(inode->i_cdev);
167
168 mutex_lock(&c->io_mutex);
169 spin_lock(&c->unlink);
170 c->access_ref = 0;
171 spin_unlock(&c->unlink);
172 if (c->dev) {
173 stop_channel(c);
174 mutex_unlock(&c->io_mutex);
175 } else {
176 mutex_unlock(&c->io_mutex);
177 destroy_channel(c);
178 }
179 return 0;
180 }
181
182
183
184
185
186
187
188
189 static ssize_t comp_write(struct file *filp, const char __user *buf,
190 size_t count, loff_t *offset)
191 {
192 int ret;
193 size_t to_copy, left;
194 struct mbo *mbo = NULL;
195 struct comp_channel *c = filp->private_data;
196
197 mutex_lock(&c->io_mutex);
198 while (c->dev && !ch_get_mbo(c, &mbo)) {
199 mutex_unlock(&c->io_mutex);
200
201 if ((filp->f_flags & O_NONBLOCK))
202 return -EAGAIN;
203 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
204 return -ERESTARTSYS;
205 mutex_lock(&c->io_mutex);
206 }
207
208 if (unlikely(!c->dev)) {
209 ret = -ENODEV;
210 goto unlock;
211 }
212
213 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
214 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
215 if (left == to_copy) {
216 ret = -EFAULT;
217 goto unlock;
218 }
219
220 c->mbo_offs += to_copy - left;
221 if (c->mbo_offs >= c->cfg->buffer_size ||
222 c->cfg->data_type == MOST_CH_CONTROL ||
223 c->cfg->data_type == MOST_CH_ASYNC) {
224 kfifo_skip(&c->fifo);
225 mbo->buffer_length = c->mbo_offs;
226 c->mbo_offs = 0;
227 most_submit_mbo(mbo);
228 }
229
230 ret = to_copy - left;
231 unlock:
232 mutex_unlock(&c->io_mutex);
233 return ret;
234 }
235
236
237
238
239
240
241
242
243 static ssize_t
244 comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
245 {
246 size_t to_copy, not_copied, copied;
247 struct mbo *mbo = NULL;
248 struct comp_channel *c = filp->private_data;
249
250 mutex_lock(&c->io_mutex);
251 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
252 mutex_unlock(&c->io_mutex);
253 if (filp->f_flags & O_NONBLOCK)
254 return -EAGAIN;
255 if (wait_event_interruptible(c->wq,
256 (!kfifo_is_empty(&c->fifo) ||
257 (!c->dev))))
258 return -ERESTARTSYS;
259 mutex_lock(&c->io_mutex);
260 }
261
262
263 if (unlikely(!c->dev)) {
264 mutex_unlock(&c->io_mutex);
265 return -ENODEV;
266 }
267
268 to_copy = min_t(size_t,
269 count,
270 mbo->processed_length - c->mbo_offs);
271
272 not_copied = copy_to_user(buf,
273 mbo->virt_address + c->mbo_offs,
274 to_copy);
275
276 copied = to_copy - not_copied;
277
278 c->mbo_offs += copied;
279 if (c->mbo_offs >= mbo->processed_length) {
280 kfifo_skip(&c->fifo);
281 most_put_mbo(mbo);
282 c->mbo_offs = 0;
283 }
284 mutex_unlock(&c->io_mutex);
285 return copied;
286 }
287
288 static __poll_t comp_poll(struct file *filp, poll_table *wait)
289 {
290 struct comp_channel *c = filp->private_data;
291 __poll_t mask = 0;
292
293 poll_wait(filp, &c->wq, wait);
294
295 mutex_lock(&c->io_mutex);
296 if (c->cfg->direction == MOST_CH_RX) {
297 if (!c->dev || !kfifo_is_empty(&c->fifo))
298 mask |= EPOLLIN | EPOLLRDNORM;
299 } else {
300 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
301 mask |= EPOLLOUT | EPOLLWRNORM;
302 }
303 mutex_unlock(&c->io_mutex);
304 return mask;
305 }
306
307
308
309
310 static const struct file_operations channel_fops = {
311 .owner = THIS_MODULE,
312 .read = comp_read,
313 .write = comp_write,
314 .open = comp_open,
315 .release = comp_close,
316 .poll = comp_poll,
317 };
318
319
320
321
322
323
324
325
326
327 static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
328 {
329 struct comp_channel *c;
330
331 if (!iface) {
332 pr_info("Bad interface pointer\n");
333 return -EINVAL;
334 }
335
336 c = get_channel(iface, channel_id);
337 if (!c)
338 return -ENXIO;
339
340 mutex_lock(&c->io_mutex);
341 spin_lock(&c->unlink);
342 c->dev = NULL;
343 spin_unlock(&c->unlink);
344 destroy_cdev(c);
345 if (c->access_ref) {
346 stop_channel(c);
347 wake_up_interruptible(&c->wq);
348 mutex_unlock(&c->io_mutex);
349 } else {
350 mutex_unlock(&c->io_mutex);
351 destroy_channel(c);
352 }
353 return 0;
354 }
355
356
357
358
359
360
361
362
363 static int comp_rx_completion(struct mbo *mbo)
364 {
365 struct comp_channel *c;
366
367 if (!mbo)
368 return -EINVAL;
369
370 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
371 if (!c)
372 return -ENXIO;
373
374 spin_lock(&c->unlink);
375 if (!c->access_ref || !c->dev) {
376 spin_unlock(&c->unlink);
377 return -ENODEV;
378 }
379 kfifo_in(&c->fifo, &mbo, 1);
380 spin_unlock(&c->unlink);
381 #ifdef DEBUG_MESG
382 if (kfifo_is_full(&c->fifo))
383 pr_info("WARN: Fifo is full\n");
384 #endif
385 wake_up_interruptible(&c->wq);
386 return 0;
387 }
388
389
390
391
392
393
394
395
396 static int comp_tx_completion(struct most_interface *iface, int channel_id)
397 {
398 struct comp_channel *c;
399
400 if (!iface) {
401 pr_info("Bad interface pointer\n");
402 return -EINVAL;
403 }
404 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
405 pr_info("Channel ID out of range\n");
406 return -EINVAL;
407 }
408
409 c = get_channel(iface, channel_id);
410 if (!c)
411 return -ENXIO;
412 wake_up_interruptible(&c->wq);
413 return 0;
414 }
415
416
417
418
419
420
421
422
423
424
425
426
427 static int comp_probe(struct most_interface *iface, int channel_id,
428 struct most_channel_config *cfg, char *name, char *args)
429 {
430 struct comp_channel *c;
431 unsigned long cl_flags;
432 int retval;
433 int current_minor;
434
435 if ((!iface) || (!cfg) || (!name)) {
436 pr_info("Probing component with bad arguments");
437 return -EINVAL;
438 }
439 c = get_channel(iface, channel_id);
440 if (c)
441 return -EEXIST;
442
443 current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
444 if (current_minor < 0)
445 return current_minor;
446
447 c = kzalloc(sizeof(*c), GFP_KERNEL);
448 if (!c) {
449 retval = -ENOMEM;
450 goto err_remove_ida;
451 }
452
453 c->devno = MKDEV(comp.major, current_minor);
454 cdev_init(&c->cdev, &channel_fops);
455 c->cdev.owner = THIS_MODULE;
456 retval = cdev_add(&c->cdev, c->devno, 1);
457 if (retval < 0)
458 goto err_free_c;
459 c->iface = iface;
460 c->cfg = cfg;
461 c->channel_id = channel_id;
462 c->access_ref = 0;
463 spin_lock_init(&c->unlink);
464 INIT_KFIFO(c->fifo);
465 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
466 if (retval)
467 goto err_del_cdev_and_free_channel;
468 init_waitqueue_head(&c->wq);
469 mutex_init(&c->io_mutex);
470 spin_lock_irqsave(&ch_list_lock, cl_flags);
471 list_add_tail(&c->list, &channel_list);
472 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
473 c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
474
475 if (IS_ERR(c->dev)) {
476 retval = PTR_ERR(c->dev);
477 pr_info("failed to create new device node %s\n", name);
478 goto err_free_kfifo_and_del_list;
479 }
480 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
481 return 0;
482
483 err_free_kfifo_and_del_list:
484 kfifo_free(&c->fifo);
485 list_del(&c->list);
486 err_del_cdev_and_free_channel:
487 cdev_del(&c->cdev);
488 err_free_c:
489 kfree(c);
490 err_remove_ida:
491 ida_simple_remove(&comp.minor_id, current_minor);
492 return retval;
493 }
494
495 static struct cdev_component comp = {
496 .cc = {
497 .name = "cdev",
498 .probe_channel = comp_probe,
499 .disconnect_channel = comp_disconnect_channel,
500 .rx_completion = comp_rx_completion,
501 .tx_completion = comp_tx_completion,
502 },
503 };
504
505 static int __init mod_init(void)
506 {
507 int err;
508
509 pr_info("init()\n");
510
511 comp.class = class_create(THIS_MODULE, "most_cdev");
512 if (IS_ERR(comp.class)) {
513 pr_info("No udev support.\n");
514 return PTR_ERR(comp.class);
515 }
516
517 INIT_LIST_HEAD(&channel_list);
518 spin_lock_init(&ch_list_lock);
519 ida_init(&comp.minor_id);
520
521 err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
522 if (err < 0)
523 goto dest_ida;
524 comp.major = MAJOR(comp.devno);
525 err = most_register_component(&comp.cc);
526 if (err)
527 goto free_cdev;
528 err = most_register_configfs_subsys(&comp.cc);
529 if (err)
530 goto deregister_comp;
531 return 0;
532
533 deregister_comp:
534 most_deregister_component(&comp.cc);
535 free_cdev:
536 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
537 dest_ida:
538 ida_destroy(&comp.minor_id);
539 class_destroy(comp.class);
540 return err;
541 }
542
543 static void __exit mod_exit(void)
544 {
545 struct comp_channel *c, *tmp;
546
547 pr_info("exit module\n");
548
549 most_deregister_configfs_subsys(&comp.cc);
550 most_deregister_component(&comp.cc);
551
552 list_for_each_entry_safe(c, tmp, &channel_list, list) {
553 destroy_cdev(c);
554 destroy_channel(c);
555 }
556 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
557 ida_destroy(&comp.minor_id);
558 class_destroy(comp.class);
559 }
560
561 module_init(mod_init);
562 module_exit(mod_exit);
563 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
564 MODULE_LICENSE("GPL");
565 MODULE_DESCRIPTION("character device component for mostcore");