root/drivers/media/v4l2-core/v4l2-event.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sev_pos
  2. __v4l2_event_dequeue
  3. v4l2_event_dequeue
  4. v4l2_event_subscribed
  5. __v4l2_event_queue_fh
  6. v4l2_event_queue
  7. v4l2_event_queue_fh
  8. v4l2_event_pending
  9. __v4l2_event_unsubscribe
  10. v4l2_event_subscribe
  11. v4l2_event_unsubscribe_all
  12. v4l2_event_unsubscribe
  13. v4l2_event_subdev_unsubscribe
  14. v4l2_event_src_replace
  15. v4l2_event_src_merge
  16. v4l2_src_change_event_subscribe
  17. v4l2_src_change_event_subdev_subscribe

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * v4l2-event.c
   4  *
   5  * V4L2 events.
   6  *
   7  * Copyright (C) 2009--2010 Nokia Corporation.
   8  *
   9  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
  10  */
  11 
  12 #include <media/v4l2-dev.h>
  13 #include <media/v4l2-fh.h>
  14 #include <media/v4l2-event.h>
  15 
  16 #include <linux/mm.h>
  17 #include <linux/sched.h>
  18 #include <linux/slab.h>
  19 #include <linux/export.h>
  20 
  21 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
  22 {
  23         idx += sev->first;
  24         return idx >= sev->elems ? idx - sev->elems : idx;
  25 }
  26 
  27 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
  28 {
  29         struct v4l2_kevent *kev;
  30         unsigned long flags;
  31 
  32         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
  33 
  34         if (list_empty(&fh->available)) {
  35                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  36                 return -ENOENT;
  37         }
  38 
  39         WARN_ON(fh->navailable == 0);
  40 
  41         kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
  42         list_del(&kev->list);
  43         fh->navailable--;
  44 
  45         kev->event.pending = fh->navailable;
  46         *event = kev->event;
  47         event->timestamp = ns_to_timespec(kev->ts);
  48         kev->sev->first = sev_pos(kev->sev, 1);
  49         kev->sev->in_use--;
  50 
  51         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
  52 
  53         return 0;
  54 }
  55 
  56 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
  57                        int nonblocking)
  58 {
  59         int ret;
  60 
  61         if (nonblocking)
  62                 return __v4l2_event_dequeue(fh, event);
  63 
  64         /* Release the vdev lock while waiting */
  65         if (fh->vdev->lock)
  66                 mutex_unlock(fh->vdev->lock);
  67 
  68         do {
  69                 ret = wait_event_interruptible(fh->wait,
  70                                                fh->navailable != 0);
  71                 if (ret < 0)
  72                         break;
  73 
  74                 ret = __v4l2_event_dequeue(fh, event);
  75         } while (ret == -ENOENT);
  76 
  77         if (fh->vdev->lock)
  78                 mutex_lock(fh->vdev->lock);
  79 
  80         return ret;
  81 }
  82 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
  83 
  84 /* Caller must hold fh->vdev->fh_lock! */
  85 static struct v4l2_subscribed_event *v4l2_event_subscribed(
  86                 struct v4l2_fh *fh, u32 type, u32 id)
  87 {
  88         struct v4l2_subscribed_event *sev;
  89 
  90         assert_spin_locked(&fh->vdev->fh_lock);
  91 
  92         list_for_each_entry(sev, &fh->subscribed, list)
  93                 if (sev->type == type && sev->id == id)
  94                         return sev;
  95 
  96         return NULL;
  97 }
  98 
  99 static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
 100                                   const struct v4l2_event *ev, u64 ts)
 101 {
 102         struct v4l2_subscribed_event *sev;
 103         struct v4l2_kevent *kev;
 104         bool copy_payload = true;
 105 
 106         /* Are we subscribed? */
 107         sev = v4l2_event_subscribed(fh, ev->type, ev->id);
 108         if (sev == NULL)
 109                 return;
 110 
 111         /* Increase event sequence number on fh. */
 112         fh->sequence++;
 113 
 114         /* Do we have any free events? */
 115         if (sev->in_use == sev->elems) {
 116                 /* no, remove the oldest one */
 117                 kev = sev->events + sev_pos(sev, 0);
 118                 list_del(&kev->list);
 119                 sev->in_use--;
 120                 sev->first = sev_pos(sev, 1);
 121                 fh->navailable--;
 122                 if (sev->elems == 1) {
 123                         if (sev->ops && sev->ops->replace) {
 124                                 sev->ops->replace(&kev->event, ev);
 125                                 copy_payload = false;
 126                         }
 127                 } else if (sev->ops && sev->ops->merge) {
 128                         struct v4l2_kevent *second_oldest =
 129                                 sev->events + sev_pos(sev, 0);
 130                         sev->ops->merge(&kev->event, &second_oldest->event);
 131                 }
 132         }
 133 
 134         /* Take one and fill it. */
 135         kev = sev->events + sev_pos(sev, sev->in_use);
 136         kev->event.type = ev->type;
 137         if (copy_payload)
 138                 kev->event.u = ev->u;
 139         kev->event.id = ev->id;
 140         kev->ts = ts;
 141         kev->event.sequence = fh->sequence;
 142         sev->in_use++;
 143         list_add_tail(&kev->list, &fh->available);
 144 
 145         fh->navailable++;
 146 
 147         wake_up_all(&fh->wait);
 148 }
 149 
 150 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
 151 {
 152         struct v4l2_fh *fh;
 153         unsigned long flags;
 154         u64 ts;
 155 
 156         if (vdev == NULL)
 157                 return;
 158 
 159         ts = ktime_get_ns();
 160 
 161         spin_lock_irqsave(&vdev->fh_lock, flags);
 162 
 163         list_for_each_entry(fh, &vdev->fh_list, list)
 164                 __v4l2_event_queue_fh(fh, ev, ts);
 165 
 166         spin_unlock_irqrestore(&vdev->fh_lock, flags);
 167 }
 168 EXPORT_SYMBOL_GPL(v4l2_event_queue);
 169 
 170 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
 171 {
 172         unsigned long flags;
 173         u64 ts = ktime_get_ns();
 174 
 175         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 176         __v4l2_event_queue_fh(fh, ev, ts);
 177         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 178 }
 179 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
 180 
 181 int v4l2_event_pending(struct v4l2_fh *fh)
 182 {
 183         return fh->navailable;
 184 }
 185 EXPORT_SYMBOL_GPL(v4l2_event_pending);
 186 
 187 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
 188 {
 189         struct v4l2_fh *fh = sev->fh;
 190         unsigned int i;
 191 
 192         lockdep_assert_held(&fh->subscribe_lock);
 193         assert_spin_locked(&fh->vdev->fh_lock);
 194 
 195         /* Remove any pending events for this subscription */
 196         for (i = 0; i < sev->in_use; i++) {
 197                 list_del(&sev->events[sev_pos(sev, i)].list);
 198                 fh->navailable--;
 199         }
 200         list_del(&sev->list);
 201 }
 202 
 203 int v4l2_event_subscribe(struct v4l2_fh *fh,
 204                          const struct v4l2_event_subscription *sub, unsigned elems,
 205                          const struct v4l2_subscribed_event_ops *ops)
 206 {
 207         struct v4l2_subscribed_event *sev, *found_ev;
 208         unsigned long flags;
 209         unsigned i;
 210         int ret = 0;
 211 
 212         if (sub->type == V4L2_EVENT_ALL)
 213                 return -EINVAL;
 214 
 215         if (elems < 1)
 216                 elems = 1;
 217 
 218         sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
 219         if (!sev)
 220                 return -ENOMEM;
 221         for (i = 0; i < elems; i++)
 222                 sev->events[i].sev = sev;
 223         sev->type = sub->type;
 224         sev->id = sub->id;
 225         sev->flags = sub->flags;
 226         sev->fh = fh;
 227         sev->ops = ops;
 228         sev->elems = elems;
 229 
 230         mutex_lock(&fh->subscribe_lock);
 231 
 232         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 233         found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
 234         if (!found_ev)
 235                 list_add(&sev->list, &fh->subscribed);
 236         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 237 
 238         if (found_ev) {
 239                 /* Already listening */
 240                 kvfree(sev);
 241         } else if (sev->ops && sev->ops->add) {
 242                 ret = sev->ops->add(sev, elems);
 243                 if (ret) {
 244                         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 245                         __v4l2_event_unsubscribe(sev);
 246                         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 247                         kvfree(sev);
 248                 }
 249         }
 250 
 251         mutex_unlock(&fh->subscribe_lock);
 252 
 253         return ret;
 254 }
 255 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 256 
 257 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
 258 {
 259         struct v4l2_event_subscription sub;
 260         struct v4l2_subscribed_event *sev;
 261         unsigned long flags;
 262 
 263         do {
 264                 sev = NULL;
 265 
 266                 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 267                 if (!list_empty(&fh->subscribed)) {
 268                         sev = list_first_entry(&fh->subscribed,
 269                                         struct v4l2_subscribed_event, list);
 270                         sub.type = sev->type;
 271                         sub.id = sev->id;
 272                 }
 273                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 274                 if (sev)
 275                         v4l2_event_unsubscribe(fh, &sub);
 276         } while (sev);
 277 }
 278 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
 279 
 280 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
 281                            const struct v4l2_event_subscription *sub)
 282 {
 283         struct v4l2_subscribed_event *sev;
 284         unsigned long flags;
 285 
 286         if (sub->type == V4L2_EVENT_ALL) {
 287                 v4l2_event_unsubscribe_all(fh);
 288                 return 0;
 289         }
 290 
 291         mutex_lock(&fh->subscribe_lock);
 292 
 293         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 294 
 295         sev = v4l2_event_subscribed(fh, sub->type, sub->id);
 296         if (sev != NULL)
 297                 __v4l2_event_unsubscribe(sev);
 298 
 299         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 300 
 301         if (sev && sev->ops && sev->ops->del)
 302                 sev->ops->del(sev);
 303 
 304         mutex_unlock(&fh->subscribe_lock);
 305 
 306         kvfree(sev);
 307 
 308         return 0;
 309 }
 310 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
 311 
 312 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
 313                                   struct v4l2_event_subscription *sub)
 314 {
 315         return v4l2_event_unsubscribe(fh, sub);
 316 }
 317 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
 318 
 319 static void v4l2_event_src_replace(struct v4l2_event *old,
 320                                 const struct v4l2_event *new)
 321 {
 322         u32 old_changes = old->u.src_change.changes;
 323 
 324         old->u.src_change = new->u.src_change;
 325         old->u.src_change.changes |= old_changes;
 326 }
 327 
 328 static void v4l2_event_src_merge(const struct v4l2_event *old,
 329                                 struct v4l2_event *new)
 330 {
 331         new->u.src_change.changes |= old->u.src_change.changes;
 332 }
 333 
 334 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
 335         .replace = v4l2_event_src_replace,
 336         .merge = v4l2_event_src_merge,
 337 };
 338 
 339 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
 340                                 const struct v4l2_event_subscription *sub)
 341 {
 342         if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
 343                 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
 344         return -EINVAL;
 345 }
 346 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
 347 
 348 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
 349                 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
 350 {
 351         return v4l2_src_change_event_subscribe(fh, sub);
 352 }
 353 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);

/* [<][>][^][v][top][bottom][index][help] */