This source file includes following definitions.
- uwb_rc_neh_release
- uwb_rc_neh_get
- uwb_rc_neh_put
- __uwb_rc_ctx_get
- __uwb_rc_ctx_put
- uwb_rc_neh_add
- __uwb_rc_neh_rm
- uwb_rc_neh_rm
- uwb_rc_neh_arm
- uwb_rc_neh_cb
- uwb_rc_neh_match
- uwb_rc_neh_lookup
- uwb_rc_notif
- uwb_rc_neh_grok_event
- uwb_rc_neh_grok
- uwb_rc_neh_error
- uwb_rc_neh_timer
- uwb_rc_neh_create
- uwb_rc_neh_destroy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 #include <linux/kernel.h>
71 #include <linux/timer.h>
72 #include <linux/slab.h>
73 #include <linux/err.h>
74 #include <linux/export.h>
75
76 #include "uwb-internal.h"
77
78
79
80
81
82
83
84
85
86
87
88
89 struct uwb_rc_neh {
90 struct kref kref;
91
92 struct uwb_rc *rc;
93 u8 evt_type;
94 __le16 evt;
95 u8 context;
96 u8 completed;
97 uwb_rc_cmd_cb_f cb;
98 void *arg;
99
100 struct timer_list timer;
101 struct list_head list_node;
102 };
103
104 static void uwb_rc_neh_timer(struct timer_list *t);
105
106 static void uwb_rc_neh_release(struct kref *kref)
107 {
108 struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
109
110 kfree(neh);
111 }
112
113 static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
114 {
115 kref_get(&neh->kref);
116 }
117
118
119
120
121
122 void uwb_rc_neh_put(struct uwb_rc_neh *neh)
123 {
124 kref_put(&neh->kref, uwb_rc_neh_release);
125 }
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148 static
149 int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
150 {
151 int result;
152 result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
153 rc->ctx_roll++);
154 if (result < UWB_RC_CTX_MAX)
155 goto found;
156 result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
157 if (result < UWB_RC_CTX_MAX)
158 goto found;
159 return -ENFILE;
160 found:
161 set_bit(result, rc->ctx_bm);
162 neh->context = result;
163 return 0;
164 }
165
166
167
168 static
169 void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
170 {
171 struct device *dev = &rc->uwb_dev.dev;
172 if (neh->context == 0)
173 return;
174 if (test_bit(neh->context, rc->ctx_bm) == 0) {
175 dev_err(dev, "context %u not set in bitmap\n",
176 neh->context);
177 WARN_ON(1);
178 }
179 clear_bit(neh->context, rc->ctx_bm);
180 neh->context = 0;
181 }
182
183
184
185
186
187
188
189
190
191
192
193
194
195 struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
196 u8 expected_type, u16 expected_event,
197 uwb_rc_cmd_cb_f cb, void *arg)
198 {
199 int result;
200 unsigned long flags;
201 struct device *dev = &rc->uwb_dev.dev;
202 struct uwb_rc_neh *neh;
203
204 neh = kzalloc(sizeof(*neh), GFP_KERNEL);
205 if (neh == NULL) {
206 result = -ENOMEM;
207 goto error_kzalloc;
208 }
209
210 kref_init(&neh->kref);
211 INIT_LIST_HEAD(&neh->list_node);
212 timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
213
214 neh->rc = rc;
215 neh->evt_type = expected_type;
216 neh->evt = cpu_to_le16(expected_event);
217 neh->cb = cb;
218 neh->arg = arg;
219
220 spin_lock_irqsave(&rc->neh_lock, flags);
221 result = __uwb_rc_ctx_get(rc, neh);
222 if (result >= 0) {
223 cmd->bCommandContext = neh->context;
224 list_add_tail(&neh->list_node, &rc->neh_list);
225 uwb_rc_neh_get(neh);
226 }
227 spin_unlock_irqrestore(&rc->neh_lock, flags);
228 if (result < 0)
229 goto error_ctx_get;
230
231 return neh;
232
233 error_ctx_get:
234 kfree(neh);
235 error_kzalloc:
236 dev_err(dev, "cannot open handle to radio controller: %d\n", result);
237 return ERR_PTR(result);
238 }
239
240 static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
241 {
242 __uwb_rc_ctx_put(rc, neh);
243 list_del(&neh->list_node);
244 }
245
246
247
248
249
250
251
252
253
254 void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
255 {
256 unsigned long flags;
257
258 spin_lock_irqsave(&rc->neh_lock, flags);
259 __uwb_rc_neh_rm(rc, neh);
260 spin_unlock_irqrestore(&rc->neh_lock, flags);
261
262 del_timer_sync(&neh->timer);
263 uwb_rc_neh_put(neh);
264 }
265
266
267
268
269
270
271
272
273
274 void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
275 {
276 unsigned long flags;
277
278 spin_lock_irqsave(&rc->neh_lock, flags);
279 if (neh->context)
280 mod_timer(&neh->timer,
281 jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
282 spin_unlock_irqrestore(&rc->neh_lock, flags);
283 }
284
285 static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
286 {
287 (*neh->cb)(neh->rc, neh->arg, rceb, size);
288 uwb_rc_neh_put(neh);
289 }
290
291 static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
292 {
293 return neh->evt_type == rceb->bEventType
294 && neh->evt == rceb->wEvent
295 && neh->context == rceb->bEventContext;
296 }
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312 static
313 struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
314 const struct uwb_rceb *rceb)
315 {
316 struct uwb_rc_neh *neh = NULL, *h;
317 unsigned long flags;
318
319 spin_lock_irqsave(&rc->neh_lock, flags);
320
321 list_for_each_entry(h, &rc->neh_list, list_node) {
322 if (uwb_rc_neh_match(h, rceb)) {
323 neh = h;
324 break;
325 }
326 }
327
328 if (neh)
329 __uwb_rc_neh_rm(rc, neh);
330
331 spin_unlock_irqrestore(&rc->neh_lock, flags);
332
333 return neh;
334 }
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362 static
363 void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
364 {
365 struct device *dev = &rc->uwb_dev.dev;
366 struct uwb_event *uwb_evt;
367
368 if (size == -ESHUTDOWN)
369 return;
370 if (size < 0) {
371 dev_err(dev, "ignoring event with error code %zu\n",
372 size);
373 return;
374 }
375
376 uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
377 if (unlikely(uwb_evt == NULL)) {
378 dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
379 rceb->bEventType, le16_to_cpu(rceb->wEvent),
380 rceb->bEventContext);
381 return;
382 }
383 uwb_evt->rc = __uwb_rc_get(rc);
384 uwb_evt->ts_jiffies = jiffies;
385 uwb_evt->type = UWB_EVT_TYPE_NOTIF;
386 uwb_evt->notif.size = size;
387 uwb_evt->notif.rceb = rceb;
388
389 uwbd_event_queue(uwb_evt);
390 }
391
392 static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
393 {
394 struct device *dev = &rc->uwb_dev.dev;
395 struct uwb_rc_neh *neh;
396 struct uwb_rceb *notif;
397 unsigned long flags;
398
399 if (rceb->bEventContext == 0) {
400 notif = kmalloc(size, GFP_ATOMIC);
401 if (notif) {
402 memcpy(notif, rceb, size);
403 uwb_rc_notif(rc, notif, size);
404 } else
405 dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
406 rceb->bEventType, le16_to_cpu(rceb->wEvent),
407 rceb->bEventContext, size);
408 } else {
409 neh = uwb_rc_neh_lookup(rc, rceb);
410 if (neh) {
411 spin_lock_irqsave(&rc->neh_lock, flags);
412
413 neh->completed = 1;
414 del_timer(&neh->timer);
415 spin_unlock_irqrestore(&rc->neh_lock, flags);
416 uwb_rc_neh_cb(neh, rceb, size);
417 } else
418 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
419 rceb->bEventType, le16_to_cpu(rceb->wEvent),
420 rceb->bEventContext, size);
421 }
422 }
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464 void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
465 {
466 struct device *dev = &rc->uwb_dev.dev;
467 void *itr;
468 struct uwb_rceb *rceb;
469 size_t size, real_size, event_size;
470 int needtofree;
471
472 itr = buf;
473 size = buf_size;
474 while (size > 0) {
475 if (size < sizeof(*rceb)) {
476 dev_err(dev, "not enough data in event buffer to "
477 "process incoming events (%zu left, minimum is "
478 "%zu)\n", size, sizeof(*rceb));
479 break;
480 }
481
482 rceb = itr;
483 if (rc->filter_event) {
484 needtofree = rc->filter_event(rc, &rceb, size,
485 &real_size, &event_size);
486 if (needtofree < 0 && needtofree != -ENOANO) {
487 dev_err(dev, "BUG: Unable to filter event "
488 "(0x%02x/%04x/%02x) from "
489 "device. \n", rceb->bEventType,
490 le16_to_cpu(rceb->wEvent),
491 rceb->bEventContext);
492 break;
493 }
494 } else
495 needtofree = -ENOANO;
496
497
498 if (needtofree == -ENOANO) {
499 ssize_t ret = uwb_est_find_size(rc, rceb, size);
500 if (ret < 0)
501 break;
502 if (ret > size) {
503 dev_err(dev, "BUG: hw sent incomplete event "
504 "0x%02x/%04x/%02x (%zd bytes), only got "
505 "%zu bytes. We don't handle that.\n",
506 rceb->bEventType, le16_to_cpu(rceb->wEvent),
507 rceb->bEventContext, ret, size);
508 break;
509 }
510 real_size = event_size = ret;
511 }
512 uwb_rc_neh_grok_event(rc, rceb, event_size);
513
514 if (needtofree == 1)
515 kfree(rceb);
516
517 itr += real_size;
518 size -= real_size;
519 }
520 }
521 EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
522
523
524
525
526
527
528
529
530
531
532 void uwb_rc_neh_error(struct uwb_rc *rc, int error)
533 {
534 struct uwb_rc_neh *neh;
535 unsigned long flags;
536
537 for (;;) {
538 spin_lock_irqsave(&rc->neh_lock, flags);
539 if (list_empty(&rc->neh_list)) {
540 spin_unlock_irqrestore(&rc->neh_lock, flags);
541 break;
542 }
543 neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
544 __uwb_rc_neh_rm(rc, neh);
545 spin_unlock_irqrestore(&rc->neh_lock, flags);
546
547 del_timer_sync(&neh->timer);
548 uwb_rc_neh_cb(neh, NULL, error);
549 }
550 }
551 EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
552
553
554 static void uwb_rc_neh_timer(struct timer_list *t)
555 {
556 struct uwb_rc_neh *neh = from_timer(neh, t, timer);
557 struct uwb_rc *rc = neh->rc;
558 unsigned long flags;
559
560 spin_lock_irqsave(&rc->neh_lock, flags);
561 if (neh->completed) {
562 spin_unlock_irqrestore(&rc->neh_lock, flags);
563 return;
564 }
565 if (neh->context)
566 __uwb_rc_neh_rm(rc, neh);
567 else
568 neh = NULL;
569 spin_unlock_irqrestore(&rc->neh_lock, flags);
570
571 if (neh)
572 uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
573 }
574
575
576
577 void uwb_rc_neh_create(struct uwb_rc *rc)
578 {
579 spin_lock_init(&rc->neh_lock);
580 INIT_LIST_HEAD(&rc->neh_list);
581 set_bit(0, rc->ctx_bm);
582 set_bit(0xff, rc->ctx_bm);
583 rc->ctx_roll = 1;
584 }
585
586
587
588 void uwb_rc_neh_destroy(struct uwb_rc *rc)
589 {
590 unsigned long flags;
591 struct uwb_rc_neh *neh;
592
593 for (;;) {
594 spin_lock_irqsave(&rc->neh_lock, flags);
595 if (list_empty(&rc->neh_list)) {
596 spin_unlock_irqrestore(&rc->neh_lock, flags);
597 break;
598 }
599 neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
600 __uwb_rc_neh_rm(rc, neh);
601 spin_unlock_irqrestore(&rc->neh_lock, flags);
602
603 del_timer_sync(&neh->timer);
604 uwb_rc_neh_put(neh);
605 }
606 }