This source file includes following definitions.
- uwb_rc_set_drp_cmd_done
- uwb_rc_send_all_drp_ie
- evaluate_conflict_action
- handle_conflict_normal
- handle_conflict_expanding
- uwb_drp_handle_conflict_rsv
- uwb_drp_handle_all_conflict_rsv
- uwb_drp_process_target_accepted
- uwb_drp_process_target
- uwb_drp_process_owner_accepted
- uwb_drp_process_owner
- uwb_cnflt_alien_stroke_timer
- uwb_cnflt_update_work
- uwb_cnflt_timer
- uwb_drp_handle_alien_drp
- uwb_drp_process_not_involved
- uwb_drp_process_involved
- uwb_drp_involves_us
- uwb_drp_process
- uwb_drp_availability_process
- uwb_drp_process_all
- uwbd_evt_handle_rc_drp
1
2
3
4
5
6
7
8
9
10 #include <linux/kthread.h>
11 #include <linux/freezer.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include "uwb-internal.h"
15
16
17
18 enum uwb_drp_conflict_action {
19
20 UWB_DRP_CONFLICT_MANTAIN = 0,
21
22
23
24
25
26
27 UWB_DRP_CONFLICT_ACT1,
28
29
30
31
32
33
34 UWB_DRP_CONFLICT_ACT2,
35
36
37
38
39
40
41
42
43 UWB_DRP_CONFLICT_ACT3,
44 };
45
46
47 static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
48 struct uwb_rceb *reply, ssize_t reply_size)
49 {
50 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
51 unsigned long flags;
52
53 if (r != NULL) {
54 if (r->bResultCode != UWB_RC_RES_SUCCESS)
55 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
56 uwb_rc_strerror(r->bResultCode), r->bResultCode);
57 } else
58 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
59
60 spin_lock_irqsave(&rc->rsvs_lock, flags);
61 if (rc->set_drp_ie_pending > 1) {
62 rc->set_drp_ie_pending = 0;
63 uwb_rsv_queue_update(rc);
64 } else {
65 rc->set_drp_ie_pending = 0;
66 }
67 spin_unlock_irqrestore(&rc->rsvs_lock, flags);
68 }
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90 int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
91 {
92 int result;
93 struct uwb_rc_cmd_set_drp_ie *cmd;
94 struct uwb_rsv *rsv;
95 struct uwb_rsv_move *mv;
96 int num_bytes = 0;
97 u8 *IEDataptr;
98
99 result = -ENOMEM;
100
101 list_for_each_entry(rsv, &rc->reservations, rc_node) {
102 if (rsv->drp_ie != NULL) {
103 num_bytes += rsv->drp_ie->hdr.length + 2;
104 if (uwb_rsv_has_two_drp_ies(rsv) &&
105 (rsv->mv.companion_drp_ie != NULL)) {
106 mv = &rsv->mv;
107 num_bytes +=
108 mv->companion_drp_ie->hdr.length + 2;
109 }
110 }
111 }
112 num_bytes += sizeof(rc->drp_avail.ie);
113 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
114 if (cmd == NULL)
115 goto error;
116 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
117 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
118 cmd->wIELength = num_bytes;
119 IEDataptr = (u8 *)&cmd->IEData[0];
120
121
122
123 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
124 IEDataptr += sizeof(struct uwb_ie_drp_avail);
125
126
127 list_for_each_entry(rsv, &rc->reservations, rc_node) {
128 if (rsv->drp_ie != NULL) {
129 memcpy(IEDataptr, rsv->drp_ie,
130 rsv->drp_ie->hdr.length + 2);
131 IEDataptr += rsv->drp_ie->hdr.length + 2;
132
133 if (uwb_rsv_has_two_drp_ies(rsv) &&
134 (rsv->mv.companion_drp_ie != NULL)) {
135 mv = &rsv->mv;
136 memcpy(IEDataptr, mv->companion_drp_ie,
137 mv->companion_drp_ie->hdr.length + 2);
138 IEDataptr +=
139 mv->companion_drp_ie->hdr.length + 2;
140 }
141 }
142 }
143
144 result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
145 &cmd->rccb, sizeof(*cmd) + num_bytes,
146 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
147 uwb_rc_set_drp_cmd_done, NULL);
148
149 rc->set_drp_ie_pending = 1;
150
151 kfree(cmd);
152 error:
153 return result;
154 }
155
156
157
158
159
160
161 static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
162 struct uwb_rsv *rsv, int our_status)
163 {
164 int our_tie_breaker = rsv->tiebreaker;
165 int our_type = rsv->type;
166 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
167
168 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
169 int ext_status = uwb_ie_drp_status(ext_drp_ie);
170 int ext_type = uwb_ie_drp_type(ext_drp_ie);
171
172
173
174 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
175 return UWB_DRP_CONFLICT_MANTAIN;
176 }
177
178
179 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
180 return UWB_DRP_CONFLICT_MANTAIN;
181 }
182
183
184 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
185
186 return UWB_DRP_CONFLICT_ACT1;
187 }
188
189
190 if (our_status == 0 && ext_status == 1) {
191 return UWB_DRP_CONFLICT_ACT2;
192 }
193
194
195 if (our_status == 1 && ext_status == 0) {
196 return UWB_DRP_CONFLICT_MANTAIN;
197 }
198
199
200 if (our_tie_breaker == ext_tie_breaker &&
201 our_beacon_slot < ext_beacon_slot) {
202 return UWB_DRP_CONFLICT_MANTAIN;
203 }
204
205
206 if (our_tie_breaker != ext_tie_breaker &&
207 our_beacon_slot > ext_beacon_slot) {
208 return UWB_DRP_CONFLICT_MANTAIN;
209 }
210
211 if (our_status == 0) {
212 if (our_tie_breaker == ext_tie_breaker) {
213
214 if (our_beacon_slot > ext_beacon_slot) {
215 return UWB_DRP_CONFLICT_ACT2;
216 }
217 } else {
218
219 if (our_beacon_slot < ext_beacon_slot) {
220 return UWB_DRP_CONFLICT_ACT2;
221 }
222 }
223 } else {
224 if (our_tie_breaker == ext_tie_breaker) {
225
226 if (our_beacon_slot > ext_beacon_slot) {
227 return UWB_DRP_CONFLICT_ACT3;
228 }
229 } else {
230
231 if (our_beacon_slot < ext_beacon_slot) {
232 return UWB_DRP_CONFLICT_ACT3;
233 }
234 }
235 }
236 return UWB_DRP_CONFLICT_MANTAIN;
237 }
238
239 static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
240 int ext_beacon_slot,
241 struct uwb_rsv *rsv,
242 struct uwb_mas_bm *conflicting_mas)
243 {
244 struct uwb_rc *rc = rsv->rc;
245 struct uwb_rsv_move *mv = &rsv->mv;
246 struct uwb_drp_backoff_win *bow = &rc->bow;
247 int action;
248
249 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
250
251 if (uwb_rsv_is_owner(rsv)) {
252 switch(action) {
253 case UWB_DRP_CONFLICT_ACT2:
254
255 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
256 if (bow->can_reserve_extra_mases == false)
257 uwb_rsv_backoff_win_increment(rc);
258
259 break;
260 case UWB_DRP_CONFLICT_ACT3:
261 uwb_rsv_backoff_win_increment(rc);
262
263
264 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
265 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
266 default:
267 break;
268 }
269 } else {
270 switch(action) {
271 case UWB_DRP_CONFLICT_ACT2:
272 case UWB_DRP_CONFLICT_ACT3:
273 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
274 default:
275 break;
276 }
277
278 }
279
280 }
281
282 static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
283 struct uwb_rsv *rsv, bool companion_only,
284 struct uwb_mas_bm *conflicting_mas)
285 {
286 struct uwb_rc *rc = rsv->rc;
287 struct uwb_drp_backoff_win *bow = &rc->bow;
288 struct uwb_rsv_move *mv = &rsv->mv;
289 int action;
290
291 if (companion_only) {
292
293 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
294 if (uwb_rsv_is_owner(rsv)) {
295 switch(action) {
296 case UWB_DRP_CONFLICT_ACT2:
297 case UWB_DRP_CONFLICT_ACT3:
298 uwb_rsv_set_state(rsv,
299 UWB_RSV_STATE_O_ESTABLISHED);
300 rsv->needs_release_companion_mas = false;
301 if (bow->can_reserve_extra_mases == false)
302 uwb_rsv_backoff_win_increment(rc);
303 uwb_drp_avail_release(rsv->rc,
304 &rsv->mv.companion_mas);
305 }
306 } else {
307 switch(action) {
308 case UWB_DRP_CONFLICT_ACT2:
309 case UWB_DRP_CONFLICT_ACT3:
310 uwb_rsv_set_state(rsv,
311 UWB_RSV_STATE_T_EXPANDING_CONFLICT);
312
313 }
314 }
315 } else {
316 if (uwb_rsv_is_owner(rsv)) {
317 uwb_rsv_backoff_win_increment(rc);
318
319 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
320
321
322
323
324 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
325 conflicting_mas->bm, UWB_NUM_MAS);
326 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
327 } else {
328 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
329
330 }
331 }
332 }
333
334 static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
335 struct uwb_rc_evt_drp *drp_evt,
336 struct uwb_ie_drp *drp_ie,
337 struct uwb_mas_bm *conflicting_mas)
338 {
339 struct uwb_rsv_move *mv;
340
341
342 if (uwb_rsv_has_two_drp_ies(rsv)) {
343 mv = &rsv->mv;
344 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
345 UWB_NUM_MAS)) {
346 handle_conflict_expanding(drp_ie,
347 drp_evt->beacon_slot_number,
348 rsv, false, conflicting_mas);
349 } else {
350 if (bitmap_intersects(mv->companion_mas.bm,
351 conflicting_mas->bm, UWB_NUM_MAS)) {
352 handle_conflict_expanding(
353 drp_ie, drp_evt->beacon_slot_number,
354 rsv, true, conflicting_mas);
355 }
356 }
357 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
358 UWB_NUM_MAS)) {
359 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
360 rsv, conflicting_mas);
361 }
362 }
363
364 static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
365 struct uwb_rc_evt_drp *drp_evt,
366 struct uwb_ie_drp *drp_ie,
367 struct uwb_mas_bm *conflicting_mas)
368 {
369 struct uwb_rsv *rsv;
370
371 list_for_each_entry(rsv, &rc->reservations, rc_node) {
372 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
373 conflicting_mas);
374 }
375 }
376
377 static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
378 struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
379 struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
380 {
381 struct uwb_rsv_move *mv = &rsv->mv;
382 int status;
383
384 status = uwb_ie_drp_status(drp_ie);
385
386 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
387 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
388 return;
389 }
390
391 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
392
393 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
394
395 uwb_rsv_set_state(rsv,
396 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
397 }
398 } else {
399 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
400 if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
401
402
403
404
405
406 uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
407 drp_ie, mas);
408 } else {
409
410 bitmap_copy(mv->companion_mas.bm, mas->bm,
411 UWB_NUM_MAS);
412 uwb_rsv_set_state(rsv,
413 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
414 }
415 } else {
416 if (status) {
417 uwb_rsv_set_state(rsv,
418 UWB_RSV_STATE_T_ACCEPTED);
419 }
420 }
421
422 }
423 }
424
425
426
427
428
429 static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
430 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
431 {
432 struct device *dev = &rc->uwb_dev.dev;
433 struct uwb_rsv_move *mv = &rsv->mv;
434 int status;
435 enum uwb_drp_reason reason_code;
436 struct uwb_mas_bm mas;
437
438 status = uwb_ie_drp_status(drp_ie);
439 reason_code = uwb_ie_drp_reason_code(drp_ie);
440 uwb_drp_ie_to_bm(&mas, drp_ie);
441
442 switch (reason_code) {
443 case UWB_DRP_REASON_ACCEPTED:
444 uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
445 break;
446
447 case UWB_DRP_REASON_MODIFIED:
448
449 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
450 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
451 break;
452 }
453
454
455 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
456
457 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
458 UWB_NUM_MAS);
459 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
460 }
461
462 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
463 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
464 break;
465 default:
466 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
467 reason_code, status);
468 }
469 }
470
471 static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
472 struct uwb_mas_bm *mas)
473 {
474 struct uwb_rsv_move *mv = &rsv->mv;
475
476 switch (rsv->state) {
477 case UWB_RSV_STATE_O_PENDING:
478 case UWB_RSV_STATE_O_INITIATED:
479 case UWB_RSV_STATE_O_ESTABLISHED:
480 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
481 break;
482 case UWB_RSV_STATE_O_MODIFIED:
483 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
484 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
485 else
486 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
487 break;
488
489 case UWB_RSV_STATE_O_MOVE_REDUCING:
490 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
491 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
492 else
493 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
494 break;
495 case UWB_RSV_STATE_O_MOVE_EXPANDING:
496 if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
497
498 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
499 } else {
500 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
501 }
502 break;
503 case UWB_RSV_STATE_O_MOVE_COMBINING:
504 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
505 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
506 else
507 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
508 break;
509 default:
510 break;
511 }
512 }
513
514
515
516
517 static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
518 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
519 struct uwb_rc_evt_drp *drp_evt)
520 {
521 struct device *dev = &rc->uwb_dev.dev;
522 int status;
523 enum uwb_drp_reason reason_code;
524 struct uwb_mas_bm mas;
525
526 status = uwb_ie_drp_status(drp_ie);
527 reason_code = uwb_ie_drp_reason_code(drp_ie);
528 uwb_drp_ie_to_bm(&mas, drp_ie);
529
530 if (status) {
531 switch (reason_code) {
532 case UWB_DRP_REASON_ACCEPTED:
533 uwb_drp_process_owner_accepted(rsv, &mas);
534 break;
535 default:
536 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
537 reason_code, status);
538 }
539 } else {
540 switch (reason_code) {
541 case UWB_DRP_REASON_PENDING:
542 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
543 break;
544 case UWB_DRP_REASON_DENIED:
545 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
546 break;
547 case UWB_DRP_REASON_CONFLICT:
548
549 bitmap_complement(mas.bm, src->last_availability_bm,
550 UWB_NUM_MAS);
551 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
552 break;
553 default:
554 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
555 reason_code, status);
556 }
557 }
558 }
559
560 static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
561 {
562 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
563 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
564 }
565
566 static void uwb_cnflt_update_work(struct work_struct *work)
567 {
568 struct uwb_cnflt_alien *cnflt = container_of(work,
569 struct uwb_cnflt_alien,
570 cnflt_update_work);
571 struct uwb_cnflt_alien *c;
572 struct uwb_rc *rc = cnflt->rc;
573
574 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
575
576 mutex_lock(&rc->rsvs_mutex);
577
578 list_del(&cnflt->rc_node);
579
580
581 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
582
583 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
584 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
585 c->mas.bm, UWB_NUM_MAS);
586 }
587
588 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
589 usecs_to_jiffies(delay_us));
590
591 kfree(cnflt);
592 mutex_unlock(&rc->rsvs_mutex);
593 }
594
595 static void uwb_cnflt_timer(struct timer_list *t)
596 {
597 struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
598
599 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
600 }
601
602
603
604
605
606 static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
607 {
608 struct device *dev = &rc->uwb_dev.dev;
609 struct uwb_mas_bm mas;
610 struct uwb_cnflt_alien *cnflt;
611 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
612
613 uwb_drp_ie_to_bm(&mas, drp_ie);
614
615 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
616 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
617
618
619 uwb_cnflt_alien_stroke_timer(cnflt);
620 return;
621 }
622 }
623
624
625
626
627 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
628 if (!cnflt) {
629 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
630 return;
631 }
632
633 INIT_LIST_HEAD(&cnflt->rc_node);
634 timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
635
636 cnflt->rc = rc;
637 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
638
639 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
640
641 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
642
643
644 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
645
646 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
647
648
649 uwb_cnflt_alien_stroke_timer(cnflt);
650 }
651
652 static void uwb_drp_process_not_involved(struct uwb_rc *rc,
653 struct uwb_rc_evt_drp *drp_evt,
654 struct uwb_ie_drp *drp_ie)
655 {
656 struct uwb_mas_bm mas;
657
658 uwb_drp_ie_to_bm(&mas, drp_ie);
659 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
660 }
661
662 static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
663 struct uwb_rc_evt_drp *drp_evt,
664 struct uwb_ie_drp *drp_ie)
665 {
666 struct uwb_rsv *rsv;
667
668 rsv = uwb_rsv_find(rc, src, drp_ie);
669 if (!rsv) {
670
671
672
673
674
675 return;
676 }
677
678
679
680
681
682 if (rsv->state == UWB_RSV_STATE_NONE) {
683 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
684 return;
685 }
686
687 if (uwb_ie_drp_owner(drp_ie))
688 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
689 else
690 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
691
692 }
693
694
695 static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
696 {
697 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
698 }
699
700
701
702
703 static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
704 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
705 {
706 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
707 uwb_drp_handle_alien_drp(rc, drp_ie);
708 else if (uwb_drp_involves_us(rc, drp_ie))
709 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
710 else
711 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
712 }
713
714
715
716
717 static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
718 struct uwb_ie_drp_avail *drp_availability_ie)
719 {
720 bitmap_copy(src->last_availability_bm,
721 drp_availability_ie->bmp, UWB_NUM_MAS);
722 }
723
724
725
726
727
728 static
729 void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
730 size_t ielen, struct uwb_dev *src_dev)
731 {
732 struct device *dev = &rc->uwb_dev.dev;
733 struct uwb_ie_hdr *ie_hdr;
734 void *ptr;
735
736 ptr = drp_evt->ie_data;
737 for (;;) {
738 ie_hdr = uwb_ie_next(&ptr, &ielen);
739 if (!ie_hdr)
740 break;
741
742 switch (ie_hdr->element_id) {
743 case UWB_IE_DRP_AVAILABILITY:
744 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
745 break;
746 case UWB_IE_DRP:
747 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
748 break;
749 default:
750 dev_warn(dev, "unexpected IE in DRP notification\n");
751 break;
752 }
753 }
754
755 if (ielen > 0)
756 dev_warn(dev, "%d octets remaining in DRP notification\n",
757 (int)ielen);
758 }
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792 int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
793 {
794 struct device *dev = &evt->rc->uwb_dev.dev;
795 struct uwb_rc *rc = evt->rc;
796 struct uwb_rc_evt_drp *drp_evt;
797 size_t ielength, bytes_left;
798 struct uwb_dev_addr src_addr;
799 struct uwb_dev *src_dev;
800
801
802
803 if (evt->notif.size < sizeof(*drp_evt)) {
804 dev_err(dev, "DRP event: Not enough data to decode event "
805 "[%zu bytes left, %zu needed]\n",
806 evt->notif.size, sizeof(*drp_evt));
807 return 0;
808 }
809 bytes_left = evt->notif.size - sizeof(*drp_evt);
810 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
811 ielength = le16_to_cpu(drp_evt->ie_length);
812 if (bytes_left != ielength) {
813 dev_err(dev, "DRP event: Not enough data in payload [%zu"
814 "bytes left, %zu declared in the event]\n",
815 bytes_left, ielength);
816 return 0;
817 }
818
819 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
820 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
821 if (!src_dev) {
822
823
824
825
826
827
828
829
830 return 0;
831 }
832
833 mutex_lock(&rc->rsvs_mutex);
834
835
836 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
837
838 mutex_unlock(&rc->rsvs_mutex);
839
840 uwb_dev_put(src_dev);
841 return 0;
842 }