This source file includes following definitions.
- uwb_est_create
- uwb_est_destroy
- uwb_est_grow
- uwb_est_register
- uwb_est_unregister
- uwb_est_get_size
- uwb_est_find_size
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28 #include <linux/spinlock.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31
32 #include "uwb-internal.h"
33
34 struct uwb_est {
35 u16 type_event_high;
36 u16 vendor, product;
37 u8 entries;
38 const struct uwb_est_entry *entry;
39 };
40
41 static struct uwb_est *uwb_est;
42 static u8 uwb_est_size;
43 static u8 uwb_est_used;
44 static DEFINE_RWLOCK(uwb_est_lock);
45
46
47
48
49
50
51 static
52 struct uwb_est_entry uwb_est_00_00xx[] = {
53 [UWB_RC_EVT_IE_RCV] = {
54 .size = sizeof(struct uwb_rc_evt_ie_rcv),
55 .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
56 },
57 [UWB_RC_EVT_BEACON] = {
58 .size = sizeof(struct uwb_rc_evt_beacon),
59 .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
60 },
61 [UWB_RC_EVT_BEACON_SIZE] = {
62 .size = sizeof(struct uwb_rc_evt_beacon_size),
63 },
64 [UWB_RC_EVT_BPOIE_CHANGE] = {
65 .size = sizeof(struct uwb_rc_evt_bpoie_change),
66 .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
67 wBPOIELength),
68 },
69 [UWB_RC_EVT_BP_SLOT_CHANGE] = {
70 .size = sizeof(struct uwb_rc_evt_bp_slot_change),
71 },
72 [UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
73 .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
74 .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
75 },
76 [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
77 .size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
78 },
79 [UWB_RC_EVT_DRP_AVAIL] = {
80 .size = sizeof(struct uwb_rc_evt_drp_avail)
81 },
82 [UWB_RC_EVT_DRP] = {
83 .size = sizeof(struct uwb_rc_evt_drp),
84 .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
85 },
86 [UWB_RC_EVT_BP_SWITCH_STATUS] = {
87 .size = sizeof(struct uwb_rc_evt_bp_switch_status),
88 },
89 [UWB_RC_EVT_CMD_FRAME_RCV] = {
90 .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
91 .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
92 },
93 [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
94 .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
95 .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
96 },
97 [UWB_RC_CMD_CHANNEL_CHANGE] = {
98 .size = sizeof(struct uwb_rc_evt_confirm),
99 },
100 [UWB_RC_CMD_DEV_ADDR_MGMT] = {
101 .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
102 [UWB_RC_CMD_GET_IE] = {
103 .size = sizeof(struct uwb_rc_evt_get_ie),
104 .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
105 },
106 [UWB_RC_CMD_RESET] = {
107 .size = sizeof(struct uwb_rc_evt_confirm),
108 },
109 [UWB_RC_CMD_SCAN] = {
110 .size = sizeof(struct uwb_rc_evt_confirm),
111 },
112 [UWB_RC_CMD_SET_BEACON_FILTER] = {
113 .size = sizeof(struct uwb_rc_evt_confirm),
114 },
115 [UWB_RC_CMD_SET_DRP_IE] = {
116 .size = sizeof(struct uwb_rc_evt_set_drp_ie),
117 },
118 [UWB_RC_CMD_SET_IE] = {
119 .size = sizeof(struct uwb_rc_evt_set_ie),
120 },
121 [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
122 .size = sizeof(struct uwb_rc_evt_confirm),
123 },
124 [UWB_RC_CMD_SET_TX_POWER] = {
125 .size = sizeof(struct uwb_rc_evt_confirm),
126 },
127 [UWB_RC_CMD_SLEEP] = {
128 .size = sizeof(struct uwb_rc_evt_confirm),
129 },
130 [UWB_RC_CMD_START_BEACON] = {
131 .size = sizeof(struct uwb_rc_evt_confirm),
132 },
133 [UWB_RC_CMD_STOP_BEACON] = {
134 .size = sizeof(struct uwb_rc_evt_confirm),
135 },
136 [UWB_RC_CMD_BP_MERGE] = {
137 .size = sizeof(struct uwb_rc_evt_confirm),
138 },
139 [UWB_RC_CMD_SEND_COMMAND_FRAME] = {
140 .size = sizeof(struct uwb_rc_evt_confirm),
141 },
142 [UWB_RC_CMD_SET_ASIE_NOTIF] = {
143 .size = sizeof(struct uwb_rc_evt_confirm),
144 },
145 };
146
147 static
148 struct uwb_est_entry uwb_est_01_00xx[] = {
149 [UWB_RC_DAA_ENERGY_DETECTED] = {
150 .size = sizeof(struct uwb_rc_evt_daa_energy_detected),
151 },
152 [UWB_RC_SET_DAA_ENERGY_MASK] = {
153 .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
154 },
155 [UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
156 .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
157 },
158 };
159
160
161
162
163
164
165
166
167 int uwb_est_create(void)
168 {
169 int result;
170
171 uwb_est_size = 2;
172 uwb_est_used = 0;
173 uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
174 if (uwb_est == NULL)
175 return -ENOMEM;
176
177 result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
178 uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
179 if (result < 0)
180 goto out;
181 result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
182 uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
183 out:
184 return result;
185 }
186
187
188
189 void uwb_est_destroy(void)
190 {
191 kfree(uwb_est);
192 uwb_est = NULL;
193 uwb_est_size = uwb_est_used = 0;
194 }
195
196
197
198
199
200
201
202 static
203 int uwb_est_grow(void)
204 {
205 size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
206 void *new = kmalloc_array(2, actual_size, GFP_ATOMIC);
207 if (new == NULL)
208 return -ENOMEM;
209 memcpy(new, uwb_est, actual_size);
210 memset(new + actual_size, 0, actual_size);
211 kfree(uwb_est);
212 uwb_est = new;
213 uwb_est_size *= 2;
214 return 0;
215 }
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242 int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
243 const struct uwb_est_entry *entry, size_t entries)
244 {
245 unsigned long flags;
246 unsigned itr;
247 int result = 0;
248
249 write_lock_irqsave(&uwb_est_lock, flags);
250 if (uwb_est_used == uwb_est_size) {
251 result = uwb_est_grow();
252 if (result < 0)
253 goto out;
254 }
255
256 for (itr = 0; itr < uwb_est_used; itr++)
257 if (uwb_est[itr].type_event_high < type
258 && uwb_est[itr].vendor < vendor
259 && uwb_est[itr].product < product)
260 break;
261
262
263 if (itr < uwb_est_used)
264 memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
265 uwb_est[itr].type_event_high = type << 8 | event_high;
266 uwb_est[itr].vendor = vendor;
267 uwb_est[itr].product = product;
268 uwb_est[itr].entry = entry;
269 uwb_est[itr].entries = entries;
270 uwb_est_used++;
271 out:
272 write_unlock_irqrestore(&uwb_est_lock, flags);
273 return result;
274 }
275 EXPORT_SYMBOL_GPL(uwb_est_register);
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291 int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
292 const struct uwb_est_entry *entry, size_t entries)
293 {
294 unsigned long flags;
295 unsigned itr;
296 struct uwb_est est_cmp = {
297 .type_event_high = type << 8 | event_high,
298 .vendor = vendor,
299 .product = product,
300 .entry = entry,
301 .entries = entries
302 };
303 write_lock_irqsave(&uwb_est_lock, flags);
304 for (itr = 0; itr < uwb_est_used; itr++)
305 if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
306 goto found;
307 write_unlock_irqrestore(&uwb_est_lock, flags);
308 return -ENOENT;
309
310 found:
311 if (itr < uwb_est_used - 1)
312 memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
313 uwb_est_used--;
314 write_unlock_irqrestore(&uwb_est_lock, flags);
315 return 0;
316 }
317 EXPORT_SYMBOL_GPL(uwb_est_unregister);
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337 static
338 ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
339 u8 event_low, const struct uwb_rceb *rceb,
340 size_t rceb_size)
341 {
342 unsigned offset;
343 ssize_t size;
344 struct device *dev = &uwb_rc->uwb_dev.dev;
345 const struct uwb_est_entry *entry;
346
347 size = -ENOENT;
348 if (event_low >= est->entries) {
349 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
350 est, est->type_event_high, est->vendor, est->product,
351 est->entries, event_low);
352 goto out;
353 }
354 size = -ENOENT;
355 entry = &est->entry[event_low];
356 if (entry->size == 0 && entry->offset == 0) {
357 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
358 est, est->type_event_high, est->vendor, est->product,
359 est->entries, event_low);
360 goto out;
361 }
362 offset = entry->offset;
363 if (offset == 0)
364 size = entry->size;
365 else {
366
367 const void *ptr = rceb;
368 size_t type_size = 0;
369 offset--;
370 size = -ENOSPC;
371 switch (entry->type) {
372 case UWB_EST_16: type_size = sizeof(__le16); break;
373 case UWB_EST_8: type_size = sizeof(u8); break;
374 default: BUG();
375 }
376 if (offset + type_size > rceb_size) {
377 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
378 "not enough data to read extra size\n",
379 est, est->type_event_high, est->vendor,
380 est->product, est->entries);
381 goto out;
382 }
383 size = entry->size;
384 ptr += offset;
385 switch (entry->type) {
386 case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
387 case UWB_EST_8: size += *(u8 *)ptr; break;
388 default: BUG();
389 }
390 }
391 out:
392 return size;
393 }
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
415 size_t rceb_size)
416 {
417
418 ssize_t size;
419 struct device *dev = &rc->uwb_dev.dev;
420 unsigned long flags;
421 unsigned itr;
422 u16 type_event_high, event;
423
424 read_lock_irqsave(&uwb_est_lock, flags);
425 size = -ENOSPC;
426 if (rceb_size < sizeof(*rceb))
427 goto out;
428 event = le16_to_cpu(rceb->wEvent);
429 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
430 for (itr = 0; itr < uwb_est_used; itr++) {
431 if (uwb_est[itr].type_event_high != type_event_high)
432 continue;
433 size = uwb_est_get_size(rc, &uwb_est[itr],
434 event & 0x00ff, rceb, rceb_size);
435
436 if (size != -ENOENT)
437 goto out;
438 }
439 dev_dbg(dev,
440 "event 0x%02x/%04x/%02x: no handlers available; RCEB %4ph\n",
441 (unsigned) rceb->bEventType,
442 (unsigned) le16_to_cpu(rceb->wEvent),
443 (unsigned) rceb->bEventContext,
444 rceb);
445 size = -ENOENT;
446 out:
447 read_unlock_irqrestore(&uwb_est_lock, flags);
448 return size;
449 }
450 EXPORT_SYMBOL_GPL(uwb_est_find_size);