This source file includes following definitions.
- pde_get_multiple
- pool_register_ref
- pool_deregister_ref
- pool_put_pulse_elem
- pool_put_pseq_elem
- pool_get_pseq_elem
- pool_get_pulse_elem
- pulse_queue_get_tail
- pulse_queue_dequeue
- pulse_queue_check_window
- pulse_queue_enqueue
- pseq_handler_create_sequences
- pseq_handler_add_to_existing_seqs
- pseq_handler_check_detection
- pri_detector_reset
- pri_detector_exit
- pri_detector_add_pulse
- pri_detector_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19
20 #include "ath.h"
21 #include "dfs_pattern_detector.h"
22 #include "dfs_pri_detector.h"
23
24 struct ath_dfs_pool_stats global_dfs_pool_stats = {};
25
26 #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27 #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
28 #define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
29 (MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
30 MIN + PRI_TOLERANCE : RUNTIME)
31
32
33
34
35
36 struct pulse_elem {
37 struct list_head head;
38 u64 ts;
39 };
40
41
42
43
44
45 static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
46 {
47 u32 remainder;
48 u32 factor;
49 u32 delta;
50
51 if (fraction == 0)
52 return 0;
53
54 delta = (val < fraction) ? (fraction - val) : (val - fraction);
55
56 if (delta <= tolerance)
57
58 return 1;
59
60 factor = val / fraction;
61 remainder = val % fraction;
62 if (remainder > tolerance) {
63
64 if ((fraction - remainder) <= tolerance)
65
66 factor++;
67 else
68 factor = 0;
69 }
70 return factor;
71 }
72
73
74
75
76
77
78
79
80
81
82 static u32 singleton_pool_references;
83 static LIST_HEAD(pulse_pool);
84 static LIST_HEAD(pseq_pool);
85 static DEFINE_SPINLOCK(pool_lock);
86
87 static void pool_register_ref(void)
88 {
89 spin_lock_bh(&pool_lock);
90 singleton_pool_references++;
91 DFS_POOL_STAT_INC(pool_reference);
92 spin_unlock_bh(&pool_lock);
93 }
94
95 static void pool_deregister_ref(void)
96 {
97 spin_lock_bh(&pool_lock);
98 singleton_pool_references--;
99 DFS_POOL_STAT_DEC(pool_reference);
100 if (singleton_pool_references == 0) {
101
102 struct pri_sequence *ps, *ps0;
103 struct pulse_elem *p, *p0;
104
105 list_for_each_entry_safe(p, p0, &pulse_pool, head) {
106 list_del(&p->head);
107 DFS_POOL_STAT_DEC(pulse_allocated);
108 kfree(p);
109 }
110 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
111 list_del(&ps->head);
112 DFS_POOL_STAT_DEC(pseq_allocated);
113 kfree(ps);
114 }
115 }
116 spin_unlock_bh(&pool_lock);
117 }
118
119 static void pool_put_pulse_elem(struct pulse_elem *pe)
120 {
121 spin_lock_bh(&pool_lock);
122 list_add(&pe->head, &pulse_pool);
123 DFS_POOL_STAT_DEC(pulse_used);
124 spin_unlock_bh(&pool_lock);
125 }
126
127 static void pool_put_pseq_elem(struct pri_sequence *pse)
128 {
129 spin_lock_bh(&pool_lock);
130 list_add(&pse->head, &pseq_pool);
131 DFS_POOL_STAT_DEC(pseq_used);
132 spin_unlock_bh(&pool_lock);
133 }
134
135 static struct pri_sequence *pool_get_pseq_elem(void)
136 {
137 struct pri_sequence *pse = NULL;
138 spin_lock_bh(&pool_lock);
139 if (!list_empty(&pseq_pool)) {
140 pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
141 list_del(&pse->head);
142 DFS_POOL_STAT_INC(pseq_used);
143 }
144 spin_unlock_bh(&pool_lock);
145 return pse;
146 }
147
148 static struct pulse_elem *pool_get_pulse_elem(void)
149 {
150 struct pulse_elem *pe = NULL;
151 spin_lock_bh(&pool_lock);
152 if (!list_empty(&pulse_pool)) {
153 pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
154 list_del(&pe->head);
155 DFS_POOL_STAT_INC(pulse_used);
156 }
157 spin_unlock_bh(&pool_lock);
158 return pe;
159 }
160
161 static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
162 {
163 struct list_head *l = &pde->pulses;
164 if (list_empty(l))
165 return NULL;
166 return list_entry(l->prev, struct pulse_elem, head);
167 }
168
169 static bool pulse_queue_dequeue(struct pri_detector *pde)
170 {
171 struct pulse_elem *p = pulse_queue_get_tail(pde);
172 if (p != NULL) {
173 list_del_init(&p->head);
174 pde->count--;
175
176 pool_put_pulse_elem(p);
177 }
178 return (pde->count > 0);
179 }
180
181
182 static void pulse_queue_check_window(struct pri_detector *pde)
183 {
184 u64 min_valid_ts;
185 struct pulse_elem *p;
186
187
188 if (pde->count < 2)
189 return;
190
191 if (pde->last_ts <= pde->window_size)
192 return;
193
194 min_valid_ts = pde->last_ts - pde->window_size;
195 while ((p = pulse_queue_get_tail(pde)) != NULL) {
196 if (p->ts >= min_valid_ts)
197 return;
198 pulse_queue_dequeue(pde);
199 }
200 }
201
202 static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
203 {
204 struct pulse_elem *p = pool_get_pulse_elem();
205 if (p == NULL) {
206 p = kmalloc(sizeof(*p), GFP_ATOMIC);
207 if (p == NULL) {
208 DFS_POOL_STAT_INC(pulse_alloc_error);
209 return false;
210 }
211 DFS_POOL_STAT_INC(pulse_allocated);
212 DFS_POOL_STAT_INC(pulse_used);
213 }
214 INIT_LIST_HEAD(&p->head);
215 p->ts = ts;
216 list_add(&p->head, &pde->pulses);
217 pde->count++;
218 pde->last_ts = ts;
219 pulse_queue_check_window(pde);
220 if (pde->count >= pde->max_count)
221 pulse_queue_dequeue(pde);
222 return true;
223 }
224
225 static bool pseq_handler_create_sequences(struct pri_detector *pde,
226 u64 ts, u32 min_count)
227 {
228 struct pulse_elem *p;
229 list_for_each_entry(p, &pde->pulses, head) {
230 struct pri_sequence ps, *new_ps;
231 struct pulse_elem *p2;
232 u32 tmp_false_count;
233 u64 min_valid_ts;
234 u32 delta_ts = ts - p->ts;
235
236 if (delta_ts < pde->rs->pri_min)
237
238 continue;
239
240 if (delta_ts > pde->rs->pri_max)
241
242 break;
243
244
245 ps.count = 2;
246 ps.count_falses = 0;
247 ps.first_ts = p->ts;
248 ps.last_ts = ts;
249 ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
250 pde->rs->pri_max, ts - p->ts);
251 ps.dur = ps.pri * (pde->rs->ppb - 1)
252 + 2 * pde->rs->max_pri_tolerance;
253
254 p2 = p;
255 tmp_false_count = 0;
256 min_valid_ts = ts - ps.dur;
257
258 list_for_each_entry_continue(p2, &pde->pulses, head) {
259 u32 factor;
260 if (p2->ts < min_valid_ts)
261
262 break;
263
264 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
265 pde->rs->max_pri_tolerance);
266 if (factor > 0) {
267 ps.count++;
268 ps.first_ts = p2->ts;
269
270
271
272
273 ps.count_falses += tmp_false_count;
274 tmp_false_count = 0;
275 } else {
276
277 tmp_false_count++;
278 }
279 }
280 if (ps.count <= min_count)
281
282 continue;
283
284
285 ps.deadline_ts = ps.first_ts + ps.dur;
286 new_ps = pool_get_pseq_elem();
287 if (new_ps == NULL) {
288 new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
289 if (new_ps == NULL) {
290 DFS_POOL_STAT_INC(pseq_alloc_error);
291 return false;
292 }
293 DFS_POOL_STAT_INC(pseq_allocated);
294 DFS_POOL_STAT_INC(pseq_used);
295 }
296 memcpy(new_ps, &ps, sizeof(ps));
297 INIT_LIST_HEAD(&new_ps->head);
298 list_add(&new_ps->head, &pde->sequences);
299 }
300 return true;
301 }
302
303
304 static u32
305 pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
306 {
307 u32 max_count = 0;
308 struct pri_sequence *ps, *ps2;
309 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
310 u32 delta_ts;
311 u32 factor;
312
313
314 if (ts > ps->deadline_ts) {
315 list_del_init(&ps->head);
316 pool_put_pseq_elem(ps);
317 continue;
318 }
319
320 delta_ts = ts - ps->last_ts;
321 factor = pde_get_multiple(delta_ts, ps->pri,
322 pde->rs->max_pri_tolerance);
323 if (factor > 0) {
324 ps->last_ts = ts;
325 ps->count++;
326
327 if (max_count < ps->count)
328 max_count = ps->count;
329 } else {
330 ps->count_falses++;
331 }
332 }
333 return max_count;
334 }
335
336 static struct pri_sequence *
337 pseq_handler_check_detection(struct pri_detector *pde)
338 {
339 struct pri_sequence *ps;
340
341 if (list_empty(&pde->sequences))
342 return NULL;
343
344 list_for_each_entry(ps, &pde->sequences, head) {
345
346
347
348
349
350 if ((ps->count >= pde->rs->ppb_thresh) &&
351 (ps->count * pde->rs->num_pri >= ps->count_falses))
352 return ps;
353 }
354 return NULL;
355 }
356
357
358
359 static void pri_detector_reset(struct pri_detector *pde, u64 ts)
360 {
361 struct pri_sequence *ps, *ps0;
362 struct pulse_elem *p, *p0;
363 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
364 list_del_init(&ps->head);
365 pool_put_pseq_elem(ps);
366 }
367 list_for_each_entry_safe(p, p0, &pde->pulses, head) {
368 list_del_init(&p->head);
369 pool_put_pulse_elem(p);
370 }
371 pde->count = 0;
372 pde->last_ts = ts;
373 }
374
375 static void pri_detector_exit(struct pri_detector *de)
376 {
377 pri_detector_reset(de, 0);
378 pool_deregister_ref();
379 kfree(de);
380 }
381
382 static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
383 struct pulse_event *event)
384 {
385 u32 max_updated_seq;
386 struct pri_sequence *ps;
387 u64 ts = event->ts;
388 const struct radar_detector_specs *rs = de->rs;
389
390
391 if ((rs->width_min > event->width) || (rs->width_max < event->width))
392 return NULL;
393
394 if ((ts - de->last_ts) < rs->max_pri_tolerance)
395
396 return NULL;
397
398 if (rs->chirp && rs->chirp != event->chirp)
399 return NULL;
400
401 de->last_ts = ts;
402
403 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
404
405 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
406 pri_detector_reset(de, ts);
407 return NULL;
408 }
409
410 ps = pseq_handler_check_detection(de);
411
412 if (ps == NULL)
413 pulse_queue_enqueue(de, ts);
414
415 return ps;
416 }
417
418 struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
419 {
420 struct pri_detector *de;
421
422 de = kzalloc(sizeof(*de), GFP_ATOMIC);
423 if (de == NULL)
424 return NULL;
425 de->exit = pri_detector_exit;
426 de->add_pulse = pri_detector_add_pulse;
427 de->reset = pri_detector_reset;
428
429 INIT_LIST_HEAD(&de->sequences);
430 INIT_LIST_HEAD(&de->pulses);
431 de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
432 de->max_count = rs->ppb * 2;
433 de->rs = rs;
434
435 pool_register_ref();
436 return de;
437 }