1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "ath.h"
21#include "dfs_pattern_detector.h"
22#include "dfs_pri_detector.h"
23
24struct ath_dfs_pool_stats global_dfs_pool_stats = {};
25
26#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
28#define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
29	(MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
30	MIN + PRI_TOLERANCE : RUNTIME)
31
32/**
33 * struct pulse_elem - elements in pulse queue
34 * @ts: time stamp in usecs
35 */
36struct pulse_elem {
37	struct list_head head;
38	u64 ts;
39};
40
41/**
42 * pde_get_multiple() - get number of multiples considering a given tolerance
43 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
44 */
45static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
46{
47	u32 remainder;
48	u32 factor;
49	u32 delta;
50
51	if (fraction == 0)
52		return 0;
53
54	delta = (val < fraction) ? (fraction - val) : (val - fraction);
55
56	if (delta <= tolerance)
57		/* val and fraction are within tolerance */
58		return 1;
59
60	factor = val / fraction;
61	remainder = val % fraction;
62	if (remainder > tolerance) {
63		/* no exact match */
64		if ((fraction - remainder) <= tolerance)
65			/* remainder is within tolerance */
66			factor++;
67		else
68			factor = 0;
69	}
70	return factor;
71}
72
73/**
74 * DOC: Singleton Pulse and Sequence Pools
75 *
76 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
77 * reduce the number of dynamic allocations. They are shared between all
78 * instances and grow up to the peak number of simultaneously used objects.
79 *
80 * Memory is freed after all references to the pools are released.
81 */
82static u32 singleton_pool_references;
83static LIST_HEAD(pulse_pool);
84static LIST_HEAD(pseq_pool);
85static DEFINE_SPINLOCK(pool_lock);
86
87static void pool_register_ref(void)
88{
89	spin_lock_bh(&pool_lock);
90	singleton_pool_references++;
91	DFS_POOL_STAT_INC(pool_reference);
92	spin_unlock_bh(&pool_lock);
93}
94
95static void pool_deregister_ref(void)
96{
97	spin_lock_bh(&pool_lock);
98	singleton_pool_references--;
99	DFS_POOL_STAT_DEC(pool_reference);
100	if (singleton_pool_references == 0) {
101		/* free singleton pools with no references left */
102		struct pri_sequence *ps, *ps0;
103		struct pulse_elem *p, *p0;
104
105		list_for_each_entry_safe(p, p0, &pulse_pool, head) {
106			list_del(&p->head);
107			DFS_POOL_STAT_DEC(pulse_allocated);
108			kfree(p);
109		}
110		list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
111			list_del(&ps->head);
112			DFS_POOL_STAT_DEC(pseq_allocated);
113			kfree(ps);
114		}
115	}
116	spin_unlock_bh(&pool_lock);
117}
118
119static void pool_put_pulse_elem(struct pulse_elem *pe)
120{
121	spin_lock_bh(&pool_lock);
122	list_add(&pe->head, &pulse_pool);
123	DFS_POOL_STAT_DEC(pulse_used);
124	spin_unlock_bh(&pool_lock);
125}
126
127static void pool_put_pseq_elem(struct pri_sequence *pse)
128{
129	spin_lock_bh(&pool_lock);
130	list_add(&pse->head, &pseq_pool);
131	DFS_POOL_STAT_DEC(pseq_used);
132	spin_unlock_bh(&pool_lock);
133}
134
135static struct pri_sequence *pool_get_pseq_elem(void)
136{
137	struct pri_sequence *pse = NULL;
138	spin_lock_bh(&pool_lock);
139	if (!list_empty(&pseq_pool)) {
140		pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
141		list_del(&pse->head);
142		DFS_POOL_STAT_INC(pseq_used);
143	}
144	spin_unlock_bh(&pool_lock);
145	return pse;
146}
147
148static struct pulse_elem *pool_get_pulse_elem(void)
149{
150	struct pulse_elem *pe = NULL;
151	spin_lock_bh(&pool_lock);
152	if (!list_empty(&pulse_pool)) {
153		pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
154		list_del(&pe->head);
155		DFS_POOL_STAT_INC(pulse_used);
156	}
157	spin_unlock_bh(&pool_lock);
158	return pe;
159}
160
161static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
162{
163	struct list_head *l = &pde->pulses;
164	if (list_empty(l))
165		return NULL;
166	return list_entry(l->prev, struct pulse_elem, head);
167}
168
169static bool pulse_queue_dequeue(struct pri_detector *pde)
170{
171	struct pulse_elem *p = pulse_queue_get_tail(pde);
172	if (p != NULL) {
173		list_del_init(&p->head);
174		pde->count--;
175		/* give it back to pool */
176		pool_put_pulse_elem(p);
177	}
178	return (pde->count > 0);
179}
180
181/* remove pulses older than window */
182static void pulse_queue_check_window(struct pri_detector *pde)
183{
184	u64 min_valid_ts;
185	struct pulse_elem *p;
186
187	/* there is no delta time with less than 2 pulses */
188	if (pde->count < 2)
189		return;
190
191	if (pde->last_ts <= pde->window_size)
192		return;
193
194	min_valid_ts = pde->last_ts - pde->window_size;
195	while ((p = pulse_queue_get_tail(pde)) != NULL) {
196		if (p->ts >= min_valid_ts)
197			return;
198		pulse_queue_dequeue(pde);
199	}
200}
201
202static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
203{
204	struct pulse_elem *p = pool_get_pulse_elem();
205	if (p == NULL) {
206		p = kmalloc(sizeof(*p), GFP_ATOMIC);
207		if (p == NULL) {
208			DFS_POOL_STAT_INC(pulse_alloc_error);
209			return false;
210		}
211		DFS_POOL_STAT_INC(pulse_allocated);
212		DFS_POOL_STAT_INC(pulse_used);
213	}
214	INIT_LIST_HEAD(&p->head);
215	p->ts = ts;
216	list_add(&p->head, &pde->pulses);
217	pde->count++;
218	pde->last_ts = ts;
219	pulse_queue_check_window(pde);
220	if (pde->count >= pde->max_count)
221		pulse_queue_dequeue(pde);
222	return true;
223}
224
225static bool pseq_handler_create_sequences(struct pri_detector *pde,
226					  u64 ts, u32 min_count)
227{
228	struct pulse_elem *p;
229	list_for_each_entry(p, &pde->pulses, head) {
230		struct pri_sequence ps, *new_ps;
231		struct pulse_elem *p2;
232		u32 tmp_false_count;
233		u64 min_valid_ts;
234		u32 delta_ts = ts - p->ts;
235
236		if (delta_ts < pde->rs->pri_min)
237			/* ignore too small pri */
238			continue;
239
240		if (delta_ts > pde->rs->pri_max)
241			/* stop on too large pri (sorted list) */
242			break;
243
244		/* build a new sequence with new potential pri */
245		ps.count = 2;
246		ps.count_falses = 0;
247		ps.first_ts = p->ts;
248		ps.last_ts = ts;
249		ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
250			pde->rs->pri_max, ts - p->ts);
251		ps.dur = ps.pri * (pde->rs->ppb - 1)
252				+ 2 * pde->rs->max_pri_tolerance;
253
254		p2 = p;
255		tmp_false_count = 0;
256		min_valid_ts = ts - ps.dur;
257		/* check which past pulses are candidates for new sequence */
258		list_for_each_entry_continue(p2, &pde->pulses, head) {
259			u32 factor;
260			if (p2->ts < min_valid_ts)
261				/* stop on crossing window border */
262				break;
263			/* check if pulse match (multi)PRI */
264			factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
265						  pde->rs->max_pri_tolerance);
266			if (factor > 0) {
267				ps.count++;
268				ps.first_ts = p2->ts;
269				/*
270				 * on match, add the intermediate falses
271				 * and reset counter
272				 */
273				ps.count_falses += tmp_false_count;
274				tmp_false_count = 0;
275			} else {
276				/* this is a potential false one */
277				tmp_false_count++;
278			}
279		}
280		if (ps.count <= min_count)
281			/* did not reach minimum count, drop sequence */
282			continue;
283
284		/* this is a valid one, add it */
285		ps.deadline_ts = ps.first_ts + ps.dur;
286		new_ps = pool_get_pseq_elem();
287		if (new_ps == NULL) {
288			new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
289			if (new_ps == NULL) {
290				DFS_POOL_STAT_INC(pseq_alloc_error);
291				return false;
292			}
293			DFS_POOL_STAT_INC(pseq_allocated);
294			DFS_POOL_STAT_INC(pseq_used);
295		}
296		memcpy(new_ps, &ps, sizeof(ps));
297		INIT_LIST_HEAD(&new_ps->head);
298		list_add(&new_ps->head, &pde->sequences);
299	}
300	return true;
301}
302
303/* check new ts and add to all matching existing sequences */
304static u32
305pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
306{
307	u32 max_count = 0;
308	struct pri_sequence *ps, *ps2;
309	list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
310		u32 delta_ts;
311		u32 factor;
312
313		/* first ensure that sequence is within window */
314		if (ts > ps->deadline_ts) {
315			list_del_init(&ps->head);
316			pool_put_pseq_elem(ps);
317			continue;
318		}
319
320		delta_ts = ts - ps->last_ts;
321		factor = pde_get_multiple(delta_ts, ps->pri,
322					  pde->rs->max_pri_tolerance);
323		if (factor > 0) {
324			ps->last_ts = ts;
325			ps->count++;
326
327			if (max_count < ps->count)
328				max_count = ps->count;
329		} else {
330			ps->count_falses++;
331		}
332	}
333	return max_count;
334}
335
336static struct pri_sequence *
337pseq_handler_check_detection(struct pri_detector *pde)
338{
339	struct pri_sequence *ps;
340
341	if (list_empty(&pde->sequences))
342		return NULL;
343
344	list_for_each_entry(ps, &pde->sequences, head) {
345		/*
346		 * we assume to have enough matching confidence if we
347		 * 1) have enough pulses
348		 * 2) have more matching than false pulses
349		 */
350		if ((ps->count >= pde->rs->ppb_thresh) &&
351		    (ps->count * pde->rs->num_pri >= ps->count_falses))
352			return ps;
353	}
354	return NULL;
355}
356
357
358/* free pulse queue and sequences list and give objects back to pools */
359static void pri_detector_reset(struct pri_detector *pde, u64 ts)
360{
361	struct pri_sequence *ps, *ps0;
362	struct pulse_elem *p, *p0;
363	list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
364		list_del_init(&ps->head);
365		pool_put_pseq_elem(ps);
366	}
367	list_for_each_entry_safe(p, p0, &pde->pulses, head) {
368		list_del_init(&p->head);
369		pool_put_pulse_elem(p);
370	}
371	pde->count = 0;
372	pde->last_ts = ts;
373}
374
375static void pri_detector_exit(struct pri_detector *de)
376{
377	pri_detector_reset(de, 0);
378	pool_deregister_ref();
379	kfree(de);
380}
381
382static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
383						   struct pulse_event *event)
384{
385	u32 max_updated_seq;
386	struct pri_sequence *ps;
387	u64 ts = event->ts;
388	const struct radar_detector_specs *rs = de->rs;
389
390	/* ignore pulses not within width range */
391	if ((rs->width_min > event->width) || (rs->width_max < event->width))
392		return NULL;
393
394	if ((ts - de->last_ts) < rs->max_pri_tolerance)
395		/* if delta to last pulse is too short, don't use this pulse */
396		return NULL;
397	/* radar detector spec needs chirp, but not detected */
398	if (rs->chirp && rs->chirp != event->chirp)
399		return NULL;
400
401	de->last_ts = ts;
402
403	max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
404
405	if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
406		pri_detector_reset(de, ts);
407		return NULL;
408	}
409
410	ps = pseq_handler_check_detection(de);
411
412	if (ps == NULL)
413		pulse_queue_enqueue(de, ts);
414
415	return ps;
416}
417
418struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
419{
420	struct pri_detector *de;
421
422	de = kzalloc(sizeof(*de), GFP_ATOMIC);
423	if (de == NULL)
424		return NULL;
425	de->exit = pri_detector_exit;
426	de->add_pulse = pri_detector_add_pulse;
427	de->reset = pri_detector_reset;
428
429	INIT_LIST_HEAD(&de->sequences);
430	INIT_LIST_HEAD(&de->pulses);
431	de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
432	de->max_count = rs->ppb * 2;
433	de->rs = rs;
434
435	pool_register_ref();
436	return de;
437}
438