dfs_pri_detector.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. /*
  2. * Copyright (c) 2012 Neratec Solutions AG
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include "ath.h"
  19. #include "dfs_pattern_detector.h"
  20. #include "dfs_pri_detector.h"
  21. struct ath_dfs_pool_stats global_dfs_pool_stats = {};
  22. #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
  23. #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
  24. #define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
  25. (MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
  26. MIN + PRI_TOLERANCE : RUNTIME)
  27. /**
  28. * struct pulse_elem - elements in pulse queue
  29. * @ts: time stamp in usecs
  30. */
  31. struct pulse_elem {
  32. struct list_head head;
  33. u64 ts;
  34. };
  35. /**
  36. * pde_get_multiple() - get number of multiples considering a given tolerance
  37. * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
  38. */
  39. static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
  40. {
  41. u32 remainder;
  42. u32 factor;
  43. u32 delta;
  44. if (fraction == 0)
  45. return 0;
  46. delta = (val < fraction) ? (fraction - val) : (val - fraction);
  47. if (delta <= tolerance)
  48. /* val and fraction are within tolerance */
  49. return 1;
  50. factor = val / fraction;
  51. remainder = val % fraction;
  52. if (remainder > tolerance) {
  53. /* no exact match */
  54. if ((fraction - remainder) <= tolerance)
  55. /* remainder is within tolerance */
  56. factor++;
  57. else
  58. factor = 0;
  59. }
  60. return factor;
  61. }
  62. /**
  63. * DOC: Singleton Pulse and Sequence Pools
  64. *
  65. * Instances of pri_sequence and pulse_elem are kept in singleton pools to
  66. * reduce the number of dynamic allocations. They are shared between all
  67. * instances and grow up to the peak number of simultaneously used objects.
  68. *
  69. * Memory is freed after all references to the pools are released.
  70. */
  71. static u32 singleton_pool_references;
  72. static LIST_HEAD(pulse_pool);
  73. static LIST_HEAD(pseq_pool);
  74. static DEFINE_SPINLOCK(pool_lock);
  75. static void pool_register_ref(void)
  76. {
  77. spin_lock_bh(&pool_lock);
  78. singleton_pool_references++;
  79. DFS_POOL_STAT_INC(pool_reference);
  80. spin_unlock_bh(&pool_lock);
  81. }
  82. static void pool_deregister_ref(void)
  83. {
  84. spin_lock_bh(&pool_lock);
  85. singleton_pool_references--;
  86. DFS_POOL_STAT_DEC(pool_reference);
  87. if (singleton_pool_references == 0) {
  88. /* free singleton pools with no references left */
  89. struct pri_sequence *ps, *ps0;
  90. struct pulse_elem *p, *p0;
  91. list_for_each_entry_safe(p, p0, &pulse_pool, head) {
  92. list_del(&p->head);
  93. DFS_POOL_STAT_DEC(pulse_allocated);
  94. kfree(p);
  95. }
  96. list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
  97. list_del(&ps->head);
  98. DFS_POOL_STAT_DEC(pseq_allocated);
  99. kfree(ps);
  100. }
  101. }
  102. spin_unlock_bh(&pool_lock);
  103. }
  104. static void pool_put_pulse_elem(struct pulse_elem *pe)
  105. {
  106. spin_lock_bh(&pool_lock);
  107. list_add(&pe->head, &pulse_pool);
  108. DFS_POOL_STAT_DEC(pulse_used);
  109. spin_unlock_bh(&pool_lock);
  110. }
  111. static void pool_put_pseq_elem(struct pri_sequence *pse)
  112. {
  113. spin_lock_bh(&pool_lock);
  114. list_add(&pse->head, &pseq_pool);
  115. DFS_POOL_STAT_DEC(pseq_used);
  116. spin_unlock_bh(&pool_lock);
  117. }
  118. static struct pri_sequence *pool_get_pseq_elem(void)
  119. {
  120. struct pri_sequence *pse = NULL;
  121. spin_lock_bh(&pool_lock);
  122. if (!list_empty(&pseq_pool)) {
  123. pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
  124. list_del(&pse->head);
  125. DFS_POOL_STAT_INC(pseq_used);
  126. }
  127. spin_unlock_bh(&pool_lock);
  128. return pse;
  129. }
  130. static struct pulse_elem *pool_get_pulse_elem(void)
  131. {
  132. struct pulse_elem *pe = NULL;
  133. spin_lock_bh(&pool_lock);
  134. if (!list_empty(&pulse_pool)) {
  135. pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
  136. list_del(&pe->head);
  137. DFS_POOL_STAT_INC(pulse_used);
  138. }
  139. spin_unlock_bh(&pool_lock);
  140. return pe;
  141. }
  142. static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
  143. {
  144. struct list_head *l = &pde->pulses;
  145. if (list_empty(l))
  146. return NULL;
  147. return list_entry(l->prev, struct pulse_elem, head);
  148. }
  149. static bool pulse_queue_dequeue(struct pri_detector *pde)
  150. {
  151. struct pulse_elem *p = pulse_queue_get_tail(pde);
  152. if (p != NULL) {
  153. list_del_init(&p->head);
  154. pde->count--;
  155. /* give it back to pool */
  156. pool_put_pulse_elem(p);
  157. }
  158. return (pde->count > 0);
  159. }
  160. /* remove pulses older than window */
  161. static void pulse_queue_check_window(struct pri_detector *pde)
  162. {
  163. u64 min_valid_ts;
  164. struct pulse_elem *p;
  165. /* there is no delta time with less than 2 pulses */
  166. if (pde->count < 2)
  167. return;
  168. if (pde->last_ts <= pde->window_size)
  169. return;
  170. min_valid_ts = pde->last_ts - pde->window_size;
  171. while ((p = pulse_queue_get_tail(pde)) != NULL) {
  172. if (p->ts >= min_valid_ts)
  173. return;
  174. pulse_queue_dequeue(pde);
  175. }
  176. }
  177. static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
  178. {
  179. struct pulse_elem *p = pool_get_pulse_elem();
  180. if (p == NULL) {
  181. p = kmalloc(sizeof(*p), GFP_ATOMIC);
  182. if (p == NULL) {
  183. DFS_POOL_STAT_INC(pulse_alloc_error);
  184. return false;
  185. }
  186. DFS_POOL_STAT_INC(pulse_allocated);
  187. DFS_POOL_STAT_INC(pulse_used);
  188. }
  189. INIT_LIST_HEAD(&p->head);
  190. p->ts = ts;
  191. list_add(&p->head, &pde->pulses);
  192. pde->count++;
  193. pde->last_ts = ts;
  194. pulse_queue_check_window(pde);
  195. if (pde->count >= pde->max_count)
  196. pulse_queue_dequeue(pde);
  197. return true;
  198. }
  199. static bool pseq_handler_create_sequences(struct pri_detector *pde,
  200. u64 ts, u32 min_count)
  201. {
  202. struct pulse_elem *p;
  203. list_for_each_entry(p, &pde->pulses, head) {
  204. struct pri_sequence ps, *new_ps;
  205. struct pulse_elem *p2;
  206. u32 tmp_false_count;
  207. u64 min_valid_ts;
  208. u32 delta_ts = ts - p->ts;
  209. if (delta_ts < pde->rs->pri_min)
  210. /* ignore too small pri */
  211. continue;
  212. if (delta_ts > pde->rs->pri_max)
  213. /* stop on too large pri (sorted list) */
  214. break;
  215. /* build a new sequence with new potential pri */
  216. ps.count = 2;
  217. ps.count_falses = 0;
  218. ps.first_ts = p->ts;
  219. ps.last_ts = ts;
  220. ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
  221. pde->rs->pri_max, ts - p->ts);
  222. ps.dur = ps.pri * (pde->rs->ppb - 1)
  223. + 2 * pde->rs->max_pri_tolerance;
  224. p2 = p;
  225. tmp_false_count = 0;
  226. min_valid_ts = ts - ps.dur;
  227. /* check which past pulses are candidates for new sequence */
  228. list_for_each_entry_continue(p2, &pde->pulses, head) {
  229. u32 factor;
  230. if (p2->ts < min_valid_ts)
  231. /* stop on crossing window border */
  232. break;
  233. /* check if pulse match (multi)PRI */
  234. factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
  235. pde->rs->max_pri_tolerance);
  236. if (factor > 0) {
  237. ps.count++;
  238. ps.first_ts = p2->ts;
  239. /*
  240. * on match, add the intermediate falses
  241. * and reset counter
  242. */
  243. ps.count_falses += tmp_false_count;
  244. tmp_false_count = 0;
  245. } else {
  246. /* this is a potential false one */
  247. tmp_false_count++;
  248. }
  249. }
  250. if (ps.count <= min_count)
  251. /* did not reach minimum count, drop sequence */
  252. continue;
  253. /* this is a valid one, add it */
  254. ps.deadline_ts = ps.first_ts + ps.dur;
  255. new_ps = pool_get_pseq_elem();
  256. if (new_ps == NULL) {
  257. new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
  258. if (new_ps == NULL) {
  259. DFS_POOL_STAT_INC(pseq_alloc_error);
  260. return false;
  261. }
  262. DFS_POOL_STAT_INC(pseq_allocated);
  263. DFS_POOL_STAT_INC(pseq_used);
  264. }
  265. memcpy(new_ps, &ps, sizeof(ps));
  266. INIT_LIST_HEAD(&new_ps->head);
  267. list_add(&new_ps->head, &pde->sequences);
  268. }
  269. return true;
  270. }
  271. /* check new ts and add to all matching existing sequences */
  272. static u32
  273. pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
  274. {
  275. u32 max_count = 0;
  276. struct pri_sequence *ps, *ps2;
  277. list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
  278. u32 delta_ts;
  279. u32 factor;
  280. /* first ensure that sequence is within window */
  281. if (ts > ps->deadline_ts) {
  282. list_del_init(&ps->head);
  283. pool_put_pseq_elem(ps);
  284. continue;
  285. }
  286. delta_ts = ts - ps->last_ts;
  287. factor = pde_get_multiple(delta_ts, ps->pri,
  288. pde->rs->max_pri_tolerance);
  289. if (factor > 0) {
  290. ps->last_ts = ts;
  291. ps->count++;
  292. if (max_count < ps->count)
  293. max_count = ps->count;
  294. } else {
  295. ps->count_falses++;
  296. }
  297. }
  298. return max_count;
  299. }
  300. static struct pri_sequence *
  301. pseq_handler_check_detection(struct pri_detector *pde)
  302. {
  303. struct pri_sequence *ps;
  304. if (list_empty(&pde->sequences))
  305. return NULL;
  306. list_for_each_entry(ps, &pde->sequences, head) {
  307. /*
  308. * we assume to have enough matching confidence if we
  309. * 1) have enough pulses
  310. * 2) have more matching than false pulses
  311. */
  312. if ((ps->count >= pde->rs->ppb_thresh) &&
  313. (ps->count * pde->rs->num_pri >= ps->count_falses))
  314. return ps;
  315. }
  316. return NULL;
  317. }
  318. /* free pulse queue and sequences list and give objects back to pools */
  319. static void pri_detector_reset(struct pri_detector *pde, u64 ts)
  320. {
  321. struct pri_sequence *ps, *ps0;
  322. struct pulse_elem *p, *p0;
  323. list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
  324. list_del_init(&ps->head);
  325. pool_put_pseq_elem(ps);
  326. }
  327. list_for_each_entry_safe(p, p0, &pde->pulses, head) {
  328. list_del_init(&p->head);
  329. pool_put_pulse_elem(p);
  330. }
  331. pde->count = 0;
  332. pde->last_ts = ts;
  333. }
  334. static void pri_detector_exit(struct pri_detector *de)
  335. {
  336. pri_detector_reset(de, 0);
  337. pool_deregister_ref();
  338. kfree(de);
  339. }
  340. static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
  341. struct pulse_event *event)
  342. {
  343. u32 max_updated_seq;
  344. struct pri_sequence *ps;
  345. u64 ts = event->ts;
  346. const struct radar_detector_specs *rs = de->rs;
  347. /* ignore pulses not within width range */
  348. if ((rs->width_min > event->width) || (rs->width_max < event->width))
  349. return NULL;
  350. if ((ts - de->last_ts) < rs->max_pri_tolerance)
  351. /* if delta to last pulse is too short, don't use this pulse */
  352. return NULL;
  353. /* radar detector spec needs chirp, but not detected */
  354. if (rs->chirp && rs->chirp != event->chirp)
  355. return NULL;
  356. de->last_ts = ts;
  357. max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
  358. if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
  359. pri_detector_reset(de, ts);
  360. return NULL;
  361. }
  362. ps = pseq_handler_check_detection(de);
  363. if (ps == NULL)
  364. pulse_queue_enqueue(de, ts);
  365. return ps;
  366. }
  367. struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
  368. {
  369. struct pri_detector *de;
  370. de = kzalloc(sizeof(*de), GFP_ATOMIC);
  371. if (de == NULL)
  372. return NULL;
  373. de->exit = pri_detector_exit;
  374. de->add_pulse = pri_detector_add_pulse;
  375. de->reset = pri_detector_reset;
  376. INIT_LIST_HEAD(&de->sequences);
  377. INIT_LIST_HEAD(&de->pulses);
  378. de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
  379. de->max_count = rs->ppb * 2;
  380. de->rs = rs;
  381. pool_register_ref();
  382. return de;
  383. }