originator.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177
  1. /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  2. *
  3. * Marek Lindner, Simon Wunderlich
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of version 2 of the GNU General Public
  7. * License as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "originator.h"
  18. #include "main.h"
  19. #include <linux/errno.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/fs.h>
  22. #include <linux/jiffies.h>
  23. #include <linux/kernel.h>
  24. #include <linux/list.h>
  25. #include <linux/lockdep.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/rculist.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/workqueue.h>
  32. #include "distributed-arp-table.h"
  33. #include "fragmentation.h"
  34. #include "gateway_client.h"
  35. #include "hard-interface.h"
  36. #include "hash.h"
  37. #include "multicast.h"
  38. #include "network-coding.h"
  39. #include "routing.h"
  40. #include "translation-table.h"
  41. /* hash class keys */
  42. static struct lock_class_key batadv_orig_hash_lock_class_key;
  43. static void batadv_purge_orig(struct work_struct *work);
  44. /* returns 1 if they are the same originator */
  45. int batadv_compare_orig(const struct hlist_node *node, const void *data2)
  46. {
  47. const void *data1 = container_of(node, struct batadv_orig_node,
  48. hash_entry);
  49. return batadv_compare_eth(data1, data2);
  50. }
  51. /**
  52. * batadv_orig_node_vlan_get - get an orig_node_vlan object
  53. * @orig_node: the originator serving the VLAN
  54. * @vid: the VLAN identifier
  55. *
  56. * Returns the vlan object identified by vid and belonging to orig_node or NULL
  57. * if it does not exist.
  58. */
  59. struct batadv_orig_node_vlan *
  60. batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
  61. unsigned short vid)
  62. {
  63. struct batadv_orig_node_vlan *vlan = NULL, *tmp;
  64. rcu_read_lock();
  65. hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
  66. if (tmp->vid != vid)
  67. continue;
  68. if (!atomic_inc_not_zero(&tmp->refcount))
  69. continue;
  70. vlan = tmp;
  71. break;
  72. }
  73. rcu_read_unlock();
  74. return vlan;
  75. }
  76. /**
  77. * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
  78. * object
  79. * @orig_node: the originator serving the VLAN
  80. * @vid: the VLAN identifier
  81. *
  82. * Returns NULL in case of failure or the vlan object identified by vid and
  83. * belonging to orig_node otherwise. The object is created and added to the list
  84. * if it does not exist.
  85. *
  86. * The object is returned with refcounter increased by 1.
  87. */
  88. struct batadv_orig_node_vlan *
  89. batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
  90. unsigned short vid)
  91. {
  92. struct batadv_orig_node_vlan *vlan;
  93. spin_lock_bh(&orig_node->vlan_list_lock);
  94. /* first look if an object for this vid already exists */
  95. vlan = batadv_orig_node_vlan_get(orig_node, vid);
  96. if (vlan)
  97. goto out;
  98. vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
  99. if (!vlan)
  100. goto out;
  101. atomic_set(&vlan->refcount, 2);
  102. vlan->vid = vid;
  103. hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
  104. out:
  105. spin_unlock_bh(&orig_node->vlan_list_lock);
  106. return vlan;
  107. }
  108. /**
  109. * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
  110. * the originator-vlan object
  111. * @orig_vlan: the originator-vlan object to release
  112. */
  113. void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
  114. {
  115. if (atomic_dec_and_test(&orig_vlan->refcount))
  116. kfree_rcu(orig_vlan, rcu);
  117. }
  118. int batadv_originator_init(struct batadv_priv *bat_priv)
  119. {
  120. if (bat_priv->orig_hash)
  121. return 0;
  122. bat_priv->orig_hash = batadv_hash_new(1024);
  123. if (!bat_priv->orig_hash)
  124. goto err;
  125. batadv_hash_set_lock_class(bat_priv->orig_hash,
  126. &batadv_orig_hash_lock_class_key);
  127. INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
  128. queue_delayed_work(batadv_event_workqueue,
  129. &bat_priv->orig_work,
  130. msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
  131. return 0;
  132. err:
  133. return -ENOMEM;
  134. }
  135. /**
  136. * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
  137. * free after rcu grace period
  138. * @neigh_ifinfo: the neigh_ifinfo object to release
  139. */
  140. static void
  141. batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
  142. {
  143. if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
  144. batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
  145. kfree_rcu(neigh_ifinfo, rcu);
  146. }
  147. /**
  148. * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
  149. * the neigh_ifinfo
  150. * @neigh_ifinfo: the neigh_ifinfo object to release
  151. */
  152. void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
  153. {
  154. if (atomic_dec_and_test(&neigh_ifinfo->refcount))
  155. batadv_neigh_ifinfo_release(neigh_ifinfo);
  156. }
  157. /**
  158. * batadv_neigh_node_free_rcu - free the neigh_node
  159. * batadv_neigh_node_release - release neigh_node from lists and queue for
  160. * free after rcu grace period
  161. * @neigh_node: neigh neighbor to free
  162. */
  163. static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
  164. {
  165. struct hlist_node *node_tmp;
  166. struct batadv_neigh_ifinfo *neigh_ifinfo;
  167. hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
  168. &neigh_node->ifinfo_list, list) {
  169. batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
  170. }
  171. batadv_hardif_free_ref(neigh_node->if_incoming);
  172. kfree_rcu(neigh_node, rcu);
  173. }
  174. /**
  175. * batadv_neigh_node_free_ref - decrement the neighbors refcounter
  176. * and possibly release it
  177. * @neigh_node: neigh neighbor to free
  178. */
  179. void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
  180. {
  181. if (atomic_dec_and_test(&neigh_node->refcount))
  182. batadv_neigh_node_release(neigh_node);
  183. }
  184. /**
  185. * batadv_orig_node_get_router - router to the originator depending on iface
  186. * @orig_node: the orig node for the router
  187. * @if_outgoing: the interface where the payload packet has been received or
  188. * the OGM should be sent to
  189. *
  190. * Returns the neighbor which should be router for this orig_node/iface.
  191. *
  192. * The object is returned with refcounter increased by 1.
  193. */
  194. struct batadv_neigh_node *
  195. batadv_orig_router_get(struct batadv_orig_node *orig_node,
  196. const struct batadv_hard_iface *if_outgoing)
  197. {
  198. struct batadv_orig_ifinfo *orig_ifinfo;
  199. struct batadv_neigh_node *router = NULL;
  200. rcu_read_lock();
  201. hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
  202. if (orig_ifinfo->if_outgoing != if_outgoing)
  203. continue;
  204. router = rcu_dereference(orig_ifinfo->router);
  205. break;
  206. }
  207. if (router && !atomic_inc_not_zero(&router->refcount))
  208. router = NULL;
  209. rcu_read_unlock();
  210. return router;
  211. }
  212. /**
  213. * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
  214. * @orig_node: the orig node to be queried
  215. * @if_outgoing: the interface for which the ifinfo should be acquired
  216. *
  217. * Returns the requested orig_ifinfo or NULL if not found.
  218. *
  219. * The object is returned with refcounter increased by 1.
  220. */
  221. struct batadv_orig_ifinfo *
  222. batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
  223. struct batadv_hard_iface *if_outgoing)
  224. {
  225. struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
  226. rcu_read_lock();
  227. hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
  228. list) {
  229. if (tmp->if_outgoing != if_outgoing)
  230. continue;
  231. if (!atomic_inc_not_zero(&tmp->refcount))
  232. continue;
  233. orig_ifinfo = tmp;
  234. break;
  235. }
  236. rcu_read_unlock();
  237. return orig_ifinfo;
  238. }
  239. /**
  240. * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
  241. * @orig_node: the orig node to be queried
  242. * @if_outgoing: the interface for which the ifinfo should be acquired
  243. *
  244. * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
  245. * interface otherwise. The object is created and added to the list
  246. * if it does not exist.
  247. *
  248. * The object is returned with refcounter increased by 1.
  249. */
  250. struct batadv_orig_ifinfo *
  251. batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
  252. struct batadv_hard_iface *if_outgoing)
  253. {
  254. struct batadv_orig_ifinfo *orig_ifinfo = NULL;
  255. unsigned long reset_time;
  256. spin_lock_bh(&orig_node->neigh_list_lock);
  257. orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
  258. if (orig_ifinfo)
  259. goto out;
  260. orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
  261. if (!orig_ifinfo)
  262. goto out;
  263. if (if_outgoing != BATADV_IF_DEFAULT &&
  264. !atomic_inc_not_zero(&if_outgoing->refcount)) {
  265. kfree(orig_ifinfo);
  266. orig_ifinfo = NULL;
  267. goto out;
  268. }
  269. reset_time = jiffies - 1;
  270. reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
  271. orig_ifinfo->batman_seqno_reset = reset_time;
  272. orig_ifinfo->if_outgoing = if_outgoing;
  273. INIT_HLIST_NODE(&orig_ifinfo->list);
  274. atomic_set(&orig_ifinfo->refcount, 2);
  275. hlist_add_head_rcu(&orig_ifinfo->list,
  276. &orig_node->ifinfo_list);
  277. out:
  278. spin_unlock_bh(&orig_node->neigh_list_lock);
  279. return orig_ifinfo;
  280. }
  281. /**
  282. * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
  283. * @neigh_node: the neigh node to be queried
  284. * @if_outgoing: the interface for which the ifinfo should be acquired
  285. *
  286. * The object is returned with refcounter increased by 1.
  287. *
  288. * Returns the requested neigh_ifinfo or NULL if not found
  289. */
  290. struct batadv_neigh_ifinfo *
  291. batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
  292. struct batadv_hard_iface *if_outgoing)
  293. {
  294. struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
  295. *tmp_neigh_ifinfo;
  296. rcu_read_lock();
  297. hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
  298. list) {
  299. if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
  300. continue;
  301. if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
  302. continue;
  303. neigh_ifinfo = tmp_neigh_ifinfo;
  304. break;
  305. }
  306. rcu_read_unlock();
  307. return neigh_ifinfo;
  308. }
  309. /**
  310. * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
  311. * @neigh_node: the neigh node to be queried
  312. * @if_outgoing: the interface for which the ifinfo should be acquired
  313. *
  314. * Returns NULL in case of failure or the neigh_ifinfo object for the
  315. * if_outgoing interface otherwise. The object is created and added to the list
  316. * if it does not exist.
  317. *
  318. * The object is returned with refcounter increased by 1.
  319. */
  320. struct batadv_neigh_ifinfo *
  321. batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
  322. struct batadv_hard_iface *if_outgoing)
  323. {
  324. struct batadv_neigh_ifinfo *neigh_ifinfo;
  325. spin_lock_bh(&neigh->ifinfo_lock);
  326. neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
  327. if (neigh_ifinfo)
  328. goto out;
  329. neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
  330. if (!neigh_ifinfo)
  331. goto out;
  332. if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
  333. kfree(neigh_ifinfo);
  334. neigh_ifinfo = NULL;
  335. goto out;
  336. }
  337. INIT_HLIST_NODE(&neigh_ifinfo->list);
  338. atomic_set(&neigh_ifinfo->refcount, 2);
  339. neigh_ifinfo->if_outgoing = if_outgoing;
  340. hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
  341. out:
  342. spin_unlock_bh(&neigh->ifinfo_lock);
  343. return neigh_ifinfo;
  344. }
  345. /**
  346. * batadv_neigh_node_get - retrieve a neighbour from the list
  347. * @orig_node: originator which the neighbour belongs to
  348. * @hard_iface: the interface where this neighbour is connected to
  349. * @addr: the address of the neighbour
  350. *
  351. * Looks for and possibly returns a neighbour belonging to this originator list
  352. * which is connected through the provided hard interface.
  353. * Returns NULL if the neighbour is not found.
  354. */
  355. static struct batadv_neigh_node *
  356. batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
  357. const struct batadv_hard_iface *hard_iface,
  358. const u8 *addr)
  359. {
  360. struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
  361. rcu_read_lock();
  362. hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
  363. if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
  364. continue;
  365. if (tmp_neigh_node->if_incoming != hard_iface)
  366. continue;
  367. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  368. continue;
  369. res = tmp_neigh_node;
  370. break;
  371. }
  372. rcu_read_unlock();
  373. return res;
  374. }
  375. /**
  376. * batadv_neigh_node_new - create and init a new neigh_node object
  377. * @orig_node: originator object representing the neighbour
  378. * @hard_iface: the interface where the neighbour is connected to
  379. * @neigh_addr: the mac address of the neighbour interface
  380. *
  381. * Allocates a new neigh_node object and initialises all the generic fields.
  382. * Returns the new object or NULL on failure.
  383. */
  384. struct batadv_neigh_node *
  385. batadv_neigh_node_new(struct batadv_orig_node *orig_node,
  386. struct batadv_hard_iface *hard_iface,
  387. const u8 *neigh_addr)
  388. {
  389. struct batadv_neigh_node *neigh_node;
  390. neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
  391. if (neigh_node)
  392. goto out;
  393. neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
  394. if (!neigh_node)
  395. goto out;
  396. if (!atomic_inc_not_zero(&hard_iface->refcount)) {
  397. kfree(neigh_node);
  398. neigh_node = NULL;
  399. goto out;
  400. }
  401. INIT_HLIST_NODE(&neigh_node->list);
  402. INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
  403. spin_lock_init(&neigh_node->ifinfo_lock);
  404. ether_addr_copy(neigh_node->addr, neigh_addr);
  405. neigh_node->if_incoming = hard_iface;
  406. neigh_node->orig_node = orig_node;
  407. /* extra reference for return */
  408. atomic_set(&neigh_node->refcount, 2);
  409. spin_lock_bh(&orig_node->neigh_list_lock);
  410. hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
  411. spin_unlock_bh(&orig_node->neigh_list_lock);
  412. batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
  413. "Creating new neighbor %pM for orig_node %pM on interface %s\n",
  414. neigh_addr, orig_node->orig, hard_iface->net_dev->name);
  415. out:
  416. return neigh_node;
  417. }
  418. /**
  419. * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
  420. * free after rcu grace period
  421. * @orig_ifinfo: the orig_ifinfo object to release
  422. */
  423. static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
  424. {
  425. struct batadv_neigh_node *router;
  426. if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
  427. batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
  428. /* this is the last reference to this object */
  429. router = rcu_dereference_protected(orig_ifinfo->router, true);
  430. if (router)
  431. batadv_neigh_node_free_ref(router);
  432. kfree_rcu(orig_ifinfo, rcu);
  433. }
  434. /**
  435. * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
  436. * the orig_ifinfo
  437. * @orig_ifinfo: the orig_ifinfo object to release
  438. */
  439. void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
  440. {
  441. if (atomic_dec_and_test(&orig_ifinfo->refcount))
  442. batadv_orig_ifinfo_release(orig_ifinfo);
  443. }
  444. /**
  445. * batadv_orig_node_free_rcu - free the orig_node
  446. * @rcu: rcu pointer of the orig_node
  447. */
  448. static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
  449. {
  450. struct batadv_orig_node *orig_node;
  451. orig_node = container_of(rcu, struct batadv_orig_node, rcu);
  452. batadv_mcast_purge_orig(orig_node);
  453. batadv_frag_purge_orig(orig_node, NULL);
  454. if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
  455. orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
  456. kfree(orig_node->tt_buff);
  457. kfree(orig_node);
  458. }
  459. /**
  460. * batadv_orig_node_release - release orig_node from lists and queue for
  461. * free after rcu grace period
  462. * @orig_node: the orig node to free
  463. */
  464. static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
  465. {
  466. struct hlist_node *node_tmp;
  467. struct batadv_neigh_node *neigh_node;
  468. struct batadv_orig_ifinfo *orig_ifinfo;
  469. spin_lock_bh(&orig_node->neigh_list_lock);
  470. /* for all neighbors towards this originator ... */
  471. hlist_for_each_entry_safe(neigh_node, node_tmp,
  472. &orig_node->neigh_list, list) {
  473. hlist_del_rcu(&neigh_node->list);
  474. batadv_neigh_node_free_ref(neigh_node);
  475. }
  476. hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
  477. &orig_node->ifinfo_list, list) {
  478. hlist_del_rcu(&orig_ifinfo->list);
  479. batadv_orig_ifinfo_free_ref(orig_ifinfo);
  480. }
  481. spin_unlock_bh(&orig_node->neigh_list_lock);
  482. /* Free nc_nodes */
  483. batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
  484. call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
  485. }
  486. /**
  487. * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
  488. * release it
  489. * @orig_node: the orig node to free
  490. */
  491. void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
  492. {
  493. if (atomic_dec_and_test(&orig_node->refcount))
  494. batadv_orig_node_release(orig_node);
  495. }
  496. void batadv_originator_free(struct batadv_priv *bat_priv)
  497. {
  498. struct batadv_hashtable *hash = bat_priv->orig_hash;
  499. struct hlist_node *node_tmp;
  500. struct hlist_head *head;
  501. spinlock_t *list_lock; /* spinlock to protect write access */
  502. struct batadv_orig_node *orig_node;
  503. u32 i;
  504. if (!hash)
  505. return;
  506. cancel_delayed_work_sync(&bat_priv->orig_work);
  507. bat_priv->orig_hash = NULL;
  508. for (i = 0; i < hash->size; i++) {
  509. head = &hash->table[i];
  510. list_lock = &hash->list_locks[i];
  511. spin_lock_bh(list_lock);
  512. hlist_for_each_entry_safe(orig_node, node_tmp,
  513. head, hash_entry) {
  514. hlist_del_rcu(&orig_node->hash_entry);
  515. batadv_orig_node_free_ref(orig_node);
  516. }
  517. spin_unlock_bh(list_lock);
  518. }
  519. batadv_hash_destroy(hash);
  520. }
  521. /**
  522. * batadv_orig_node_new - creates a new orig_node
  523. * @bat_priv: the bat priv with all the soft interface information
  524. * @addr: the mac address of the originator
  525. *
  526. * Creates a new originator object and initialise all the generic fields.
  527. * The new object is not added to the originator list.
  528. * Returns the newly created object or NULL on failure.
  529. */
  530. struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
  531. const u8 *addr)
  532. {
  533. struct batadv_orig_node *orig_node;
  534. struct batadv_orig_node_vlan *vlan;
  535. unsigned long reset_time;
  536. int i;
  537. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  538. "Creating new originator: %pM\n", addr);
  539. orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
  540. if (!orig_node)
  541. return NULL;
  542. INIT_HLIST_HEAD(&orig_node->neigh_list);
  543. INIT_HLIST_HEAD(&orig_node->vlan_list);
  544. INIT_HLIST_HEAD(&orig_node->ifinfo_list);
  545. spin_lock_init(&orig_node->bcast_seqno_lock);
  546. spin_lock_init(&orig_node->neigh_list_lock);
  547. spin_lock_init(&orig_node->tt_buff_lock);
  548. spin_lock_init(&orig_node->tt_lock);
  549. spin_lock_init(&orig_node->vlan_list_lock);
  550. batadv_nc_init_orig(orig_node);
  551. /* extra reference for return */
  552. atomic_set(&orig_node->refcount, 2);
  553. orig_node->bat_priv = bat_priv;
  554. ether_addr_copy(orig_node->orig, addr);
  555. batadv_dat_init_orig_node_addr(orig_node);
  556. atomic_set(&orig_node->last_ttvn, 0);
  557. orig_node->tt_buff = NULL;
  558. orig_node->tt_buff_len = 0;
  559. orig_node->last_seen = jiffies;
  560. reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
  561. orig_node->bcast_seqno_reset = reset_time;
  562. #ifdef CONFIG_BATMAN_ADV_MCAST
  563. orig_node->mcast_flags = BATADV_NO_FLAGS;
  564. INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
  565. INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
  566. INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
  567. spin_lock_init(&orig_node->mcast_handler_lock);
  568. #endif
  569. /* create a vlan object for the "untagged" LAN */
  570. vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
  571. if (!vlan)
  572. goto free_orig_node;
  573. /* batadv_orig_node_vlan_new() increases the refcounter.
  574. * Immediately release vlan since it is not needed anymore in this
  575. * context
  576. */
  577. batadv_orig_node_vlan_free_ref(vlan);
  578. for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
  579. INIT_HLIST_HEAD(&orig_node->fragments[i].head);
  580. spin_lock_init(&orig_node->fragments[i].lock);
  581. orig_node->fragments[i].size = 0;
  582. }
  583. return orig_node;
  584. free_orig_node:
  585. kfree(orig_node);
  586. return NULL;
  587. }
  588. /**
  589. * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
  590. * @bat_priv: the bat priv with all the soft interface information
  591. * @neigh: orig node which is to be checked
  592. */
  593. static void
  594. batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
  595. struct batadv_neigh_node *neigh)
  596. {
  597. struct batadv_neigh_ifinfo *neigh_ifinfo;
  598. struct batadv_hard_iface *if_outgoing;
  599. struct hlist_node *node_tmp;
  600. spin_lock_bh(&neigh->ifinfo_lock);
  601. /* for all ifinfo objects for this neighinator */
  602. hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
  603. &neigh->ifinfo_list, list) {
  604. if_outgoing = neigh_ifinfo->if_outgoing;
  605. /* always keep the default interface */
  606. if (if_outgoing == BATADV_IF_DEFAULT)
  607. continue;
  608. /* don't purge if the interface is not (going) down */
  609. if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
  610. (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
  611. (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
  612. continue;
  613. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  614. "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
  615. neigh->addr, if_outgoing->net_dev->name);
  616. hlist_del_rcu(&neigh_ifinfo->list);
  617. batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
  618. }
  619. spin_unlock_bh(&neigh->ifinfo_lock);
  620. }
  621. /**
  622. * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
  623. * @bat_priv: the bat priv with all the soft interface information
  624. * @orig_node: orig node which is to be checked
  625. *
  626. * Returns true if any ifinfo entry was purged, false otherwise.
  627. */
  628. static bool
  629. batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
  630. struct batadv_orig_node *orig_node)
  631. {
  632. struct batadv_orig_ifinfo *orig_ifinfo;
  633. struct batadv_hard_iface *if_outgoing;
  634. struct hlist_node *node_tmp;
  635. bool ifinfo_purged = false;
  636. spin_lock_bh(&orig_node->neigh_list_lock);
  637. /* for all ifinfo objects for this originator */
  638. hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
  639. &orig_node->ifinfo_list, list) {
  640. if_outgoing = orig_ifinfo->if_outgoing;
  641. /* always keep the default interface */
  642. if (if_outgoing == BATADV_IF_DEFAULT)
  643. continue;
  644. /* don't purge if the interface is not (going) down */
  645. if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
  646. (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
  647. (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
  648. continue;
  649. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  650. "router/ifinfo purge: originator %pM, iface: %s\n",
  651. orig_node->orig, if_outgoing->net_dev->name);
  652. ifinfo_purged = true;
  653. hlist_del_rcu(&orig_ifinfo->list);
  654. batadv_orig_ifinfo_free_ref(orig_ifinfo);
  655. if (orig_node->last_bonding_candidate == orig_ifinfo) {
  656. orig_node->last_bonding_candidate = NULL;
  657. batadv_orig_ifinfo_free_ref(orig_ifinfo);
  658. }
  659. }
  660. spin_unlock_bh(&orig_node->neigh_list_lock);
  661. return ifinfo_purged;
  662. }
  663. /**
  664. * batadv_purge_orig_neighbors - purges neighbors from originator
  665. * @bat_priv: the bat priv with all the soft interface information
  666. * @orig_node: orig node which is to be checked
  667. *
  668. * Returns true if any neighbor was purged, false otherwise
  669. */
  670. static bool
  671. batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
  672. struct batadv_orig_node *orig_node)
  673. {
  674. struct hlist_node *node_tmp;
  675. struct batadv_neigh_node *neigh_node;
  676. bool neigh_purged = false;
  677. unsigned long last_seen;
  678. struct batadv_hard_iface *if_incoming;
  679. spin_lock_bh(&orig_node->neigh_list_lock);
  680. /* for all neighbors towards this originator ... */
  681. hlist_for_each_entry_safe(neigh_node, node_tmp,
  682. &orig_node->neigh_list, list) {
  683. last_seen = neigh_node->last_seen;
  684. if_incoming = neigh_node->if_incoming;
  685. if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
  686. (if_incoming->if_status == BATADV_IF_INACTIVE) ||
  687. (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
  688. (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
  689. if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
  690. (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
  691. (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
  692. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  693. "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
  694. orig_node->orig, neigh_node->addr,
  695. if_incoming->net_dev->name);
  696. else
  697. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  698. "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
  699. orig_node->orig, neigh_node->addr,
  700. jiffies_to_msecs(last_seen));
  701. neigh_purged = true;
  702. hlist_del_rcu(&neigh_node->list);
  703. batadv_neigh_node_free_ref(neigh_node);
  704. } else {
  705. /* only necessary if not the whole neighbor is to be
  706. * deleted, but some interface has been removed.
  707. */
  708. batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
  709. }
  710. }
  711. spin_unlock_bh(&orig_node->neigh_list_lock);
  712. return neigh_purged;
  713. }
  714. /**
  715. * batadv_find_best_neighbor - finds the best neighbor after purging
  716. * @bat_priv: the bat priv with all the soft interface information
  717. * @orig_node: orig node which is to be checked
  718. * @if_outgoing: the interface for which the metric should be compared
  719. *
  720. * Returns the current best neighbor, with refcount increased.
  721. */
  722. static struct batadv_neigh_node *
  723. batadv_find_best_neighbor(struct batadv_priv *bat_priv,
  724. struct batadv_orig_node *orig_node,
  725. struct batadv_hard_iface *if_outgoing)
  726. {
  727. struct batadv_neigh_node *best = NULL, *neigh;
  728. struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
  729. rcu_read_lock();
  730. hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
  731. if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
  732. best, if_outgoing) <= 0))
  733. continue;
  734. if (!atomic_inc_not_zero(&neigh->refcount))
  735. continue;
  736. if (best)
  737. batadv_neigh_node_free_ref(best);
  738. best = neigh;
  739. }
  740. rcu_read_unlock();
  741. return best;
  742. }
  743. /**
  744. * batadv_purge_orig_node - purges obsolete information from an orig_node
  745. * @bat_priv: the bat priv with all the soft interface information
  746. * @orig_node: orig node which is to be checked
  747. *
  748. * This function checks if the orig_node or substructures of it have become
  749. * obsolete, and purges this information if that's the case.
  750. *
  751. * Returns true if the orig_node is to be removed, false otherwise.
  752. */
  753. static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
  754. struct batadv_orig_node *orig_node)
  755. {
  756. struct batadv_neigh_node *best_neigh_node;
  757. struct batadv_hard_iface *hard_iface;
  758. bool changed_ifinfo, changed_neigh;
  759. if (batadv_has_timed_out(orig_node->last_seen,
  760. 2 * BATADV_PURGE_TIMEOUT)) {
  761. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  762. "Originator timeout: originator %pM, last_seen %u\n",
  763. orig_node->orig,
  764. jiffies_to_msecs(orig_node->last_seen));
  765. return true;
  766. }
  767. changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
  768. changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
  769. if (!changed_ifinfo && !changed_neigh)
  770. return false;
  771. /* first for NULL ... */
  772. best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
  773. BATADV_IF_DEFAULT);
  774. batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
  775. best_neigh_node);
  776. if (best_neigh_node)
  777. batadv_neigh_node_free_ref(best_neigh_node);
  778. /* ... then for all other interfaces. */
  779. rcu_read_lock();
  780. list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
  781. if (hard_iface->if_status != BATADV_IF_ACTIVE)
  782. continue;
  783. if (hard_iface->soft_iface != bat_priv->soft_iface)
  784. continue;
  785. best_neigh_node = batadv_find_best_neighbor(bat_priv,
  786. orig_node,
  787. hard_iface);
  788. batadv_update_route(bat_priv, orig_node, hard_iface,
  789. best_neigh_node);
  790. if (best_neigh_node)
  791. batadv_neigh_node_free_ref(best_neigh_node);
  792. }
  793. rcu_read_unlock();
  794. return false;
  795. }
  796. static void _batadv_purge_orig(struct batadv_priv *bat_priv)
  797. {
  798. struct batadv_hashtable *hash = bat_priv->orig_hash;
  799. struct hlist_node *node_tmp;
  800. struct hlist_head *head;
  801. spinlock_t *list_lock; /* spinlock to protect write access */
  802. struct batadv_orig_node *orig_node;
  803. u32 i;
  804. if (!hash)
  805. return;
  806. /* for all origins... */
  807. for (i = 0; i < hash->size; i++) {
  808. head = &hash->table[i];
  809. list_lock = &hash->list_locks[i];
  810. spin_lock_bh(list_lock);
  811. hlist_for_each_entry_safe(orig_node, node_tmp,
  812. head, hash_entry) {
  813. if (batadv_purge_orig_node(bat_priv, orig_node)) {
  814. batadv_gw_node_delete(bat_priv, orig_node);
  815. hlist_del_rcu(&orig_node->hash_entry);
  816. batadv_tt_global_del_orig(orig_node->bat_priv,
  817. orig_node, -1,
  818. "originator timed out");
  819. batadv_orig_node_free_ref(orig_node);
  820. continue;
  821. }
  822. batadv_frag_purge_orig(orig_node,
  823. batadv_frag_check_entry);
  824. }
  825. spin_unlock_bh(list_lock);
  826. }
  827. batadv_gw_election(bat_priv);
  828. }
  829. static void batadv_purge_orig(struct work_struct *work)
  830. {
  831. struct delayed_work *delayed_work;
  832. struct batadv_priv *bat_priv;
  833. delayed_work = container_of(work, struct delayed_work, work);
  834. bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
  835. _batadv_purge_orig(bat_priv);
  836. queue_delayed_work(batadv_event_workqueue,
  837. &bat_priv->orig_work,
  838. msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
  839. }
  840. void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
  841. {
  842. _batadv_purge_orig(bat_priv);
  843. }
  844. int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
  845. {
  846. struct net_device *net_dev = (struct net_device *)seq->private;
  847. struct batadv_priv *bat_priv = netdev_priv(net_dev);
  848. struct batadv_hard_iface *primary_if;
  849. primary_if = batadv_seq_print_text_primary_if_get(seq);
  850. if (!primary_if)
  851. return 0;
  852. seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
  853. BATADV_SOURCE_VERSION, primary_if->net_dev->name,
  854. primary_if->net_dev->dev_addr, net_dev->name,
  855. bat_priv->bat_algo_ops->name);
  856. batadv_hardif_free_ref(primary_if);
  857. if (!bat_priv->bat_algo_ops->bat_orig_print) {
  858. seq_puts(seq,
  859. "No printing function for this routing protocol\n");
  860. return 0;
  861. }
  862. bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
  863. BATADV_IF_DEFAULT);
  864. return 0;
  865. }
  866. /**
  867. * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
  868. * outgoing interface
  869. * @seq: debugfs table seq_file struct
  870. * @offset: not used
  871. *
  872. * Returns 0
  873. */
  874. int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
  875. {
  876. struct net_device *net_dev = (struct net_device *)seq->private;
  877. struct batadv_hard_iface *hard_iface;
  878. struct batadv_priv *bat_priv;
  879. hard_iface = batadv_hardif_get_by_netdev(net_dev);
  880. if (!hard_iface || !hard_iface->soft_iface) {
  881. seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
  882. goto out;
  883. }
  884. bat_priv = netdev_priv(hard_iface->soft_iface);
  885. if (!bat_priv->bat_algo_ops->bat_orig_print) {
  886. seq_puts(seq,
  887. "No printing function for this routing protocol\n");
  888. goto out;
  889. }
  890. if (hard_iface->if_status != BATADV_IF_ACTIVE) {
  891. seq_puts(seq, "Interface not active\n");
  892. goto out;
  893. }
  894. seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
  895. BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
  896. hard_iface->net_dev->dev_addr,
  897. hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
  898. bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
  899. out:
  900. if (hard_iface)
  901. batadv_hardif_free_ref(hard_iface);
  902. return 0;
  903. }
  904. int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
  905. int max_if_num)
  906. {
  907. struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  908. struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
  909. struct batadv_hashtable *hash = bat_priv->orig_hash;
  910. struct hlist_head *head;
  911. struct batadv_orig_node *orig_node;
  912. u32 i;
  913. int ret;
  914. /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
  915. * if_num
  916. */
  917. for (i = 0; i < hash->size; i++) {
  918. head = &hash->table[i];
  919. rcu_read_lock();
  920. hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
  921. ret = 0;
  922. if (bao->bat_orig_add_if)
  923. ret = bao->bat_orig_add_if(orig_node,
  924. max_if_num);
  925. if (ret == -ENOMEM)
  926. goto err;
  927. }
  928. rcu_read_unlock();
  929. }
  930. return 0;
  931. err:
  932. rcu_read_unlock();
  933. return -ENOMEM;
  934. }
  935. int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
  936. int max_if_num)
  937. {
  938. struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  939. struct batadv_hashtable *hash = bat_priv->orig_hash;
  940. struct hlist_head *head;
  941. struct batadv_hard_iface *hard_iface_tmp;
  942. struct batadv_orig_node *orig_node;
  943. struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
  944. u32 i;
  945. int ret;
  946. /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
  947. * if_num
  948. */
  949. for (i = 0; i < hash->size; i++) {
  950. head = &hash->table[i];
  951. rcu_read_lock();
  952. hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
  953. ret = 0;
  954. if (bao->bat_orig_del_if)
  955. ret = bao->bat_orig_del_if(orig_node,
  956. max_if_num,
  957. hard_iface->if_num);
  958. if (ret == -ENOMEM)
  959. goto err;
  960. }
  961. rcu_read_unlock();
  962. }
  963. /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
  964. rcu_read_lock();
  965. list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
  966. if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
  967. continue;
  968. if (hard_iface == hard_iface_tmp)
  969. continue;
  970. if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
  971. continue;
  972. if (hard_iface_tmp->if_num > hard_iface->if_num)
  973. hard_iface_tmp->if_num--;
  974. }
  975. rcu_read_unlock();
  976. hard_iface->if_num = -1;
  977. return 0;
  978. err:
  979. rcu_read_unlock();
  980. return -ENOMEM;
  981. }