devinet.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405
  1. /*
  2. * NET3 IP device support routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Derived from the IP parts of dev.c 1.0.19
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. *
  14. * Additional Authors:
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  17. *
  18. * Changes:
  19. * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
  20. * lists.
  21. * Cyrus Durgin: updated for kmod
  22. * Matthias Andree: in devinet_ioctl, compare label and
  23. * address (4.4BSD alias style support),
  24. * fall back to comparing just the label
  25. * if no match found.
  26. */
  27. #include <asm/uaccess.h>
  28. #include <linux/bitops.h>
  29. #include <linux/capability.h>
  30. #include <linux/module.h>
  31. #include <linux/types.h>
  32. #include <linux/kernel.h>
  33. #include <linux/string.h>
  34. #include <linux/mm.h>
  35. #include <linux/socket.h>
  36. #include <linux/sockios.h>
  37. #include <linux/in.h>
  38. #include <linux/errno.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/if_addr.h>
  41. #include <linux/if_ether.h>
  42. #include <linux/inet.h>
  43. #include <linux/netdevice.h>
  44. #include <linux/etherdevice.h>
  45. #include <linux/skbuff.h>
  46. #include <linux/init.h>
  47. #include <linux/notifier.h>
  48. #include <linux/inetdevice.h>
  49. #include <linux/igmp.h>
  50. #include <linux/slab.h>
  51. #include <linux/hash.h>
  52. #ifdef CONFIG_SYSCTL
  53. #include <linux/sysctl.h>
  54. #endif
  55. #include <linux/kmod.h>
  56. #include <linux/netconf.h>
  57. #include <net/arp.h>
  58. #include <net/ip.h>
  59. #include <net/route.h>
  60. #include <net/ip_fib.h>
  61. #include <net/rtnetlink.h>
  62. #include <net/net_namespace.h>
  63. #include <net/addrconf.h>
  64. #include "fib_lookup.h"
  65. static struct ipv4_devconf ipv4_devconf = {
  66. .data = {
  67. [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  68. [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  69. [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  70. [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  71. [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  72. [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
  73. },
  74. };
  75. static struct ipv4_devconf ipv4_devconf_dflt = {
  76. .data = {
  77. [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  78. [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  79. [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  80. [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  81. [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
  82. [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  83. [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
  84. },
  85. };
  86. #define IPV4_DEVCONF_DFLT(net, attr) \
  87. IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
  88. static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
  89. [IFA_LOCAL] = { .type = NLA_U32 },
  90. [IFA_ADDRESS] = { .type = NLA_U32 },
  91. [IFA_BROADCAST] = { .type = NLA_U32 },
  92. [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
  93. [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
  94. [IFA_FLAGS] = { .type = NLA_U32 },
  95. };
  96. #define IN4_ADDR_HSIZE_SHIFT 8
  97. #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
  98. static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
  99. static u32 inet_addr_hash(const struct net *net, __be32 addr)
  100. {
  101. u32 val = (__force u32) addr ^ net_hash_mix(net);
  102. return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
  103. }
  104. static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
  105. {
  106. u32 hash = inet_addr_hash(net, ifa->ifa_local);
  107. ASSERT_RTNL();
  108. hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
  109. }
  110. static void inet_hash_remove(struct in_ifaddr *ifa)
  111. {
  112. ASSERT_RTNL();
  113. hlist_del_init_rcu(&ifa->hash);
  114. }
  115. /**
  116. * __ip_dev_find - find the first device with a given source address.
  117. * @net: the net namespace
  118. * @addr: the source address
  119. * @devref: if true, take a reference on the found device
  120. *
  121. * If a caller uses devref=false, it should be protected by RCU, or RTNL
  122. */
  123. struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
  124. {
  125. u32 hash = inet_addr_hash(net, addr);
  126. struct net_device *result = NULL;
  127. struct in_ifaddr *ifa;
  128. rcu_read_lock();
  129. hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
  130. if (ifa->ifa_local == addr) {
  131. struct net_device *dev = ifa->ifa_dev->dev;
  132. if (!net_eq(dev_net(dev), net))
  133. continue;
  134. result = dev;
  135. break;
  136. }
  137. }
  138. if (!result) {
  139. struct flowi4 fl4 = { .daddr = addr };
  140. struct fib_result res = { 0 };
  141. struct fib_table *local;
  142. /* Fallback to FIB local table so that communication
  143. * over loopback subnets work.
  144. */
  145. local = fib_get_table(net, RT_TABLE_LOCAL);
  146. if (local &&
  147. !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
  148. res.type == RTN_LOCAL)
  149. result = FIB_RES_DEV(res);
  150. }
  151. if (result && devref)
  152. dev_hold(result);
  153. rcu_read_unlock();
  154. return result;
  155. }
  156. EXPORT_SYMBOL(__ip_dev_find);
  157. static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
  158. static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
  159. static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
  160. int destroy);
  161. #ifdef CONFIG_SYSCTL
  162. static int devinet_sysctl_register(struct in_device *idev);
  163. static void devinet_sysctl_unregister(struct in_device *idev);
  164. #else
  165. static int devinet_sysctl_register(struct in_device *idev)
  166. {
  167. return 0;
  168. }
  169. static void devinet_sysctl_unregister(struct in_device *idev)
  170. {
  171. }
  172. #endif
  173. /* Locks all the inet devices. */
  174. static struct in_ifaddr *inet_alloc_ifa(void)
  175. {
  176. return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
  177. }
  178. static void inet_rcu_free_ifa(struct rcu_head *head)
  179. {
  180. struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
  181. if (ifa->ifa_dev)
  182. in_dev_put(ifa->ifa_dev);
  183. kfree(ifa);
  184. }
  185. static void inet_free_ifa(struct in_ifaddr *ifa)
  186. {
  187. call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
  188. }
  189. void in_dev_finish_destroy(struct in_device *idev)
  190. {
  191. struct net_device *dev = idev->dev;
  192. WARN_ON(idev->ifa_list);
  193. WARN_ON(idev->mc_list);
  194. kfree(rcu_dereference_protected(idev->mc_hash, 1));
  195. #ifdef NET_REFCNT_DEBUG
  196. pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
  197. #endif
  198. dev_put(dev);
  199. if (!idev->dead)
  200. pr_err("Freeing alive in_device %p\n", idev);
  201. else
  202. kfree(idev);
  203. }
  204. EXPORT_SYMBOL(in_dev_finish_destroy);
  205. static struct in_device *inetdev_init(struct net_device *dev)
  206. {
  207. struct in_device *in_dev;
  208. int err = -ENOMEM;
  209. ASSERT_RTNL();
  210. in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
  211. if (!in_dev)
  212. goto out;
  213. memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
  214. sizeof(in_dev->cnf));
  215. in_dev->cnf.sysctl = NULL;
  216. in_dev->dev = dev;
  217. in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
  218. if (!in_dev->arp_parms)
  219. goto out_kfree;
  220. if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
  221. dev_disable_lro(dev);
  222. /* Reference in_dev->dev */
  223. dev_hold(dev);
  224. /* Account for reference dev->ip_ptr (below) */
  225. in_dev_hold(in_dev);
  226. err = devinet_sysctl_register(in_dev);
  227. if (err) {
  228. in_dev->dead = 1;
  229. in_dev_put(in_dev);
  230. in_dev = NULL;
  231. goto out;
  232. }
  233. ip_mc_init_dev(in_dev);
  234. if (dev->flags & IFF_UP)
  235. ip_mc_up(in_dev);
  236. /* we can receive as soon as ip_ptr is set -- do this last */
  237. rcu_assign_pointer(dev->ip_ptr, in_dev);
  238. out:
  239. return in_dev ?: ERR_PTR(err);
  240. out_kfree:
  241. kfree(in_dev);
  242. in_dev = NULL;
  243. goto out;
  244. }
  245. static void in_dev_rcu_put(struct rcu_head *head)
  246. {
  247. struct in_device *idev = container_of(head, struct in_device, rcu_head);
  248. in_dev_put(idev);
  249. }
  250. static void inetdev_destroy(struct in_device *in_dev)
  251. {
  252. struct in_ifaddr *ifa;
  253. struct net_device *dev;
  254. ASSERT_RTNL();
  255. dev = in_dev->dev;
  256. in_dev->dead = 1;
  257. ip_mc_destroy_dev(in_dev);
  258. while ((ifa = in_dev->ifa_list) != NULL) {
  259. inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
  260. inet_free_ifa(ifa);
  261. }
  262. RCU_INIT_POINTER(dev->ip_ptr, NULL);
  263. devinet_sysctl_unregister(in_dev);
  264. neigh_parms_release(&arp_tbl, in_dev->arp_parms);
  265. arp_ifdown(dev);
  266. call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
  267. }
  268. int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
  269. {
  270. rcu_read_lock();
  271. for_primary_ifa(in_dev) {
  272. if (inet_ifa_match(a, ifa)) {
  273. if (!b || inet_ifa_match(b, ifa)) {
  274. rcu_read_unlock();
  275. return 1;
  276. }
  277. }
  278. } endfor_ifa(in_dev);
  279. rcu_read_unlock();
  280. return 0;
  281. }
  282. static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
  283. int destroy, struct nlmsghdr *nlh, u32 portid)
  284. {
  285. struct in_ifaddr *promote = NULL;
  286. struct in_ifaddr *ifa, *ifa1 = *ifap;
  287. struct in_ifaddr *last_prim = in_dev->ifa_list;
  288. struct in_ifaddr *prev_prom = NULL;
  289. int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
  290. ASSERT_RTNL();
  291. if (in_dev->dead)
  292. goto no_promotions;
  293. /* 1. Deleting primary ifaddr forces deletion all secondaries
  294. * unless alias promotion is set
  295. **/
  296. if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
  297. struct in_ifaddr **ifap1 = &ifa1->ifa_next;
  298. while ((ifa = *ifap1) != NULL) {
  299. if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
  300. ifa1->ifa_scope <= ifa->ifa_scope)
  301. last_prim = ifa;
  302. if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
  303. ifa1->ifa_mask != ifa->ifa_mask ||
  304. !inet_ifa_match(ifa1->ifa_address, ifa)) {
  305. ifap1 = &ifa->ifa_next;
  306. prev_prom = ifa;
  307. continue;
  308. }
  309. if (!do_promote) {
  310. inet_hash_remove(ifa);
  311. *ifap1 = ifa->ifa_next;
  312. rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
  313. blocking_notifier_call_chain(&inetaddr_chain,
  314. NETDEV_DOWN, ifa);
  315. inet_free_ifa(ifa);
  316. } else {
  317. promote = ifa;
  318. break;
  319. }
  320. }
  321. }
  322. /* On promotion all secondaries from subnet are changing
  323. * the primary IP, we must remove all their routes silently
  324. * and later to add them back with new prefsrc. Do this
  325. * while all addresses are on the device list.
  326. */
  327. for (ifa = promote; ifa; ifa = ifa->ifa_next) {
  328. if (ifa1->ifa_mask == ifa->ifa_mask &&
  329. inet_ifa_match(ifa1->ifa_address, ifa))
  330. fib_del_ifaddr(ifa, ifa1);
  331. }
  332. no_promotions:
  333. /* 2. Unlink it */
  334. *ifap = ifa1->ifa_next;
  335. inet_hash_remove(ifa1);
  336. /* 3. Announce address deletion */
  337. /* Send message first, then call notifier.
  338. At first sight, FIB update triggered by notifier
  339. will refer to already deleted ifaddr, that could confuse
  340. netlink listeners. It is not true: look, gated sees
  341. that route deleted and if it still thinks that ifaddr
  342. is valid, it will try to restore deleted routes... Grr.
  343. So that, this order is correct.
  344. */
  345. rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
  346. blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
  347. if (promote) {
  348. struct in_ifaddr *next_sec = promote->ifa_next;
  349. if (prev_prom) {
  350. prev_prom->ifa_next = promote->ifa_next;
  351. promote->ifa_next = last_prim->ifa_next;
  352. last_prim->ifa_next = promote;
  353. }
  354. promote->ifa_flags &= ~IFA_F_SECONDARY;
  355. rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
  356. blocking_notifier_call_chain(&inetaddr_chain,
  357. NETDEV_UP, promote);
  358. for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
  359. if (ifa1->ifa_mask != ifa->ifa_mask ||
  360. !inet_ifa_match(ifa1->ifa_address, ifa))
  361. continue;
  362. fib_add_ifaddr(ifa);
  363. }
  364. }
  365. if (destroy)
  366. inet_free_ifa(ifa1);
  367. }
  368. static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
  369. int destroy)
  370. {
  371. __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
  372. }
  373. static void check_lifetime(struct work_struct *work);
  374. static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
  375. static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
  376. u32 portid)
  377. {
  378. struct in_device *in_dev = ifa->ifa_dev;
  379. struct in_ifaddr *ifa1, **ifap, **last_primary;
  380. ASSERT_RTNL();
  381. if (!ifa->ifa_local) {
  382. inet_free_ifa(ifa);
  383. return 0;
  384. }
  385. ifa->ifa_flags &= ~IFA_F_SECONDARY;
  386. last_primary = &in_dev->ifa_list;
  387. for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
  388. ifap = &ifa1->ifa_next) {
  389. if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
  390. ifa->ifa_scope <= ifa1->ifa_scope)
  391. last_primary = &ifa1->ifa_next;
  392. if (ifa1->ifa_mask == ifa->ifa_mask &&
  393. inet_ifa_match(ifa1->ifa_address, ifa)) {
  394. if (ifa1->ifa_local == ifa->ifa_local) {
  395. inet_free_ifa(ifa);
  396. return -EEXIST;
  397. }
  398. if (ifa1->ifa_scope != ifa->ifa_scope) {
  399. inet_free_ifa(ifa);
  400. return -EINVAL;
  401. }
  402. ifa->ifa_flags |= IFA_F_SECONDARY;
  403. }
  404. }
  405. if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
  406. prandom_seed((__force u32) ifa->ifa_local);
  407. ifap = last_primary;
  408. }
  409. ifa->ifa_next = *ifap;
  410. *ifap = ifa;
  411. inet_hash_insert(dev_net(in_dev->dev), ifa);
  412. cancel_delayed_work(&check_lifetime_work);
  413. queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
  414. /* Send message first, then call notifier.
  415. Notifier will trigger FIB update, so that
  416. listeners of netlink will know about new ifaddr */
  417. rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
  418. blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
  419. return 0;
  420. }
  421. static int inet_insert_ifa(struct in_ifaddr *ifa)
  422. {
  423. return __inet_insert_ifa(ifa, NULL, 0);
  424. }
  425. static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
  426. {
  427. struct in_device *in_dev = __in_dev_get_rtnl(dev);
  428. ASSERT_RTNL();
  429. if (!in_dev) {
  430. inet_free_ifa(ifa);
  431. return -ENOBUFS;
  432. }
  433. ipv4_devconf_setall(in_dev);
  434. neigh_parms_data_state_setall(in_dev->arp_parms);
  435. if (ifa->ifa_dev != in_dev) {
  436. WARN_ON(ifa->ifa_dev);
  437. in_dev_hold(in_dev);
  438. ifa->ifa_dev = in_dev;
  439. }
  440. if (ipv4_is_loopback(ifa->ifa_local))
  441. ifa->ifa_scope = RT_SCOPE_HOST;
  442. return inet_insert_ifa(ifa);
  443. }
  444. /* Caller must hold RCU or RTNL :
  445. * We dont take a reference on found in_device
  446. */
  447. struct in_device *inetdev_by_index(struct net *net, int ifindex)
  448. {
  449. struct net_device *dev;
  450. struct in_device *in_dev = NULL;
  451. rcu_read_lock();
  452. dev = dev_get_by_index_rcu(net, ifindex);
  453. if (dev)
  454. in_dev = rcu_dereference_rtnl(dev->ip_ptr);
  455. rcu_read_unlock();
  456. return in_dev;
  457. }
  458. EXPORT_SYMBOL(inetdev_by_index);
  459. /* Called only from RTNL semaphored context. No locks. */
  460. struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
  461. __be32 mask)
  462. {
  463. ASSERT_RTNL();
  464. for_primary_ifa(in_dev) {
  465. if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
  466. return ifa;
  467. } endfor_ifa(in_dev);
  468. return NULL;
  469. }
  470. static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
  471. {
  472. struct ip_mreqn mreq = {
  473. .imr_multiaddr.s_addr = ifa->ifa_address,
  474. .imr_ifindex = ifa->ifa_dev->dev->ifindex,
  475. };
  476. int ret;
  477. ASSERT_RTNL();
  478. lock_sock(sk);
  479. if (join)
  480. ret = ip_mc_join_group(sk, &mreq);
  481. else
  482. ret = ip_mc_leave_group(sk, &mreq);
  483. release_sock(sk);
  484. return ret;
  485. }
  486. static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
  487. {
  488. struct net *net = sock_net(skb->sk);
  489. struct nlattr *tb[IFA_MAX+1];
  490. struct in_device *in_dev;
  491. struct ifaddrmsg *ifm;
  492. struct in_ifaddr *ifa, **ifap;
  493. int err = -EINVAL;
  494. ASSERT_RTNL();
  495. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
  496. if (err < 0)
  497. goto errout;
  498. ifm = nlmsg_data(nlh);
  499. in_dev = inetdev_by_index(net, ifm->ifa_index);
  500. if (!in_dev) {
  501. err = -ENODEV;
  502. goto errout;
  503. }
  504. for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
  505. ifap = &ifa->ifa_next) {
  506. if (tb[IFA_LOCAL] &&
  507. ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
  508. continue;
  509. if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
  510. continue;
  511. if (tb[IFA_ADDRESS] &&
  512. (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
  513. !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
  514. continue;
  515. if (ipv4_is_multicast(ifa->ifa_address))
  516. ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
  517. __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
  518. return 0;
  519. }
  520. err = -EADDRNOTAVAIL;
  521. errout:
  522. return err;
  523. }
  524. #define INFINITY_LIFE_TIME 0xFFFFFFFF
  525. static void check_lifetime(struct work_struct *work)
  526. {
  527. unsigned long now, next, next_sec, next_sched;
  528. struct in_ifaddr *ifa;
  529. struct hlist_node *n;
  530. int i;
  531. now = jiffies;
  532. next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
  533. for (i = 0; i < IN4_ADDR_HSIZE; i++) {
  534. bool change_needed = false;
  535. rcu_read_lock();
  536. hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
  537. unsigned long age;
  538. if (ifa->ifa_flags & IFA_F_PERMANENT)
  539. continue;
  540. /* We try to batch several events at once. */
  541. age = (now - ifa->ifa_tstamp +
  542. ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
  543. if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
  544. age >= ifa->ifa_valid_lft) {
  545. change_needed = true;
  546. } else if (ifa->ifa_preferred_lft ==
  547. INFINITY_LIFE_TIME) {
  548. continue;
  549. } else if (age >= ifa->ifa_preferred_lft) {
  550. if (time_before(ifa->ifa_tstamp +
  551. ifa->ifa_valid_lft * HZ, next))
  552. next = ifa->ifa_tstamp +
  553. ifa->ifa_valid_lft * HZ;
  554. if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
  555. change_needed = true;
  556. } else if (time_before(ifa->ifa_tstamp +
  557. ifa->ifa_preferred_lft * HZ,
  558. next)) {
  559. next = ifa->ifa_tstamp +
  560. ifa->ifa_preferred_lft * HZ;
  561. }
  562. }
  563. rcu_read_unlock();
  564. if (!change_needed)
  565. continue;
  566. rtnl_lock();
  567. hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
  568. unsigned long age;
  569. if (ifa->ifa_flags & IFA_F_PERMANENT)
  570. continue;
  571. /* We try to batch several events at once. */
  572. age = (now - ifa->ifa_tstamp +
  573. ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
  574. if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
  575. age >= ifa->ifa_valid_lft) {
  576. struct in_ifaddr **ifap;
  577. for (ifap = &ifa->ifa_dev->ifa_list;
  578. *ifap != NULL; ifap = &(*ifap)->ifa_next) {
  579. if (*ifap == ifa) {
  580. inet_del_ifa(ifa->ifa_dev,
  581. ifap, 1);
  582. break;
  583. }
  584. }
  585. } else if (ifa->ifa_preferred_lft !=
  586. INFINITY_LIFE_TIME &&
  587. age >= ifa->ifa_preferred_lft &&
  588. !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
  589. ifa->ifa_flags |= IFA_F_DEPRECATED;
  590. rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
  591. }
  592. }
  593. rtnl_unlock();
  594. }
  595. next_sec = round_jiffies_up(next);
  596. next_sched = next;
  597. /* If rounded timeout is accurate enough, accept it. */
  598. if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
  599. next_sched = next_sec;
  600. now = jiffies;
  601. /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
  602. if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
  603. next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
  604. queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
  605. next_sched - now);
  606. }
  607. static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
  608. __u32 prefered_lft)
  609. {
  610. unsigned long timeout;
  611. ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
  612. timeout = addrconf_timeout_fixup(valid_lft, HZ);
  613. if (addrconf_finite_timeout(timeout))
  614. ifa->ifa_valid_lft = timeout;
  615. else
  616. ifa->ifa_flags |= IFA_F_PERMANENT;
  617. timeout = addrconf_timeout_fixup(prefered_lft, HZ);
  618. if (addrconf_finite_timeout(timeout)) {
  619. if (timeout == 0)
  620. ifa->ifa_flags |= IFA_F_DEPRECATED;
  621. ifa->ifa_preferred_lft = timeout;
  622. }
  623. ifa->ifa_tstamp = jiffies;
  624. if (!ifa->ifa_cstamp)
  625. ifa->ifa_cstamp = ifa->ifa_tstamp;
  626. }
  627. static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
  628. __u32 *pvalid_lft, __u32 *pprefered_lft)
  629. {
  630. struct nlattr *tb[IFA_MAX+1];
  631. struct in_ifaddr *ifa;
  632. struct ifaddrmsg *ifm;
  633. struct net_device *dev;
  634. struct in_device *in_dev;
  635. int err;
  636. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
  637. if (err < 0)
  638. goto errout;
  639. ifm = nlmsg_data(nlh);
  640. err = -EINVAL;
  641. if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
  642. goto errout;
  643. dev = __dev_get_by_index(net, ifm->ifa_index);
  644. err = -ENODEV;
  645. if (!dev)
  646. goto errout;
  647. in_dev = __in_dev_get_rtnl(dev);
  648. err = -ENOBUFS;
  649. if (!in_dev)
  650. goto errout;
  651. ifa = inet_alloc_ifa();
  652. if (!ifa)
  653. /*
  654. * A potential indev allocation can be left alive, it stays
  655. * assigned to its device and is destroy with it.
  656. */
  657. goto errout;
  658. ipv4_devconf_setall(in_dev);
  659. neigh_parms_data_state_setall(in_dev->arp_parms);
  660. in_dev_hold(in_dev);
  661. if (!tb[IFA_ADDRESS])
  662. tb[IFA_ADDRESS] = tb[IFA_LOCAL];
  663. INIT_HLIST_NODE(&ifa->hash);
  664. ifa->ifa_prefixlen = ifm->ifa_prefixlen;
  665. ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
  666. ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
  667. ifm->ifa_flags;
  668. ifa->ifa_scope = ifm->ifa_scope;
  669. ifa->ifa_dev = in_dev;
  670. ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
  671. ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
  672. if (tb[IFA_BROADCAST])
  673. ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
  674. if (tb[IFA_LABEL])
  675. nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
  676. else
  677. memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
  678. if (tb[IFA_CACHEINFO]) {
  679. struct ifa_cacheinfo *ci;
  680. ci = nla_data(tb[IFA_CACHEINFO]);
  681. if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
  682. err = -EINVAL;
  683. goto errout_free;
  684. }
  685. *pvalid_lft = ci->ifa_valid;
  686. *pprefered_lft = ci->ifa_prefered;
  687. }
  688. return ifa;
  689. errout_free:
  690. inet_free_ifa(ifa);
  691. errout:
  692. return ERR_PTR(err);
  693. }
  694. static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
  695. {
  696. struct in_device *in_dev = ifa->ifa_dev;
  697. struct in_ifaddr *ifa1, **ifap;
  698. if (!ifa->ifa_local)
  699. return NULL;
  700. for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
  701. ifap = &ifa1->ifa_next) {
  702. if (ifa1->ifa_mask == ifa->ifa_mask &&
  703. inet_ifa_match(ifa1->ifa_address, ifa) &&
  704. ifa1->ifa_local == ifa->ifa_local)
  705. return ifa1;
  706. }
  707. return NULL;
  708. }
  709. static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
  710. {
  711. struct net *net = sock_net(skb->sk);
  712. struct in_ifaddr *ifa;
  713. struct in_ifaddr *ifa_existing;
  714. __u32 valid_lft = INFINITY_LIFE_TIME;
  715. __u32 prefered_lft = INFINITY_LIFE_TIME;
  716. ASSERT_RTNL();
  717. ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
  718. if (IS_ERR(ifa))
  719. return PTR_ERR(ifa);
  720. ifa_existing = find_matching_ifa(ifa);
  721. if (!ifa_existing) {
  722. /* It would be best to check for !NLM_F_CREATE here but
  723. * userspace already relies on not having to provide this.
  724. */
  725. set_ifa_lifetime(ifa, valid_lft, prefered_lft);
  726. if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
  727. int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
  728. true, ifa);
  729. if (ret < 0) {
  730. inet_free_ifa(ifa);
  731. return ret;
  732. }
  733. }
  734. return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
  735. } else {
  736. inet_free_ifa(ifa);
  737. if (nlh->nlmsg_flags & NLM_F_EXCL ||
  738. !(nlh->nlmsg_flags & NLM_F_REPLACE))
  739. return -EEXIST;
  740. ifa = ifa_existing;
  741. set_ifa_lifetime(ifa, valid_lft, prefered_lft);
  742. cancel_delayed_work(&check_lifetime_work);
  743. queue_delayed_work(system_power_efficient_wq,
  744. &check_lifetime_work, 0);
  745. rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
  746. }
  747. return 0;
  748. }
  749. /*
  750. * Determine a default network mask, based on the IP address.
  751. */
  752. static int inet_abc_len(__be32 addr)
  753. {
  754. int rc = -1; /* Something else, probably a multicast. */
  755. if (ipv4_is_zeronet(addr))
  756. rc = 0;
  757. else {
  758. __u32 haddr = ntohl(addr);
  759. if (IN_CLASSA(haddr))
  760. rc = 8;
  761. else if (IN_CLASSB(haddr))
  762. rc = 16;
  763. else if (IN_CLASSC(haddr))
  764. rc = 24;
  765. }
  766. return rc;
  767. }
  768. int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
  769. {
  770. struct ifreq ifr;
  771. struct sockaddr_in sin_orig;
  772. struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
  773. struct in_device *in_dev;
  774. struct in_ifaddr **ifap = NULL;
  775. struct in_ifaddr *ifa = NULL;
  776. struct net_device *dev;
  777. char *colon;
  778. int ret = -EFAULT;
  779. int tryaddrmatch = 0;
  780. /*
  781. * Fetch the caller's info block into kernel space
  782. */
  783. if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
  784. goto out;
  785. ifr.ifr_name[IFNAMSIZ - 1] = 0;
  786. /* save original address for comparison */
  787. memcpy(&sin_orig, sin, sizeof(*sin));
  788. colon = strchr(ifr.ifr_name, ':');
  789. if (colon)
  790. *colon = 0;
  791. dev_load(net, ifr.ifr_name);
  792. switch (cmd) {
  793. case SIOCGIFADDR: /* Get interface address */
  794. case SIOCGIFBRDADDR: /* Get the broadcast address */
  795. case SIOCGIFDSTADDR: /* Get the destination address */
  796. case SIOCGIFNETMASK: /* Get the netmask for the interface */
  797. /* Note that these ioctls will not sleep,
  798. so that we do not impose a lock.
  799. One day we will be forced to put shlock here (I mean SMP)
  800. */
  801. tryaddrmatch = (sin_orig.sin_family == AF_INET);
  802. memset(sin, 0, sizeof(*sin));
  803. sin->sin_family = AF_INET;
  804. break;
  805. case SIOCSIFFLAGS:
  806. ret = -EPERM;
  807. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  808. goto out;
  809. break;
  810. case SIOCSIFADDR: /* Set interface address (and family) */
  811. case SIOCSIFBRDADDR: /* Set the broadcast address */
  812. case SIOCSIFDSTADDR: /* Set the destination address */
  813. case SIOCSIFNETMASK: /* Set the netmask for the interface */
  814. ret = -EPERM;
  815. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  816. goto out;
  817. ret = -EINVAL;
  818. if (sin->sin_family != AF_INET)
  819. goto out;
  820. break;
  821. default:
  822. ret = -EINVAL;
  823. goto out;
  824. }
  825. rtnl_lock();
  826. ret = -ENODEV;
  827. dev = __dev_get_by_name(net, ifr.ifr_name);
  828. if (!dev)
  829. goto done;
  830. if (colon)
  831. *colon = ':';
  832. in_dev = __in_dev_get_rtnl(dev);
  833. if (in_dev) {
  834. if (tryaddrmatch) {
  835. /* Matthias Andree */
  836. /* compare label and address (4.4BSD style) */
  837. /* note: we only do this for a limited set of ioctls
  838. and only if the original address family was AF_INET.
  839. This is checked above. */
  840. for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
  841. ifap = &ifa->ifa_next) {
  842. if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
  843. sin_orig.sin_addr.s_addr ==
  844. ifa->ifa_local) {
  845. break; /* found */
  846. }
  847. }
  848. }
  849. /* we didn't get a match, maybe the application is
  850. 4.3BSD-style and passed in junk so we fall back to
  851. comparing just the label */
  852. if (!ifa) {
  853. for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
  854. ifap = &ifa->ifa_next)
  855. if (!strcmp(ifr.ifr_name, ifa->ifa_label))
  856. break;
  857. }
  858. }
  859. ret = -EADDRNOTAVAIL;
  860. if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
  861. goto done;
  862. switch (cmd) {
  863. case SIOCGIFADDR: /* Get interface address */
  864. sin->sin_addr.s_addr = ifa->ifa_local;
  865. goto rarok;
  866. case SIOCGIFBRDADDR: /* Get the broadcast address */
  867. sin->sin_addr.s_addr = ifa->ifa_broadcast;
  868. goto rarok;
  869. case SIOCGIFDSTADDR: /* Get the destination address */
  870. sin->sin_addr.s_addr = ifa->ifa_address;
  871. goto rarok;
  872. case SIOCGIFNETMASK: /* Get the netmask for the interface */
  873. sin->sin_addr.s_addr = ifa->ifa_mask;
  874. goto rarok;
  875. case SIOCSIFFLAGS:
  876. if (colon) {
  877. ret = -EADDRNOTAVAIL;
  878. if (!ifa)
  879. break;
  880. ret = 0;
  881. if (!(ifr.ifr_flags & IFF_UP))
  882. inet_del_ifa(in_dev, ifap, 1);
  883. break;
  884. }
  885. ret = dev_change_flags(dev, ifr.ifr_flags);
  886. break;
  887. case SIOCSIFADDR: /* Set interface address (and family) */
  888. ret = -EINVAL;
  889. if (inet_abc_len(sin->sin_addr.s_addr) < 0)
  890. break;
  891. if (!ifa) {
  892. ret = -ENOBUFS;
  893. ifa = inet_alloc_ifa();
  894. if (!ifa)
  895. break;
  896. INIT_HLIST_NODE(&ifa->hash);
  897. if (colon)
  898. memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
  899. else
  900. memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
  901. } else {
  902. ret = 0;
  903. if (ifa->ifa_local == sin->sin_addr.s_addr)
  904. break;
  905. inet_del_ifa(in_dev, ifap, 0);
  906. ifa->ifa_broadcast = 0;
  907. ifa->ifa_scope = 0;
  908. }
  909. ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
  910. if (!(dev->flags & IFF_POINTOPOINT)) {
  911. ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
  912. ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
  913. if ((dev->flags & IFF_BROADCAST) &&
  914. ifa->ifa_prefixlen < 31)
  915. ifa->ifa_broadcast = ifa->ifa_address |
  916. ~ifa->ifa_mask;
  917. } else {
  918. ifa->ifa_prefixlen = 32;
  919. ifa->ifa_mask = inet_make_mask(32);
  920. }
  921. set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
  922. ret = inet_set_ifa(dev, ifa);
  923. break;
  924. case SIOCSIFBRDADDR: /* Set the broadcast address */
  925. ret = 0;
  926. if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
  927. inet_del_ifa(in_dev, ifap, 0);
  928. ifa->ifa_broadcast = sin->sin_addr.s_addr;
  929. inet_insert_ifa(ifa);
  930. }
  931. break;
  932. case SIOCSIFDSTADDR: /* Set the destination address */
  933. ret = 0;
  934. if (ifa->ifa_address == sin->sin_addr.s_addr)
  935. break;
  936. ret = -EINVAL;
  937. if (inet_abc_len(sin->sin_addr.s_addr) < 0)
  938. break;
  939. ret = 0;
  940. inet_del_ifa(in_dev, ifap, 0);
  941. ifa->ifa_address = sin->sin_addr.s_addr;
  942. inet_insert_ifa(ifa);
  943. break;
  944. case SIOCSIFNETMASK: /* Set the netmask for the interface */
  945. /*
  946. * The mask we set must be legal.
  947. */
  948. ret = -EINVAL;
  949. if (bad_mask(sin->sin_addr.s_addr, 0))
  950. break;
  951. ret = 0;
  952. if (ifa->ifa_mask != sin->sin_addr.s_addr) {
  953. __be32 old_mask = ifa->ifa_mask;
  954. inet_del_ifa(in_dev, ifap, 0);
  955. ifa->ifa_mask = sin->sin_addr.s_addr;
  956. ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
  957. /* See if current broadcast address matches
  958. * with current netmask, then recalculate
  959. * the broadcast address. Otherwise it's a
  960. * funny address, so don't touch it since
  961. * the user seems to know what (s)he's doing...
  962. */
  963. if ((dev->flags & IFF_BROADCAST) &&
  964. (ifa->ifa_prefixlen < 31) &&
  965. (ifa->ifa_broadcast ==
  966. (ifa->ifa_local|~old_mask))) {
  967. ifa->ifa_broadcast = (ifa->ifa_local |
  968. ~sin->sin_addr.s_addr);
  969. }
  970. inet_insert_ifa(ifa);
  971. }
  972. break;
  973. }
  974. done:
  975. rtnl_unlock();
  976. out:
  977. return ret;
  978. rarok:
  979. rtnl_unlock();
  980. ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
  981. goto out;
  982. }
  983. static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
  984. {
  985. struct in_device *in_dev = __in_dev_get_rtnl(dev);
  986. struct in_ifaddr *ifa;
  987. struct ifreq ifr;
  988. int done = 0;
  989. if (!in_dev)
  990. goto out;
  991. for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
  992. if (!buf) {
  993. done += sizeof(ifr);
  994. continue;
  995. }
  996. if (len < (int) sizeof(ifr))
  997. break;
  998. memset(&ifr, 0, sizeof(struct ifreq));
  999. strcpy(ifr.ifr_name, ifa->ifa_label);
  1000. (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
  1001. (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
  1002. ifa->ifa_local;
  1003. if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
  1004. done = -EFAULT;
  1005. break;
  1006. }
  1007. buf += sizeof(struct ifreq);
  1008. len -= sizeof(struct ifreq);
  1009. done += sizeof(struct ifreq);
  1010. }
  1011. out:
  1012. return done;
  1013. }
  1014. __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
  1015. {
  1016. __be32 addr = 0;
  1017. struct in_device *in_dev;
  1018. struct net *net = dev_net(dev);
  1019. rcu_read_lock();
  1020. in_dev = __in_dev_get_rcu(dev);
  1021. if (!in_dev)
  1022. goto no_in_dev;
  1023. for_primary_ifa(in_dev) {
  1024. if (ifa->ifa_scope > scope)
  1025. continue;
  1026. if (!dst || inet_ifa_match(dst, ifa)) {
  1027. addr = ifa->ifa_local;
  1028. break;
  1029. }
  1030. if (!addr)
  1031. addr = ifa->ifa_local;
  1032. } endfor_ifa(in_dev);
  1033. if (addr)
  1034. goto out_unlock;
  1035. no_in_dev:
  1036. /* Not loopback addresses on loopback should be preferred
  1037. in this case. It is important that lo is the first interface
  1038. in dev_base list.
  1039. */
  1040. for_each_netdev_rcu(net, dev) {
  1041. in_dev = __in_dev_get_rcu(dev);
  1042. if (!in_dev)
  1043. continue;
  1044. for_primary_ifa(in_dev) {
  1045. if (ifa->ifa_scope != RT_SCOPE_LINK &&
  1046. ifa->ifa_scope <= scope) {
  1047. addr = ifa->ifa_local;
  1048. goto out_unlock;
  1049. }
  1050. } endfor_ifa(in_dev);
  1051. }
  1052. out_unlock:
  1053. rcu_read_unlock();
  1054. return addr;
  1055. }
  1056. EXPORT_SYMBOL(inet_select_addr);
  1057. static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
  1058. __be32 local, int scope)
  1059. {
  1060. int same = 0;
  1061. __be32 addr = 0;
  1062. for_ifa(in_dev) {
  1063. if (!addr &&
  1064. (local == ifa->ifa_local || !local) &&
  1065. ifa->ifa_scope <= scope) {
  1066. addr = ifa->ifa_local;
  1067. if (same)
  1068. break;
  1069. }
  1070. if (!same) {
  1071. same = (!local || inet_ifa_match(local, ifa)) &&
  1072. (!dst || inet_ifa_match(dst, ifa));
  1073. if (same && addr) {
  1074. if (local || !dst)
  1075. break;
  1076. /* Is the selected addr into dst subnet? */
  1077. if (inet_ifa_match(addr, ifa))
  1078. break;
  1079. /* No, then can we use new local src? */
  1080. if (ifa->ifa_scope <= scope) {
  1081. addr = ifa->ifa_local;
  1082. break;
  1083. }
  1084. /* search for large dst subnet for addr */
  1085. same = 0;
  1086. }
  1087. }
  1088. } endfor_ifa(in_dev);
  1089. return same ? addr : 0;
  1090. }
  1091. /*
  1092. * Confirm that local IP address exists using wildcards:
  1093. * - net: netns to check, cannot be NULL
  1094. * - in_dev: only on this interface, NULL=any interface
  1095. * - dst: only in the same subnet as dst, 0=any dst
  1096. * - local: address, 0=autoselect the local address
  1097. * - scope: maximum allowed scope value for the local address
  1098. */
  1099. __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
  1100. __be32 dst, __be32 local, int scope)
  1101. {
  1102. __be32 addr = 0;
  1103. struct net_device *dev;
  1104. if (in_dev)
  1105. return confirm_addr_indev(in_dev, dst, local, scope);
  1106. rcu_read_lock();
  1107. for_each_netdev_rcu(net, dev) {
  1108. in_dev = __in_dev_get_rcu(dev);
  1109. if (in_dev) {
  1110. addr = confirm_addr_indev(in_dev, dst, local, scope);
  1111. if (addr)
  1112. break;
  1113. }
  1114. }
  1115. rcu_read_unlock();
  1116. return addr;
  1117. }
  1118. EXPORT_SYMBOL(inet_confirm_addr);
  1119. /*
  1120. * Device notifier
  1121. */
  1122. int register_inetaddr_notifier(struct notifier_block *nb)
  1123. {
  1124. return blocking_notifier_chain_register(&inetaddr_chain, nb);
  1125. }
  1126. EXPORT_SYMBOL(register_inetaddr_notifier);
  1127. int unregister_inetaddr_notifier(struct notifier_block *nb)
  1128. {
  1129. return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
  1130. }
  1131. EXPORT_SYMBOL(unregister_inetaddr_notifier);
  1132. /* Rename ifa_labels for a device name change. Make some effort to preserve
  1133. * existing alias numbering and to create unique labels if possible.
  1134. */
  1135. static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
  1136. {
  1137. struct in_ifaddr *ifa;
  1138. int named = 0;
  1139. for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
  1140. char old[IFNAMSIZ], *dot;
  1141. memcpy(old, ifa->ifa_label, IFNAMSIZ);
  1142. memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
  1143. if (named++ == 0)
  1144. goto skip;
  1145. dot = strchr(old, ':');
  1146. if (!dot) {
  1147. sprintf(old, ":%d", named);
  1148. dot = old;
  1149. }
  1150. if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
  1151. strcat(ifa->ifa_label, dot);
  1152. else
  1153. strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
  1154. skip:
  1155. rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
  1156. }
  1157. }
  1158. static bool inetdev_valid_mtu(unsigned int mtu)
  1159. {
  1160. return mtu >= IPV4_MIN_MTU;
  1161. }
  1162. static void inetdev_send_gratuitous_arp(struct net_device *dev,
  1163. struct in_device *in_dev)
  1164. {
  1165. struct in_ifaddr *ifa;
  1166. for (ifa = in_dev->ifa_list; ifa;
  1167. ifa = ifa->ifa_next) {
  1168. arp_send(ARPOP_REQUEST, ETH_P_ARP,
  1169. ifa->ifa_local, dev,
  1170. ifa->ifa_local, NULL,
  1171. dev->dev_addr, NULL);
  1172. }
  1173. }
  1174. /* Called only under RTNL semaphore */
  1175. static int inetdev_event(struct notifier_block *this, unsigned long event,
  1176. void *ptr)
  1177. {
  1178. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1179. struct in_device *in_dev = __in_dev_get_rtnl(dev);
  1180. ASSERT_RTNL();
  1181. if (!in_dev) {
  1182. if (event == NETDEV_REGISTER) {
  1183. in_dev = inetdev_init(dev);
  1184. if (IS_ERR(in_dev))
  1185. return notifier_from_errno(PTR_ERR(in_dev));
  1186. if (dev->flags & IFF_LOOPBACK) {
  1187. IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
  1188. IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
  1189. }
  1190. } else if (event == NETDEV_CHANGEMTU) {
  1191. /* Re-enabling IP */
  1192. if (inetdev_valid_mtu(dev->mtu))
  1193. in_dev = inetdev_init(dev);
  1194. }
  1195. goto out;
  1196. }
  1197. switch (event) {
  1198. case NETDEV_REGISTER:
  1199. pr_debug("%s: bug\n", __func__);
  1200. RCU_INIT_POINTER(dev->ip_ptr, NULL);
  1201. break;
  1202. case NETDEV_UP:
  1203. if (!inetdev_valid_mtu(dev->mtu))
  1204. break;
  1205. if (dev->flags & IFF_LOOPBACK) {
  1206. struct in_ifaddr *ifa = inet_alloc_ifa();
  1207. if (ifa) {
  1208. INIT_HLIST_NODE(&ifa->hash);
  1209. ifa->ifa_local =
  1210. ifa->ifa_address = htonl(INADDR_LOOPBACK);
  1211. ifa->ifa_prefixlen = 8;
  1212. ifa->ifa_mask = inet_make_mask(8);
  1213. in_dev_hold(in_dev);
  1214. ifa->ifa_dev = in_dev;
  1215. ifa->ifa_scope = RT_SCOPE_HOST;
  1216. memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
  1217. set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
  1218. INFINITY_LIFE_TIME);
  1219. ipv4_devconf_setall(in_dev);
  1220. neigh_parms_data_state_setall(in_dev->arp_parms);
  1221. inet_insert_ifa(ifa);
  1222. }
  1223. }
  1224. ip_mc_up(in_dev);
  1225. /* fall through */
  1226. case NETDEV_CHANGEADDR:
  1227. if (!IN_DEV_ARP_NOTIFY(in_dev))
  1228. break;
  1229. /* fall through */
  1230. case NETDEV_NOTIFY_PEERS:
  1231. /* Send gratuitous ARP to notify of link change */
  1232. inetdev_send_gratuitous_arp(dev, in_dev);
  1233. break;
  1234. case NETDEV_DOWN:
  1235. ip_mc_down(in_dev);
  1236. break;
  1237. case NETDEV_PRE_TYPE_CHANGE:
  1238. ip_mc_unmap(in_dev);
  1239. break;
  1240. case NETDEV_POST_TYPE_CHANGE:
  1241. ip_mc_remap(in_dev);
  1242. break;
  1243. case NETDEV_CHANGEMTU:
  1244. if (inetdev_valid_mtu(dev->mtu))
  1245. break;
  1246. /* disable IP when MTU is not enough */
  1247. case NETDEV_UNREGISTER:
  1248. inetdev_destroy(in_dev);
  1249. break;
  1250. case NETDEV_CHANGENAME:
  1251. /* Do not notify about label change, this event is
  1252. * not interesting to applications using netlink.
  1253. */
  1254. inetdev_changename(dev, in_dev);
  1255. devinet_sysctl_unregister(in_dev);
  1256. devinet_sysctl_register(in_dev);
  1257. break;
  1258. }
  1259. out:
  1260. return NOTIFY_DONE;
  1261. }
  1262. static struct notifier_block ip_netdev_notifier = {
  1263. .notifier_call = inetdev_event,
  1264. };
  1265. static size_t inet_nlmsg_size(void)
  1266. {
  1267. return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
  1268. + nla_total_size(4) /* IFA_ADDRESS */
  1269. + nla_total_size(4) /* IFA_LOCAL */
  1270. + nla_total_size(4) /* IFA_BROADCAST */
  1271. + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
  1272. + nla_total_size(4) /* IFA_FLAGS */
  1273. + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
  1274. }
  1275. static inline u32 cstamp_delta(unsigned long cstamp)
  1276. {
  1277. return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
  1278. }
  1279. static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
  1280. unsigned long tstamp, u32 preferred, u32 valid)
  1281. {
  1282. struct ifa_cacheinfo ci;
  1283. ci.cstamp = cstamp_delta(cstamp);
  1284. ci.tstamp = cstamp_delta(tstamp);
  1285. ci.ifa_prefered = preferred;
  1286. ci.ifa_valid = valid;
  1287. return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
  1288. }
  1289. static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
  1290. u32 portid, u32 seq, int event, unsigned int flags)
  1291. {
  1292. struct ifaddrmsg *ifm;
  1293. struct nlmsghdr *nlh;
  1294. u32 preferred, valid;
  1295. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
  1296. if (!nlh)
  1297. return -EMSGSIZE;
  1298. ifm = nlmsg_data(nlh);
  1299. ifm->ifa_family = AF_INET;
  1300. ifm->ifa_prefixlen = ifa->ifa_prefixlen;
  1301. ifm->ifa_flags = ifa->ifa_flags;
  1302. ifm->ifa_scope = ifa->ifa_scope;
  1303. ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
  1304. if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
  1305. preferred = ifa->ifa_preferred_lft;
  1306. valid = ifa->ifa_valid_lft;
  1307. if (preferred != INFINITY_LIFE_TIME) {
  1308. long tval = (jiffies - ifa->ifa_tstamp) / HZ;
  1309. if (preferred > tval)
  1310. preferred -= tval;
  1311. else
  1312. preferred = 0;
  1313. if (valid != INFINITY_LIFE_TIME) {
  1314. if (valid > tval)
  1315. valid -= tval;
  1316. else
  1317. valid = 0;
  1318. }
  1319. }
  1320. } else {
  1321. preferred = INFINITY_LIFE_TIME;
  1322. valid = INFINITY_LIFE_TIME;
  1323. }
  1324. if ((ifa->ifa_address &&
  1325. nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
  1326. (ifa->ifa_local &&
  1327. nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
  1328. (ifa->ifa_broadcast &&
  1329. nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
  1330. (ifa->ifa_label[0] &&
  1331. nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
  1332. nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
  1333. put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
  1334. preferred, valid))
  1335. goto nla_put_failure;
  1336. nlmsg_end(skb, nlh);
  1337. return 0;
  1338. nla_put_failure:
  1339. nlmsg_cancel(skb, nlh);
  1340. return -EMSGSIZE;
  1341. }
  1342. static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
  1343. {
  1344. struct net *net = sock_net(skb->sk);
  1345. int h, s_h;
  1346. int idx, s_idx;
  1347. int ip_idx, s_ip_idx;
  1348. struct net_device *dev;
  1349. struct in_device *in_dev;
  1350. struct in_ifaddr *ifa;
  1351. struct hlist_head *head;
  1352. s_h = cb->args[0];
  1353. s_idx = idx = cb->args[1];
  1354. s_ip_idx = ip_idx = cb->args[2];
  1355. for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
  1356. idx = 0;
  1357. head = &net->dev_index_head[h];
  1358. rcu_read_lock();
  1359. cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
  1360. net->dev_base_seq;
  1361. hlist_for_each_entry_rcu(dev, head, index_hlist) {
  1362. if (idx < s_idx)
  1363. goto cont;
  1364. if (h > s_h || idx > s_idx)
  1365. s_ip_idx = 0;
  1366. in_dev = __in_dev_get_rcu(dev);
  1367. if (!in_dev)
  1368. goto cont;
  1369. for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
  1370. ifa = ifa->ifa_next, ip_idx++) {
  1371. if (ip_idx < s_ip_idx)
  1372. continue;
  1373. if (inet_fill_ifaddr(skb, ifa,
  1374. NETLINK_CB(cb->skb).portid,
  1375. cb->nlh->nlmsg_seq,
  1376. RTM_NEWADDR, NLM_F_MULTI) < 0) {
  1377. rcu_read_unlock();
  1378. goto done;
  1379. }
  1380. nl_dump_check_consistent(cb, nlmsg_hdr(skb));
  1381. }
  1382. cont:
  1383. idx++;
  1384. }
  1385. rcu_read_unlock();
  1386. }
  1387. done:
  1388. cb->args[0] = h;
  1389. cb->args[1] = idx;
  1390. cb->args[2] = ip_idx;
  1391. return skb->len;
  1392. }
  1393. static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
  1394. u32 portid)
  1395. {
  1396. struct sk_buff *skb;
  1397. u32 seq = nlh ? nlh->nlmsg_seq : 0;
  1398. int err = -ENOBUFS;
  1399. struct net *net;
  1400. net = dev_net(ifa->ifa_dev->dev);
  1401. skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
  1402. if (!skb)
  1403. goto errout;
  1404. err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
  1405. if (err < 0) {
  1406. /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
  1407. WARN_ON(err == -EMSGSIZE);
  1408. kfree_skb(skb);
  1409. goto errout;
  1410. }
  1411. rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
  1412. return;
  1413. errout:
  1414. if (err < 0)
  1415. rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
  1416. }
  1417. static size_t inet_get_link_af_size(const struct net_device *dev,
  1418. u32 ext_filter_mask)
  1419. {
  1420. struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
  1421. if (!in_dev)
  1422. return 0;
  1423. return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
  1424. }
  1425. static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
  1426. u32 ext_filter_mask)
  1427. {
  1428. struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
  1429. struct nlattr *nla;
  1430. int i;
  1431. if (!in_dev)
  1432. return -ENODATA;
  1433. nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
  1434. if (!nla)
  1435. return -EMSGSIZE;
  1436. for (i = 0; i < IPV4_DEVCONF_MAX; i++)
  1437. ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
  1438. return 0;
  1439. }
  1440. static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
  1441. [IFLA_INET_CONF] = { .type = NLA_NESTED },
  1442. };
  1443. static int inet_validate_link_af(const struct net_device *dev,
  1444. const struct nlattr *nla)
  1445. {
  1446. struct nlattr *a, *tb[IFLA_INET_MAX+1];
  1447. int err, rem;
  1448. if (dev && !__in_dev_get_rtnl(dev))
  1449. return -EAFNOSUPPORT;
  1450. err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
  1451. if (err < 0)
  1452. return err;
  1453. if (tb[IFLA_INET_CONF]) {
  1454. nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
  1455. int cfgid = nla_type(a);
  1456. if (nla_len(a) < 4)
  1457. return -EINVAL;
  1458. if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
  1459. return -EINVAL;
  1460. }
  1461. }
  1462. return 0;
  1463. }
  1464. static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
  1465. {
  1466. struct in_device *in_dev = __in_dev_get_rtnl(dev);
  1467. struct nlattr *a, *tb[IFLA_INET_MAX+1];
  1468. int rem;
  1469. if (!in_dev)
  1470. return -EAFNOSUPPORT;
  1471. if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
  1472. BUG();
  1473. if (tb[IFLA_INET_CONF]) {
  1474. nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
  1475. ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
  1476. }
  1477. return 0;
  1478. }
  1479. static int inet_netconf_msgsize_devconf(int type)
  1480. {
  1481. int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
  1482. + nla_total_size(4); /* NETCONFA_IFINDEX */
  1483. /* type -1 is used for ALL */
  1484. if (type == -1 || type == NETCONFA_FORWARDING)
  1485. size += nla_total_size(4);
  1486. if (type == -1 || type == NETCONFA_RP_FILTER)
  1487. size += nla_total_size(4);
  1488. if (type == -1 || type == NETCONFA_MC_FORWARDING)
  1489. size += nla_total_size(4);
  1490. if (type == -1 || type == NETCONFA_PROXY_NEIGH)
  1491. size += nla_total_size(4);
  1492. if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
  1493. size += nla_total_size(4);
  1494. return size;
  1495. }
  1496. static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
  1497. struct ipv4_devconf *devconf, u32 portid,
  1498. u32 seq, int event, unsigned int flags,
  1499. int type)
  1500. {
  1501. struct nlmsghdr *nlh;
  1502. struct netconfmsg *ncm;
  1503. nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
  1504. flags);
  1505. if (!nlh)
  1506. return -EMSGSIZE;
  1507. ncm = nlmsg_data(nlh);
  1508. ncm->ncm_family = AF_INET;
  1509. if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
  1510. goto nla_put_failure;
  1511. /* type -1 is used for ALL */
  1512. if ((type == -1 || type == NETCONFA_FORWARDING) &&
  1513. nla_put_s32(skb, NETCONFA_FORWARDING,
  1514. IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
  1515. goto nla_put_failure;
  1516. if ((type == -1 || type == NETCONFA_RP_FILTER) &&
  1517. nla_put_s32(skb, NETCONFA_RP_FILTER,
  1518. IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
  1519. goto nla_put_failure;
  1520. if ((type == -1 || type == NETCONFA_MC_FORWARDING) &&
  1521. nla_put_s32(skb, NETCONFA_MC_FORWARDING,
  1522. IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
  1523. goto nla_put_failure;
  1524. if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
  1525. nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
  1526. IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
  1527. goto nla_put_failure;
  1528. if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
  1529. nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
  1530. IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
  1531. goto nla_put_failure;
  1532. nlmsg_end(skb, nlh);
  1533. return 0;
  1534. nla_put_failure:
  1535. nlmsg_cancel(skb, nlh);
  1536. return -EMSGSIZE;
  1537. }
  1538. void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
  1539. struct ipv4_devconf *devconf)
  1540. {
  1541. struct sk_buff *skb;
  1542. int err = -ENOBUFS;
  1543. skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
  1544. if (!skb)
  1545. goto errout;
  1546. err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
  1547. RTM_NEWNETCONF, 0, type);
  1548. if (err < 0) {
  1549. /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
  1550. WARN_ON(err == -EMSGSIZE);
  1551. kfree_skb(skb);
  1552. goto errout;
  1553. }
  1554. rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
  1555. return;
  1556. errout:
  1557. if (err < 0)
  1558. rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
  1559. }
  1560. static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
  1561. [NETCONFA_IFINDEX] = { .len = sizeof(int) },
  1562. [NETCONFA_FORWARDING] = { .len = sizeof(int) },
  1563. [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
  1564. [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
  1565. [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
  1566. };
  1567. static int inet_netconf_get_devconf(struct sk_buff *in_skb,
  1568. struct nlmsghdr *nlh)
  1569. {
  1570. struct net *net = sock_net(in_skb->sk);
  1571. struct nlattr *tb[NETCONFA_MAX+1];
  1572. struct netconfmsg *ncm;
  1573. struct sk_buff *skb;
  1574. struct ipv4_devconf *devconf;
  1575. struct in_device *in_dev;
  1576. struct net_device *dev;
  1577. int ifindex;
  1578. int err;
  1579. err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
  1580. devconf_ipv4_policy);
  1581. if (err < 0)
  1582. goto errout;
  1583. err = -EINVAL;
  1584. if (!tb[NETCONFA_IFINDEX])
  1585. goto errout;
  1586. ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
  1587. switch (ifindex) {
  1588. case NETCONFA_IFINDEX_ALL:
  1589. devconf = net->ipv4.devconf_all;
  1590. break;
  1591. case NETCONFA_IFINDEX_DEFAULT:
  1592. devconf = net->ipv4.devconf_dflt;
  1593. break;
  1594. default:
  1595. dev = __dev_get_by_index(net, ifindex);
  1596. if (!dev)
  1597. goto errout;
  1598. in_dev = __in_dev_get_rtnl(dev);
  1599. if (!in_dev)
  1600. goto errout;
  1601. devconf = &in_dev->cnf;
  1602. break;
  1603. }
  1604. err = -ENOBUFS;
  1605. skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
  1606. if (!skb)
  1607. goto errout;
  1608. err = inet_netconf_fill_devconf(skb, ifindex, devconf,
  1609. NETLINK_CB(in_skb).portid,
  1610. nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
  1611. -1);
  1612. if (err < 0) {
  1613. /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
  1614. WARN_ON(err == -EMSGSIZE);
  1615. kfree_skb(skb);
  1616. goto errout;
  1617. }
  1618. err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
  1619. errout:
  1620. return err;
  1621. }
  1622. static int inet_netconf_dump_devconf(struct sk_buff *skb,
  1623. struct netlink_callback *cb)
  1624. {
  1625. struct net *net = sock_net(skb->sk);
  1626. int h, s_h;
  1627. int idx, s_idx;
  1628. struct net_device *dev;
  1629. struct in_device *in_dev;
  1630. struct hlist_head *head;
  1631. s_h = cb->args[0];
  1632. s_idx = idx = cb->args[1];
  1633. for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
  1634. idx = 0;
  1635. head = &net->dev_index_head[h];
  1636. rcu_read_lock();
  1637. cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
  1638. net->dev_base_seq;
  1639. hlist_for_each_entry_rcu(dev, head, index_hlist) {
  1640. if (idx < s_idx)
  1641. goto cont;
  1642. in_dev = __in_dev_get_rcu(dev);
  1643. if (!in_dev)
  1644. goto cont;
  1645. if (inet_netconf_fill_devconf(skb, dev->ifindex,
  1646. &in_dev->cnf,
  1647. NETLINK_CB(cb->skb).portid,
  1648. cb->nlh->nlmsg_seq,
  1649. RTM_NEWNETCONF,
  1650. NLM_F_MULTI,
  1651. -1) < 0) {
  1652. rcu_read_unlock();
  1653. goto done;
  1654. }
  1655. nl_dump_check_consistent(cb, nlmsg_hdr(skb));
  1656. cont:
  1657. idx++;
  1658. }
  1659. rcu_read_unlock();
  1660. }
  1661. if (h == NETDEV_HASHENTRIES) {
  1662. if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
  1663. net->ipv4.devconf_all,
  1664. NETLINK_CB(cb->skb).portid,
  1665. cb->nlh->nlmsg_seq,
  1666. RTM_NEWNETCONF, NLM_F_MULTI,
  1667. -1) < 0)
  1668. goto done;
  1669. else
  1670. h++;
  1671. }
  1672. if (h == NETDEV_HASHENTRIES + 1) {
  1673. if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
  1674. net->ipv4.devconf_dflt,
  1675. NETLINK_CB(cb->skb).portid,
  1676. cb->nlh->nlmsg_seq,
  1677. RTM_NEWNETCONF, NLM_F_MULTI,
  1678. -1) < 0)
  1679. goto done;
  1680. else
  1681. h++;
  1682. }
  1683. done:
  1684. cb->args[0] = h;
  1685. cb->args[1] = idx;
  1686. return skb->len;
  1687. }
  1688. #ifdef CONFIG_SYSCTL
  1689. static void devinet_copy_dflt_conf(struct net *net, int i)
  1690. {
  1691. struct net_device *dev;
  1692. rcu_read_lock();
  1693. for_each_netdev_rcu(net, dev) {
  1694. struct in_device *in_dev;
  1695. in_dev = __in_dev_get_rcu(dev);
  1696. if (in_dev && !test_bit(i, in_dev->cnf.state))
  1697. in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
  1698. }
  1699. rcu_read_unlock();
  1700. }
  1701. /* called with RTNL locked */
  1702. static void inet_forward_change(struct net *net)
  1703. {
  1704. struct net_device *dev;
  1705. int on = IPV4_DEVCONF_ALL(net, FORWARDING);
  1706. IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
  1707. IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
  1708. inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
  1709. NETCONFA_IFINDEX_ALL,
  1710. net->ipv4.devconf_all);
  1711. inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
  1712. NETCONFA_IFINDEX_DEFAULT,
  1713. net->ipv4.devconf_dflt);
  1714. for_each_netdev(net, dev) {
  1715. struct in_device *in_dev;
  1716. if (on)
  1717. dev_disable_lro(dev);
  1718. rcu_read_lock();
  1719. in_dev = __in_dev_get_rcu(dev);
  1720. if (in_dev) {
  1721. IN_DEV_CONF_SET(in_dev, FORWARDING, on);
  1722. inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
  1723. dev->ifindex, &in_dev->cnf);
  1724. }
  1725. rcu_read_unlock();
  1726. }
  1727. }
  1728. static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
  1729. {
  1730. if (cnf == net->ipv4.devconf_dflt)
  1731. return NETCONFA_IFINDEX_DEFAULT;
  1732. else if (cnf == net->ipv4.devconf_all)
  1733. return NETCONFA_IFINDEX_ALL;
  1734. else {
  1735. struct in_device *idev
  1736. = container_of(cnf, struct in_device, cnf);
  1737. return idev->dev->ifindex;
  1738. }
  1739. }
  1740. static int devinet_conf_proc(struct ctl_table *ctl, int write,
  1741. void __user *buffer,
  1742. size_t *lenp, loff_t *ppos)
  1743. {
  1744. int old_value = *(int *)ctl->data;
  1745. int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
  1746. int new_value = *(int *)ctl->data;
  1747. if (write) {
  1748. struct ipv4_devconf *cnf = ctl->extra1;
  1749. struct net *net = ctl->extra2;
  1750. int i = (int *)ctl->data - cnf->data;
  1751. int ifindex;
  1752. set_bit(i, cnf->state);
  1753. if (cnf == net->ipv4.devconf_dflt)
  1754. devinet_copy_dflt_conf(net, i);
  1755. if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
  1756. i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
  1757. if ((new_value == 0) && (old_value != 0))
  1758. rt_cache_flush(net);
  1759. if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
  1760. new_value != old_value) {
  1761. ifindex = devinet_conf_ifindex(net, cnf);
  1762. inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
  1763. ifindex, cnf);
  1764. }
  1765. if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
  1766. new_value != old_value) {
  1767. ifindex = devinet_conf_ifindex(net, cnf);
  1768. inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
  1769. ifindex, cnf);
  1770. }
  1771. if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
  1772. new_value != old_value) {
  1773. ifindex = devinet_conf_ifindex(net, cnf);
  1774. inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
  1775. ifindex, cnf);
  1776. }
  1777. }
  1778. return ret;
  1779. }
  1780. static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
  1781. void __user *buffer,
  1782. size_t *lenp, loff_t *ppos)
  1783. {
  1784. int *valp = ctl->data;
  1785. int val = *valp;
  1786. loff_t pos = *ppos;
  1787. int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
  1788. if (write && *valp != val) {
  1789. struct net *net = ctl->extra2;
  1790. if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
  1791. if (!rtnl_trylock()) {
  1792. /* Restore the original values before restarting */
  1793. *valp = val;
  1794. *ppos = pos;
  1795. return restart_syscall();
  1796. }
  1797. if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
  1798. inet_forward_change(net);
  1799. } else {
  1800. struct ipv4_devconf *cnf = ctl->extra1;
  1801. struct in_device *idev =
  1802. container_of(cnf, struct in_device, cnf);
  1803. if (*valp)
  1804. dev_disable_lro(idev->dev);
  1805. inet_netconf_notify_devconf(net,
  1806. NETCONFA_FORWARDING,
  1807. idev->dev->ifindex,
  1808. cnf);
  1809. }
  1810. rtnl_unlock();
  1811. rt_cache_flush(net);
  1812. } else
  1813. inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
  1814. NETCONFA_IFINDEX_DEFAULT,
  1815. net->ipv4.devconf_dflt);
  1816. }
  1817. return ret;
  1818. }
  1819. static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
  1820. void __user *buffer,
  1821. size_t *lenp, loff_t *ppos)
  1822. {
  1823. int *valp = ctl->data;
  1824. int val = *valp;
  1825. int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
  1826. struct net *net = ctl->extra2;
  1827. if (write && *valp != val)
  1828. rt_cache_flush(net);
  1829. return ret;
  1830. }
  1831. #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
  1832. { \
  1833. .procname = name, \
  1834. .data = ipv4_devconf.data + \
  1835. IPV4_DEVCONF_ ## attr - 1, \
  1836. .maxlen = sizeof(int), \
  1837. .mode = mval, \
  1838. .proc_handler = proc, \
  1839. .extra1 = &ipv4_devconf, \
  1840. }
  1841. #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
  1842. DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
  1843. #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
  1844. DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
  1845. #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
  1846. DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
  1847. #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
  1848. DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
  1849. static struct devinet_sysctl_table {
  1850. struct ctl_table_header *sysctl_header;
  1851. struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
  1852. } devinet_sysctl = {
  1853. .devinet_vars = {
  1854. DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
  1855. devinet_sysctl_forward),
  1856. DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
  1857. DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
  1858. DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
  1859. DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
  1860. DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
  1861. DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
  1862. DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
  1863. "accept_source_route"),
  1864. DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
  1865. DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
  1866. DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
  1867. DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
  1868. DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
  1869. DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
  1870. DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
  1871. DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
  1872. DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
  1873. DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
  1874. DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
  1875. DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
  1876. DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
  1877. DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
  1878. "force_igmp_version"),
  1879. DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
  1880. "igmpv2_unsolicited_report_interval"),
  1881. DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
  1882. "igmpv3_unsolicited_report_interval"),
  1883. DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
  1884. "ignore_routes_with_linkdown"),
  1885. DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
  1886. DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
  1887. DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
  1888. "promote_secondaries"),
  1889. DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
  1890. "route_localnet"),
  1891. },
  1892. };
  1893. static int __devinet_sysctl_register(struct net *net, char *dev_name,
  1894. struct ipv4_devconf *p)
  1895. {
  1896. int i;
  1897. struct devinet_sysctl_table *t;
  1898. char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
  1899. t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
  1900. if (!t)
  1901. goto out;
  1902. for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
  1903. t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
  1904. t->devinet_vars[i].extra1 = p;
  1905. t->devinet_vars[i].extra2 = net;
  1906. }
  1907. snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
  1908. t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
  1909. if (!t->sysctl_header)
  1910. goto free;
  1911. p->sysctl = t;
  1912. return 0;
  1913. free:
  1914. kfree(t);
  1915. out:
  1916. return -ENOBUFS;
  1917. }
  1918. static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
  1919. {
  1920. struct devinet_sysctl_table *t = cnf->sysctl;
  1921. if (!t)
  1922. return;
  1923. cnf->sysctl = NULL;
  1924. unregister_net_sysctl_table(t->sysctl_header);
  1925. kfree(t);
  1926. }
  1927. static int devinet_sysctl_register(struct in_device *idev)
  1928. {
  1929. int err;
  1930. if (!sysctl_dev_name_is_allowed(idev->dev->name))
  1931. return -EINVAL;
  1932. err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
  1933. if (err)
  1934. return err;
  1935. err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
  1936. &idev->cnf);
  1937. if (err)
  1938. neigh_sysctl_unregister(idev->arp_parms);
  1939. return err;
  1940. }
  1941. static void devinet_sysctl_unregister(struct in_device *idev)
  1942. {
  1943. __devinet_sysctl_unregister(&idev->cnf);
  1944. neigh_sysctl_unregister(idev->arp_parms);
  1945. }
  1946. static struct ctl_table ctl_forward_entry[] = {
  1947. {
  1948. .procname = "ip_forward",
  1949. .data = &ipv4_devconf.data[
  1950. IPV4_DEVCONF_FORWARDING - 1],
  1951. .maxlen = sizeof(int),
  1952. .mode = 0644,
  1953. .proc_handler = devinet_sysctl_forward,
  1954. .extra1 = &ipv4_devconf,
  1955. .extra2 = &init_net,
  1956. },
  1957. { },
  1958. };
  1959. #endif
  1960. static __net_init int devinet_init_net(struct net *net)
  1961. {
  1962. int err;
  1963. struct ipv4_devconf *all, *dflt;
  1964. #ifdef CONFIG_SYSCTL
  1965. struct ctl_table *tbl = ctl_forward_entry;
  1966. struct ctl_table_header *forw_hdr;
  1967. #endif
  1968. err = -ENOMEM;
  1969. all = &ipv4_devconf;
  1970. dflt = &ipv4_devconf_dflt;
  1971. if (!net_eq(net, &init_net)) {
  1972. all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
  1973. if (!all)
  1974. goto err_alloc_all;
  1975. dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
  1976. if (!dflt)
  1977. goto err_alloc_dflt;
  1978. #ifdef CONFIG_SYSCTL
  1979. tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
  1980. if (!tbl)
  1981. goto err_alloc_ctl;
  1982. tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
  1983. tbl[0].extra1 = all;
  1984. tbl[0].extra2 = net;
  1985. #endif
  1986. }
  1987. #ifdef CONFIG_SYSCTL
  1988. err = __devinet_sysctl_register(net, "all", all);
  1989. if (err < 0)
  1990. goto err_reg_all;
  1991. err = __devinet_sysctl_register(net, "default", dflt);
  1992. if (err < 0)
  1993. goto err_reg_dflt;
  1994. err = -ENOMEM;
  1995. forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
  1996. if (!forw_hdr)
  1997. goto err_reg_ctl;
  1998. net->ipv4.forw_hdr = forw_hdr;
  1999. #endif
  2000. net->ipv4.devconf_all = all;
  2001. net->ipv4.devconf_dflt = dflt;
  2002. return 0;
  2003. #ifdef CONFIG_SYSCTL
  2004. err_reg_ctl:
  2005. __devinet_sysctl_unregister(dflt);
  2006. err_reg_dflt:
  2007. __devinet_sysctl_unregister(all);
  2008. err_reg_all:
  2009. if (tbl != ctl_forward_entry)
  2010. kfree(tbl);
  2011. err_alloc_ctl:
  2012. #endif
  2013. if (dflt != &ipv4_devconf_dflt)
  2014. kfree(dflt);
  2015. err_alloc_dflt:
  2016. if (all != &ipv4_devconf)
  2017. kfree(all);
  2018. err_alloc_all:
  2019. return err;
  2020. }
  2021. static __net_exit void devinet_exit_net(struct net *net)
  2022. {
  2023. #ifdef CONFIG_SYSCTL
  2024. struct ctl_table *tbl;
  2025. tbl = net->ipv4.forw_hdr->ctl_table_arg;
  2026. unregister_net_sysctl_table(net->ipv4.forw_hdr);
  2027. __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
  2028. __devinet_sysctl_unregister(net->ipv4.devconf_all);
  2029. kfree(tbl);
  2030. #endif
  2031. kfree(net->ipv4.devconf_dflt);
  2032. kfree(net->ipv4.devconf_all);
  2033. }
  2034. static __net_initdata struct pernet_operations devinet_ops = {
  2035. .init = devinet_init_net,
  2036. .exit = devinet_exit_net,
  2037. };
  2038. static struct rtnl_af_ops inet_af_ops __read_mostly = {
  2039. .family = AF_INET,
  2040. .fill_link_af = inet_fill_link_af,
  2041. .get_link_af_size = inet_get_link_af_size,
  2042. .validate_link_af = inet_validate_link_af,
  2043. .set_link_af = inet_set_link_af,
  2044. };
  2045. void __init devinet_init(void)
  2046. {
  2047. int i;
  2048. for (i = 0; i < IN4_ADDR_HSIZE; i++)
  2049. INIT_HLIST_HEAD(&inet_addr_lst[i]);
  2050. register_pernet_subsys(&devinet_ops);
  2051. register_gifconf(PF_INET, inet_gifconf);
  2052. register_netdevice_notifier(&ip_netdev_notifier);
  2053. queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
  2054. rtnl_af_register(&inet_af_ops);
  2055. rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
  2056. rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
  2057. rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
  2058. rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
  2059. inet_netconf_dump_devconf, NULL);
  2060. }