nf_conntrack_netlink.c 82 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426
  1. /* Connection tracking via netlink socket. Allows for user space
  2. * protocol helpers and general trouble making from userspace.
  3. *
  4. * (C) 2001 by Jay Schulist <jschlst@samba.org>
  5. * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
  6. * (C) 2003 by Patrick Mchardy <kaber@trash.net>
  7. * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
  8. *
  9. * Initial connection tracking via netlink development funded and
  10. * generally made possible by Network Robots, Inc. (www.networkrobots.com)
  11. *
  12. * Further development of this code funded by Astaro AG (http://www.astaro.com)
  13. *
  14. * This software may be used and distributed according to the terms
  15. * of the GNU General Public License, incorporated herein by reference.
  16. */
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/rculist.h>
  21. #include <linux/rculist_nulls.h>
  22. #include <linux/types.h>
  23. #include <linux/timer.h>
  24. #include <linux/security.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/errno.h>
  27. #include <linux/netlink.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/slab.h>
  31. #include <linux/netfilter.h>
  32. #include <net/netlink.h>
  33. #include <net/sock.h>
  34. #include <net/netfilter/nf_conntrack.h>
  35. #include <net/netfilter/nf_conntrack_core.h>
  36. #include <net/netfilter/nf_conntrack_expect.h>
  37. #include <net/netfilter/nf_conntrack_helper.h>
  38. #include <net/netfilter/nf_conntrack_seqadj.h>
  39. #include <net/netfilter/nf_conntrack_l3proto.h>
  40. #include <net/netfilter/nf_conntrack_l4proto.h>
  41. #include <net/netfilter/nf_conntrack_tuple.h>
  42. #include <net/netfilter/nf_conntrack_acct.h>
  43. #include <net/netfilter/nf_conntrack_zones.h>
  44. #include <net/netfilter/nf_conntrack_timestamp.h>
  45. #include <net/netfilter/nf_conntrack_labels.h>
  46. #include <net/netfilter/nf_conntrack_seqadj.h>
  47. #include <net/netfilter/nf_conntrack_synproxy.h>
  48. #ifdef CONFIG_NF_NAT_NEEDED
  49. #include <net/netfilter/nf_nat_core.h>
  50. #include <net/netfilter/nf_nat_l4proto.h>
  51. #include <net/netfilter/nf_nat_helper.h>
  52. #endif
  53. #include <linux/netfilter/nfnetlink.h>
  54. #include <linux/netfilter/nfnetlink_conntrack.h>
  55. MODULE_LICENSE("GPL");
  56. static char __initdata version[] = "0.93";
  57. static inline int
  58. ctnetlink_dump_tuples_proto(struct sk_buff *skb,
  59. const struct nf_conntrack_tuple *tuple,
  60. struct nf_conntrack_l4proto *l4proto)
  61. {
  62. int ret = 0;
  63. struct nlattr *nest_parms;
  64. nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
  65. if (!nest_parms)
  66. goto nla_put_failure;
  67. if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
  68. goto nla_put_failure;
  69. if (likely(l4proto->tuple_to_nlattr))
  70. ret = l4proto->tuple_to_nlattr(skb, tuple);
  71. nla_nest_end(skb, nest_parms);
  72. return ret;
  73. nla_put_failure:
  74. return -1;
  75. }
  76. static inline int
  77. ctnetlink_dump_tuples_ip(struct sk_buff *skb,
  78. const struct nf_conntrack_tuple *tuple,
  79. struct nf_conntrack_l3proto *l3proto)
  80. {
  81. int ret = 0;
  82. struct nlattr *nest_parms;
  83. nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
  84. if (!nest_parms)
  85. goto nla_put_failure;
  86. if (likely(l3proto->tuple_to_nlattr))
  87. ret = l3proto->tuple_to_nlattr(skb, tuple);
  88. nla_nest_end(skb, nest_parms);
  89. return ret;
  90. nla_put_failure:
  91. return -1;
  92. }
  93. static int
  94. ctnetlink_dump_tuples(struct sk_buff *skb,
  95. const struct nf_conntrack_tuple *tuple)
  96. {
  97. int ret;
  98. struct nf_conntrack_l3proto *l3proto;
  99. struct nf_conntrack_l4proto *l4proto;
  100. rcu_read_lock();
  101. l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
  102. ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
  103. if (ret >= 0) {
  104. l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
  105. tuple->dst.protonum);
  106. ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
  107. }
  108. rcu_read_unlock();
  109. return ret;
  110. }
  111. static inline int
  112. ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
  113. const struct nf_conntrack_zone *zone, int dir)
  114. {
  115. if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
  116. return 0;
  117. if (nla_put_be16(skb, attrtype, htons(zone->id)))
  118. goto nla_put_failure;
  119. return 0;
  120. nla_put_failure:
  121. return -1;
  122. }
  123. static inline int
  124. ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
  125. {
  126. if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
  127. goto nla_put_failure;
  128. return 0;
  129. nla_put_failure:
  130. return -1;
  131. }
  132. static inline int
  133. ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
  134. {
  135. long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
  136. if (timeout < 0)
  137. timeout = 0;
  138. if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
  139. goto nla_put_failure;
  140. return 0;
  141. nla_put_failure:
  142. return -1;
  143. }
  144. static inline int
  145. ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
  146. {
  147. struct nf_conntrack_l4proto *l4proto;
  148. struct nlattr *nest_proto;
  149. int ret;
  150. l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
  151. if (!l4proto->to_nlattr)
  152. return 0;
  153. nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
  154. if (!nest_proto)
  155. goto nla_put_failure;
  156. ret = l4proto->to_nlattr(skb, nest_proto, ct);
  157. nla_nest_end(skb, nest_proto);
  158. return ret;
  159. nla_put_failure:
  160. return -1;
  161. }
  162. static inline int
  163. ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
  164. {
  165. struct nlattr *nest_helper;
  166. const struct nf_conn_help *help = nfct_help(ct);
  167. struct nf_conntrack_helper *helper;
  168. if (!help)
  169. return 0;
  170. helper = rcu_dereference(help->helper);
  171. if (!helper)
  172. goto out;
  173. nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
  174. if (!nest_helper)
  175. goto nla_put_failure;
  176. if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
  177. goto nla_put_failure;
  178. if (helper->to_nlattr)
  179. helper->to_nlattr(skb, ct);
  180. nla_nest_end(skb, nest_helper);
  181. out:
  182. return 0;
  183. nla_put_failure:
  184. return -1;
  185. }
  186. static int
  187. dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
  188. enum ip_conntrack_dir dir, int type)
  189. {
  190. enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
  191. struct nf_conn_counter *counter = acct->counter;
  192. struct nlattr *nest_count;
  193. u64 pkts, bytes;
  194. if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
  195. pkts = atomic64_xchg(&counter[dir].packets, 0);
  196. bytes = atomic64_xchg(&counter[dir].bytes, 0);
  197. } else {
  198. pkts = atomic64_read(&counter[dir].packets);
  199. bytes = atomic64_read(&counter[dir].bytes);
  200. }
  201. nest_count = nla_nest_start(skb, attr | NLA_F_NESTED);
  202. if (!nest_count)
  203. goto nla_put_failure;
  204. if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
  205. nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
  206. goto nla_put_failure;
  207. nla_nest_end(skb, nest_count);
  208. return 0;
  209. nla_put_failure:
  210. return -1;
  211. }
  212. static int
  213. ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
  214. {
  215. struct nf_conn_acct *acct = nf_conn_acct_find(ct);
  216. if (!acct)
  217. return 0;
  218. if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
  219. return -1;
  220. if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
  221. return -1;
  222. return 0;
  223. }
  224. static int
  225. ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
  226. {
  227. struct nlattr *nest_count;
  228. const struct nf_conn_tstamp *tstamp;
  229. tstamp = nf_conn_tstamp_find(ct);
  230. if (!tstamp)
  231. return 0;
  232. nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
  233. if (!nest_count)
  234. goto nla_put_failure;
  235. if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
  236. (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
  237. cpu_to_be64(tstamp->stop))))
  238. goto nla_put_failure;
  239. nla_nest_end(skb, nest_count);
  240. return 0;
  241. nla_put_failure:
  242. return -1;
  243. }
  244. #ifdef CONFIG_NF_CONNTRACK_MARK
  245. static inline int
  246. ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
  247. {
  248. if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
  249. goto nla_put_failure;
  250. return 0;
  251. nla_put_failure:
  252. return -1;
  253. }
  254. #else
  255. #define ctnetlink_dump_mark(a, b) (0)
  256. #endif
  257. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  258. static inline int
  259. ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
  260. {
  261. struct nlattr *nest_secctx;
  262. int len, ret;
  263. char *secctx;
  264. ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
  265. if (ret)
  266. return 0;
  267. ret = -1;
  268. nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
  269. if (!nest_secctx)
  270. goto nla_put_failure;
  271. if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
  272. goto nla_put_failure;
  273. nla_nest_end(skb, nest_secctx);
  274. ret = 0;
  275. nla_put_failure:
  276. security_release_secctx(secctx, len);
  277. return ret;
  278. }
  279. #else
  280. #define ctnetlink_dump_secctx(a, b) (0)
  281. #endif
  282. #ifdef CONFIG_NF_CONNTRACK_LABELS
  283. static int ctnetlink_label_size(const struct nf_conn *ct)
  284. {
  285. struct nf_conn_labels *labels = nf_ct_labels_find(ct);
  286. if (!labels)
  287. return 0;
  288. return nla_total_size(labels->words * sizeof(long));
  289. }
  290. static int
  291. ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
  292. {
  293. struct nf_conn_labels *labels = nf_ct_labels_find(ct);
  294. unsigned int len, i;
  295. if (!labels)
  296. return 0;
  297. len = labels->words * sizeof(long);
  298. i = 0;
  299. do {
  300. if (labels->bits[i] != 0)
  301. return nla_put(skb, CTA_LABELS, len, labels->bits);
  302. i++;
  303. } while (i < labels->words);
  304. return 0;
  305. }
  306. #else
  307. #define ctnetlink_dump_labels(a, b) (0)
  308. #define ctnetlink_label_size(a) (0)
  309. #endif
  310. #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
  311. static inline int
  312. ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
  313. {
  314. struct nlattr *nest_parms;
  315. if (!(ct->status & IPS_EXPECTED))
  316. return 0;
  317. nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
  318. if (!nest_parms)
  319. goto nla_put_failure;
  320. if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
  321. goto nla_put_failure;
  322. nla_nest_end(skb, nest_parms);
  323. return 0;
  324. nla_put_failure:
  325. return -1;
  326. }
  327. static int
  328. dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
  329. {
  330. struct nlattr *nest_parms;
  331. nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
  332. if (!nest_parms)
  333. goto nla_put_failure;
  334. if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
  335. htonl(seq->correction_pos)) ||
  336. nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
  337. htonl(seq->offset_before)) ||
  338. nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
  339. htonl(seq->offset_after)))
  340. goto nla_put_failure;
  341. nla_nest_end(skb, nest_parms);
  342. return 0;
  343. nla_put_failure:
  344. return -1;
  345. }
  346. static inline int
  347. ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
  348. {
  349. struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
  350. struct nf_ct_seqadj *seq;
  351. if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
  352. return 0;
  353. seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
  354. if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
  355. return -1;
  356. seq = &seqadj->seq[IP_CT_DIR_REPLY];
  357. if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
  358. return -1;
  359. return 0;
  360. }
  361. static inline int
  362. ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
  363. {
  364. if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
  365. goto nla_put_failure;
  366. return 0;
  367. nla_put_failure:
  368. return -1;
  369. }
  370. static inline int
  371. ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
  372. {
  373. if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
  374. goto nla_put_failure;
  375. return 0;
  376. nla_put_failure:
  377. return -1;
  378. }
  379. static int
  380. ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
  381. struct nf_conn *ct)
  382. {
  383. const struct nf_conntrack_zone *zone;
  384. struct nlmsghdr *nlh;
  385. struct nfgenmsg *nfmsg;
  386. struct nlattr *nest_parms;
  387. unsigned int flags = portid ? NLM_F_MULTI : 0, event;
  388. event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
  389. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
  390. if (nlh == NULL)
  391. goto nlmsg_failure;
  392. nfmsg = nlmsg_data(nlh);
  393. nfmsg->nfgen_family = nf_ct_l3num(ct);
  394. nfmsg->version = NFNETLINK_V0;
  395. nfmsg->res_id = 0;
  396. zone = nf_ct_zone(ct);
  397. nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
  398. if (!nest_parms)
  399. goto nla_put_failure;
  400. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
  401. goto nla_put_failure;
  402. if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
  403. NF_CT_ZONE_DIR_ORIG) < 0)
  404. goto nla_put_failure;
  405. nla_nest_end(skb, nest_parms);
  406. nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
  407. if (!nest_parms)
  408. goto nla_put_failure;
  409. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
  410. goto nla_put_failure;
  411. if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
  412. NF_CT_ZONE_DIR_REPL) < 0)
  413. goto nla_put_failure;
  414. nla_nest_end(skb, nest_parms);
  415. if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
  416. NF_CT_DEFAULT_ZONE_DIR) < 0)
  417. goto nla_put_failure;
  418. if (ctnetlink_dump_status(skb, ct) < 0 ||
  419. ctnetlink_dump_timeout(skb, ct) < 0 ||
  420. ctnetlink_dump_acct(skb, ct, type) < 0 ||
  421. ctnetlink_dump_timestamp(skb, ct) < 0 ||
  422. ctnetlink_dump_protoinfo(skb, ct) < 0 ||
  423. ctnetlink_dump_helpinfo(skb, ct) < 0 ||
  424. ctnetlink_dump_mark(skb, ct) < 0 ||
  425. ctnetlink_dump_secctx(skb, ct) < 0 ||
  426. ctnetlink_dump_labels(skb, ct) < 0 ||
  427. ctnetlink_dump_id(skb, ct) < 0 ||
  428. ctnetlink_dump_use(skb, ct) < 0 ||
  429. ctnetlink_dump_master(skb, ct) < 0 ||
  430. ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
  431. goto nla_put_failure;
  432. nlmsg_end(skb, nlh);
  433. return skb->len;
  434. nlmsg_failure:
  435. nla_put_failure:
  436. nlmsg_cancel(skb, nlh);
  437. return -1;
  438. }
  439. static inline size_t
  440. ctnetlink_proto_size(const struct nf_conn *ct)
  441. {
  442. struct nf_conntrack_l3proto *l3proto;
  443. struct nf_conntrack_l4proto *l4proto;
  444. size_t len = 0;
  445. rcu_read_lock();
  446. l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
  447. len += l3proto->nla_size;
  448. l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
  449. len += l4proto->nla_size;
  450. rcu_read_unlock();
  451. return len;
  452. }
  453. static inline size_t
  454. ctnetlink_acct_size(const struct nf_conn *ct)
  455. {
  456. if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
  457. return 0;
  458. return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
  459. + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
  460. + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
  461. ;
  462. }
  463. static inline int
  464. ctnetlink_secctx_size(const struct nf_conn *ct)
  465. {
  466. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  467. int len, ret;
  468. ret = security_secid_to_secctx(ct->secmark, NULL, &len);
  469. if (ret)
  470. return 0;
  471. return nla_total_size(0) /* CTA_SECCTX */
  472. + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
  473. #else
  474. return 0;
  475. #endif
  476. }
  477. static inline size_t
  478. ctnetlink_timestamp_size(const struct nf_conn *ct)
  479. {
  480. #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
  481. if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
  482. return 0;
  483. return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
  484. #else
  485. return 0;
  486. #endif
  487. }
  488. static inline size_t
  489. ctnetlink_nlmsg_size(const struct nf_conn *ct)
  490. {
  491. return NLMSG_ALIGN(sizeof(struct nfgenmsg))
  492. + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
  493. + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
  494. + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
  495. + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
  496. + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
  497. + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
  498. + ctnetlink_acct_size(ct)
  499. + ctnetlink_timestamp_size(ct)
  500. + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
  501. + nla_total_size(0) /* CTA_PROTOINFO */
  502. + nla_total_size(0) /* CTA_HELP */
  503. + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
  504. + ctnetlink_secctx_size(ct)
  505. #ifdef CONFIG_NF_NAT_NEEDED
  506. + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
  507. + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
  508. #endif
  509. #ifdef CONFIG_NF_CONNTRACK_MARK
  510. + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
  511. #endif
  512. #ifdef CONFIG_NF_CONNTRACK_ZONES
  513. + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
  514. #endif
  515. + ctnetlink_proto_size(ct)
  516. + ctnetlink_label_size(ct)
  517. ;
  518. }
  519. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  520. static int
  521. ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
  522. {
  523. const struct nf_conntrack_zone *zone;
  524. struct net *net;
  525. struct nlmsghdr *nlh;
  526. struct nfgenmsg *nfmsg;
  527. struct nlattr *nest_parms;
  528. struct nf_conn *ct = item->ct;
  529. struct sk_buff *skb;
  530. unsigned int type;
  531. unsigned int flags = 0, group;
  532. int err;
  533. /* ignore our fake conntrack entry */
  534. if (nf_ct_is_untracked(ct))
  535. return 0;
  536. if (events & (1 << IPCT_DESTROY)) {
  537. type = IPCTNL_MSG_CT_DELETE;
  538. group = NFNLGRP_CONNTRACK_DESTROY;
  539. } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
  540. type = IPCTNL_MSG_CT_NEW;
  541. flags = NLM_F_CREATE|NLM_F_EXCL;
  542. group = NFNLGRP_CONNTRACK_NEW;
  543. } else if (events) {
  544. type = IPCTNL_MSG_CT_NEW;
  545. group = NFNLGRP_CONNTRACK_UPDATE;
  546. } else
  547. return 0;
  548. net = nf_ct_net(ct);
  549. if (!item->report && !nfnetlink_has_listeners(net, group))
  550. return 0;
  551. skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
  552. if (skb == NULL)
  553. goto errout;
  554. type |= NFNL_SUBSYS_CTNETLINK << 8;
  555. nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
  556. if (nlh == NULL)
  557. goto nlmsg_failure;
  558. nfmsg = nlmsg_data(nlh);
  559. nfmsg->nfgen_family = nf_ct_l3num(ct);
  560. nfmsg->version = NFNETLINK_V0;
  561. nfmsg->res_id = 0;
  562. rcu_read_lock();
  563. zone = nf_ct_zone(ct);
  564. nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
  565. if (!nest_parms)
  566. goto nla_put_failure;
  567. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
  568. goto nla_put_failure;
  569. if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
  570. NF_CT_ZONE_DIR_ORIG) < 0)
  571. goto nla_put_failure;
  572. nla_nest_end(skb, nest_parms);
  573. nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
  574. if (!nest_parms)
  575. goto nla_put_failure;
  576. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
  577. goto nla_put_failure;
  578. if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
  579. NF_CT_ZONE_DIR_REPL) < 0)
  580. goto nla_put_failure;
  581. nla_nest_end(skb, nest_parms);
  582. if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
  583. NF_CT_DEFAULT_ZONE_DIR) < 0)
  584. goto nla_put_failure;
  585. if (ctnetlink_dump_id(skb, ct) < 0)
  586. goto nla_put_failure;
  587. if (ctnetlink_dump_status(skb, ct) < 0)
  588. goto nla_put_failure;
  589. if (events & (1 << IPCT_DESTROY)) {
  590. if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
  591. ctnetlink_dump_timestamp(skb, ct) < 0)
  592. goto nla_put_failure;
  593. } else {
  594. if (ctnetlink_dump_timeout(skb, ct) < 0)
  595. goto nla_put_failure;
  596. if (events & (1 << IPCT_PROTOINFO)
  597. && ctnetlink_dump_protoinfo(skb, ct) < 0)
  598. goto nla_put_failure;
  599. if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
  600. && ctnetlink_dump_helpinfo(skb, ct) < 0)
  601. goto nla_put_failure;
  602. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  603. if ((events & (1 << IPCT_SECMARK) || ct->secmark)
  604. && ctnetlink_dump_secctx(skb, ct) < 0)
  605. goto nla_put_failure;
  606. #endif
  607. if (events & (1 << IPCT_LABEL) &&
  608. ctnetlink_dump_labels(skb, ct) < 0)
  609. goto nla_put_failure;
  610. if (events & (1 << IPCT_RELATED) &&
  611. ctnetlink_dump_master(skb, ct) < 0)
  612. goto nla_put_failure;
  613. if (events & (1 << IPCT_SEQADJ) &&
  614. ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
  615. goto nla_put_failure;
  616. }
  617. #ifdef CONFIG_NF_CONNTRACK_MARK
  618. if ((events & (1 << IPCT_MARK) || ct->mark)
  619. && ctnetlink_dump_mark(skb, ct) < 0)
  620. goto nla_put_failure;
  621. #endif
  622. rcu_read_unlock();
  623. nlmsg_end(skb, nlh);
  624. err = nfnetlink_send(skb, net, item->portid, group, item->report,
  625. GFP_ATOMIC);
  626. if (err == -ENOBUFS || err == -EAGAIN)
  627. return -ENOBUFS;
  628. return 0;
  629. nla_put_failure:
  630. rcu_read_unlock();
  631. nlmsg_cancel(skb, nlh);
  632. nlmsg_failure:
  633. kfree_skb(skb);
  634. errout:
  635. if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
  636. return -ENOBUFS;
  637. return 0;
  638. }
  639. #endif /* CONFIG_NF_CONNTRACK_EVENTS */
  640. static int ctnetlink_done(struct netlink_callback *cb)
  641. {
  642. if (cb->args[1])
  643. nf_ct_put((struct nf_conn *)cb->args[1]);
  644. kfree(cb->data);
  645. return 0;
  646. }
  647. struct ctnetlink_filter {
  648. struct {
  649. u_int32_t val;
  650. u_int32_t mask;
  651. } mark;
  652. };
  653. static struct ctnetlink_filter *
  654. ctnetlink_alloc_filter(const struct nlattr * const cda[])
  655. {
  656. #ifdef CONFIG_NF_CONNTRACK_MARK
  657. struct ctnetlink_filter *filter;
  658. filter = kzalloc(sizeof(*filter), GFP_KERNEL);
  659. if (filter == NULL)
  660. return ERR_PTR(-ENOMEM);
  661. filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
  662. filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
  663. return filter;
  664. #else
  665. return ERR_PTR(-EOPNOTSUPP);
  666. #endif
  667. }
  668. static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
  669. {
  670. struct ctnetlink_filter *filter = data;
  671. if (filter == NULL)
  672. return 1;
  673. #ifdef CONFIG_NF_CONNTRACK_MARK
  674. if ((ct->mark & filter->mark.mask) == filter->mark.val)
  675. return 1;
  676. #endif
  677. return 0;
  678. }
  679. static int
  680. ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
  681. {
  682. struct net *net = sock_net(skb->sk);
  683. struct nf_conn *ct, *last;
  684. struct nf_conntrack_tuple_hash *h;
  685. struct hlist_nulls_node *n;
  686. struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
  687. u_int8_t l3proto = nfmsg->nfgen_family;
  688. int res;
  689. spinlock_t *lockp;
  690. last = (struct nf_conn *)cb->args[1];
  691. local_bh_disable();
  692. for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
  693. restart:
  694. lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
  695. spin_lock(lockp);
  696. if (cb->args[0] >= net->ct.htable_size) {
  697. spin_unlock(lockp);
  698. goto out;
  699. }
  700. hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
  701. hnnode) {
  702. if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
  703. continue;
  704. ct = nf_ct_tuplehash_to_ctrack(h);
  705. /* Dump entries of a given L3 protocol number.
  706. * If it is not specified, ie. l3proto == 0,
  707. * then dump everything. */
  708. if (l3proto && nf_ct_l3num(ct) != l3proto)
  709. continue;
  710. if (cb->args[1]) {
  711. if (ct != last)
  712. continue;
  713. cb->args[1] = 0;
  714. }
  715. if (!ctnetlink_filter_match(ct, cb->data))
  716. continue;
  717. rcu_read_lock();
  718. res =
  719. ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
  720. cb->nlh->nlmsg_seq,
  721. NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
  722. ct);
  723. rcu_read_unlock();
  724. if (res < 0) {
  725. nf_conntrack_get(&ct->ct_general);
  726. cb->args[1] = (unsigned long)ct;
  727. spin_unlock(lockp);
  728. goto out;
  729. }
  730. }
  731. spin_unlock(lockp);
  732. if (cb->args[1]) {
  733. cb->args[1] = 0;
  734. goto restart;
  735. }
  736. }
  737. out:
  738. local_bh_enable();
  739. if (last) {
  740. /* nf ct hash resize happened, now clear the leftover. */
  741. if ((struct nf_conn *)cb->args[1] == last)
  742. cb->args[1] = 0;
  743. nf_ct_put(last);
  744. }
  745. return skb->len;
  746. }
  747. static inline int
  748. ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
  749. {
  750. struct nlattr *tb[CTA_IP_MAX+1];
  751. struct nf_conntrack_l3proto *l3proto;
  752. int ret = 0;
  753. ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
  754. if (ret < 0)
  755. return ret;
  756. rcu_read_lock();
  757. l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
  758. if (likely(l3proto->nlattr_to_tuple)) {
  759. ret = nla_validate_nested(attr, CTA_IP_MAX,
  760. l3proto->nla_policy);
  761. if (ret == 0)
  762. ret = l3proto->nlattr_to_tuple(tb, tuple);
  763. }
  764. rcu_read_unlock();
  765. return ret;
  766. }
  767. static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
  768. [CTA_PROTO_NUM] = { .type = NLA_U8 },
  769. };
  770. static inline int
  771. ctnetlink_parse_tuple_proto(struct nlattr *attr,
  772. struct nf_conntrack_tuple *tuple)
  773. {
  774. struct nlattr *tb[CTA_PROTO_MAX+1];
  775. struct nf_conntrack_l4proto *l4proto;
  776. int ret = 0;
  777. ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
  778. if (ret < 0)
  779. return ret;
  780. if (!tb[CTA_PROTO_NUM])
  781. return -EINVAL;
  782. tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
  783. rcu_read_lock();
  784. l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
  785. if (likely(l4proto->nlattr_to_tuple)) {
  786. ret = nla_validate_nested(attr, CTA_PROTO_MAX,
  787. l4proto->nla_policy);
  788. if (ret == 0)
  789. ret = l4proto->nlattr_to_tuple(tb, tuple);
  790. }
  791. rcu_read_unlock();
  792. return ret;
  793. }
  794. static int
  795. ctnetlink_parse_zone(const struct nlattr *attr,
  796. struct nf_conntrack_zone *zone)
  797. {
  798. nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
  799. NF_CT_DEFAULT_ZONE_DIR, 0);
  800. #ifdef CONFIG_NF_CONNTRACK_ZONES
  801. if (attr)
  802. zone->id = ntohs(nla_get_be16(attr));
  803. #else
  804. if (attr)
  805. return -EOPNOTSUPP;
  806. #endif
  807. return 0;
  808. }
  809. static int
  810. ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
  811. struct nf_conntrack_zone *zone)
  812. {
  813. int ret;
  814. if (zone->id != NF_CT_DEFAULT_ZONE_ID)
  815. return -EINVAL;
  816. ret = ctnetlink_parse_zone(attr, zone);
  817. if (ret < 0)
  818. return ret;
  819. if (type == CTA_TUPLE_REPLY)
  820. zone->dir = NF_CT_ZONE_DIR_REPL;
  821. else
  822. zone->dir = NF_CT_ZONE_DIR_ORIG;
  823. return 0;
  824. }
  825. static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
  826. [CTA_TUPLE_IP] = { .type = NLA_NESTED },
  827. [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
  828. [CTA_TUPLE_ZONE] = { .type = NLA_U16 },
  829. };
  830. static int
  831. ctnetlink_parse_tuple(const struct nlattr * const cda[],
  832. struct nf_conntrack_tuple *tuple, u32 type,
  833. u_int8_t l3num, struct nf_conntrack_zone *zone)
  834. {
  835. struct nlattr *tb[CTA_TUPLE_MAX+1];
  836. int err;
  837. memset(tuple, 0, sizeof(*tuple));
  838. err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
  839. if (err < 0)
  840. return err;
  841. if (!tb[CTA_TUPLE_IP])
  842. return -EINVAL;
  843. tuple->src.l3num = l3num;
  844. err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
  845. if (err < 0)
  846. return err;
  847. if (!tb[CTA_TUPLE_PROTO])
  848. return -EINVAL;
  849. err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
  850. if (err < 0)
  851. return err;
  852. if (tb[CTA_TUPLE_ZONE]) {
  853. if (!zone)
  854. return -EINVAL;
  855. err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
  856. type, zone);
  857. if (err < 0)
  858. return err;
  859. }
  860. /* orig and expect tuples get DIR_ORIGINAL */
  861. if (type == CTA_TUPLE_REPLY)
  862. tuple->dst.dir = IP_CT_DIR_REPLY;
  863. else
  864. tuple->dst.dir = IP_CT_DIR_ORIGINAL;
  865. return 0;
  866. }
  867. static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
  868. [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
  869. .len = NF_CT_HELPER_NAME_LEN - 1 },
  870. };
  871. static inline int
  872. ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
  873. struct nlattr **helpinfo)
  874. {
  875. int err;
  876. struct nlattr *tb[CTA_HELP_MAX+1];
  877. err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
  878. if (err < 0)
  879. return err;
  880. if (!tb[CTA_HELP_NAME])
  881. return -EINVAL;
  882. *helper_name = nla_data(tb[CTA_HELP_NAME]);
  883. if (tb[CTA_HELP_INFO])
  884. *helpinfo = tb[CTA_HELP_INFO];
  885. return 0;
  886. }
  887. static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
  888. [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
  889. [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
  890. [CTA_STATUS] = { .type = NLA_U32 },
  891. [CTA_PROTOINFO] = { .type = NLA_NESTED },
  892. [CTA_HELP] = { .type = NLA_NESTED },
  893. [CTA_NAT_SRC] = { .type = NLA_NESTED },
  894. [CTA_TIMEOUT] = { .type = NLA_U32 },
  895. [CTA_MARK] = { .type = NLA_U32 },
  896. [CTA_ID] = { .type = NLA_U32 },
  897. [CTA_NAT_DST] = { .type = NLA_NESTED },
  898. [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
  899. [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
  900. [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
  901. [CTA_ZONE] = { .type = NLA_U16 },
  902. [CTA_MARK_MASK] = { .type = NLA_U32 },
  903. [CTA_LABELS] = { .type = NLA_BINARY,
  904. .len = NF_CT_LABELS_MAX_SIZE },
  905. [CTA_LABELS_MASK] = { .type = NLA_BINARY,
  906. .len = NF_CT_LABELS_MAX_SIZE },
  907. };
  908. static int ctnetlink_flush_conntrack(struct net *net,
  909. const struct nlattr * const cda[],
  910. u32 portid, int report)
  911. {
  912. struct ctnetlink_filter *filter = NULL;
  913. if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
  914. filter = ctnetlink_alloc_filter(cda);
  915. if (IS_ERR(filter))
  916. return PTR_ERR(filter);
  917. }
  918. nf_ct_iterate_cleanup(net, ctnetlink_filter_match, filter,
  919. portid, report);
  920. kfree(filter);
  921. return 0;
  922. }
  923. static int
  924. ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
  925. const struct nlmsghdr *nlh,
  926. const struct nlattr * const cda[])
  927. {
  928. struct net *net = sock_net(ctnl);
  929. struct nf_conntrack_tuple_hash *h;
  930. struct nf_conntrack_tuple tuple;
  931. struct nf_conn *ct;
  932. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  933. u_int8_t u3 = nfmsg->nfgen_family;
  934. struct nf_conntrack_zone zone;
  935. int err;
  936. err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
  937. if (err < 0)
  938. return err;
  939. if (cda[CTA_TUPLE_ORIG])
  940. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
  941. u3, &zone);
  942. else if (cda[CTA_TUPLE_REPLY])
  943. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
  944. u3, &zone);
  945. else {
  946. return ctnetlink_flush_conntrack(net, cda,
  947. NETLINK_CB(skb).portid,
  948. nlmsg_report(nlh));
  949. }
  950. if (err < 0)
  951. return err;
  952. h = nf_conntrack_find_get(net, &zone, &tuple);
  953. if (!h)
  954. return -ENOENT;
  955. ct = nf_ct_tuplehash_to_ctrack(h);
  956. if (cda[CTA_ID]) {
  957. u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
  958. if (id != (u32)(unsigned long)ct) {
  959. nf_ct_put(ct);
  960. return -ENOENT;
  961. }
  962. }
  963. if (del_timer(&ct->timeout))
  964. nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
  965. nf_ct_put(ct);
  966. return 0;
  967. }
  968. static int
  969. ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
  970. const struct nlmsghdr *nlh,
  971. const struct nlattr * const cda[])
  972. {
  973. struct net *net = sock_net(ctnl);
  974. struct nf_conntrack_tuple_hash *h;
  975. struct nf_conntrack_tuple tuple;
  976. struct nf_conn *ct;
  977. struct sk_buff *skb2 = NULL;
  978. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  979. u_int8_t u3 = nfmsg->nfgen_family;
  980. struct nf_conntrack_zone zone;
  981. int err;
  982. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  983. struct netlink_dump_control c = {
  984. .dump = ctnetlink_dump_table,
  985. .done = ctnetlink_done,
  986. };
  987. if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
  988. struct ctnetlink_filter *filter;
  989. filter = ctnetlink_alloc_filter(cda);
  990. if (IS_ERR(filter))
  991. return PTR_ERR(filter);
  992. c.data = filter;
  993. }
  994. return netlink_dump_start(ctnl, skb, nlh, &c);
  995. }
  996. err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
  997. if (err < 0)
  998. return err;
  999. if (cda[CTA_TUPLE_ORIG])
  1000. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
  1001. u3, &zone);
  1002. else if (cda[CTA_TUPLE_REPLY])
  1003. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
  1004. u3, &zone);
  1005. else
  1006. return -EINVAL;
  1007. if (err < 0)
  1008. return err;
  1009. h = nf_conntrack_find_get(net, &zone, &tuple);
  1010. if (!h)
  1011. return -ENOENT;
  1012. ct = nf_ct_tuplehash_to_ctrack(h);
  1013. err = -ENOMEM;
  1014. skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1015. if (skb2 == NULL) {
  1016. nf_ct_put(ct);
  1017. return -ENOMEM;
  1018. }
  1019. rcu_read_lock();
  1020. err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
  1021. NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
  1022. rcu_read_unlock();
  1023. nf_ct_put(ct);
  1024. if (err <= 0)
  1025. goto free;
  1026. err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
  1027. if (err < 0)
  1028. goto out;
  1029. return 0;
  1030. free:
  1031. kfree_skb(skb2);
  1032. out:
  1033. /* this avoids a loop in nfnetlink. */
  1034. return err == -EAGAIN ? -ENOBUFS : err;
  1035. }
  1036. static int ctnetlink_done_list(struct netlink_callback *cb)
  1037. {
  1038. if (cb->args[1])
  1039. nf_ct_put((struct nf_conn *)cb->args[1]);
  1040. return 0;
  1041. }
  1042. static int
  1043. ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
  1044. {
  1045. struct nf_conn *ct, *last;
  1046. struct nf_conntrack_tuple_hash *h;
  1047. struct hlist_nulls_node *n;
  1048. struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
  1049. u_int8_t l3proto = nfmsg->nfgen_family;
  1050. int res;
  1051. int cpu;
  1052. struct hlist_nulls_head *list;
  1053. struct net *net = sock_net(skb->sk);
  1054. if (cb->args[2])
  1055. return 0;
  1056. last = (struct nf_conn *)cb->args[1];
  1057. for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
  1058. struct ct_pcpu *pcpu;
  1059. if (!cpu_possible(cpu))
  1060. continue;
  1061. pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
  1062. spin_lock_bh(&pcpu->lock);
  1063. list = dying ? &pcpu->dying : &pcpu->unconfirmed;
  1064. restart:
  1065. hlist_nulls_for_each_entry(h, n, list, hnnode) {
  1066. ct = nf_ct_tuplehash_to_ctrack(h);
  1067. if (l3proto && nf_ct_l3num(ct) != l3proto)
  1068. continue;
  1069. if (cb->args[1]) {
  1070. if (ct != last)
  1071. continue;
  1072. cb->args[1] = 0;
  1073. }
  1074. rcu_read_lock();
  1075. res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
  1076. cb->nlh->nlmsg_seq,
  1077. NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
  1078. ct);
  1079. rcu_read_unlock();
  1080. if (res < 0) {
  1081. if (!atomic_inc_not_zero(&ct->ct_general.use))
  1082. continue;
  1083. cb->args[0] = cpu;
  1084. cb->args[1] = (unsigned long)ct;
  1085. spin_unlock_bh(&pcpu->lock);
  1086. goto out;
  1087. }
  1088. }
  1089. if (cb->args[1]) {
  1090. cb->args[1] = 0;
  1091. goto restart;
  1092. }
  1093. spin_unlock_bh(&pcpu->lock);
  1094. }
  1095. cb->args[2] = 1;
  1096. out:
  1097. if (last)
  1098. nf_ct_put(last);
  1099. return skb->len;
  1100. }
  1101. static int
  1102. ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
  1103. {
  1104. return ctnetlink_dump_list(skb, cb, true);
  1105. }
  1106. static int
  1107. ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
  1108. const struct nlmsghdr *nlh,
  1109. const struct nlattr * const cda[])
  1110. {
  1111. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  1112. struct netlink_dump_control c = {
  1113. .dump = ctnetlink_dump_dying,
  1114. .done = ctnetlink_done_list,
  1115. };
  1116. return netlink_dump_start(ctnl, skb, nlh, &c);
  1117. }
  1118. return -EOPNOTSUPP;
  1119. }
  1120. static int
  1121. ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
  1122. {
  1123. return ctnetlink_dump_list(skb, cb, false);
  1124. }
  1125. static int
  1126. ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
  1127. const struct nlmsghdr *nlh,
  1128. const struct nlattr * const cda[])
  1129. {
  1130. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  1131. struct netlink_dump_control c = {
  1132. .dump = ctnetlink_dump_unconfirmed,
  1133. .done = ctnetlink_done_list,
  1134. };
  1135. return netlink_dump_start(ctnl, skb, nlh, &c);
  1136. }
  1137. return -EOPNOTSUPP;
  1138. }
  1139. #ifdef CONFIG_NF_NAT_NEEDED
  1140. static int
  1141. ctnetlink_parse_nat_setup(struct nf_conn *ct,
  1142. enum nf_nat_manip_type manip,
  1143. const struct nlattr *attr)
  1144. {
  1145. typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
  1146. int err;
  1147. parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
  1148. if (!parse_nat_setup) {
  1149. #ifdef CONFIG_MODULES
  1150. rcu_read_unlock();
  1151. nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
  1152. if (request_module("nf-nat") < 0) {
  1153. nfnl_lock(NFNL_SUBSYS_CTNETLINK);
  1154. rcu_read_lock();
  1155. return -EOPNOTSUPP;
  1156. }
  1157. nfnl_lock(NFNL_SUBSYS_CTNETLINK);
  1158. rcu_read_lock();
  1159. if (nfnetlink_parse_nat_setup_hook)
  1160. return -EAGAIN;
  1161. #endif
  1162. return -EOPNOTSUPP;
  1163. }
  1164. err = parse_nat_setup(ct, manip, attr);
  1165. if (err == -EAGAIN) {
  1166. #ifdef CONFIG_MODULES
  1167. rcu_read_unlock();
  1168. nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
  1169. if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
  1170. nfnl_lock(NFNL_SUBSYS_CTNETLINK);
  1171. rcu_read_lock();
  1172. return -EOPNOTSUPP;
  1173. }
  1174. nfnl_lock(NFNL_SUBSYS_CTNETLINK);
  1175. rcu_read_lock();
  1176. #else
  1177. err = -EOPNOTSUPP;
  1178. #endif
  1179. }
  1180. return err;
  1181. }
  1182. #endif
  1183. static int
  1184. ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
  1185. {
  1186. unsigned long d;
  1187. unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
  1188. d = ct->status ^ status;
  1189. if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
  1190. /* unchangeable */
  1191. return -EBUSY;
  1192. if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
  1193. /* SEEN_REPLY bit can only be set */
  1194. return -EBUSY;
  1195. if (d & IPS_ASSURED && !(status & IPS_ASSURED))
  1196. /* ASSURED bit can only be set */
  1197. return -EBUSY;
  1198. /* Be careful here, modifying NAT bits can screw up things,
  1199. * so don't let users modify them directly if they don't pass
  1200. * nf_nat_range. */
  1201. ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
  1202. return 0;
  1203. }
  1204. static int
  1205. ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
  1206. {
  1207. #ifdef CONFIG_NF_NAT_NEEDED
  1208. int ret;
  1209. if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
  1210. return 0;
  1211. ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
  1212. cda[CTA_NAT_DST]);
  1213. if (ret < 0)
  1214. return ret;
  1215. ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
  1216. cda[CTA_NAT_SRC]);
  1217. return ret;
  1218. #else
  1219. if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
  1220. return 0;
  1221. return -EOPNOTSUPP;
  1222. #endif
  1223. }
  1224. static inline int
  1225. ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
  1226. {
  1227. struct nf_conntrack_helper *helper;
  1228. struct nf_conn_help *help = nfct_help(ct);
  1229. char *helpname = NULL;
  1230. struct nlattr *helpinfo = NULL;
  1231. int err;
  1232. /* don't change helper of sibling connections */
  1233. if (ct->master)
  1234. return -EBUSY;
  1235. err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
  1236. if (err < 0)
  1237. return err;
  1238. if (!strcmp(helpname, "")) {
  1239. if (help && help->helper) {
  1240. /* we had a helper before ... */
  1241. nf_ct_remove_expectations(ct);
  1242. RCU_INIT_POINTER(help->helper, NULL);
  1243. }
  1244. return 0;
  1245. }
  1246. helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
  1247. nf_ct_protonum(ct));
  1248. if (helper == NULL) {
  1249. #ifdef CONFIG_MODULES
  1250. spin_unlock_bh(&nf_conntrack_expect_lock);
  1251. if (request_module("nfct-helper-%s", helpname) < 0) {
  1252. spin_lock_bh(&nf_conntrack_expect_lock);
  1253. return -EOPNOTSUPP;
  1254. }
  1255. spin_lock_bh(&nf_conntrack_expect_lock);
  1256. helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
  1257. nf_ct_protonum(ct));
  1258. if (helper)
  1259. return -EAGAIN;
  1260. #endif
  1261. return -EOPNOTSUPP;
  1262. }
  1263. if (help) {
  1264. if (help->helper == helper) {
  1265. /* update private helper data if allowed. */
  1266. if (helper->from_nlattr)
  1267. helper->from_nlattr(helpinfo, ct);
  1268. return 0;
  1269. } else
  1270. return -EBUSY;
  1271. }
  1272. /* we cannot set a helper for an existing conntrack */
  1273. return -EOPNOTSUPP;
  1274. }
  1275. static inline int
  1276. ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
  1277. {
  1278. u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
  1279. if (!del_timer(&ct->timeout))
  1280. return -ETIME;
  1281. ct->timeout.expires = jiffies + timeout * HZ;
  1282. add_timer(&ct->timeout);
  1283. return 0;
  1284. }
  1285. static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
  1286. [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
  1287. [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
  1288. [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
  1289. };
  1290. static inline int
  1291. ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
  1292. {
  1293. const struct nlattr *attr = cda[CTA_PROTOINFO];
  1294. struct nlattr *tb[CTA_PROTOINFO_MAX+1];
  1295. struct nf_conntrack_l4proto *l4proto;
  1296. int err = 0;
  1297. err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
  1298. if (err < 0)
  1299. return err;
  1300. rcu_read_lock();
  1301. l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
  1302. if (l4proto->from_nlattr)
  1303. err = l4proto->from_nlattr(tb, ct);
  1304. rcu_read_unlock();
  1305. return err;
  1306. }
  1307. static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
  1308. [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
  1309. [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
  1310. [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
  1311. };
  1312. static inline int
  1313. change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr)
  1314. {
  1315. int err;
  1316. struct nlattr *cda[CTA_SEQADJ_MAX+1];
  1317. err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy);
  1318. if (err < 0)
  1319. return err;
  1320. if (!cda[CTA_SEQADJ_CORRECTION_POS])
  1321. return -EINVAL;
  1322. seq->correction_pos =
  1323. ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
  1324. if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
  1325. return -EINVAL;
  1326. seq->offset_before =
  1327. ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
  1328. if (!cda[CTA_SEQADJ_OFFSET_AFTER])
  1329. return -EINVAL;
  1330. seq->offset_after =
  1331. ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
  1332. return 0;
  1333. }
  1334. static int
  1335. ctnetlink_change_seq_adj(struct nf_conn *ct,
  1336. const struct nlattr * const cda[])
  1337. {
  1338. struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
  1339. int ret = 0;
  1340. if (!seqadj)
  1341. return 0;
  1342. if (cda[CTA_SEQ_ADJ_ORIG]) {
  1343. ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
  1344. cda[CTA_SEQ_ADJ_ORIG]);
  1345. if (ret < 0)
  1346. return ret;
  1347. ct->status |= IPS_SEQ_ADJUST;
  1348. }
  1349. if (cda[CTA_SEQ_ADJ_REPLY]) {
  1350. ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
  1351. cda[CTA_SEQ_ADJ_REPLY]);
  1352. if (ret < 0)
  1353. return ret;
  1354. ct->status |= IPS_SEQ_ADJUST;
  1355. }
  1356. return 0;
  1357. }
  1358. static int
  1359. ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
  1360. {
  1361. #ifdef CONFIG_NF_CONNTRACK_LABELS
  1362. size_t len = nla_len(cda[CTA_LABELS]);
  1363. const void *mask = cda[CTA_LABELS_MASK];
  1364. if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
  1365. return -EINVAL;
  1366. if (mask) {
  1367. if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
  1368. nla_len(cda[CTA_LABELS_MASK]) != len)
  1369. return -EINVAL;
  1370. mask = nla_data(cda[CTA_LABELS_MASK]);
  1371. }
  1372. len /= sizeof(u32);
  1373. return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
  1374. #else
  1375. return -EOPNOTSUPP;
  1376. #endif
  1377. }
  1378. static int
  1379. ctnetlink_change_conntrack(struct nf_conn *ct,
  1380. const struct nlattr * const cda[])
  1381. {
  1382. int err;
  1383. /* only allow NAT changes and master assignation for new conntracks */
  1384. if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
  1385. return -EOPNOTSUPP;
  1386. if (cda[CTA_HELP]) {
  1387. err = ctnetlink_change_helper(ct, cda);
  1388. if (err < 0)
  1389. return err;
  1390. }
  1391. if (cda[CTA_TIMEOUT]) {
  1392. err = ctnetlink_change_timeout(ct, cda);
  1393. if (err < 0)
  1394. return err;
  1395. }
  1396. if (cda[CTA_STATUS]) {
  1397. err = ctnetlink_change_status(ct, cda);
  1398. if (err < 0)
  1399. return err;
  1400. }
  1401. if (cda[CTA_PROTOINFO]) {
  1402. err = ctnetlink_change_protoinfo(ct, cda);
  1403. if (err < 0)
  1404. return err;
  1405. }
  1406. #if defined(CONFIG_NF_CONNTRACK_MARK)
  1407. if (cda[CTA_MARK])
  1408. ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
  1409. #endif
  1410. if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
  1411. err = ctnetlink_change_seq_adj(ct, cda);
  1412. if (err < 0)
  1413. return err;
  1414. }
  1415. if (cda[CTA_LABELS]) {
  1416. err = ctnetlink_attach_labels(ct, cda);
  1417. if (err < 0)
  1418. return err;
  1419. }
  1420. return 0;
  1421. }
  1422. static struct nf_conn *
  1423. ctnetlink_create_conntrack(struct net *net,
  1424. const struct nf_conntrack_zone *zone,
  1425. const struct nlattr * const cda[],
  1426. struct nf_conntrack_tuple *otuple,
  1427. struct nf_conntrack_tuple *rtuple,
  1428. u8 u3)
  1429. {
  1430. struct nf_conn *ct;
  1431. int err = -EINVAL;
  1432. struct nf_conntrack_helper *helper;
  1433. struct nf_conn_tstamp *tstamp;
  1434. ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
  1435. if (IS_ERR(ct))
  1436. return ERR_PTR(-ENOMEM);
  1437. if (!cda[CTA_TIMEOUT])
  1438. goto err1;
  1439. ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
  1440. ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
  1441. rcu_read_lock();
  1442. if (cda[CTA_HELP]) {
  1443. char *helpname = NULL;
  1444. struct nlattr *helpinfo = NULL;
  1445. err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
  1446. if (err < 0)
  1447. goto err2;
  1448. helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
  1449. nf_ct_protonum(ct));
  1450. if (helper == NULL) {
  1451. rcu_read_unlock();
  1452. #ifdef CONFIG_MODULES
  1453. if (request_module("nfct-helper-%s", helpname) < 0) {
  1454. err = -EOPNOTSUPP;
  1455. goto err1;
  1456. }
  1457. rcu_read_lock();
  1458. helper = __nf_conntrack_helper_find(helpname,
  1459. nf_ct_l3num(ct),
  1460. nf_ct_protonum(ct));
  1461. if (helper) {
  1462. err = -EAGAIN;
  1463. goto err2;
  1464. }
  1465. rcu_read_unlock();
  1466. #endif
  1467. err = -EOPNOTSUPP;
  1468. goto err1;
  1469. } else {
  1470. struct nf_conn_help *help;
  1471. help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
  1472. if (help == NULL) {
  1473. err = -ENOMEM;
  1474. goto err2;
  1475. }
  1476. /* set private helper data if allowed. */
  1477. if (helper->from_nlattr)
  1478. helper->from_nlattr(helpinfo, ct);
  1479. /* not in hash table yet so not strictly necessary */
  1480. RCU_INIT_POINTER(help->helper, helper);
  1481. }
  1482. } else {
  1483. /* try an implicit helper assignation */
  1484. err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
  1485. if (err < 0)
  1486. goto err2;
  1487. }
  1488. err = ctnetlink_setup_nat(ct, cda);
  1489. if (err < 0)
  1490. goto err2;
  1491. nf_ct_acct_ext_add(ct, GFP_ATOMIC);
  1492. nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
  1493. nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
  1494. nf_ct_labels_ext_add(ct);
  1495. nfct_seqadj_ext_add(ct);
  1496. nfct_synproxy_ext_add(ct);
  1497. /* we must add conntrack extensions before confirmation. */
  1498. ct->status |= IPS_CONFIRMED;
  1499. if (cda[CTA_STATUS]) {
  1500. err = ctnetlink_change_status(ct, cda);
  1501. if (err < 0)
  1502. goto err2;
  1503. }
  1504. if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
  1505. err = ctnetlink_change_seq_adj(ct, cda);
  1506. if (err < 0)
  1507. goto err2;
  1508. }
  1509. memset(&ct->proto, 0, sizeof(ct->proto));
  1510. if (cda[CTA_PROTOINFO]) {
  1511. err = ctnetlink_change_protoinfo(ct, cda);
  1512. if (err < 0)
  1513. goto err2;
  1514. }
  1515. #if defined(CONFIG_NF_CONNTRACK_MARK)
  1516. if (cda[CTA_MARK])
  1517. ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
  1518. #endif
  1519. /* setup master conntrack: this is a confirmed expectation */
  1520. if (cda[CTA_TUPLE_MASTER]) {
  1521. struct nf_conntrack_tuple master;
  1522. struct nf_conntrack_tuple_hash *master_h;
  1523. struct nf_conn *master_ct;
  1524. err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
  1525. u3, NULL);
  1526. if (err < 0)
  1527. goto err2;
  1528. master_h = nf_conntrack_find_get(net, zone, &master);
  1529. if (master_h == NULL) {
  1530. err = -ENOENT;
  1531. goto err2;
  1532. }
  1533. master_ct = nf_ct_tuplehash_to_ctrack(master_h);
  1534. __set_bit(IPS_EXPECTED_BIT, &ct->status);
  1535. ct->master = master_ct;
  1536. }
  1537. tstamp = nf_conn_tstamp_find(ct);
  1538. if (tstamp)
  1539. tstamp->start = ktime_get_real_ns();
  1540. err = nf_conntrack_hash_check_insert(ct);
  1541. if (err < 0)
  1542. goto err2;
  1543. rcu_read_unlock();
  1544. return ct;
  1545. err2:
  1546. rcu_read_unlock();
  1547. err1:
  1548. nf_conntrack_free(ct);
  1549. return ERR_PTR(err);
  1550. }
  1551. static int
  1552. ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
  1553. const struct nlmsghdr *nlh,
  1554. const struct nlattr * const cda[])
  1555. {
  1556. struct net *net = sock_net(ctnl);
  1557. struct nf_conntrack_tuple otuple, rtuple;
  1558. struct nf_conntrack_tuple_hash *h = NULL;
  1559. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  1560. struct nf_conn *ct;
  1561. u_int8_t u3 = nfmsg->nfgen_family;
  1562. struct nf_conntrack_zone zone;
  1563. int err;
  1564. err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
  1565. if (err < 0)
  1566. return err;
  1567. if (cda[CTA_TUPLE_ORIG]) {
  1568. err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
  1569. u3, &zone);
  1570. if (err < 0)
  1571. return err;
  1572. }
  1573. if (cda[CTA_TUPLE_REPLY]) {
  1574. err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
  1575. u3, &zone);
  1576. if (err < 0)
  1577. return err;
  1578. }
  1579. if (cda[CTA_TUPLE_ORIG])
  1580. h = nf_conntrack_find_get(net, &zone, &otuple);
  1581. else if (cda[CTA_TUPLE_REPLY])
  1582. h = nf_conntrack_find_get(net, &zone, &rtuple);
  1583. if (h == NULL) {
  1584. err = -ENOENT;
  1585. if (nlh->nlmsg_flags & NLM_F_CREATE) {
  1586. enum ip_conntrack_events events;
  1587. if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
  1588. return -EINVAL;
  1589. ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
  1590. &rtuple, u3);
  1591. if (IS_ERR(ct))
  1592. return PTR_ERR(ct);
  1593. err = 0;
  1594. if (test_bit(IPS_EXPECTED_BIT, &ct->status))
  1595. events = IPCT_RELATED;
  1596. else
  1597. events = IPCT_NEW;
  1598. if (cda[CTA_LABELS] &&
  1599. ctnetlink_attach_labels(ct, cda) == 0)
  1600. events |= (1 << IPCT_LABEL);
  1601. nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
  1602. (1 << IPCT_ASSURED) |
  1603. (1 << IPCT_HELPER) |
  1604. (1 << IPCT_PROTOINFO) |
  1605. (1 << IPCT_SEQADJ) |
  1606. (1 << IPCT_MARK) | events,
  1607. ct, NETLINK_CB(skb).portid,
  1608. nlmsg_report(nlh));
  1609. nf_ct_put(ct);
  1610. }
  1611. return err;
  1612. }
  1613. /* implicit 'else' */
  1614. err = -EEXIST;
  1615. ct = nf_ct_tuplehash_to_ctrack(h);
  1616. if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
  1617. spin_lock_bh(&nf_conntrack_expect_lock);
  1618. err = ctnetlink_change_conntrack(ct, cda);
  1619. spin_unlock_bh(&nf_conntrack_expect_lock);
  1620. if (err == 0) {
  1621. nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
  1622. (1 << IPCT_ASSURED) |
  1623. (1 << IPCT_HELPER) |
  1624. (1 << IPCT_LABEL) |
  1625. (1 << IPCT_PROTOINFO) |
  1626. (1 << IPCT_SEQADJ) |
  1627. (1 << IPCT_MARK),
  1628. ct, NETLINK_CB(skb).portid,
  1629. nlmsg_report(nlh));
  1630. }
  1631. }
  1632. nf_ct_put(ct);
  1633. return err;
  1634. }
  1635. static int
  1636. ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
  1637. __u16 cpu, const struct ip_conntrack_stat *st)
  1638. {
  1639. struct nlmsghdr *nlh;
  1640. struct nfgenmsg *nfmsg;
  1641. unsigned int flags = portid ? NLM_F_MULTI : 0, event;
  1642. event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
  1643. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
  1644. if (nlh == NULL)
  1645. goto nlmsg_failure;
  1646. nfmsg = nlmsg_data(nlh);
  1647. nfmsg->nfgen_family = AF_UNSPEC;
  1648. nfmsg->version = NFNETLINK_V0;
  1649. nfmsg->res_id = htons(cpu);
  1650. if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
  1651. nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
  1652. nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
  1653. nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
  1654. nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
  1655. nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
  1656. nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
  1657. nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
  1658. nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
  1659. htonl(st->insert_failed)) ||
  1660. nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
  1661. nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
  1662. nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
  1663. nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
  1664. htonl(st->search_restart)))
  1665. goto nla_put_failure;
  1666. nlmsg_end(skb, nlh);
  1667. return skb->len;
  1668. nla_put_failure:
  1669. nlmsg_failure:
  1670. nlmsg_cancel(skb, nlh);
  1671. return -1;
  1672. }
  1673. static int
  1674. ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1675. {
  1676. int cpu;
  1677. struct net *net = sock_net(skb->sk);
  1678. if (cb->args[0] == nr_cpu_ids)
  1679. return 0;
  1680. for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
  1681. const struct ip_conntrack_stat *st;
  1682. if (!cpu_possible(cpu))
  1683. continue;
  1684. st = per_cpu_ptr(net->ct.stat, cpu);
  1685. if (ctnetlink_ct_stat_cpu_fill_info(skb,
  1686. NETLINK_CB(cb->skb).portid,
  1687. cb->nlh->nlmsg_seq,
  1688. cpu, st) < 0)
  1689. break;
  1690. }
  1691. cb->args[0] = cpu;
  1692. return skb->len;
  1693. }
  1694. static int
  1695. ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
  1696. const struct nlmsghdr *nlh,
  1697. const struct nlattr * const cda[])
  1698. {
  1699. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  1700. struct netlink_dump_control c = {
  1701. .dump = ctnetlink_ct_stat_cpu_dump,
  1702. };
  1703. return netlink_dump_start(ctnl, skb, nlh, &c);
  1704. }
  1705. return 0;
  1706. }
  1707. static int
  1708. ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
  1709. struct net *net)
  1710. {
  1711. struct nlmsghdr *nlh;
  1712. struct nfgenmsg *nfmsg;
  1713. unsigned int flags = portid ? NLM_F_MULTI : 0, event;
  1714. unsigned int nr_conntracks = atomic_read(&net->ct.count);
  1715. event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
  1716. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
  1717. if (nlh == NULL)
  1718. goto nlmsg_failure;
  1719. nfmsg = nlmsg_data(nlh);
  1720. nfmsg->nfgen_family = AF_UNSPEC;
  1721. nfmsg->version = NFNETLINK_V0;
  1722. nfmsg->res_id = 0;
  1723. if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
  1724. goto nla_put_failure;
  1725. nlmsg_end(skb, nlh);
  1726. return skb->len;
  1727. nla_put_failure:
  1728. nlmsg_failure:
  1729. nlmsg_cancel(skb, nlh);
  1730. return -1;
  1731. }
  1732. static int
  1733. ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
  1734. const struct nlmsghdr *nlh,
  1735. const struct nlattr * const cda[])
  1736. {
  1737. struct sk_buff *skb2;
  1738. int err;
  1739. skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1740. if (skb2 == NULL)
  1741. return -ENOMEM;
  1742. err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
  1743. nlh->nlmsg_seq,
  1744. NFNL_MSG_TYPE(nlh->nlmsg_type),
  1745. sock_net(skb->sk));
  1746. if (err <= 0)
  1747. goto free;
  1748. err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
  1749. if (err < 0)
  1750. goto out;
  1751. return 0;
  1752. free:
  1753. kfree_skb(skb2);
  1754. out:
  1755. /* this avoids a loop in nfnetlink. */
  1756. return err == -EAGAIN ? -ENOBUFS : err;
  1757. }
  1758. static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
  1759. [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
  1760. [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
  1761. [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
  1762. [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
  1763. [CTA_EXPECT_ID] = { .type = NLA_U32 },
  1764. [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
  1765. .len = NF_CT_HELPER_NAME_LEN - 1 },
  1766. [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
  1767. [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
  1768. [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
  1769. [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
  1770. [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
  1771. };
  1772. static struct nf_conntrack_expect *
  1773. ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
  1774. struct nf_conntrack_helper *helper,
  1775. struct nf_conntrack_tuple *tuple,
  1776. struct nf_conntrack_tuple *mask);
  1777. #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
  1778. static size_t
  1779. ctnetlink_glue_build_size(const struct nf_conn *ct)
  1780. {
  1781. return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
  1782. + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
  1783. + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
  1784. + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
  1785. + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
  1786. + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
  1787. + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
  1788. + nla_total_size(0) /* CTA_PROTOINFO */
  1789. + nla_total_size(0) /* CTA_HELP */
  1790. + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
  1791. + ctnetlink_secctx_size(ct)
  1792. #ifdef CONFIG_NF_NAT_NEEDED
  1793. + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
  1794. + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
  1795. #endif
  1796. #ifdef CONFIG_NF_CONNTRACK_MARK
  1797. + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
  1798. #endif
  1799. #ifdef CONFIG_NF_CONNTRACK_ZONES
  1800. + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
  1801. #endif
  1802. + ctnetlink_proto_size(ct)
  1803. ;
  1804. }
  1805. static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb,
  1806. enum ip_conntrack_info *ctinfo)
  1807. {
  1808. struct nf_conn *ct;
  1809. ct = nf_ct_get(skb, ctinfo);
  1810. if (ct && nf_ct_is_untracked(ct))
  1811. ct = NULL;
  1812. return ct;
  1813. }
  1814. static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
  1815. {
  1816. const struct nf_conntrack_zone *zone;
  1817. struct nlattr *nest_parms;
  1818. rcu_read_lock();
  1819. zone = nf_ct_zone(ct);
  1820. nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
  1821. if (!nest_parms)
  1822. goto nla_put_failure;
  1823. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
  1824. goto nla_put_failure;
  1825. if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
  1826. NF_CT_ZONE_DIR_ORIG) < 0)
  1827. goto nla_put_failure;
  1828. nla_nest_end(skb, nest_parms);
  1829. nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
  1830. if (!nest_parms)
  1831. goto nla_put_failure;
  1832. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
  1833. goto nla_put_failure;
  1834. if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
  1835. NF_CT_ZONE_DIR_REPL) < 0)
  1836. goto nla_put_failure;
  1837. nla_nest_end(skb, nest_parms);
  1838. if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
  1839. NF_CT_DEFAULT_ZONE_DIR) < 0)
  1840. goto nla_put_failure;
  1841. if (ctnetlink_dump_id(skb, ct) < 0)
  1842. goto nla_put_failure;
  1843. if (ctnetlink_dump_status(skb, ct) < 0)
  1844. goto nla_put_failure;
  1845. if (ctnetlink_dump_timeout(skb, ct) < 0)
  1846. goto nla_put_failure;
  1847. if (ctnetlink_dump_protoinfo(skb, ct) < 0)
  1848. goto nla_put_failure;
  1849. if (ctnetlink_dump_helpinfo(skb, ct) < 0)
  1850. goto nla_put_failure;
  1851. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  1852. if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
  1853. goto nla_put_failure;
  1854. #endif
  1855. if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
  1856. goto nla_put_failure;
  1857. if ((ct->status & IPS_SEQ_ADJUST) &&
  1858. ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
  1859. goto nla_put_failure;
  1860. #ifdef CONFIG_NF_CONNTRACK_MARK
  1861. if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
  1862. goto nla_put_failure;
  1863. #endif
  1864. if (ctnetlink_dump_labels(skb, ct) < 0)
  1865. goto nla_put_failure;
  1866. rcu_read_unlock();
  1867. return 0;
  1868. nla_put_failure:
  1869. rcu_read_unlock();
  1870. return -ENOSPC;
  1871. }
  1872. static int
  1873. ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
  1874. enum ip_conntrack_info ctinfo,
  1875. u_int16_t ct_attr, u_int16_t ct_info_attr)
  1876. {
  1877. struct nlattr *nest_parms;
  1878. nest_parms = nla_nest_start(skb, ct_attr | NLA_F_NESTED);
  1879. if (!nest_parms)
  1880. goto nla_put_failure;
  1881. if (__ctnetlink_glue_build(skb, ct) < 0)
  1882. goto nla_put_failure;
  1883. nla_nest_end(skb, nest_parms);
  1884. if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
  1885. goto nla_put_failure;
  1886. return 0;
  1887. nla_put_failure:
  1888. return -ENOSPC;
  1889. }
  1890. static int
  1891. ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
  1892. {
  1893. int err;
  1894. if (cda[CTA_TIMEOUT]) {
  1895. err = ctnetlink_change_timeout(ct, cda);
  1896. if (err < 0)
  1897. return err;
  1898. }
  1899. if (cda[CTA_STATUS]) {
  1900. err = ctnetlink_change_status(ct, cda);
  1901. if (err < 0)
  1902. return err;
  1903. }
  1904. if (cda[CTA_HELP]) {
  1905. err = ctnetlink_change_helper(ct, cda);
  1906. if (err < 0)
  1907. return err;
  1908. }
  1909. if (cda[CTA_LABELS]) {
  1910. err = ctnetlink_attach_labels(ct, cda);
  1911. if (err < 0)
  1912. return err;
  1913. }
  1914. #if defined(CONFIG_NF_CONNTRACK_MARK)
  1915. if (cda[CTA_MARK]) {
  1916. u32 mask = 0, mark, newmark;
  1917. if (cda[CTA_MARK_MASK])
  1918. mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
  1919. mark = ntohl(nla_get_be32(cda[CTA_MARK]));
  1920. newmark = (ct->mark & mask) ^ mark;
  1921. if (newmark != ct->mark)
  1922. ct->mark = newmark;
  1923. }
  1924. #endif
  1925. return 0;
  1926. }
  1927. static int
  1928. ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
  1929. {
  1930. struct nlattr *cda[CTA_MAX+1];
  1931. int ret;
  1932. ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
  1933. if (ret < 0)
  1934. return ret;
  1935. spin_lock_bh(&nf_conntrack_expect_lock);
  1936. ret = ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
  1937. spin_unlock_bh(&nf_conntrack_expect_lock);
  1938. return ret;
  1939. }
  1940. static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
  1941. const struct nf_conn *ct,
  1942. struct nf_conntrack_tuple *tuple,
  1943. struct nf_conntrack_tuple *mask)
  1944. {
  1945. int err;
  1946. err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
  1947. nf_ct_l3num(ct), NULL);
  1948. if (err < 0)
  1949. return err;
  1950. return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
  1951. nf_ct_l3num(ct), NULL);
  1952. }
  1953. static int
  1954. ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
  1955. u32 portid, u32 report)
  1956. {
  1957. struct nlattr *cda[CTA_EXPECT_MAX+1];
  1958. struct nf_conntrack_tuple tuple, mask;
  1959. struct nf_conntrack_helper *helper = NULL;
  1960. struct nf_conntrack_expect *exp;
  1961. int err;
  1962. err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
  1963. if (err < 0)
  1964. return err;
  1965. err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
  1966. ct, &tuple, &mask);
  1967. if (err < 0)
  1968. return err;
  1969. if (cda[CTA_EXPECT_HELP_NAME]) {
  1970. const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
  1971. helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
  1972. nf_ct_protonum(ct));
  1973. if (helper == NULL)
  1974. return -EOPNOTSUPP;
  1975. }
  1976. exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
  1977. helper, &tuple, &mask);
  1978. if (IS_ERR(exp))
  1979. return PTR_ERR(exp);
  1980. err = nf_ct_expect_related_report(exp, portid, report);
  1981. if (err < 0) {
  1982. nf_ct_expect_put(exp);
  1983. return err;
  1984. }
  1985. return 0;
  1986. }
  1987. static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
  1988. enum ip_conntrack_info ctinfo, int diff)
  1989. {
  1990. if (!(ct->status & IPS_NAT_MASK))
  1991. return;
  1992. nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
  1993. }
  1994. static struct nfnl_ct_hook ctnetlink_glue_hook = {
  1995. .get_ct = ctnetlink_glue_get_ct,
  1996. .build_size = ctnetlink_glue_build_size,
  1997. .build = ctnetlink_glue_build,
  1998. .parse = ctnetlink_glue_parse,
  1999. .attach_expect = ctnetlink_glue_attach_expect,
  2000. .seq_adjust = ctnetlink_glue_seqadj,
  2001. };
  2002. #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
  2003. /***********************************************************************
  2004. * EXPECT
  2005. ***********************************************************************/
  2006. static inline int
  2007. ctnetlink_exp_dump_tuple(struct sk_buff *skb,
  2008. const struct nf_conntrack_tuple *tuple,
  2009. u32 type)
  2010. {
  2011. struct nlattr *nest_parms;
  2012. nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
  2013. if (!nest_parms)
  2014. goto nla_put_failure;
  2015. if (ctnetlink_dump_tuples(skb, tuple) < 0)
  2016. goto nla_put_failure;
  2017. nla_nest_end(skb, nest_parms);
  2018. return 0;
  2019. nla_put_failure:
  2020. return -1;
  2021. }
  2022. static inline int
  2023. ctnetlink_exp_dump_mask(struct sk_buff *skb,
  2024. const struct nf_conntrack_tuple *tuple,
  2025. const struct nf_conntrack_tuple_mask *mask)
  2026. {
  2027. int ret;
  2028. struct nf_conntrack_l3proto *l3proto;
  2029. struct nf_conntrack_l4proto *l4proto;
  2030. struct nf_conntrack_tuple m;
  2031. struct nlattr *nest_parms;
  2032. memset(&m, 0xFF, sizeof(m));
  2033. memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
  2034. m.src.u.all = mask->src.u.all;
  2035. m.dst.protonum = tuple->dst.protonum;
  2036. nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
  2037. if (!nest_parms)
  2038. goto nla_put_failure;
  2039. rcu_read_lock();
  2040. l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
  2041. ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
  2042. if (ret >= 0) {
  2043. l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
  2044. tuple->dst.protonum);
  2045. ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
  2046. }
  2047. rcu_read_unlock();
  2048. if (unlikely(ret < 0))
  2049. goto nla_put_failure;
  2050. nla_nest_end(skb, nest_parms);
  2051. return 0;
  2052. nla_put_failure:
  2053. return -1;
  2054. }
  2055. static const union nf_inet_addr any_addr;
  2056. static int
  2057. ctnetlink_exp_dump_expect(struct sk_buff *skb,
  2058. const struct nf_conntrack_expect *exp)
  2059. {
  2060. struct nf_conn *master = exp->master;
  2061. long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
  2062. struct nf_conn_help *help;
  2063. #ifdef CONFIG_NF_NAT_NEEDED
  2064. struct nlattr *nest_parms;
  2065. struct nf_conntrack_tuple nat_tuple = {};
  2066. #endif
  2067. struct nf_ct_helper_expectfn *expfn;
  2068. if (timeout < 0)
  2069. timeout = 0;
  2070. if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
  2071. goto nla_put_failure;
  2072. if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
  2073. goto nla_put_failure;
  2074. if (ctnetlink_exp_dump_tuple(skb,
  2075. &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  2076. CTA_EXPECT_MASTER) < 0)
  2077. goto nla_put_failure;
  2078. #ifdef CONFIG_NF_NAT_NEEDED
  2079. if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
  2080. exp->saved_proto.all) {
  2081. nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
  2082. if (!nest_parms)
  2083. goto nla_put_failure;
  2084. if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
  2085. goto nla_put_failure;
  2086. nat_tuple.src.l3num = nf_ct_l3num(master);
  2087. nat_tuple.src.u3 = exp->saved_addr;
  2088. nat_tuple.dst.protonum = nf_ct_protonum(master);
  2089. nat_tuple.src.u = exp->saved_proto;
  2090. if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
  2091. CTA_EXPECT_NAT_TUPLE) < 0)
  2092. goto nla_put_failure;
  2093. nla_nest_end(skb, nest_parms);
  2094. }
  2095. #endif
  2096. if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
  2097. nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
  2098. nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
  2099. nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
  2100. goto nla_put_failure;
  2101. help = nfct_help(master);
  2102. if (help) {
  2103. struct nf_conntrack_helper *helper;
  2104. helper = rcu_dereference(help->helper);
  2105. if (helper &&
  2106. nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
  2107. goto nla_put_failure;
  2108. }
  2109. expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
  2110. if (expfn != NULL &&
  2111. nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
  2112. goto nla_put_failure;
  2113. return 0;
  2114. nla_put_failure:
  2115. return -1;
  2116. }
  2117. static int
  2118. ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
  2119. int event, const struct nf_conntrack_expect *exp)
  2120. {
  2121. struct nlmsghdr *nlh;
  2122. struct nfgenmsg *nfmsg;
  2123. unsigned int flags = portid ? NLM_F_MULTI : 0;
  2124. event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
  2125. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
  2126. if (nlh == NULL)
  2127. goto nlmsg_failure;
  2128. nfmsg = nlmsg_data(nlh);
  2129. nfmsg->nfgen_family = exp->tuple.src.l3num;
  2130. nfmsg->version = NFNETLINK_V0;
  2131. nfmsg->res_id = 0;
  2132. if (ctnetlink_exp_dump_expect(skb, exp) < 0)
  2133. goto nla_put_failure;
  2134. nlmsg_end(skb, nlh);
  2135. return skb->len;
  2136. nlmsg_failure:
  2137. nla_put_failure:
  2138. nlmsg_cancel(skb, nlh);
  2139. return -1;
  2140. }
  2141. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  2142. static int
  2143. ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
  2144. {
  2145. struct nf_conntrack_expect *exp = item->exp;
  2146. struct net *net = nf_ct_exp_net(exp);
  2147. struct nlmsghdr *nlh;
  2148. struct nfgenmsg *nfmsg;
  2149. struct sk_buff *skb;
  2150. unsigned int type, group;
  2151. int flags = 0;
  2152. if (events & (1 << IPEXP_DESTROY)) {
  2153. type = IPCTNL_MSG_EXP_DELETE;
  2154. group = NFNLGRP_CONNTRACK_EXP_DESTROY;
  2155. } else if (events & (1 << IPEXP_NEW)) {
  2156. type = IPCTNL_MSG_EXP_NEW;
  2157. flags = NLM_F_CREATE|NLM_F_EXCL;
  2158. group = NFNLGRP_CONNTRACK_EXP_NEW;
  2159. } else
  2160. return 0;
  2161. if (!item->report && !nfnetlink_has_listeners(net, group))
  2162. return 0;
  2163. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  2164. if (skb == NULL)
  2165. goto errout;
  2166. type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
  2167. nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
  2168. if (nlh == NULL)
  2169. goto nlmsg_failure;
  2170. nfmsg = nlmsg_data(nlh);
  2171. nfmsg->nfgen_family = exp->tuple.src.l3num;
  2172. nfmsg->version = NFNETLINK_V0;
  2173. nfmsg->res_id = 0;
  2174. rcu_read_lock();
  2175. if (ctnetlink_exp_dump_expect(skb, exp) < 0)
  2176. goto nla_put_failure;
  2177. rcu_read_unlock();
  2178. nlmsg_end(skb, nlh);
  2179. nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
  2180. return 0;
  2181. nla_put_failure:
  2182. rcu_read_unlock();
  2183. nlmsg_cancel(skb, nlh);
  2184. nlmsg_failure:
  2185. kfree_skb(skb);
  2186. errout:
  2187. nfnetlink_set_err(net, 0, 0, -ENOBUFS);
  2188. return 0;
  2189. }
  2190. #endif
  2191. static int ctnetlink_exp_done(struct netlink_callback *cb)
  2192. {
  2193. if (cb->args[1])
  2194. nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
  2195. return 0;
  2196. }
  2197. static int
  2198. ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
  2199. {
  2200. struct net *net = sock_net(skb->sk);
  2201. struct nf_conntrack_expect *exp, *last;
  2202. struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
  2203. u_int8_t l3proto = nfmsg->nfgen_family;
  2204. rcu_read_lock();
  2205. last = (struct nf_conntrack_expect *)cb->args[1];
  2206. for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
  2207. restart:
  2208. hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
  2209. hnode) {
  2210. if (l3proto && exp->tuple.src.l3num != l3proto)
  2211. continue;
  2212. if (cb->args[1]) {
  2213. if (exp != last)
  2214. continue;
  2215. cb->args[1] = 0;
  2216. }
  2217. if (ctnetlink_exp_fill_info(skb,
  2218. NETLINK_CB(cb->skb).portid,
  2219. cb->nlh->nlmsg_seq,
  2220. IPCTNL_MSG_EXP_NEW,
  2221. exp) < 0) {
  2222. if (!atomic_inc_not_zero(&exp->use))
  2223. continue;
  2224. cb->args[1] = (unsigned long)exp;
  2225. goto out;
  2226. }
  2227. }
  2228. if (cb->args[1]) {
  2229. cb->args[1] = 0;
  2230. goto restart;
  2231. }
  2232. }
  2233. out:
  2234. rcu_read_unlock();
  2235. if (last)
  2236. nf_ct_expect_put(last);
  2237. return skb->len;
  2238. }
  2239. static int
  2240. ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
  2241. {
  2242. struct nf_conntrack_expect *exp, *last;
  2243. struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
  2244. struct nf_conn *ct = cb->data;
  2245. struct nf_conn_help *help = nfct_help(ct);
  2246. u_int8_t l3proto = nfmsg->nfgen_family;
  2247. if (cb->args[0])
  2248. return 0;
  2249. rcu_read_lock();
  2250. last = (struct nf_conntrack_expect *)cb->args[1];
  2251. restart:
  2252. hlist_for_each_entry(exp, &help->expectations, lnode) {
  2253. if (l3proto && exp->tuple.src.l3num != l3proto)
  2254. continue;
  2255. if (cb->args[1]) {
  2256. if (exp != last)
  2257. continue;
  2258. cb->args[1] = 0;
  2259. }
  2260. if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
  2261. cb->nlh->nlmsg_seq,
  2262. IPCTNL_MSG_EXP_NEW,
  2263. exp) < 0) {
  2264. if (!atomic_inc_not_zero(&exp->use))
  2265. continue;
  2266. cb->args[1] = (unsigned long)exp;
  2267. goto out;
  2268. }
  2269. }
  2270. if (cb->args[1]) {
  2271. cb->args[1] = 0;
  2272. goto restart;
  2273. }
  2274. cb->args[0] = 1;
  2275. out:
  2276. rcu_read_unlock();
  2277. if (last)
  2278. nf_ct_expect_put(last);
  2279. return skb->len;
  2280. }
  2281. static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
  2282. const struct nlmsghdr *nlh,
  2283. const struct nlattr * const cda[])
  2284. {
  2285. int err;
  2286. struct net *net = sock_net(ctnl);
  2287. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  2288. u_int8_t u3 = nfmsg->nfgen_family;
  2289. struct nf_conntrack_tuple tuple;
  2290. struct nf_conntrack_tuple_hash *h;
  2291. struct nf_conn *ct;
  2292. struct nf_conntrack_zone zone;
  2293. struct netlink_dump_control c = {
  2294. .dump = ctnetlink_exp_ct_dump_table,
  2295. .done = ctnetlink_exp_done,
  2296. };
  2297. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
  2298. u3, NULL);
  2299. if (err < 0)
  2300. return err;
  2301. err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
  2302. if (err < 0)
  2303. return err;
  2304. h = nf_conntrack_find_get(net, &zone, &tuple);
  2305. if (!h)
  2306. return -ENOENT;
  2307. ct = nf_ct_tuplehash_to_ctrack(h);
  2308. c.data = ct;
  2309. err = netlink_dump_start(ctnl, skb, nlh, &c);
  2310. nf_ct_put(ct);
  2311. return err;
  2312. }
  2313. static int
  2314. ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
  2315. const struct nlmsghdr *nlh,
  2316. const struct nlattr * const cda[])
  2317. {
  2318. struct net *net = sock_net(ctnl);
  2319. struct nf_conntrack_tuple tuple;
  2320. struct nf_conntrack_expect *exp;
  2321. struct sk_buff *skb2;
  2322. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  2323. u_int8_t u3 = nfmsg->nfgen_family;
  2324. struct nf_conntrack_zone zone;
  2325. int err;
  2326. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  2327. if (cda[CTA_EXPECT_MASTER])
  2328. return ctnetlink_dump_exp_ct(ctnl, skb, nlh, cda);
  2329. else {
  2330. struct netlink_dump_control c = {
  2331. .dump = ctnetlink_exp_dump_table,
  2332. .done = ctnetlink_exp_done,
  2333. };
  2334. return netlink_dump_start(ctnl, skb, nlh, &c);
  2335. }
  2336. }
  2337. err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
  2338. if (err < 0)
  2339. return err;
  2340. if (cda[CTA_EXPECT_TUPLE])
  2341. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
  2342. u3, NULL);
  2343. else if (cda[CTA_EXPECT_MASTER])
  2344. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
  2345. u3, NULL);
  2346. else
  2347. return -EINVAL;
  2348. if (err < 0)
  2349. return err;
  2350. exp = nf_ct_expect_find_get(net, &zone, &tuple);
  2351. if (!exp)
  2352. return -ENOENT;
  2353. if (cda[CTA_EXPECT_ID]) {
  2354. __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
  2355. if (ntohl(id) != (u32)(unsigned long)exp) {
  2356. nf_ct_expect_put(exp);
  2357. return -ENOENT;
  2358. }
  2359. }
  2360. err = -ENOMEM;
  2361. skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  2362. if (skb2 == NULL) {
  2363. nf_ct_expect_put(exp);
  2364. goto out;
  2365. }
  2366. rcu_read_lock();
  2367. err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
  2368. nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
  2369. rcu_read_unlock();
  2370. nf_ct_expect_put(exp);
  2371. if (err <= 0)
  2372. goto free;
  2373. err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
  2374. if (err < 0)
  2375. goto out;
  2376. return 0;
  2377. free:
  2378. kfree_skb(skb2);
  2379. out:
  2380. /* this avoids a loop in nfnetlink. */
  2381. return err == -EAGAIN ? -ENOBUFS : err;
  2382. }
  2383. static int
  2384. ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
  2385. const struct nlmsghdr *nlh,
  2386. const struct nlattr * const cda[])
  2387. {
  2388. struct net *net = sock_net(ctnl);
  2389. struct nf_conntrack_expect *exp;
  2390. struct nf_conntrack_tuple tuple;
  2391. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  2392. struct hlist_node *next;
  2393. u_int8_t u3 = nfmsg->nfgen_family;
  2394. struct nf_conntrack_zone zone;
  2395. unsigned int i;
  2396. int err;
  2397. if (cda[CTA_EXPECT_TUPLE]) {
  2398. /* delete a single expect by tuple */
  2399. err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
  2400. if (err < 0)
  2401. return err;
  2402. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
  2403. u3, NULL);
  2404. if (err < 0)
  2405. return err;
  2406. /* bump usage count to 2 */
  2407. exp = nf_ct_expect_find_get(net, &zone, &tuple);
  2408. if (!exp)
  2409. return -ENOENT;
  2410. if (cda[CTA_EXPECT_ID]) {
  2411. __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
  2412. if (ntohl(id) != (u32)(unsigned long)exp) {
  2413. nf_ct_expect_put(exp);
  2414. return -ENOENT;
  2415. }
  2416. }
  2417. /* after list removal, usage count == 1 */
  2418. spin_lock_bh(&nf_conntrack_expect_lock);
  2419. if (del_timer(&exp->timeout)) {
  2420. nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
  2421. nlmsg_report(nlh));
  2422. nf_ct_expect_put(exp);
  2423. }
  2424. spin_unlock_bh(&nf_conntrack_expect_lock);
  2425. /* have to put what we 'get' above.
  2426. * after this line usage count == 0 */
  2427. nf_ct_expect_put(exp);
  2428. } else if (cda[CTA_EXPECT_HELP_NAME]) {
  2429. char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
  2430. struct nf_conn_help *m_help;
  2431. /* delete all expectations for this helper */
  2432. spin_lock_bh(&nf_conntrack_expect_lock);
  2433. for (i = 0; i < nf_ct_expect_hsize; i++) {
  2434. hlist_for_each_entry_safe(exp, next,
  2435. &net->ct.expect_hash[i],
  2436. hnode) {
  2437. m_help = nfct_help(exp->master);
  2438. if (!strcmp(m_help->helper->name, name) &&
  2439. del_timer(&exp->timeout)) {
  2440. nf_ct_unlink_expect_report(exp,
  2441. NETLINK_CB(skb).portid,
  2442. nlmsg_report(nlh));
  2443. nf_ct_expect_put(exp);
  2444. }
  2445. }
  2446. }
  2447. spin_unlock_bh(&nf_conntrack_expect_lock);
  2448. } else {
  2449. /* This basically means we have to flush everything*/
  2450. spin_lock_bh(&nf_conntrack_expect_lock);
  2451. for (i = 0; i < nf_ct_expect_hsize; i++) {
  2452. hlist_for_each_entry_safe(exp, next,
  2453. &net->ct.expect_hash[i],
  2454. hnode) {
  2455. if (del_timer(&exp->timeout)) {
  2456. nf_ct_unlink_expect_report(exp,
  2457. NETLINK_CB(skb).portid,
  2458. nlmsg_report(nlh));
  2459. nf_ct_expect_put(exp);
  2460. }
  2461. }
  2462. }
  2463. spin_unlock_bh(&nf_conntrack_expect_lock);
  2464. }
  2465. return 0;
  2466. }
  2467. static int
  2468. ctnetlink_change_expect(struct nf_conntrack_expect *x,
  2469. const struct nlattr * const cda[])
  2470. {
  2471. if (cda[CTA_EXPECT_TIMEOUT]) {
  2472. if (!del_timer(&x->timeout))
  2473. return -ETIME;
  2474. x->timeout.expires = jiffies +
  2475. ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
  2476. add_timer(&x->timeout);
  2477. }
  2478. return 0;
  2479. }
  2480. static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
  2481. [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
  2482. [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
  2483. };
  2484. static int
  2485. ctnetlink_parse_expect_nat(const struct nlattr *attr,
  2486. struct nf_conntrack_expect *exp,
  2487. u_int8_t u3)
  2488. {
  2489. #ifdef CONFIG_NF_NAT_NEEDED
  2490. struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
  2491. struct nf_conntrack_tuple nat_tuple = {};
  2492. int err;
  2493. err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
  2494. if (err < 0)
  2495. return err;
  2496. if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
  2497. return -EINVAL;
  2498. err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
  2499. &nat_tuple, CTA_EXPECT_NAT_TUPLE,
  2500. u3, NULL);
  2501. if (err < 0)
  2502. return err;
  2503. exp->saved_addr = nat_tuple.src.u3;
  2504. exp->saved_proto = nat_tuple.src.u;
  2505. exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
  2506. return 0;
  2507. #else
  2508. return -EOPNOTSUPP;
  2509. #endif
  2510. }
  2511. static struct nf_conntrack_expect *
  2512. ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
  2513. struct nf_conntrack_helper *helper,
  2514. struct nf_conntrack_tuple *tuple,
  2515. struct nf_conntrack_tuple *mask)
  2516. {
  2517. u_int32_t class = 0;
  2518. struct nf_conntrack_expect *exp;
  2519. struct nf_conn_help *help;
  2520. int err;
  2521. if (cda[CTA_EXPECT_CLASS] && helper) {
  2522. class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
  2523. if (class > helper->expect_class_max)
  2524. return ERR_PTR(-EINVAL);
  2525. }
  2526. exp = nf_ct_expect_alloc(ct);
  2527. if (!exp)
  2528. return ERR_PTR(-ENOMEM);
  2529. help = nfct_help(ct);
  2530. if (!help) {
  2531. if (!cda[CTA_EXPECT_TIMEOUT]) {
  2532. err = -EINVAL;
  2533. goto err_out;
  2534. }
  2535. exp->timeout.expires =
  2536. jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
  2537. exp->flags = NF_CT_EXPECT_USERSPACE;
  2538. if (cda[CTA_EXPECT_FLAGS]) {
  2539. exp->flags |=
  2540. ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
  2541. }
  2542. } else {
  2543. if (cda[CTA_EXPECT_FLAGS]) {
  2544. exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
  2545. exp->flags &= ~NF_CT_EXPECT_USERSPACE;
  2546. } else
  2547. exp->flags = 0;
  2548. }
  2549. if (cda[CTA_EXPECT_FN]) {
  2550. const char *name = nla_data(cda[CTA_EXPECT_FN]);
  2551. struct nf_ct_helper_expectfn *expfn;
  2552. expfn = nf_ct_helper_expectfn_find_by_name(name);
  2553. if (expfn == NULL) {
  2554. err = -EINVAL;
  2555. goto err_out;
  2556. }
  2557. exp->expectfn = expfn->expectfn;
  2558. } else
  2559. exp->expectfn = NULL;
  2560. exp->class = class;
  2561. exp->master = ct;
  2562. exp->helper = helper;
  2563. exp->tuple = *tuple;
  2564. exp->mask.src.u3 = mask->src.u3;
  2565. exp->mask.src.u.all = mask->src.u.all;
  2566. if (cda[CTA_EXPECT_NAT]) {
  2567. err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
  2568. exp, nf_ct_l3num(ct));
  2569. if (err < 0)
  2570. goto err_out;
  2571. }
  2572. return exp;
  2573. err_out:
  2574. nf_ct_expect_put(exp);
  2575. return ERR_PTR(err);
  2576. }
  2577. static int
  2578. ctnetlink_create_expect(struct net *net,
  2579. const struct nf_conntrack_zone *zone,
  2580. const struct nlattr * const cda[],
  2581. u_int8_t u3, u32 portid, int report)
  2582. {
  2583. struct nf_conntrack_tuple tuple, mask, master_tuple;
  2584. struct nf_conntrack_tuple_hash *h = NULL;
  2585. struct nf_conntrack_helper *helper = NULL;
  2586. struct nf_conntrack_expect *exp;
  2587. struct nf_conn *ct;
  2588. int err;
  2589. /* caller guarantees that those three CTA_EXPECT_* exist */
  2590. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
  2591. u3, NULL);
  2592. if (err < 0)
  2593. return err;
  2594. err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
  2595. u3, NULL);
  2596. if (err < 0)
  2597. return err;
  2598. err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
  2599. u3, NULL);
  2600. if (err < 0)
  2601. return err;
  2602. /* Look for master conntrack of this expectation */
  2603. h = nf_conntrack_find_get(net, zone, &master_tuple);
  2604. if (!h)
  2605. return -ENOENT;
  2606. ct = nf_ct_tuplehash_to_ctrack(h);
  2607. if (cda[CTA_EXPECT_HELP_NAME]) {
  2608. const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
  2609. helper = __nf_conntrack_helper_find(helpname, u3,
  2610. nf_ct_protonum(ct));
  2611. if (helper == NULL) {
  2612. #ifdef CONFIG_MODULES
  2613. if (request_module("nfct-helper-%s", helpname) < 0) {
  2614. err = -EOPNOTSUPP;
  2615. goto err_ct;
  2616. }
  2617. helper = __nf_conntrack_helper_find(helpname, u3,
  2618. nf_ct_protonum(ct));
  2619. if (helper) {
  2620. err = -EAGAIN;
  2621. goto err_ct;
  2622. }
  2623. #endif
  2624. err = -EOPNOTSUPP;
  2625. goto err_ct;
  2626. }
  2627. }
  2628. exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
  2629. if (IS_ERR(exp)) {
  2630. err = PTR_ERR(exp);
  2631. goto err_ct;
  2632. }
  2633. err = nf_ct_expect_related_report(exp, portid, report);
  2634. nf_ct_expect_put(exp);
  2635. err_ct:
  2636. nf_ct_put(ct);
  2637. return err;
  2638. }
  2639. static int
  2640. ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
  2641. const struct nlmsghdr *nlh,
  2642. const struct nlattr * const cda[])
  2643. {
  2644. struct net *net = sock_net(ctnl);
  2645. struct nf_conntrack_tuple tuple;
  2646. struct nf_conntrack_expect *exp;
  2647. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  2648. u_int8_t u3 = nfmsg->nfgen_family;
  2649. struct nf_conntrack_zone zone;
  2650. int err;
  2651. if (!cda[CTA_EXPECT_TUPLE]
  2652. || !cda[CTA_EXPECT_MASK]
  2653. || !cda[CTA_EXPECT_MASTER])
  2654. return -EINVAL;
  2655. err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
  2656. if (err < 0)
  2657. return err;
  2658. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
  2659. u3, NULL);
  2660. if (err < 0)
  2661. return err;
  2662. spin_lock_bh(&nf_conntrack_expect_lock);
  2663. exp = __nf_ct_expect_find(net, &zone, &tuple);
  2664. if (!exp) {
  2665. spin_unlock_bh(&nf_conntrack_expect_lock);
  2666. err = -ENOENT;
  2667. if (nlh->nlmsg_flags & NLM_F_CREATE) {
  2668. err = ctnetlink_create_expect(net, &zone, cda, u3,
  2669. NETLINK_CB(skb).portid,
  2670. nlmsg_report(nlh));
  2671. }
  2672. return err;
  2673. }
  2674. err = -EEXIST;
  2675. if (!(nlh->nlmsg_flags & NLM_F_EXCL))
  2676. err = ctnetlink_change_expect(exp, cda);
  2677. spin_unlock_bh(&nf_conntrack_expect_lock);
  2678. return err;
  2679. }
  2680. static int
  2681. ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
  2682. const struct ip_conntrack_stat *st)
  2683. {
  2684. struct nlmsghdr *nlh;
  2685. struct nfgenmsg *nfmsg;
  2686. unsigned int flags = portid ? NLM_F_MULTI : 0, event;
  2687. event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
  2688. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
  2689. if (nlh == NULL)
  2690. goto nlmsg_failure;
  2691. nfmsg = nlmsg_data(nlh);
  2692. nfmsg->nfgen_family = AF_UNSPEC;
  2693. nfmsg->version = NFNETLINK_V0;
  2694. nfmsg->res_id = htons(cpu);
  2695. if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
  2696. nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
  2697. nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
  2698. goto nla_put_failure;
  2699. nlmsg_end(skb, nlh);
  2700. return skb->len;
  2701. nla_put_failure:
  2702. nlmsg_failure:
  2703. nlmsg_cancel(skb, nlh);
  2704. return -1;
  2705. }
  2706. static int
  2707. ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
  2708. {
  2709. int cpu;
  2710. struct net *net = sock_net(skb->sk);
  2711. if (cb->args[0] == nr_cpu_ids)
  2712. return 0;
  2713. for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
  2714. const struct ip_conntrack_stat *st;
  2715. if (!cpu_possible(cpu))
  2716. continue;
  2717. st = per_cpu_ptr(net->ct.stat, cpu);
  2718. if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
  2719. cb->nlh->nlmsg_seq,
  2720. cpu, st) < 0)
  2721. break;
  2722. }
  2723. cb->args[0] = cpu;
  2724. return skb->len;
  2725. }
  2726. static int
  2727. ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
  2728. const struct nlmsghdr *nlh,
  2729. const struct nlattr * const cda[])
  2730. {
  2731. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  2732. struct netlink_dump_control c = {
  2733. .dump = ctnetlink_exp_stat_cpu_dump,
  2734. };
  2735. return netlink_dump_start(ctnl, skb, nlh, &c);
  2736. }
  2737. return 0;
  2738. }
  2739. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  2740. static struct nf_ct_event_notifier ctnl_notifier = {
  2741. .fcn = ctnetlink_conntrack_event,
  2742. };
  2743. static struct nf_exp_event_notifier ctnl_notifier_exp = {
  2744. .fcn = ctnetlink_expect_event,
  2745. };
  2746. #endif
  2747. static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
  2748. [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
  2749. .attr_count = CTA_MAX,
  2750. .policy = ct_nla_policy },
  2751. [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
  2752. .attr_count = CTA_MAX,
  2753. .policy = ct_nla_policy },
  2754. [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
  2755. .attr_count = CTA_MAX,
  2756. .policy = ct_nla_policy },
  2757. [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
  2758. .attr_count = CTA_MAX,
  2759. .policy = ct_nla_policy },
  2760. [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
  2761. [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
  2762. [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying },
  2763. [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed },
  2764. };
  2765. static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
  2766. [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
  2767. .attr_count = CTA_EXPECT_MAX,
  2768. .policy = exp_nla_policy },
  2769. [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
  2770. .attr_count = CTA_EXPECT_MAX,
  2771. .policy = exp_nla_policy },
  2772. [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
  2773. .attr_count = CTA_EXPECT_MAX,
  2774. .policy = exp_nla_policy },
  2775. [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
  2776. };
  2777. static const struct nfnetlink_subsystem ctnl_subsys = {
  2778. .name = "conntrack",
  2779. .subsys_id = NFNL_SUBSYS_CTNETLINK,
  2780. .cb_count = IPCTNL_MSG_MAX,
  2781. .cb = ctnl_cb,
  2782. };
  2783. static const struct nfnetlink_subsystem ctnl_exp_subsys = {
  2784. .name = "conntrack_expect",
  2785. .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
  2786. .cb_count = IPCTNL_MSG_EXP_MAX,
  2787. .cb = ctnl_exp_cb,
  2788. };
  2789. MODULE_ALIAS("ip_conntrack_netlink");
  2790. MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
  2791. MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
  2792. static int __net_init ctnetlink_net_init(struct net *net)
  2793. {
  2794. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  2795. int ret;
  2796. ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
  2797. if (ret < 0) {
  2798. pr_err("ctnetlink_init: cannot register notifier.\n");
  2799. goto err_out;
  2800. }
  2801. ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
  2802. if (ret < 0) {
  2803. pr_err("ctnetlink_init: cannot expect register notifier.\n");
  2804. goto err_unreg_notifier;
  2805. }
  2806. #endif
  2807. return 0;
  2808. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  2809. err_unreg_notifier:
  2810. nf_conntrack_unregister_notifier(net, &ctnl_notifier);
  2811. err_out:
  2812. return ret;
  2813. #endif
  2814. }
  2815. static void ctnetlink_net_exit(struct net *net)
  2816. {
  2817. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  2818. nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
  2819. nf_conntrack_unregister_notifier(net, &ctnl_notifier);
  2820. #endif
  2821. }
  2822. static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
  2823. {
  2824. struct net *net;
  2825. list_for_each_entry(net, net_exit_list, exit_list)
  2826. ctnetlink_net_exit(net);
  2827. }
  2828. static struct pernet_operations ctnetlink_net_ops = {
  2829. .init = ctnetlink_net_init,
  2830. .exit_batch = ctnetlink_net_exit_batch,
  2831. };
  2832. static int __init ctnetlink_init(void)
  2833. {
  2834. int ret;
  2835. pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
  2836. ret = nfnetlink_subsys_register(&ctnl_subsys);
  2837. if (ret < 0) {
  2838. pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
  2839. goto err_out;
  2840. }
  2841. ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
  2842. if (ret < 0) {
  2843. pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
  2844. goto err_unreg_subsys;
  2845. }
  2846. ret = register_pernet_subsys(&ctnetlink_net_ops);
  2847. if (ret < 0) {
  2848. pr_err("ctnetlink_init: cannot register pernet operations\n");
  2849. goto err_unreg_exp_subsys;
  2850. }
  2851. #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
  2852. /* setup interaction between nf_queue and nf_conntrack_netlink. */
  2853. RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
  2854. #endif
  2855. return 0;
  2856. err_unreg_exp_subsys:
  2857. nfnetlink_subsys_unregister(&ctnl_exp_subsys);
  2858. err_unreg_subsys:
  2859. nfnetlink_subsys_unregister(&ctnl_subsys);
  2860. err_out:
  2861. return ret;
  2862. }
  2863. static void __exit ctnetlink_exit(void)
  2864. {
  2865. pr_info("ctnetlink: unregistering from nfnetlink.\n");
  2866. unregister_pernet_subsys(&ctnetlink_net_ops);
  2867. nfnetlink_subsys_unregister(&ctnl_exp_subsys);
  2868. nfnetlink_subsys_unregister(&ctnl_subsys);
  2869. #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
  2870. RCU_INIT_POINTER(nfnl_ct_hook, NULL);
  2871. #endif
  2872. synchronize_rcu();
  2873. }
  2874. module_init(ctnetlink_init);
  2875. module_exit(ctnetlink_exit);