ip6_flowlabel.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. /*
  2. * ip6_flowlabel.c IPv6 flowlabel manager.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/socket.h>
  15. #include <linux/net.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/in6.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/slab.h>
  21. #include <linux/export.h>
  22. #include <linux/pid_namespace.h>
  23. #include <net/net_namespace.h>
  24. #include <net/sock.h>
  25. #include <net/ipv6.h>
  26. #include <net/rawv6.h>
  27. #include <net/transp_v6.h>
  28. #include <asm/uaccess.h>
  29. #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
  30. in old IPv6 RFC. Well, it was reasonable value.
  31. */
  32. #define FL_MAX_LINGER 150 /* Maximal linger timeout */
  33. /* FL hash table */
  34. #define FL_MAX_PER_SOCK 32
  35. #define FL_MAX_SIZE 4096
  36. #define FL_HASH_MASK 255
  37. #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
  38. static atomic_t fl_size = ATOMIC_INIT(0);
  39. static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
  40. static void ip6_fl_gc(unsigned long dummy);
  41. static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
  42. /* FL hash table lock: it protects only of GC */
  43. static DEFINE_SPINLOCK(ip6_fl_lock);
  44. /* Big socket sock */
  45. static DEFINE_SPINLOCK(ip6_sk_fl_lock);
  46. #define for_each_fl_rcu(hash, fl) \
  47. for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
  48. fl != NULL; \
  49. fl = rcu_dereference_bh(fl->next))
  50. #define for_each_fl_continue_rcu(fl) \
  51. for (fl = rcu_dereference_bh(fl->next); \
  52. fl != NULL; \
  53. fl = rcu_dereference_bh(fl->next))
  54. #define for_each_sk_fl_rcu(np, sfl) \
  55. for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
  56. sfl != NULL; \
  57. sfl = rcu_dereference_bh(sfl->next))
  58. static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
  59. {
  60. struct ip6_flowlabel *fl;
  61. for_each_fl_rcu(FL_HASH(label), fl) {
  62. if (fl->label == label && net_eq(fl->fl_net, net))
  63. return fl;
  64. }
  65. return NULL;
  66. }
  67. static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
  68. {
  69. struct ip6_flowlabel *fl;
  70. rcu_read_lock_bh();
  71. fl = __fl_lookup(net, label);
  72. if (fl && !atomic_inc_not_zero(&fl->users))
  73. fl = NULL;
  74. rcu_read_unlock_bh();
  75. return fl;
  76. }
  77. static void fl_free(struct ip6_flowlabel *fl)
  78. {
  79. if (fl) {
  80. if (fl->share == IPV6_FL_S_PROCESS)
  81. put_pid(fl->owner.pid);
  82. kfree(fl->opt);
  83. kfree_rcu(fl, rcu);
  84. }
  85. }
  86. static void fl_release(struct ip6_flowlabel *fl)
  87. {
  88. spin_lock_bh(&ip6_fl_lock);
  89. fl->lastuse = jiffies;
  90. if (atomic_dec_and_test(&fl->users)) {
  91. unsigned long ttd = fl->lastuse + fl->linger;
  92. if (time_after(ttd, fl->expires))
  93. fl->expires = ttd;
  94. ttd = fl->expires;
  95. if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
  96. struct ipv6_txoptions *opt = fl->opt;
  97. fl->opt = NULL;
  98. kfree(opt);
  99. }
  100. if (!timer_pending(&ip6_fl_gc_timer) ||
  101. time_after(ip6_fl_gc_timer.expires, ttd))
  102. mod_timer(&ip6_fl_gc_timer, ttd);
  103. }
  104. spin_unlock_bh(&ip6_fl_lock);
  105. }
  106. static void ip6_fl_gc(unsigned long dummy)
  107. {
  108. int i;
  109. unsigned long now = jiffies;
  110. unsigned long sched = 0;
  111. spin_lock(&ip6_fl_lock);
  112. for (i = 0; i <= FL_HASH_MASK; i++) {
  113. struct ip6_flowlabel *fl;
  114. struct ip6_flowlabel __rcu **flp;
  115. flp = &fl_ht[i];
  116. while ((fl = rcu_dereference_protected(*flp,
  117. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  118. if (atomic_read(&fl->users) == 0) {
  119. unsigned long ttd = fl->lastuse + fl->linger;
  120. if (time_after(ttd, fl->expires))
  121. fl->expires = ttd;
  122. ttd = fl->expires;
  123. if (time_after_eq(now, ttd)) {
  124. *flp = fl->next;
  125. fl_free(fl);
  126. atomic_dec(&fl_size);
  127. continue;
  128. }
  129. if (!sched || time_before(ttd, sched))
  130. sched = ttd;
  131. }
  132. flp = &fl->next;
  133. }
  134. }
  135. if (!sched && atomic_read(&fl_size))
  136. sched = now + FL_MAX_LINGER;
  137. if (sched) {
  138. mod_timer(&ip6_fl_gc_timer, sched);
  139. }
  140. spin_unlock(&ip6_fl_lock);
  141. }
  142. static void __net_exit ip6_fl_purge(struct net *net)
  143. {
  144. int i;
  145. spin_lock_bh(&ip6_fl_lock);
  146. for (i = 0; i <= FL_HASH_MASK; i++) {
  147. struct ip6_flowlabel *fl;
  148. struct ip6_flowlabel __rcu **flp;
  149. flp = &fl_ht[i];
  150. while ((fl = rcu_dereference_protected(*flp,
  151. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  152. if (net_eq(fl->fl_net, net) &&
  153. atomic_read(&fl->users) == 0) {
  154. *flp = fl->next;
  155. fl_free(fl);
  156. atomic_dec(&fl_size);
  157. continue;
  158. }
  159. flp = &fl->next;
  160. }
  161. }
  162. spin_unlock_bh(&ip6_fl_lock);
  163. }
  164. static struct ip6_flowlabel *fl_intern(struct net *net,
  165. struct ip6_flowlabel *fl, __be32 label)
  166. {
  167. struct ip6_flowlabel *lfl;
  168. fl->label = label & IPV6_FLOWLABEL_MASK;
  169. spin_lock_bh(&ip6_fl_lock);
  170. if (label == 0) {
  171. for (;;) {
  172. fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
  173. if (fl->label) {
  174. lfl = __fl_lookup(net, fl->label);
  175. if (!lfl)
  176. break;
  177. }
  178. }
  179. } else {
  180. /*
  181. * we dropper the ip6_fl_lock, so this entry could reappear
  182. * and we need to recheck with it.
  183. *
  184. * OTOH no need to search the active socket first, like it is
  185. * done in ipv6_flowlabel_opt - sock is locked, so new entry
  186. * with the same label can only appear on another sock
  187. */
  188. lfl = __fl_lookup(net, fl->label);
  189. if (lfl) {
  190. atomic_inc(&lfl->users);
  191. spin_unlock_bh(&ip6_fl_lock);
  192. return lfl;
  193. }
  194. }
  195. fl->lastuse = jiffies;
  196. fl->next = fl_ht[FL_HASH(fl->label)];
  197. rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
  198. atomic_inc(&fl_size);
  199. spin_unlock_bh(&ip6_fl_lock);
  200. return NULL;
  201. }
  202. /* Socket flowlabel lists */
  203. struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
  204. {
  205. struct ipv6_fl_socklist *sfl;
  206. struct ipv6_pinfo *np = inet6_sk(sk);
  207. label &= IPV6_FLOWLABEL_MASK;
  208. rcu_read_lock_bh();
  209. for_each_sk_fl_rcu(np, sfl) {
  210. struct ip6_flowlabel *fl = sfl->fl;
  211. if (fl->label == label) {
  212. fl->lastuse = jiffies;
  213. atomic_inc(&fl->users);
  214. rcu_read_unlock_bh();
  215. return fl;
  216. }
  217. }
  218. rcu_read_unlock_bh();
  219. return NULL;
  220. }
  221. EXPORT_SYMBOL_GPL(fl6_sock_lookup);
  222. void fl6_free_socklist(struct sock *sk)
  223. {
  224. struct ipv6_pinfo *np = inet6_sk(sk);
  225. struct ipv6_fl_socklist *sfl;
  226. if (!rcu_access_pointer(np->ipv6_fl_list))
  227. return;
  228. spin_lock_bh(&ip6_sk_fl_lock);
  229. while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
  230. lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
  231. np->ipv6_fl_list = sfl->next;
  232. spin_unlock_bh(&ip6_sk_fl_lock);
  233. fl_release(sfl->fl);
  234. kfree_rcu(sfl, rcu);
  235. spin_lock_bh(&ip6_sk_fl_lock);
  236. }
  237. spin_unlock_bh(&ip6_sk_fl_lock);
  238. }
  239. /* Service routines */
  240. /*
  241. It is the only difficult place. flowlabel enforces equal headers
  242. before and including routing header, however user may supply options
  243. following rthdr.
  244. */
  245. struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
  246. struct ip6_flowlabel *fl,
  247. struct ipv6_txoptions *fopt)
  248. {
  249. struct ipv6_txoptions *fl_opt = fl->opt;
  250. if (!fopt || fopt->opt_flen == 0)
  251. return fl_opt;
  252. if (fl_opt) {
  253. opt_space->hopopt = fl_opt->hopopt;
  254. opt_space->dst0opt = fl_opt->dst0opt;
  255. opt_space->srcrt = fl_opt->srcrt;
  256. opt_space->opt_nflen = fl_opt->opt_nflen;
  257. } else {
  258. if (fopt->opt_nflen == 0)
  259. return fopt;
  260. opt_space->hopopt = NULL;
  261. opt_space->dst0opt = NULL;
  262. opt_space->srcrt = NULL;
  263. opt_space->opt_nflen = 0;
  264. }
  265. opt_space->dst1opt = fopt->dst1opt;
  266. opt_space->opt_flen = fopt->opt_flen;
  267. opt_space->tot_len = fopt->tot_len;
  268. return opt_space;
  269. }
  270. EXPORT_SYMBOL_GPL(fl6_merge_options);
  271. static unsigned long check_linger(unsigned long ttl)
  272. {
  273. if (ttl < FL_MIN_LINGER)
  274. return FL_MIN_LINGER*HZ;
  275. if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
  276. return 0;
  277. return ttl*HZ;
  278. }
  279. static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
  280. {
  281. linger = check_linger(linger);
  282. if (!linger)
  283. return -EPERM;
  284. expires = check_linger(expires);
  285. if (!expires)
  286. return -EPERM;
  287. spin_lock_bh(&ip6_fl_lock);
  288. fl->lastuse = jiffies;
  289. if (time_before(fl->linger, linger))
  290. fl->linger = linger;
  291. if (time_before(expires, fl->linger))
  292. expires = fl->linger;
  293. if (time_before(fl->expires, fl->lastuse + expires))
  294. fl->expires = fl->lastuse + expires;
  295. spin_unlock_bh(&ip6_fl_lock);
  296. return 0;
  297. }
  298. static struct ip6_flowlabel *
  299. fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
  300. char __user *optval, int optlen, int *err_p)
  301. {
  302. struct ip6_flowlabel *fl = NULL;
  303. int olen;
  304. int addr_type;
  305. int err;
  306. olen = optlen - CMSG_ALIGN(sizeof(*freq));
  307. err = -EINVAL;
  308. if (olen > 64 * 1024)
  309. goto done;
  310. err = -ENOMEM;
  311. fl = kzalloc(sizeof(*fl), GFP_KERNEL);
  312. if (!fl)
  313. goto done;
  314. if (olen > 0) {
  315. struct msghdr msg;
  316. struct flowi6 flowi6;
  317. int junk;
  318. err = -ENOMEM;
  319. fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
  320. if (!fl->opt)
  321. goto done;
  322. memset(fl->opt, 0, sizeof(*fl->opt));
  323. fl->opt->tot_len = sizeof(*fl->opt) + olen;
  324. err = -EFAULT;
  325. if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
  326. goto done;
  327. msg.msg_controllen = olen;
  328. msg.msg_control = (void *)(fl->opt+1);
  329. memset(&flowi6, 0, sizeof(flowi6));
  330. err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
  331. &junk, &junk, &junk);
  332. if (err)
  333. goto done;
  334. err = -EINVAL;
  335. if (fl->opt->opt_flen)
  336. goto done;
  337. if (fl->opt->opt_nflen == 0) {
  338. kfree(fl->opt);
  339. fl->opt = NULL;
  340. }
  341. }
  342. fl->fl_net = net;
  343. fl->expires = jiffies;
  344. err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
  345. if (err)
  346. goto done;
  347. fl->share = freq->flr_share;
  348. addr_type = ipv6_addr_type(&freq->flr_dst);
  349. if ((addr_type & IPV6_ADDR_MAPPED) ||
  350. addr_type == IPV6_ADDR_ANY) {
  351. err = -EINVAL;
  352. goto done;
  353. }
  354. fl->dst = freq->flr_dst;
  355. atomic_set(&fl->users, 1);
  356. switch (fl->share) {
  357. case IPV6_FL_S_EXCL:
  358. case IPV6_FL_S_ANY:
  359. break;
  360. case IPV6_FL_S_PROCESS:
  361. fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
  362. break;
  363. case IPV6_FL_S_USER:
  364. fl->owner.uid = current_euid();
  365. break;
  366. default:
  367. err = -EINVAL;
  368. goto done;
  369. }
  370. return fl;
  371. done:
  372. fl_free(fl);
  373. *err_p = err;
  374. return NULL;
  375. }
  376. static int mem_check(struct sock *sk)
  377. {
  378. struct ipv6_pinfo *np = inet6_sk(sk);
  379. struct ipv6_fl_socklist *sfl;
  380. int room = FL_MAX_SIZE - atomic_read(&fl_size);
  381. int count = 0;
  382. if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
  383. return 0;
  384. rcu_read_lock_bh();
  385. for_each_sk_fl_rcu(np, sfl)
  386. count++;
  387. rcu_read_unlock_bh();
  388. if (room <= 0 ||
  389. ((count >= FL_MAX_PER_SOCK ||
  390. (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
  391. !capable(CAP_NET_ADMIN)))
  392. return -ENOBUFS;
  393. return 0;
  394. }
  395. static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
  396. struct ip6_flowlabel *fl)
  397. {
  398. spin_lock_bh(&ip6_sk_fl_lock);
  399. sfl->fl = fl;
  400. sfl->next = np->ipv6_fl_list;
  401. rcu_assign_pointer(np->ipv6_fl_list, sfl);
  402. spin_unlock_bh(&ip6_sk_fl_lock);
  403. }
  404. int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
  405. int flags)
  406. {
  407. struct ipv6_pinfo *np = inet6_sk(sk);
  408. struct ipv6_fl_socklist *sfl;
  409. if (flags & IPV6_FL_F_REMOTE) {
  410. freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
  411. return 0;
  412. }
  413. if (np->repflow) {
  414. freq->flr_label = np->flow_label;
  415. return 0;
  416. }
  417. rcu_read_lock_bh();
  418. for_each_sk_fl_rcu(np, sfl) {
  419. if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
  420. spin_lock_bh(&ip6_fl_lock);
  421. freq->flr_label = sfl->fl->label;
  422. freq->flr_dst = sfl->fl->dst;
  423. freq->flr_share = sfl->fl->share;
  424. freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
  425. freq->flr_linger = sfl->fl->linger / HZ;
  426. spin_unlock_bh(&ip6_fl_lock);
  427. rcu_read_unlock_bh();
  428. return 0;
  429. }
  430. }
  431. rcu_read_unlock_bh();
  432. return -ENOENT;
  433. }
  434. int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
  435. {
  436. int uninitialized_var(err);
  437. struct net *net = sock_net(sk);
  438. struct ipv6_pinfo *np = inet6_sk(sk);
  439. struct in6_flowlabel_req freq;
  440. struct ipv6_fl_socklist *sfl1 = NULL;
  441. struct ipv6_fl_socklist *sfl;
  442. struct ipv6_fl_socklist __rcu **sflp;
  443. struct ip6_flowlabel *fl, *fl1 = NULL;
  444. if (optlen < sizeof(freq))
  445. return -EINVAL;
  446. if (copy_from_user(&freq, optval, sizeof(freq)))
  447. return -EFAULT;
  448. switch (freq.flr_action) {
  449. case IPV6_FL_A_PUT:
  450. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  451. if (sk->sk_protocol != IPPROTO_TCP)
  452. return -ENOPROTOOPT;
  453. if (!np->repflow)
  454. return -ESRCH;
  455. np->flow_label = 0;
  456. np->repflow = 0;
  457. return 0;
  458. }
  459. spin_lock_bh(&ip6_sk_fl_lock);
  460. for (sflp = &np->ipv6_fl_list;
  461. (sfl = rcu_dereference_protected(*sflp,
  462. lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
  463. sflp = &sfl->next) {
  464. if (sfl->fl->label == freq.flr_label) {
  465. if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
  466. np->flow_label &= ~IPV6_FLOWLABEL_MASK;
  467. *sflp = sfl->next;
  468. spin_unlock_bh(&ip6_sk_fl_lock);
  469. fl_release(sfl->fl);
  470. kfree_rcu(sfl, rcu);
  471. return 0;
  472. }
  473. }
  474. spin_unlock_bh(&ip6_sk_fl_lock);
  475. return -ESRCH;
  476. case IPV6_FL_A_RENEW:
  477. rcu_read_lock_bh();
  478. for_each_sk_fl_rcu(np, sfl) {
  479. if (sfl->fl->label == freq.flr_label) {
  480. err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
  481. rcu_read_unlock_bh();
  482. return err;
  483. }
  484. }
  485. rcu_read_unlock_bh();
  486. if (freq.flr_share == IPV6_FL_S_NONE &&
  487. ns_capable(net->user_ns, CAP_NET_ADMIN)) {
  488. fl = fl_lookup(net, freq.flr_label);
  489. if (fl) {
  490. err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
  491. fl_release(fl);
  492. return err;
  493. }
  494. }
  495. return -ESRCH;
  496. case IPV6_FL_A_GET:
  497. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  498. struct net *net = sock_net(sk);
  499. if (net->ipv6.sysctl.flowlabel_consistency) {
  500. net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
  501. return -EPERM;
  502. }
  503. if (sk->sk_protocol != IPPROTO_TCP)
  504. return -ENOPROTOOPT;
  505. np->repflow = 1;
  506. return 0;
  507. }
  508. if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
  509. return -EINVAL;
  510. if (net->ipv6.sysctl.flowlabel_state_ranges &&
  511. (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
  512. return -ERANGE;
  513. fl = fl_create(net, sk, &freq, optval, optlen, &err);
  514. if (!fl)
  515. return err;
  516. sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
  517. if (freq.flr_label) {
  518. err = -EEXIST;
  519. rcu_read_lock_bh();
  520. for_each_sk_fl_rcu(np, sfl) {
  521. if (sfl->fl->label == freq.flr_label) {
  522. if (freq.flr_flags&IPV6_FL_F_EXCL) {
  523. rcu_read_unlock_bh();
  524. goto done;
  525. }
  526. fl1 = sfl->fl;
  527. atomic_inc(&fl1->users);
  528. break;
  529. }
  530. }
  531. rcu_read_unlock_bh();
  532. if (!fl1)
  533. fl1 = fl_lookup(net, freq.flr_label);
  534. if (fl1) {
  535. recheck:
  536. err = -EEXIST;
  537. if (freq.flr_flags&IPV6_FL_F_EXCL)
  538. goto release;
  539. err = -EPERM;
  540. if (fl1->share == IPV6_FL_S_EXCL ||
  541. fl1->share != fl->share ||
  542. ((fl1->share == IPV6_FL_S_PROCESS) &&
  543. (fl1->owner.pid == fl->owner.pid)) ||
  544. ((fl1->share == IPV6_FL_S_USER) &&
  545. uid_eq(fl1->owner.uid, fl->owner.uid)))
  546. goto release;
  547. err = -ENOMEM;
  548. if (!sfl1)
  549. goto release;
  550. if (fl->linger > fl1->linger)
  551. fl1->linger = fl->linger;
  552. if ((long)(fl->expires - fl1->expires) > 0)
  553. fl1->expires = fl->expires;
  554. fl_link(np, sfl1, fl1);
  555. fl_free(fl);
  556. return 0;
  557. release:
  558. fl_release(fl1);
  559. goto done;
  560. }
  561. }
  562. err = -ENOENT;
  563. if (!(freq.flr_flags&IPV6_FL_F_CREATE))
  564. goto done;
  565. err = -ENOMEM;
  566. if (!sfl1)
  567. goto done;
  568. err = mem_check(sk);
  569. if (err != 0)
  570. goto done;
  571. fl1 = fl_intern(net, fl, freq.flr_label);
  572. if (fl1)
  573. goto recheck;
  574. if (!freq.flr_label) {
  575. if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
  576. &fl->label, sizeof(fl->label))) {
  577. /* Intentionally ignore fault. */
  578. }
  579. }
  580. fl_link(np, sfl1, fl);
  581. return 0;
  582. default:
  583. return -EINVAL;
  584. }
  585. done:
  586. fl_free(fl);
  587. kfree(sfl1);
  588. return err;
  589. }
  590. #ifdef CONFIG_PROC_FS
  591. struct ip6fl_iter_state {
  592. struct seq_net_private p;
  593. struct pid_namespace *pid_ns;
  594. int bucket;
  595. };
  596. #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
  597. static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
  598. {
  599. struct ip6_flowlabel *fl = NULL;
  600. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  601. struct net *net = seq_file_net(seq);
  602. for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
  603. for_each_fl_rcu(state->bucket, fl) {
  604. if (net_eq(fl->fl_net, net))
  605. goto out;
  606. }
  607. }
  608. fl = NULL;
  609. out:
  610. return fl;
  611. }
  612. static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
  613. {
  614. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  615. struct net *net = seq_file_net(seq);
  616. for_each_fl_continue_rcu(fl) {
  617. if (net_eq(fl->fl_net, net))
  618. goto out;
  619. }
  620. try_again:
  621. if (++state->bucket <= FL_HASH_MASK) {
  622. for_each_fl_rcu(state->bucket, fl) {
  623. if (net_eq(fl->fl_net, net))
  624. goto out;
  625. }
  626. goto try_again;
  627. }
  628. fl = NULL;
  629. out:
  630. return fl;
  631. }
  632. static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
  633. {
  634. struct ip6_flowlabel *fl = ip6fl_get_first(seq);
  635. if (fl)
  636. while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
  637. --pos;
  638. return pos ? NULL : fl;
  639. }
  640. static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
  641. __acquires(RCU)
  642. {
  643. rcu_read_lock_bh();
  644. return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  645. }
  646. static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  647. {
  648. struct ip6_flowlabel *fl;
  649. if (v == SEQ_START_TOKEN)
  650. fl = ip6fl_get_first(seq);
  651. else
  652. fl = ip6fl_get_next(seq, v);
  653. ++*pos;
  654. return fl;
  655. }
  656. static void ip6fl_seq_stop(struct seq_file *seq, void *v)
  657. __releases(RCU)
  658. {
  659. rcu_read_unlock_bh();
  660. }
  661. static int ip6fl_seq_show(struct seq_file *seq, void *v)
  662. {
  663. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  664. if (v == SEQ_START_TOKEN) {
  665. seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n");
  666. } else {
  667. struct ip6_flowlabel *fl = v;
  668. seq_printf(seq,
  669. "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
  670. (unsigned int)ntohl(fl->label),
  671. fl->share,
  672. ((fl->share == IPV6_FL_S_PROCESS) ?
  673. pid_nr_ns(fl->owner.pid, state->pid_ns) :
  674. ((fl->share == IPV6_FL_S_USER) ?
  675. from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
  676. 0)),
  677. atomic_read(&fl->users),
  678. fl->linger/HZ,
  679. (long)(fl->expires - jiffies)/HZ,
  680. &fl->dst,
  681. fl->opt ? fl->opt->opt_nflen : 0);
  682. }
  683. return 0;
  684. }
  685. static const struct seq_operations ip6fl_seq_ops = {
  686. .start = ip6fl_seq_start,
  687. .next = ip6fl_seq_next,
  688. .stop = ip6fl_seq_stop,
  689. .show = ip6fl_seq_show,
  690. };
  691. static int ip6fl_seq_open(struct inode *inode, struct file *file)
  692. {
  693. struct seq_file *seq;
  694. struct ip6fl_iter_state *state;
  695. int err;
  696. err = seq_open_net(inode, file, &ip6fl_seq_ops,
  697. sizeof(struct ip6fl_iter_state));
  698. if (!err) {
  699. seq = file->private_data;
  700. state = ip6fl_seq_private(seq);
  701. rcu_read_lock();
  702. state->pid_ns = get_pid_ns(task_active_pid_ns(current));
  703. rcu_read_unlock();
  704. }
  705. return err;
  706. }
  707. static int ip6fl_seq_release(struct inode *inode, struct file *file)
  708. {
  709. struct seq_file *seq = file->private_data;
  710. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  711. put_pid_ns(state->pid_ns);
  712. return seq_release_net(inode, file);
  713. }
  714. static const struct file_operations ip6fl_seq_fops = {
  715. .owner = THIS_MODULE,
  716. .open = ip6fl_seq_open,
  717. .read = seq_read,
  718. .llseek = seq_lseek,
  719. .release = ip6fl_seq_release,
  720. };
  721. static int __net_init ip6_flowlabel_proc_init(struct net *net)
  722. {
  723. if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
  724. &ip6fl_seq_fops))
  725. return -ENOMEM;
  726. return 0;
  727. }
  728. static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
  729. {
  730. remove_proc_entry("ip6_flowlabel", net->proc_net);
  731. }
  732. #else
  733. static inline int ip6_flowlabel_proc_init(struct net *net)
  734. {
  735. return 0;
  736. }
  737. static inline void ip6_flowlabel_proc_fini(struct net *net)
  738. {
  739. }
  740. #endif
  741. static void __net_exit ip6_flowlabel_net_exit(struct net *net)
  742. {
  743. ip6_fl_purge(net);
  744. ip6_flowlabel_proc_fini(net);
  745. }
  746. static struct pernet_operations ip6_flowlabel_net_ops = {
  747. .init = ip6_flowlabel_proc_init,
  748. .exit = ip6_flowlabel_net_exit,
  749. };
  750. int ip6_flowlabel_init(void)
  751. {
  752. return register_pernet_subsys(&ip6_flowlabel_net_ops);
  753. }
  754. void ip6_flowlabel_cleanup(void)
  755. {
  756. del_timer(&ip6_fl_gc_timer);
  757. unregister_pernet_subsys(&ip6_flowlabel_net_ops);
  758. }