tcp_metrics.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. #include <linux/rcupdate.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/jiffies.h>
  4. #include <linux/module.h>
  5. #include <linux/cache.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/tcp.h>
  9. #include <linux/hash.h>
  10. #include <linux/tcp_metrics.h>
  11. #include <linux/vmalloc.h>
  12. #include <net/inet_connection_sock.h>
  13. #include <net/net_namespace.h>
  14. #include <net/request_sock.h>
  15. #include <net/inetpeer.h>
  16. #include <net/sock.h>
  17. #include <net/ipv6.h>
  18. #include <net/dst.h>
  19. #include <net/tcp.h>
  20. #include <net/genetlink.h>
  21. int sysctl_tcp_nometrics_save __read_mostly;
  22. static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  23. const struct inetpeer_addr *daddr,
  24. struct net *net, unsigned int hash);
  25. struct tcp_fastopen_metrics {
  26. u16 mss;
  27. u16 syn_loss:10, /* Recurring Fast Open SYN losses */
  28. try_exp:2; /* Request w/ exp. option (once) */
  29. unsigned long last_syn_loss; /* Last Fast Open SYN loss */
  30. struct tcp_fastopen_cookie cookie;
  31. };
  32. /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  33. * Kernel only stores RTT and RTTVAR in usec resolution
  34. */
  35. #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  36. struct tcp_metrics_block {
  37. struct tcp_metrics_block __rcu *tcpm_next;
  38. possible_net_t tcpm_net;
  39. struct inetpeer_addr tcpm_saddr;
  40. struct inetpeer_addr tcpm_daddr;
  41. unsigned long tcpm_stamp;
  42. u32 tcpm_ts;
  43. u32 tcpm_ts_stamp;
  44. u32 tcpm_lock;
  45. u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  46. struct tcp_fastopen_metrics tcpm_fastopen;
  47. struct rcu_head rcu_head;
  48. };
  49. static inline struct net *tm_net(struct tcp_metrics_block *tm)
  50. {
  51. return read_pnet(&tm->tcpm_net);
  52. }
  53. static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  54. enum tcp_metric_index idx)
  55. {
  56. return tm->tcpm_lock & (1 << idx);
  57. }
  58. static u32 tcp_metric_get(struct tcp_metrics_block *tm,
  59. enum tcp_metric_index idx)
  60. {
  61. return tm->tcpm_vals[idx];
  62. }
  63. static void tcp_metric_set(struct tcp_metrics_block *tm,
  64. enum tcp_metric_index idx,
  65. u32 val)
  66. {
  67. tm->tcpm_vals[idx] = val;
  68. }
  69. static bool addr_same(const struct inetpeer_addr *a,
  70. const struct inetpeer_addr *b)
  71. {
  72. return inetpeer_addr_cmp(a, b) == 0;
  73. }
  74. struct tcpm_hash_bucket {
  75. struct tcp_metrics_block __rcu *chain;
  76. };
  77. static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
  78. static unsigned int tcp_metrics_hash_log __read_mostly;
  79. static DEFINE_SPINLOCK(tcp_metrics_lock);
  80. static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  81. const struct dst_entry *dst,
  82. bool fastopen_clear)
  83. {
  84. u32 msval;
  85. u32 val;
  86. tm->tcpm_stamp = jiffies;
  87. val = 0;
  88. if (dst_metric_locked(dst, RTAX_RTT))
  89. val |= 1 << TCP_METRIC_RTT;
  90. if (dst_metric_locked(dst, RTAX_RTTVAR))
  91. val |= 1 << TCP_METRIC_RTTVAR;
  92. if (dst_metric_locked(dst, RTAX_SSTHRESH))
  93. val |= 1 << TCP_METRIC_SSTHRESH;
  94. if (dst_metric_locked(dst, RTAX_CWND))
  95. val |= 1 << TCP_METRIC_CWND;
  96. if (dst_metric_locked(dst, RTAX_REORDERING))
  97. val |= 1 << TCP_METRIC_REORDERING;
  98. tm->tcpm_lock = val;
  99. msval = dst_metric_raw(dst, RTAX_RTT);
  100. tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
  101. msval = dst_metric_raw(dst, RTAX_RTTVAR);
  102. tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
  103. tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
  104. tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
  105. tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
  106. tm->tcpm_ts = 0;
  107. tm->tcpm_ts_stamp = 0;
  108. if (fastopen_clear) {
  109. tm->tcpm_fastopen.mss = 0;
  110. tm->tcpm_fastopen.syn_loss = 0;
  111. tm->tcpm_fastopen.try_exp = 0;
  112. tm->tcpm_fastopen.cookie.exp = false;
  113. tm->tcpm_fastopen.cookie.len = 0;
  114. }
  115. }
  116. #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
  117. static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
  118. {
  119. if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
  120. tcpm_suck_dst(tm, dst, false);
  121. }
  122. #define TCP_METRICS_RECLAIM_DEPTH 5
  123. #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
  124. #define deref_locked(p) \
  125. rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
  126. static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
  127. struct inetpeer_addr *saddr,
  128. struct inetpeer_addr *daddr,
  129. unsigned int hash)
  130. {
  131. struct tcp_metrics_block *tm;
  132. struct net *net;
  133. bool reclaim = false;
  134. spin_lock_bh(&tcp_metrics_lock);
  135. net = dev_net(dst->dev);
  136. /* While waiting for the spin-lock the cache might have been populated
  137. * with this entry and so we have to check again.
  138. */
  139. tm = __tcp_get_metrics(saddr, daddr, net, hash);
  140. if (tm == TCP_METRICS_RECLAIM_PTR) {
  141. reclaim = true;
  142. tm = NULL;
  143. }
  144. if (tm) {
  145. tcpm_check_stamp(tm, dst);
  146. goto out_unlock;
  147. }
  148. if (unlikely(reclaim)) {
  149. struct tcp_metrics_block *oldest;
  150. oldest = deref_locked(tcp_metrics_hash[hash].chain);
  151. for (tm = deref_locked(oldest->tcpm_next); tm;
  152. tm = deref_locked(tm->tcpm_next)) {
  153. if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
  154. oldest = tm;
  155. }
  156. tm = oldest;
  157. } else {
  158. tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
  159. if (!tm)
  160. goto out_unlock;
  161. }
  162. write_pnet(&tm->tcpm_net, net);
  163. tm->tcpm_saddr = *saddr;
  164. tm->tcpm_daddr = *daddr;
  165. tcpm_suck_dst(tm, dst, true);
  166. if (likely(!reclaim)) {
  167. tm->tcpm_next = tcp_metrics_hash[hash].chain;
  168. rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
  169. }
  170. out_unlock:
  171. spin_unlock_bh(&tcp_metrics_lock);
  172. return tm;
  173. }
  174. static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
  175. {
  176. if (tm)
  177. return tm;
  178. if (depth > TCP_METRICS_RECLAIM_DEPTH)
  179. return TCP_METRICS_RECLAIM_PTR;
  180. return NULL;
  181. }
  182. static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  183. const struct inetpeer_addr *daddr,
  184. struct net *net, unsigned int hash)
  185. {
  186. struct tcp_metrics_block *tm;
  187. int depth = 0;
  188. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  189. tm = rcu_dereference(tm->tcpm_next)) {
  190. if (addr_same(&tm->tcpm_saddr, saddr) &&
  191. addr_same(&tm->tcpm_daddr, daddr) &&
  192. net_eq(tm_net(tm), net))
  193. break;
  194. depth++;
  195. }
  196. return tcp_get_encode(tm, depth);
  197. }
  198. static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
  199. struct dst_entry *dst)
  200. {
  201. struct tcp_metrics_block *tm;
  202. struct inetpeer_addr saddr, daddr;
  203. unsigned int hash;
  204. struct net *net;
  205. saddr.family = req->rsk_ops->family;
  206. daddr.family = req->rsk_ops->family;
  207. switch (daddr.family) {
  208. case AF_INET:
  209. inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
  210. inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
  211. hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
  212. break;
  213. #if IS_ENABLED(CONFIG_IPV6)
  214. case AF_INET6:
  215. inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
  216. inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
  217. hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
  218. break;
  219. #endif
  220. default:
  221. return NULL;
  222. }
  223. net = dev_net(dst->dev);
  224. hash ^= net_hash_mix(net);
  225. hash = hash_32(hash, tcp_metrics_hash_log);
  226. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  227. tm = rcu_dereference(tm->tcpm_next)) {
  228. if (addr_same(&tm->tcpm_saddr, &saddr) &&
  229. addr_same(&tm->tcpm_daddr, &daddr) &&
  230. net_eq(tm_net(tm), net))
  231. break;
  232. }
  233. tcpm_check_stamp(tm, dst);
  234. return tm;
  235. }
  236. static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
  237. {
  238. struct tcp_metrics_block *tm;
  239. struct inetpeer_addr saddr, daddr;
  240. unsigned int hash;
  241. struct net *net;
  242. if (tw->tw_family == AF_INET) {
  243. inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
  244. inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
  245. hash = ipv4_addr_hash(tw->tw_daddr);
  246. }
  247. #if IS_ENABLED(CONFIG_IPV6)
  248. else if (tw->tw_family == AF_INET6) {
  249. if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
  250. inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
  251. inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
  252. hash = ipv4_addr_hash(tw->tw_daddr);
  253. } else {
  254. inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
  255. inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
  256. hash = ipv6_addr_hash(&tw->tw_v6_daddr);
  257. }
  258. }
  259. #endif
  260. else
  261. return NULL;
  262. net = twsk_net(tw);
  263. hash ^= net_hash_mix(net);
  264. hash = hash_32(hash, tcp_metrics_hash_log);
  265. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  266. tm = rcu_dereference(tm->tcpm_next)) {
  267. if (addr_same(&tm->tcpm_saddr, &saddr) &&
  268. addr_same(&tm->tcpm_daddr, &daddr) &&
  269. net_eq(tm_net(tm), net))
  270. break;
  271. }
  272. return tm;
  273. }
  274. static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
  275. struct dst_entry *dst,
  276. bool create)
  277. {
  278. struct tcp_metrics_block *tm;
  279. struct inetpeer_addr saddr, daddr;
  280. unsigned int hash;
  281. struct net *net;
  282. if (sk->sk_family == AF_INET) {
  283. inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
  284. inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
  285. hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
  286. }
  287. #if IS_ENABLED(CONFIG_IPV6)
  288. else if (sk->sk_family == AF_INET6) {
  289. if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
  290. inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
  291. inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
  292. hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
  293. } else {
  294. inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
  295. inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
  296. hash = ipv6_addr_hash(&sk->sk_v6_daddr);
  297. }
  298. }
  299. #endif
  300. else
  301. return NULL;
  302. net = dev_net(dst->dev);
  303. hash ^= net_hash_mix(net);
  304. hash = hash_32(hash, tcp_metrics_hash_log);
  305. tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
  306. if (tm == TCP_METRICS_RECLAIM_PTR)
  307. tm = NULL;
  308. if (!tm && create)
  309. tm = tcpm_new(dst, &saddr, &daddr, hash);
  310. else
  311. tcpm_check_stamp(tm, dst);
  312. return tm;
  313. }
  314. /* Save metrics learned by this TCP session. This function is called
  315. * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
  316. * or goes from LAST-ACK to CLOSE.
  317. */
  318. void tcp_update_metrics(struct sock *sk)
  319. {
  320. const struct inet_connection_sock *icsk = inet_csk(sk);
  321. struct dst_entry *dst = __sk_dst_get(sk);
  322. struct tcp_sock *tp = tcp_sk(sk);
  323. struct tcp_metrics_block *tm;
  324. unsigned long rtt;
  325. u32 val;
  326. int m;
  327. if (sysctl_tcp_nometrics_save || !dst)
  328. return;
  329. if (dst->flags & DST_HOST)
  330. dst_confirm(dst);
  331. rcu_read_lock();
  332. if (icsk->icsk_backoff || !tp->srtt_us) {
  333. /* This session failed to estimate rtt. Why?
  334. * Probably, no packets returned in time. Reset our
  335. * results.
  336. */
  337. tm = tcp_get_metrics(sk, dst, false);
  338. if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
  339. tcp_metric_set(tm, TCP_METRIC_RTT, 0);
  340. goto out_unlock;
  341. } else
  342. tm = tcp_get_metrics(sk, dst, true);
  343. if (!tm)
  344. goto out_unlock;
  345. rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
  346. m = rtt - tp->srtt_us;
  347. /* If newly calculated rtt larger than stored one, store new
  348. * one. Otherwise, use EWMA. Remember, rtt overestimation is
  349. * always better than underestimation.
  350. */
  351. if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
  352. if (m <= 0)
  353. rtt = tp->srtt_us;
  354. else
  355. rtt -= (m >> 3);
  356. tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
  357. }
  358. if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
  359. unsigned long var;
  360. if (m < 0)
  361. m = -m;
  362. /* Scale deviation to rttvar fixed point */
  363. m >>= 1;
  364. if (m < tp->mdev_us)
  365. m = tp->mdev_us;
  366. var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
  367. if (m >= var)
  368. var = m;
  369. else
  370. var -= (var - m) >> 2;
  371. tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
  372. }
  373. if (tcp_in_initial_slowstart(tp)) {
  374. /* Slow start still did not finish. */
  375. if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
  376. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  377. if (val && (tp->snd_cwnd >> 1) > val)
  378. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  379. tp->snd_cwnd >> 1);
  380. }
  381. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  382. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  383. if (tp->snd_cwnd > val)
  384. tcp_metric_set(tm, TCP_METRIC_CWND,
  385. tp->snd_cwnd);
  386. }
  387. } else if (!tcp_in_slow_start(tp) &&
  388. icsk->icsk_ca_state == TCP_CA_Open) {
  389. /* Cong. avoidance phase, cwnd is reliable. */
  390. if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
  391. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  392. max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
  393. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  394. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  395. tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
  396. }
  397. } else {
  398. /* Else slow start did not finish, cwnd is non-sense,
  399. * ssthresh may be also invalid.
  400. */
  401. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  402. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  403. tcp_metric_set(tm, TCP_METRIC_CWND,
  404. (val + tp->snd_ssthresh) >> 1);
  405. }
  406. if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
  407. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  408. if (val && tp->snd_ssthresh > val)
  409. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  410. tp->snd_ssthresh);
  411. }
  412. if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
  413. val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
  414. if (val < tp->reordering &&
  415. tp->reordering != sysctl_tcp_reordering)
  416. tcp_metric_set(tm, TCP_METRIC_REORDERING,
  417. tp->reordering);
  418. }
  419. }
  420. tm->tcpm_stamp = jiffies;
  421. out_unlock:
  422. rcu_read_unlock();
  423. }
  424. /* Initialize metrics on socket. */
  425. void tcp_init_metrics(struct sock *sk)
  426. {
  427. struct dst_entry *dst = __sk_dst_get(sk);
  428. struct tcp_sock *tp = tcp_sk(sk);
  429. struct tcp_metrics_block *tm;
  430. u32 val, crtt = 0; /* cached RTT scaled by 8 */
  431. if (!dst)
  432. goto reset;
  433. dst_confirm(dst);
  434. rcu_read_lock();
  435. tm = tcp_get_metrics(sk, dst, true);
  436. if (!tm) {
  437. rcu_read_unlock();
  438. goto reset;
  439. }
  440. if (tcp_metric_locked(tm, TCP_METRIC_CWND))
  441. tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
  442. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  443. if (val) {
  444. tp->snd_ssthresh = val;
  445. if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
  446. tp->snd_ssthresh = tp->snd_cwnd_clamp;
  447. } else {
  448. /* ssthresh may have been reduced unnecessarily during.
  449. * 3WHS. Restore it back to its initial default.
  450. */
  451. tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  452. }
  453. val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
  454. if (val && tp->reordering != val) {
  455. tcp_disable_fack(tp);
  456. tcp_disable_early_retrans(tp);
  457. tp->reordering = val;
  458. }
  459. crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
  460. rcu_read_unlock();
  461. reset:
  462. /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
  463. * to seed the RTO for later data packets because SYN packets are
  464. * small. Use the per-dst cached values to seed the RTO but keep
  465. * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
  466. * Later the RTO will be updated immediately upon obtaining the first
  467. * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
  468. * influences the first RTO but not later RTT estimation.
  469. *
  470. * But if RTT is not available from the SYN (due to retransmits or
  471. * syn cookies) or the cache, force a conservative 3secs timeout.
  472. *
  473. * A bit of theory. RTT is time passed after "normal" sized packet
  474. * is sent until it is ACKed. In normal circumstances sending small
  475. * packets force peer to delay ACKs and calculation is correct too.
  476. * The algorithm is adaptive and, provided we follow specs, it
  477. * NEVER underestimate RTT. BUT! If peer tries to make some clever
  478. * tricks sort of "quick acks" for time long enough to decrease RTT
  479. * to low value, and then abruptly stops to do it and starts to delay
  480. * ACKs, wait for troubles.
  481. */
  482. if (crtt > tp->srtt_us) {
  483. /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
  484. crtt /= 8 * USEC_PER_SEC / HZ;
  485. inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
  486. } else if (tp->srtt_us == 0) {
  487. /* RFC6298: 5.7 We've failed to get a valid RTT sample from
  488. * 3WHS. This is most likely due to retransmission,
  489. * including spurious one. Reset the RTO back to 3secs
  490. * from the more aggressive 1sec to avoid more spurious
  491. * retransmission.
  492. */
  493. tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
  494. tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
  495. inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
  496. }
  497. /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
  498. * retransmitted. In light of RFC6298 more aggressive 1sec
  499. * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
  500. * retransmission has occurred.
  501. */
  502. if (tp->total_retrans > 1)
  503. tp->snd_cwnd = 1;
  504. else
  505. tp->snd_cwnd = tcp_init_cwnd(tp, dst);
  506. tp->snd_cwnd_stamp = tcp_time_stamp;
  507. }
  508. bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
  509. bool paws_check, bool timestamps)
  510. {
  511. struct tcp_metrics_block *tm;
  512. bool ret;
  513. if (!dst)
  514. return false;
  515. rcu_read_lock();
  516. tm = __tcp_get_metrics_req(req, dst);
  517. if (paws_check) {
  518. if (tm &&
  519. (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
  520. ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
  521. !timestamps))
  522. ret = false;
  523. else
  524. ret = true;
  525. } else {
  526. if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
  527. ret = true;
  528. else
  529. ret = false;
  530. }
  531. rcu_read_unlock();
  532. return ret;
  533. }
  534. EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
  535. void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
  536. {
  537. struct tcp_metrics_block *tm;
  538. rcu_read_lock();
  539. tm = tcp_get_metrics(sk, dst, true);
  540. if (tm) {
  541. struct tcp_sock *tp = tcp_sk(sk);
  542. if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
  543. tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
  544. tp->rx_opt.ts_recent = tm->tcpm_ts;
  545. }
  546. }
  547. rcu_read_unlock();
  548. }
  549. EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
  550. /* VJ's idea. Save last timestamp seen from this destination and hold
  551. * it at least for normal timewait interval to use for duplicate
  552. * segment detection in subsequent connections, before they enter
  553. * synchronized state.
  554. */
  555. bool tcp_remember_stamp(struct sock *sk)
  556. {
  557. struct dst_entry *dst = __sk_dst_get(sk);
  558. bool ret = false;
  559. if (dst) {
  560. struct tcp_metrics_block *tm;
  561. rcu_read_lock();
  562. tm = tcp_get_metrics(sk, dst, true);
  563. if (tm) {
  564. struct tcp_sock *tp = tcp_sk(sk);
  565. if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
  566. ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
  567. tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
  568. tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
  569. tm->tcpm_ts = tp->rx_opt.ts_recent;
  570. }
  571. ret = true;
  572. }
  573. rcu_read_unlock();
  574. }
  575. return ret;
  576. }
  577. bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
  578. {
  579. struct tcp_metrics_block *tm;
  580. bool ret = false;
  581. rcu_read_lock();
  582. tm = __tcp_get_metrics_tw(tw);
  583. if (tm) {
  584. const struct tcp_timewait_sock *tcptw;
  585. struct sock *sk = (struct sock *) tw;
  586. tcptw = tcp_twsk(sk);
  587. if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
  588. ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
  589. tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
  590. tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
  591. tm->tcpm_ts = tcptw->tw_ts_recent;
  592. }
  593. ret = true;
  594. }
  595. rcu_read_unlock();
  596. return ret;
  597. }
  598. static DEFINE_SEQLOCK(fastopen_seqlock);
  599. void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
  600. struct tcp_fastopen_cookie *cookie,
  601. int *syn_loss, unsigned long *last_syn_loss)
  602. {
  603. struct tcp_metrics_block *tm;
  604. rcu_read_lock();
  605. tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
  606. if (tm) {
  607. struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
  608. unsigned int seq;
  609. do {
  610. seq = read_seqbegin(&fastopen_seqlock);
  611. if (tfom->mss)
  612. *mss = tfom->mss;
  613. *cookie = tfom->cookie;
  614. if (cookie->len <= 0 && tfom->try_exp == 1)
  615. cookie->exp = true;
  616. *syn_loss = tfom->syn_loss;
  617. *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
  618. } while (read_seqretry(&fastopen_seqlock, seq));
  619. }
  620. rcu_read_unlock();
  621. }
  622. void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
  623. struct tcp_fastopen_cookie *cookie, bool syn_lost,
  624. u16 try_exp)
  625. {
  626. struct dst_entry *dst = __sk_dst_get(sk);
  627. struct tcp_metrics_block *tm;
  628. if (!dst)
  629. return;
  630. rcu_read_lock();
  631. tm = tcp_get_metrics(sk, dst, true);
  632. if (tm) {
  633. struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
  634. write_seqlock_bh(&fastopen_seqlock);
  635. if (mss)
  636. tfom->mss = mss;
  637. if (cookie && cookie->len > 0)
  638. tfom->cookie = *cookie;
  639. else if (try_exp > tfom->try_exp &&
  640. tfom->cookie.len <= 0 && !tfom->cookie.exp)
  641. tfom->try_exp = try_exp;
  642. if (syn_lost) {
  643. ++tfom->syn_loss;
  644. tfom->last_syn_loss = jiffies;
  645. } else
  646. tfom->syn_loss = 0;
  647. write_sequnlock_bh(&fastopen_seqlock);
  648. }
  649. rcu_read_unlock();
  650. }
  651. static struct genl_family tcp_metrics_nl_family = {
  652. .id = GENL_ID_GENERATE,
  653. .hdrsize = 0,
  654. .name = TCP_METRICS_GENL_NAME,
  655. .version = TCP_METRICS_GENL_VERSION,
  656. .maxattr = TCP_METRICS_ATTR_MAX,
  657. .netnsok = true,
  658. };
  659. static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
  660. [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
  661. [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
  662. .len = sizeof(struct in6_addr), },
  663. /* Following attributes are not received for GET/DEL,
  664. * we keep them for reference
  665. */
  666. #if 0
  667. [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
  668. [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
  669. [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
  670. [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
  671. [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
  672. [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
  673. [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
  674. [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
  675. .len = TCP_FASTOPEN_COOKIE_MAX, },
  676. #endif
  677. };
  678. /* Add attributes, caller cancels its header on failure */
  679. static int tcp_metrics_fill_info(struct sk_buff *msg,
  680. struct tcp_metrics_block *tm)
  681. {
  682. struct nlattr *nest;
  683. int i;
  684. switch (tm->tcpm_daddr.family) {
  685. case AF_INET:
  686. if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
  687. inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
  688. goto nla_put_failure;
  689. if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
  690. inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
  691. goto nla_put_failure;
  692. break;
  693. case AF_INET6:
  694. if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
  695. inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
  696. goto nla_put_failure;
  697. if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
  698. inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
  699. goto nla_put_failure;
  700. break;
  701. default:
  702. return -EAFNOSUPPORT;
  703. }
  704. if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
  705. jiffies - tm->tcpm_stamp) < 0)
  706. goto nla_put_failure;
  707. if (tm->tcpm_ts_stamp) {
  708. if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
  709. (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
  710. goto nla_put_failure;
  711. if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
  712. tm->tcpm_ts) < 0)
  713. goto nla_put_failure;
  714. }
  715. {
  716. int n = 0;
  717. nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
  718. if (!nest)
  719. goto nla_put_failure;
  720. for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
  721. u32 val = tm->tcpm_vals[i];
  722. if (!val)
  723. continue;
  724. if (i == TCP_METRIC_RTT) {
  725. if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
  726. val) < 0)
  727. goto nla_put_failure;
  728. n++;
  729. val = max(val / 1000, 1U);
  730. }
  731. if (i == TCP_METRIC_RTTVAR) {
  732. if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
  733. val) < 0)
  734. goto nla_put_failure;
  735. n++;
  736. val = max(val / 1000, 1U);
  737. }
  738. if (nla_put_u32(msg, i + 1, val) < 0)
  739. goto nla_put_failure;
  740. n++;
  741. }
  742. if (n)
  743. nla_nest_end(msg, nest);
  744. else
  745. nla_nest_cancel(msg, nest);
  746. }
  747. {
  748. struct tcp_fastopen_metrics tfom_copy[1], *tfom;
  749. unsigned int seq;
  750. do {
  751. seq = read_seqbegin(&fastopen_seqlock);
  752. tfom_copy[0] = tm->tcpm_fastopen;
  753. } while (read_seqretry(&fastopen_seqlock, seq));
  754. tfom = tfom_copy;
  755. if (tfom->mss &&
  756. nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
  757. tfom->mss) < 0)
  758. goto nla_put_failure;
  759. if (tfom->syn_loss &&
  760. (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
  761. tfom->syn_loss) < 0 ||
  762. nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
  763. jiffies - tfom->last_syn_loss) < 0))
  764. goto nla_put_failure;
  765. if (tfom->cookie.len > 0 &&
  766. nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
  767. tfom->cookie.len, tfom->cookie.val) < 0)
  768. goto nla_put_failure;
  769. }
  770. return 0;
  771. nla_put_failure:
  772. return -EMSGSIZE;
  773. }
  774. static int tcp_metrics_dump_info(struct sk_buff *skb,
  775. struct netlink_callback *cb,
  776. struct tcp_metrics_block *tm)
  777. {
  778. void *hdr;
  779. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  780. &tcp_metrics_nl_family, NLM_F_MULTI,
  781. TCP_METRICS_CMD_GET);
  782. if (!hdr)
  783. return -EMSGSIZE;
  784. if (tcp_metrics_fill_info(skb, tm) < 0)
  785. goto nla_put_failure;
  786. genlmsg_end(skb, hdr);
  787. return 0;
  788. nla_put_failure:
  789. genlmsg_cancel(skb, hdr);
  790. return -EMSGSIZE;
  791. }
  792. static int tcp_metrics_nl_dump(struct sk_buff *skb,
  793. struct netlink_callback *cb)
  794. {
  795. struct net *net = sock_net(skb->sk);
  796. unsigned int max_rows = 1U << tcp_metrics_hash_log;
  797. unsigned int row, s_row = cb->args[0];
  798. int s_col = cb->args[1], col = s_col;
  799. for (row = s_row; row < max_rows; row++, s_col = 0) {
  800. struct tcp_metrics_block *tm;
  801. struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
  802. rcu_read_lock();
  803. for (col = 0, tm = rcu_dereference(hb->chain); tm;
  804. tm = rcu_dereference(tm->tcpm_next), col++) {
  805. if (!net_eq(tm_net(tm), net))
  806. continue;
  807. if (col < s_col)
  808. continue;
  809. if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
  810. rcu_read_unlock();
  811. goto done;
  812. }
  813. }
  814. rcu_read_unlock();
  815. }
  816. done:
  817. cb->args[0] = row;
  818. cb->args[1] = col;
  819. return skb->len;
  820. }
  821. static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
  822. unsigned int *hash, int optional, int v4, int v6)
  823. {
  824. struct nlattr *a;
  825. a = info->attrs[v4];
  826. if (a) {
  827. inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
  828. if (hash)
  829. *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
  830. return 0;
  831. }
  832. a = info->attrs[v6];
  833. if (a) {
  834. struct in6_addr in6;
  835. if (nla_len(a) != sizeof(struct in6_addr))
  836. return -EINVAL;
  837. in6 = nla_get_in6_addr(a);
  838. inetpeer_set_addr_v6(addr, &in6);
  839. if (hash)
  840. *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
  841. return 0;
  842. }
  843. return optional ? 1 : -EAFNOSUPPORT;
  844. }
  845. static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
  846. unsigned int *hash, int optional)
  847. {
  848. return __parse_nl_addr(info, addr, hash, optional,
  849. TCP_METRICS_ATTR_ADDR_IPV4,
  850. TCP_METRICS_ATTR_ADDR_IPV6);
  851. }
  852. static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
  853. {
  854. return __parse_nl_addr(info, addr, NULL, 0,
  855. TCP_METRICS_ATTR_SADDR_IPV4,
  856. TCP_METRICS_ATTR_SADDR_IPV6);
  857. }
  858. static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
  859. {
  860. struct tcp_metrics_block *tm;
  861. struct inetpeer_addr saddr, daddr;
  862. unsigned int hash;
  863. struct sk_buff *msg;
  864. struct net *net = genl_info_net(info);
  865. void *reply;
  866. int ret;
  867. bool src = true;
  868. ret = parse_nl_addr(info, &daddr, &hash, 0);
  869. if (ret < 0)
  870. return ret;
  871. ret = parse_nl_saddr(info, &saddr);
  872. if (ret < 0)
  873. src = false;
  874. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  875. if (!msg)
  876. return -ENOMEM;
  877. reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
  878. info->genlhdr->cmd);
  879. if (!reply)
  880. goto nla_put_failure;
  881. hash ^= net_hash_mix(net);
  882. hash = hash_32(hash, tcp_metrics_hash_log);
  883. ret = -ESRCH;
  884. rcu_read_lock();
  885. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  886. tm = rcu_dereference(tm->tcpm_next)) {
  887. if (addr_same(&tm->tcpm_daddr, &daddr) &&
  888. (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
  889. net_eq(tm_net(tm), net)) {
  890. ret = tcp_metrics_fill_info(msg, tm);
  891. break;
  892. }
  893. }
  894. rcu_read_unlock();
  895. if (ret < 0)
  896. goto out_free;
  897. genlmsg_end(msg, reply);
  898. return genlmsg_reply(msg, info);
  899. nla_put_failure:
  900. ret = -EMSGSIZE;
  901. out_free:
  902. nlmsg_free(msg);
  903. return ret;
  904. }
  905. static void tcp_metrics_flush_all(struct net *net)
  906. {
  907. unsigned int max_rows = 1U << tcp_metrics_hash_log;
  908. struct tcpm_hash_bucket *hb = tcp_metrics_hash;
  909. struct tcp_metrics_block *tm;
  910. unsigned int row;
  911. for (row = 0; row < max_rows; row++, hb++) {
  912. struct tcp_metrics_block __rcu **pp;
  913. spin_lock_bh(&tcp_metrics_lock);
  914. pp = &hb->chain;
  915. for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
  916. if (net_eq(tm_net(tm), net)) {
  917. *pp = tm->tcpm_next;
  918. kfree_rcu(tm, rcu_head);
  919. } else {
  920. pp = &tm->tcpm_next;
  921. }
  922. }
  923. spin_unlock_bh(&tcp_metrics_lock);
  924. }
  925. }
  926. static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
  927. {
  928. struct tcpm_hash_bucket *hb;
  929. struct tcp_metrics_block *tm;
  930. struct tcp_metrics_block __rcu **pp;
  931. struct inetpeer_addr saddr, daddr;
  932. unsigned int hash;
  933. struct net *net = genl_info_net(info);
  934. int ret;
  935. bool src = true, found = false;
  936. ret = parse_nl_addr(info, &daddr, &hash, 1);
  937. if (ret < 0)
  938. return ret;
  939. if (ret > 0) {
  940. tcp_metrics_flush_all(net);
  941. return 0;
  942. }
  943. ret = parse_nl_saddr(info, &saddr);
  944. if (ret < 0)
  945. src = false;
  946. hash ^= net_hash_mix(net);
  947. hash = hash_32(hash, tcp_metrics_hash_log);
  948. hb = tcp_metrics_hash + hash;
  949. pp = &hb->chain;
  950. spin_lock_bh(&tcp_metrics_lock);
  951. for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
  952. if (addr_same(&tm->tcpm_daddr, &daddr) &&
  953. (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
  954. net_eq(tm_net(tm), net)) {
  955. *pp = tm->tcpm_next;
  956. kfree_rcu(tm, rcu_head);
  957. found = true;
  958. } else {
  959. pp = &tm->tcpm_next;
  960. }
  961. }
  962. spin_unlock_bh(&tcp_metrics_lock);
  963. if (!found)
  964. return -ESRCH;
  965. return 0;
  966. }
  967. static const struct genl_ops tcp_metrics_nl_ops[] = {
  968. {
  969. .cmd = TCP_METRICS_CMD_GET,
  970. .doit = tcp_metrics_nl_cmd_get,
  971. .dumpit = tcp_metrics_nl_dump,
  972. .policy = tcp_metrics_nl_policy,
  973. },
  974. {
  975. .cmd = TCP_METRICS_CMD_DEL,
  976. .doit = tcp_metrics_nl_cmd_del,
  977. .policy = tcp_metrics_nl_policy,
  978. .flags = GENL_ADMIN_PERM,
  979. },
  980. };
  981. static unsigned int tcpmhash_entries;
  982. static int __init set_tcpmhash_entries(char *str)
  983. {
  984. ssize_t ret;
  985. if (!str)
  986. return 0;
  987. ret = kstrtouint(str, 0, &tcpmhash_entries);
  988. if (ret)
  989. return 0;
  990. return 1;
  991. }
  992. __setup("tcpmhash_entries=", set_tcpmhash_entries);
  993. static int __net_init tcp_net_metrics_init(struct net *net)
  994. {
  995. size_t size;
  996. unsigned int slots;
  997. if (!net_eq(net, &init_net))
  998. return 0;
  999. slots = tcpmhash_entries;
  1000. if (!slots) {
  1001. if (totalram_pages >= 128 * 1024)
  1002. slots = 16 * 1024;
  1003. else
  1004. slots = 8 * 1024;
  1005. }
  1006. tcp_metrics_hash_log = order_base_2(slots);
  1007. size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
  1008. tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
  1009. if (!tcp_metrics_hash)
  1010. tcp_metrics_hash = vzalloc(size);
  1011. if (!tcp_metrics_hash)
  1012. return -ENOMEM;
  1013. return 0;
  1014. }
  1015. static void __net_exit tcp_net_metrics_exit(struct net *net)
  1016. {
  1017. tcp_metrics_flush_all(net);
  1018. }
  1019. static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
  1020. .init = tcp_net_metrics_init,
  1021. .exit = tcp_net_metrics_exit,
  1022. };
  1023. void __init tcp_metrics_init(void)
  1024. {
  1025. int ret;
  1026. ret = register_pernet_subsys(&tcp_net_metrics_ops);
  1027. if (ret < 0)
  1028. panic("Could not allocate the tcp_metrics hash table\n");
  1029. ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
  1030. tcp_metrics_nl_ops);
  1031. if (ret < 0)
  1032. panic("Could not register tcp_metrics generic netlink\n");
  1033. }