taskstats.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * taskstats.c - Export per-task statistics to userland
  3. *
  4. * Copyright (C) Shailabh Nagar, IBM Corp. 2006
  5. * (C) Balbir Singh, IBM Corp. 2006
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/taskstats_kern.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/delayacct.h>
  22. #include <linux/cpumask.h>
  23. #include <linux/percpu.h>
  24. #include <linux/slab.h>
  25. #include <linux/cgroupstats.h>
  26. #include <linux/cgroup.h>
  27. #include <linux/fs.h>
  28. #include <linux/file.h>
  29. #include <linux/pid_namespace.h>
  30. #include <net/genetlink.h>
  31. #include <linux/atomic.h>
  32. /*
  33. * Maximum length of a cpumask that can be specified in
  34. * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
  35. */
  36. #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
  37. static DEFINE_PER_CPU(__u32, taskstats_seqnum);
  38. static int family_registered;
  39. struct kmem_cache *taskstats_cache;
  40. static struct genl_family family = {
  41. .id = GENL_ID_GENERATE,
  42. .name = TASKSTATS_GENL_NAME,
  43. .version = TASKSTATS_GENL_VERSION,
  44. .maxattr = TASKSTATS_CMD_ATTR_MAX,
  45. };
  46. static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
  47. [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
  48. [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
  49. [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
  50. [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
  51. static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
  52. [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
  53. };
  54. struct listener {
  55. struct list_head list;
  56. pid_t pid;
  57. char valid;
  58. };
  59. struct listener_list {
  60. struct rw_semaphore sem;
  61. struct list_head list;
  62. };
  63. static DEFINE_PER_CPU(struct listener_list, listener_array);
  64. enum actions {
  65. REGISTER,
  66. DEREGISTER,
  67. CPU_DONT_CARE
  68. };
  69. static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  70. size_t size)
  71. {
  72. struct sk_buff *skb;
  73. void *reply;
  74. /*
  75. * If new attributes are added, please revisit this allocation
  76. */
  77. skb = genlmsg_new(size, GFP_KERNEL);
  78. if (!skb)
  79. return -ENOMEM;
  80. if (!info) {
  81. int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
  82. reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
  83. } else
  84. reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
  85. if (reply == NULL) {
  86. nlmsg_free(skb);
  87. return -EINVAL;
  88. }
  89. *skbp = skb;
  90. return 0;
  91. }
  92. /*
  93. * Send taskstats data in @skb to listener with nl_pid @pid
  94. */
  95. static int send_reply(struct sk_buff *skb, struct genl_info *info)
  96. {
  97. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  98. void *reply = genlmsg_data(genlhdr);
  99. genlmsg_end(skb, reply);
  100. return genlmsg_reply(skb, info);
  101. }
  102. /*
  103. * Send taskstats data in @skb to listeners registered for @cpu's exit data
  104. */
  105. static void send_cpu_listeners(struct sk_buff *skb,
  106. struct listener_list *listeners)
  107. {
  108. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  109. struct listener *s, *tmp;
  110. struct sk_buff *skb_next, *skb_cur = skb;
  111. void *reply = genlmsg_data(genlhdr);
  112. int rc, delcount = 0;
  113. genlmsg_end(skb, reply);
  114. rc = 0;
  115. down_read(&listeners->sem);
  116. list_for_each_entry(s, &listeners->list, list) {
  117. skb_next = NULL;
  118. if (!list_is_last(&s->list, &listeners->list)) {
  119. skb_next = skb_clone(skb_cur, GFP_KERNEL);
  120. if (!skb_next)
  121. break;
  122. }
  123. rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
  124. if (rc == -ECONNREFUSED) {
  125. s->valid = 0;
  126. delcount++;
  127. }
  128. skb_cur = skb_next;
  129. }
  130. up_read(&listeners->sem);
  131. if (skb_cur)
  132. nlmsg_free(skb_cur);
  133. if (!delcount)
  134. return;
  135. /* Delete invalidated entries */
  136. down_write(&listeners->sem);
  137. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  138. if (!s->valid) {
  139. list_del(&s->list);
  140. kfree(s);
  141. }
  142. }
  143. up_write(&listeners->sem);
  144. }
  145. static void fill_stats(struct user_namespace *user_ns,
  146. struct pid_namespace *pid_ns,
  147. struct task_struct *tsk, struct taskstats *stats)
  148. {
  149. memset(stats, 0, sizeof(*stats));
  150. /*
  151. * Each accounting subsystem adds calls to its functions to
  152. * fill in relevant parts of struct taskstsats as follows
  153. *
  154. * per-task-foo(stats, tsk);
  155. */
  156. delayacct_add_tsk(stats, tsk);
  157. /* fill in basic acct fields */
  158. stats->version = TASKSTATS_VERSION;
  159. stats->nvcsw = tsk->nvcsw;
  160. stats->nivcsw = tsk->nivcsw;
  161. bacct_add_tsk(user_ns, pid_ns, stats, tsk);
  162. /* fill in extended acct fields */
  163. xacct_add_tsk(stats, tsk);
  164. }
  165. static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
  166. {
  167. struct task_struct *tsk;
  168. rcu_read_lock();
  169. tsk = find_task_by_vpid(pid);
  170. if (tsk)
  171. get_task_struct(tsk);
  172. rcu_read_unlock();
  173. if (!tsk)
  174. return -ESRCH;
  175. fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
  176. put_task_struct(tsk);
  177. return 0;
  178. }
  179. static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
  180. {
  181. struct task_struct *tsk, *first;
  182. unsigned long flags;
  183. int rc = -ESRCH;
  184. /*
  185. * Add additional stats from live tasks except zombie thread group
  186. * leaders who are already counted with the dead tasks
  187. */
  188. rcu_read_lock();
  189. first = find_task_by_vpid(tgid);
  190. if (!first || !lock_task_sighand(first, &flags))
  191. goto out;
  192. if (first->signal->stats)
  193. memcpy(stats, first->signal->stats, sizeof(*stats));
  194. else
  195. memset(stats, 0, sizeof(*stats));
  196. tsk = first;
  197. do {
  198. if (tsk->exit_state)
  199. continue;
  200. /*
  201. * Accounting subsystem can call its functions here to
  202. * fill in relevant parts of struct taskstsats as follows
  203. *
  204. * per-task-foo(stats, tsk);
  205. */
  206. delayacct_add_tsk(stats, tsk);
  207. stats->nvcsw += tsk->nvcsw;
  208. stats->nivcsw += tsk->nivcsw;
  209. } while_each_thread(first, tsk);
  210. unlock_task_sighand(first, &flags);
  211. rc = 0;
  212. out:
  213. rcu_read_unlock();
  214. stats->version = TASKSTATS_VERSION;
  215. /*
  216. * Accounting subsystems can also add calls here to modify
  217. * fields of taskstats.
  218. */
  219. return rc;
  220. }
  221. static void fill_tgid_exit(struct task_struct *tsk)
  222. {
  223. unsigned long flags;
  224. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  225. if (!tsk->signal->stats)
  226. goto ret;
  227. /*
  228. * Each accounting subsystem calls its functions here to
  229. * accumalate its per-task stats for tsk, into the per-tgid structure
  230. *
  231. * per-task-foo(tsk->signal->stats, tsk);
  232. */
  233. delayacct_add_tsk(tsk->signal->stats, tsk);
  234. ret:
  235. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  236. return;
  237. }
  238. static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
  239. {
  240. struct listener_list *listeners;
  241. struct listener *s, *tmp, *s2;
  242. unsigned int cpu;
  243. int ret = 0;
  244. if (!cpumask_subset(mask, cpu_possible_mask))
  245. return -EINVAL;
  246. if (current_user_ns() != &init_user_ns)
  247. return -EINVAL;
  248. if (task_active_pid_ns(current) != &init_pid_ns)
  249. return -EINVAL;
  250. if (isadd == REGISTER) {
  251. for_each_cpu(cpu, mask) {
  252. s = kmalloc_node(sizeof(struct listener),
  253. GFP_KERNEL, cpu_to_node(cpu));
  254. if (!s) {
  255. ret = -ENOMEM;
  256. goto cleanup;
  257. }
  258. s->pid = pid;
  259. s->valid = 1;
  260. listeners = &per_cpu(listener_array, cpu);
  261. down_write(&listeners->sem);
  262. list_for_each_entry(s2, &listeners->list, list) {
  263. if (s2->pid == pid && s2->valid)
  264. goto exists;
  265. }
  266. list_add(&s->list, &listeners->list);
  267. s = NULL;
  268. exists:
  269. up_write(&listeners->sem);
  270. kfree(s); /* nop if NULL */
  271. }
  272. return 0;
  273. }
  274. /* Deregister or cleanup */
  275. cleanup:
  276. for_each_cpu(cpu, mask) {
  277. listeners = &per_cpu(listener_array, cpu);
  278. down_write(&listeners->sem);
  279. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  280. if (s->pid == pid) {
  281. list_del(&s->list);
  282. kfree(s);
  283. break;
  284. }
  285. }
  286. up_write(&listeners->sem);
  287. }
  288. return ret;
  289. }
  290. static int parse(struct nlattr *na, struct cpumask *mask)
  291. {
  292. char *data;
  293. int len;
  294. int ret;
  295. if (na == NULL)
  296. return 1;
  297. len = nla_len(na);
  298. if (len > TASKSTATS_CPUMASK_MAXLEN)
  299. return -E2BIG;
  300. if (len < 1)
  301. return -EINVAL;
  302. data = kmalloc(len, GFP_KERNEL);
  303. if (!data)
  304. return -ENOMEM;
  305. nla_strlcpy(data, na, len);
  306. ret = cpulist_parse(data, mask);
  307. kfree(data);
  308. return ret;
  309. }
  310. #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  311. #define TASKSTATS_NEEDS_PADDING 1
  312. #endif
  313. static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
  314. {
  315. struct nlattr *na, *ret;
  316. int aggr;
  317. aggr = (type == TASKSTATS_TYPE_PID)
  318. ? TASKSTATS_TYPE_AGGR_PID
  319. : TASKSTATS_TYPE_AGGR_TGID;
  320. /*
  321. * The taskstats structure is internally aligned on 8 byte
  322. * boundaries but the layout of the aggregrate reply, with
  323. * two NLA headers and the pid (each 4 bytes), actually
  324. * force the entire structure to be unaligned. This causes
  325. * the kernel to issue unaligned access warnings on some
  326. * architectures like ia64. Unfortunately, some software out there
  327. * doesn't properly unroll the NLA packet and assumes that the start
  328. * of the taskstats structure will always be 20 bytes from the start
  329. * of the netlink payload. Aligning the start of the taskstats
  330. * structure breaks this software, which we don't want. So, for now
  331. * the alignment only happens on architectures that require it
  332. * and those users will have to update to fixed versions of those
  333. * packages. Space is reserved in the packet only when needed.
  334. * This ifdef should be removed in several years e.g. 2012 once
  335. * we can be confident that fixed versions are installed on most
  336. * systems. We add the padding before the aggregate since the
  337. * aggregate is already a defined type.
  338. */
  339. #ifdef TASKSTATS_NEEDS_PADDING
  340. if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
  341. goto err;
  342. #endif
  343. na = nla_nest_start(skb, aggr);
  344. if (!na)
  345. goto err;
  346. if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
  347. nla_nest_cancel(skb, na);
  348. goto err;
  349. }
  350. ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
  351. if (!ret) {
  352. nla_nest_cancel(skb, na);
  353. goto err;
  354. }
  355. nla_nest_end(skb, na);
  356. return nla_data(ret);
  357. err:
  358. return NULL;
  359. }
  360. static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  361. {
  362. int rc = 0;
  363. struct sk_buff *rep_skb;
  364. struct cgroupstats *stats;
  365. struct nlattr *na;
  366. size_t size;
  367. u32 fd;
  368. struct fd f;
  369. na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
  370. if (!na)
  371. return -EINVAL;
  372. fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
  373. f = fdget(fd);
  374. if (!f.file)
  375. return 0;
  376. size = nla_total_size(sizeof(struct cgroupstats));
  377. rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
  378. size);
  379. if (rc < 0)
  380. goto err;
  381. na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
  382. sizeof(struct cgroupstats));
  383. if (na == NULL) {
  384. nlmsg_free(rep_skb);
  385. rc = -EMSGSIZE;
  386. goto err;
  387. }
  388. stats = nla_data(na);
  389. memset(stats, 0, sizeof(*stats));
  390. rc = cgroupstats_build(stats, f.file->f_path.dentry);
  391. if (rc < 0) {
  392. nlmsg_free(rep_skb);
  393. goto err;
  394. }
  395. rc = send_reply(rep_skb, info);
  396. err:
  397. fdput(f);
  398. return rc;
  399. }
  400. static int cmd_attr_register_cpumask(struct genl_info *info)
  401. {
  402. cpumask_var_t mask;
  403. int rc;
  404. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  405. return -ENOMEM;
  406. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
  407. if (rc < 0)
  408. goto out;
  409. rc = add_del_listener(info->snd_portid, mask, REGISTER);
  410. out:
  411. free_cpumask_var(mask);
  412. return rc;
  413. }
  414. static int cmd_attr_deregister_cpumask(struct genl_info *info)
  415. {
  416. cpumask_var_t mask;
  417. int rc;
  418. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  419. return -ENOMEM;
  420. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
  421. if (rc < 0)
  422. goto out;
  423. rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
  424. out:
  425. free_cpumask_var(mask);
  426. return rc;
  427. }
  428. static size_t taskstats_packet_size(void)
  429. {
  430. size_t size;
  431. size = nla_total_size(sizeof(u32)) +
  432. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  433. #ifdef TASKSTATS_NEEDS_PADDING
  434. size += nla_total_size(0); /* Padding for alignment */
  435. #endif
  436. return size;
  437. }
  438. static int cmd_attr_pid(struct genl_info *info)
  439. {
  440. struct taskstats *stats;
  441. struct sk_buff *rep_skb;
  442. size_t size;
  443. u32 pid;
  444. int rc;
  445. size = taskstats_packet_size();
  446. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
  447. if (rc < 0)
  448. return rc;
  449. rc = -EINVAL;
  450. pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
  451. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
  452. if (!stats)
  453. goto err;
  454. rc = fill_stats_for_pid(pid, stats);
  455. if (rc < 0)
  456. goto err;
  457. return send_reply(rep_skb, info);
  458. err:
  459. nlmsg_free(rep_skb);
  460. return rc;
  461. }
  462. static int cmd_attr_tgid(struct genl_info *info)
  463. {
  464. struct taskstats *stats;
  465. struct sk_buff *rep_skb;
  466. size_t size;
  467. u32 tgid;
  468. int rc;
  469. size = taskstats_packet_size();
  470. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
  471. if (rc < 0)
  472. return rc;
  473. rc = -EINVAL;
  474. tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
  475. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
  476. if (!stats)
  477. goto err;
  478. rc = fill_stats_for_tgid(tgid, stats);
  479. if (rc < 0)
  480. goto err;
  481. return send_reply(rep_skb, info);
  482. err:
  483. nlmsg_free(rep_skb);
  484. return rc;
  485. }
  486. static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  487. {
  488. if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
  489. return cmd_attr_register_cpumask(info);
  490. else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
  491. return cmd_attr_deregister_cpumask(info);
  492. else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
  493. return cmd_attr_pid(info);
  494. else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
  495. return cmd_attr_tgid(info);
  496. else
  497. return -EINVAL;
  498. }
  499. static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
  500. {
  501. struct signal_struct *sig = tsk->signal;
  502. struct taskstats *stats;
  503. if (sig->stats || thread_group_empty(tsk))
  504. goto ret;
  505. /* No problem if kmem_cache_zalloc() fails */
  506. stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
  507. spin_lock_irq(&tsk->sighand->siglock);
  508. if (!sig->stats) {
  509. sig->stats = stats;
  510. stats = NULL;
  511. }
  512. spin_unlock_irq(&tsk->sighand->siglock);
  513. if (stats)
  514. kmem_cache_free(taskstats_cache, stats);
  515. ret:
  516. return sig->stats;
  517. }
  518. /* Send pid data out on exit */
  519. void taskstats_exit(struct task_struct *tsk, int group_dead)
  520. {
  521. int rc;
  522. struct listener_list *listeners;
  523. struct taskstats *stats;
  524. struct sk_buff *rep_skb;
  525. size_t size;
  526. int is_thread_group;
  527. if (!family_registered)
  528. return;
  529. /*
  530. * Size includes space for nested attributes
  531. */
  532. size = taskstats_packet_size();
  533. is_thread_group = !!taskstats_tgid_alloc(tsk);
  534. if (is_thread_group) {
  535. /* PID + STATS + TGID + STATS */
  536. size = 2 * size;
  537. /* fill the tsk->signal->stats structure */
  538. fill_tgid_exit(tsk);
  539. }
  540. listeners = raw_cpu_ptr(&listener_array);
  541. if (list_empty(&listeners->list))
  542. return;
  543. rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
  544. if (rc < 0)
  545. return;
  546. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
  547. task_pid_nr_ns(tsk, &init_pid_ns));
  548. if (!stats)
  549. goto err;
  550. fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
  551. /*
  552. * Doesn't matter if tsk is the leader or the last group member leaving
  553. */
  554. if (!is_thread_group || !group_dead)
  555. goto send;
  556. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
  557. task_tgid_nr_ns(tsk, &init_pid_ns));
  558. if (!stats)
  559. goto err;
  560. memcpy(stats, tsk->signal->stats, sizeof(*stats));
  561. send:
  562. send_cpu_listeners(rep_skb, listeners);
  563. return;
  564. err:
  565. nlmsg_free(rep_skb);
  566. }
  567. static const struct genl_ops taskstats_ops[] = {
  568. {
  569. .cmd = TASKSTATS_CMD_GET,
  570. .doit = taskstats_user_cmd,
  571. .policy = taskstats_cmd_get_policy,
  572. .flags = GENL_ADMIN_PERM,
  573. },
  574. {
  575. .cmd = CGROUPSTATS_CMD_GET,
  576. .doit = cgroupstats_user_cmd,
  577. .policy = cgroupstats_cmd_get_policy,
  578. },
  579. };
  580. /* Needed early in initialization */
  581. void __init taskstats_init_early(void)
  582. {
  583. unsigned int i;
  584. taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
  585. for_each_possible_cpu(i) {
  586. INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
  587. init_rwsem(&(per_cpu(listener_array, i).sem));
  588. }
  589. }
  590. static int __init taskstats_init(void)
  591. {
  592. int rc;
  593. rc = genl_register_family_with_ops(&family, taskstats_ops);
  594. if (rc)
  595. return rc;
  596. family_registered = 1;
  597. pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
  598. return 0;
  599. }
  600. /*
  601. * late initcall ensures initialization of statistics collection
  602. * mechanisms precedes initialization of the taskstats interface
  603. */
  604. late_initcall(taskstats_init);