torture.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * Common functions for in-kernel torture tests.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2014
  19. *
  20. * Author: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Based on kernel/rcu/torture.c.
  22. */
  23. #include <linux/types.h>
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/module.h>
  27. #include <linux/kthread.h>
  28. #include <linux/err.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/smp.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/sched.h>
  33. #include <linux/atomic.h>
  34. #include <linux/bitops.h>
  35. #include <linux/completion.h>
  36. #include <linux/moduleparam.h>
  37. #include <linux/percpu.h>
  38. #include <linux/notifier.h>
  39. #include <linux/reboot.h>
  40. #include <linux/freezer.h>
  41. #include <linux/cpu.h>
  42. #include <linux/delay.h>
  43. #include <linux/stat.h>
  44. #include <linux/slab.h>
  45. #include <linux/trace_clock.h>
  46. #include <asm/byteorder.h>
  47. #include <linux/torture.h>
  48. MODULE_LICENSE("GPL");
  49. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
  50. static char *torture_type;
  51. static bool verbose;
  52. /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
  53. #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
  54. #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */
  55. #define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
  56. static int fullstop = FULLSTOP_RMMOD;
  57. static DEFINE_MUTEX(fullstop_mutex);
  58. static int *torture_runnable;
  59. #ifdef CONFIG_HOTPLUG_CPU
  60. /*
  61. * Variables for online-offline handling. Only present if CPU hotplug
  62. * is enabled, otherwise does nothing.
  63. */
  64. static struct task_struct *onoff_task;
  65. static long onoff_holdoff;
  66. static long onoff_interval;
  67. static long n_offline_attempts;
  68. static long n_offline_successes;
  69. static unsigned long sum_offline;
  70. static int min_offline = -1;
  71. static int max_offline;
  72. static long n_online_attempts;
  73. static long n_online_successes;
  74. static unsigned long sum_online;
  75. static int min_online = -1;
  76. static int max_online;
  77. /*
  78. * Execute random CPU-hotplug operations at the interval specified
  79. * by the onoff_interval.
  80. */
  81. static int
  82. torture_onoff(void *arg)
  83. {
  84. int cpu;
  85. unsigned long delta;
  86. int maxcpu = -1;
  87. DEFINE_TORTURE_RANDOM(rand);
  88. int ret;
  89. unsigned long starttime;
  90. VERBOSE_TOROUT_STRING("torture_onoff task started");
  91. for_each_online_cpu(cpu)
  92. maxcpu = cpu;
  93. WARN_ON(maxcpu < 0);
  94. if (onoff_holdoff > 0) {
  95. VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
  96. schedule_timeout_interruptible(onoff_holdoff);
  97. VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
  98. }
  99. while (!torture_must_stop()) {
  100. cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
  101. if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
  102. if (verbose)
  103. pr_alert("%s" TORTURE_FLAG
  104. "torture_onoff task: offlining %d\n",
  105. torture_type, cpu);
  106. starttime = jiffies;
  107. n_offline_attempts++;
  108. ret = cpu_down(cpu);
  109. if (ret) {
  110. if (verbose)
  111. pr_alert("%s" TORTURE_FLAG
  112. "torture_onoff task: offline %d failed: errno %d\n",
  113. torture_type, cpu, ret);
  114. } else {
  115. if (verbose)
  116. pr_alert("%s" TORTURE_FLAG
  117. "torture_onoff task: offlined %d\n",
  118. torture_type, cpu);
  119. n_offline_successes++;
  120. delta = jiffies - starttime;
  121. sum_offline += delta;
  122. if (min_offline < 0) {
  123. min_offline = delta;
  124. max_offline = delta;
  125. }
  126. if (min_offline > delta)
  127. min_offline = delta;
  128. if (max_offline < delta)
  129. max_offline = delta;
  130. }
  131. } else if (cpu_is_hotpluggable(cpu)) {
  132. if (verbose)
  133. pr_alert("%s" TORTURE_FLAG
  134. "torture_onoff task: onlining %d\n",
  135. torture_type, cpu);
  136. starttime = jiffies;
  137. n_online_attempts++;
  138. ret = cpu_up(cpu);
  139. if (ret) {
  140. if (verbose)
  141. pr_alert("%s" TORTURE_FLAG
  142. "torture_onoff task: online %d failed: errno %d\n",
  143. torture_type, cpu, ret);
  144. } else {
  145. if (verbose)
  146. pr_alert("%s" TORTURE_FLAG
  147. "torture_onoff task: onlined %d\n",
  148. torture_type, cpu);
  149. n_online_successes++;
  150. delta = jiffies - starttime;
  151. sum_online += delta;
  152. if (min_online < 0) {
  153. min_online = delta;
  154. max_online = delta;
  155. }
  156. if (min_online > delta)
  157. min_online = delta;
  158. if (max_online < delta)
  159. max_online = delta;
  160. }
  161. }
  162. schedule_timeout_interruptible(onoff_interval);
  163. }
  164. torture_kthread_stopping("torture_onoff");
  165. return 0;
  166. }
  167. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  168. /*
  169. * Initiate online-offline handling.
  170. */
  171. int torture_onoff_init(long ooholdoff, long oointerval)
  172. {
  173. int ret = 0;
  174. #ifdef CONFIG_HOTPLUG_CPU
  175. onoff_holdoff = ooholdoff;
  176. onoff_interval = oointerval;
  177. if (onoff_interval <= 0)
  178. return 0;
  179. ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
  180. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  181. return ret;
  182. }
  183. EXPORT_SYMBOL_GPL(torture_onoff_init);
  184. /*
  185. * Clean up after online/offline testing.
  186. */
  187. static void torture_onoff_cleanup(void)
  188. {
  189. #ifdef CONFIG_HOTPLUG_CPU
  190. if (onoff_task == NULL)
  191. return;
  192. VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
  193. kthread_stop(onoff_task);
  194. onoff_task = NULL;
  195. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  196. }
  197. EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
  198. /*
  199. * Print online/offline testing statistics.
  200. */
  201. void torture_onoff_stats(void)
  202. {
  203. #ifdef CONFIG_HOTPLUG_CPU
  204. pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
  205. n_online_successes, n_online_attempts,
  206. n_offline_successes, n_offline_attempts,
  207. min_online, max_online,
  208. min_offline, max_offline,
  209. sum_online, sum_offline, HZ);
  210. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  211. }
  212. EXPORT_SYMBOL_GPL(torture_onoff_stats);
  213. /*
  214. * Were all the online/offline operations successful?
  215. */
  216. bool torture_onoff_failures(void)
  217. {
  218. #ifdef CONFIG_HOTPLUG_CPU
  219. return n_online_successes != n_online_attempts ||
  220. n_offline_successes != n_offline_attempts;
  221. #else /* #ifdef CONFIG_HOTPLUG_CPU */
  222. return false;
  223. #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
  224. }
  225. EXPORT_SYMBOL_GPL(torture_onoff_failures);
  226. #define TORTURE_RANDOM_MULT 39916801 /* prime */
  227. #define TORTURE_RANDOM_ADD 479001701 /* prime */
  228. #define TORTURE_RANDOM_REFRESH 10000
  229. /*
  230. * Crude but fast random-number generator. Uses a linear congruential
  231. * generator, with occasional help from cpu_clock().
  232. */
  233. unsigned long
  234. torture_random(struct torture_random_state *trsp)
  235. {
  236. if (--trsp->trs_count < 0) {
  237. trsp->trs_state += (unsigned long)local_clock();
  238. trsp->trs_count = TORTURE_RANDOM_REFRESH;
  239. }
  240. trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
  241. TORTURE_RANDOM_ADD;
  242. return swahw32(trsp->trs_state);
  243. }
  244. EXPORT_SYMBOL_GPL(torture_random);
  245. /*
  246. * Variables for shuffling. The idea is to ensure that each CPU stays
  247. * idle for an extended period to test interactions with dyntick idle,
  248. * as well as interactions with any per-CPU varibles.
  249. */
  250. struct shuffle_task {
  251. struct list_head st_l;
  252. struct task_struct *st_t;
  253. };
  254. static long shuffle_interval; /* In jiffies. */
  255. static struct task_struct *shuffler_task;
  256. static cpumask_var_t shuffle_tmp_mask;
  257. static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */
  258. static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
  259. static DEFINE_MUTEX(shuffle_task_mutex);
  260. /*
  261. * Register a task to be shuffled. If there is no memory, just splat
  262. * and don't bother registering.
  263. */
  264. void torture_shuffle_task_register(struct task_struct *tp)
  265. {
  266. struct shuffle_task *stp;
  267. if (WARN_ON_ONCE(tp == NULL))
  268. return;
  269. stp = kmalloc(sizeof(*stp), GFP_KERNEL);
  270. if (WARN_ON_ONCE(stp == NULL))
  271. return;
  272. stp->st_t = tp;
  273. mutex_lock(&shuffle_task_mutex);
  274. list_add(&stp->st_l, &shuffle_task_list);
  275. mutex_unlock(&shuffle_task_mutex);
  276. }
  277. EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
  278. /*
  279. * Unregister all tasks, for example, at the end of the torture run.
  280. */
  281. static void torture_shuffle_task_unregister_all(void)
  282. {
  283. struct shuffle_task *stp;
  284. struct shuffle_task *p;
  285. mutex_lock(&shuffle_task_mutex);
  286. list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
  287. list_del(&stp->st_l);
  288. kfree(stp);
  289. }
  290. mutex_unlock(&shuffle_task_mutex);
  291. }
  292. /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
  293. * A special case is when shuffle_idle_cpu = -1, in which case we allow
  294. * the tasks to run on all CPUs.
  295. */
  296. static void torture_shuffle_tasks(void)
  297. {
  298. struct shuffle_task *stp;
  299. cpumask_setall(shuffle_tmp_mask);
  300. get_online_cpus();
  301. /* No point in shuffling if there is only one online CPU (ex: UP) */
  302. if (num_online_cpus() == 1) {
  303. put_online_cpus();
  304. return;
  305. }
  306. /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */
  307. shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
  308. if (shuffle_idle_cpu >= nr_cpu_ids)
  309. shuffle_idle_cpu = -1;
  310. else
  311. cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
  312. mutex_lock(&shuffle_task_mutex);
  313. list_for_each_entry(stp, &shuffle_task_list, st_l)
  314. set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
  315. mutex_unlock(&shuffle_task_mutex);
  316. put_online_cpus();
  317. }
  318. /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
  319. * system to become idle at a time and cut off its timer ticks. This is meant
  320. * to test the support for such tickless idle CPU in RCU.
  321. */
  322. static int torture_shuffle(void *arg)
  323. {
  324. VERBOSE_TOROUT_STRING("torture_shuffle task started");
  325. do {
  326. schedule_timeout_interruptible(shuffle_interval);
  327. torture_shuffle_tasks();
  328. torture_shutdown_absorb("torture_shuffle");
  329. } while (!torture_must_stop());
  330. torture_kthread_stopping("torture_shuffle");
  331. return 0;
  332. }
  333. /*
  334. * Start the shuffler, with shuffint in jiffies.
  335. */
  336. int torture_shuffle_init(long shuffint)
  337. {
  338. shuffle_interval = shuffint;
  339. shuffle_idle_cpu = -1;
  340. if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
  341. VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
  342. return -ENOMEM;
  343. }
  344. /* Create the shuffler thread */
  345. return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
  346. }
  347. EXPORT_SYMBOL_GPL(torture_shuffle_init);
  348. /*
  349. * Stop the shuffling.
  350. */
  351. static void torture_shuffle_cleanup(void)
  352. {
  353. torture_shuffle_task_unregister_all();
  354. if (shuffler_task) {
  355. VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
  356. kthread_stop(shuffler_task);
  357. free_cpumask_var(shuffle_tmp_mask);
  358. }
  359. shuffler_task = NULL;
  360. }
  361. EXPORT_SYMBOL_GPL(torture_shuffle_cleanup);
  362. /*
  363. * Variables for auto-shutdown. This allows "lights out" torture runs
  364. * to be fully scripted.
  365. */
  366. static int shutdown_secs; /* desired test duration in seconds. */
  367. static struct task_struct *shutdown_task;
  368. static unsigned long shutdown_time; /* jiffies to system shutdown. */
  369. static void (*torture_shutdown_hook)(void);
  370. /*
  371. * Absorb kthreads into a kernel function that won't return, so that
  372. * they won't ever access module text or data again.
  373. */
  374. void torture_shutdown_absorb(const char *title)
  375. {
  376. while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
  377. pr_notice("torture thread %s parking due to system shutdown\n",
  378. title);
  379. schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
  380. }
  381. }
  382. EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
  383. /*
  384. * Cause the torture test to shutdown the system after the test has
  385. * run for the time specified by the shutdown_secs parameter.
  386. */
  387. static int torture_shutdown(void *arg)
  388. {
  389. long delta;
  390. unsigned long jiffies_snap;
  391. VERBOSE_TOROUT_STRING("torture_shutdown task started");
  392. jiffies_snap = jiffies;
  393. while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
  394. !torture_must_stop()) {
  395. delta = shutdown_time - jiffies_snap;
  396. if (verbose)
  397. pr_alert("%s" TORTURE_FLAG
  398. "torture_shutdown task: %lu jiffies remaining\n",
  399. torture_type, delta);
  400. schedule_timeout_interruptible(delta);
  401. jiffies_snap = jiffies;
  402. }
  403. if (torture_must_stop()) {
  404. torture_kthread_stopping("torture_shutdown");
  405. return 0;
  406. }
  407. /* OK, shut down the system. */
  408. VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
  409. shutdown_task = NULL; /* Avoid self-kill deadlock. */
  410. if (torture_shutdown_hook)
  411. torture_shutdown_hook();
  412. else
  413. VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
  414. kernel_power_off(); /* Shut down the system. */
  415. return 0;
  416. }
  417. /*
  418. * Start up the shutdown task.
  419. */
  420. int torture_shutdown_init(int ssecs, void (*cleanup)(void))
  421. {
  422. int ret = 0;
  423. shutdown_secs = ssecs;
  424. torture_shutdown_hook = cleanup;
  425. if (shutdown_secs > 0) {
  426. shutdown_time = jiffies + shutdown_secs * HZ;
  427. ret = torture_create_kthread(torture_shutdown, NULL,
  428. shutdown_task);
  429. }
  430. return ret;
  431. }
  432. EXPORT_SYMBOL_GPL(torture_shutdown_init);
  433. /*
  434. * Detect and respond to a system shutdown.
  435. */
  436. static int torture_shutdown_notify(struct notifier_block *unused1,
  437. unsigned long unused2, void *unused3)
  438. {
  439. mutex_lock(&fullstop_mutex);
  440. if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
  441. VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
  442. WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN);
  443. } else {
  444. pr_warn("Concurrent rmmod and shutdown illegal!\n");
  445. }
  446. mutex_unlock(&fullstop_mutex);
  447. return NOTIFY_DONE;
  448. }
  449. static struct notifier_block torture_shutdown_nb = {
  450. .notifier_call = torture_shutdown_notify,
  451. };
  452. /*
  453. * Shut down the shutdown task. Say what??? Heh! This can happen if
  454. * the torture module gets an rmmod before the shutdown time arrives. ;-)
  455. */
  456. static void torture_shutdown_cleanup(void)
  457. {
  458. unregister_reboot_notifier(&torture_shutdown_nb);
  459. if (shutdown_task != NULL) {
  460. VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
  461. kthread_stop(shutdown_task);
  462. }
  463. shutdown_task = NULL;
  464. }
  465. /*
  466. * Variables for stuttering, which means to periodically pause and
  467. * restart testing in order to catch bugs that appear when load is
  468. * suddenly applied to or removed from the system.
  469. */
  470. static struct task_struct *stutter_task;
  471. static int stutter_pause_test;
  472. static int stutter;
  473. /*
  474. * Block until the stutter interval ends. This must be called periodically
  475. * by all running kthreads that need to be subject to stuttering.
  476. */
  477. void stutter_wait(const char *title)
  478. {
  479. cond_resched_rcu_qs();
  480. while (READ_ONCE(stutter_pause_test) ||
  481. (torture_runnable && !READ_ONCE(*torture_runnable))) {
  482. if (stutter_pause_test)
  483. if (READ_ONCE(stutter_pause_test) == 1)
  484. schedule_timeout_interruptible(1);
  485. else
  486. while (READ_ONCE(stutter_pause_test))
  487. cond_resched();
  488. else
  489. schedule_timeout_interruptible(round_jiffies_relative(HZ));
  490. torture_shutdown_absorb(title);
  491. }
  492. }
  493. EXPORT_SYMBOL_GPL(stutter_wait);
  494. /*
  495. * Cause the torture test to "stutter", starting and stopping all
  496. * threads periodically.
  497. */
  498. static int torture_stutter(void *arg)
  499. {
  500. VERBOSE_TOROUT_STRING("torture_stutter task started");
  501. do {
  502. if (!torture_must_stop()) {
  503. if (stutter > 1) {
  504. schedule_timeout_interruptible(stutter - 1);
  505. WRITE_ONCE(stutter_pause_test, 2);
  506. }
  507. schedule_timeout_interruptible(1);
  508. WRITE_ONCE(stutter_pause_test, 1);
  509. }
  510. if (!torture_must_stop())
  511. schedule_timeout_interruptible(stutter);
  512. WRITE_ONCE(stutter_pause_test, 0);
  513. torture_shutdown_absorb("torture_stutter");
  514. } while (!torture_must_stop());
  515. torture_kthread_stopping("torture_stutter");
  516. return 0;
  517. }
  518. /*
  519. * Initialize and kick off the torture_stutter kthread.
  520. */
  521. int torture_stutter_init(int s)
  522. {
  523. int ret;
  524. stutter = s;
  525. ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
  526. return ret;
  527. }
  528. EXPORT_SYMBOL_GPL(torture_stutter_init);
  529. /*
  530. * Cleanup after the torture_stutter kthread.
  531. */
  532. static void torture_stutter_cleanup(void)
  533. {
  534. if (!stutter_task)
  535. return;
  536. VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
  537. kthread_stop(stutter_task);
  538. stutter_task = NULL;
  539. }
  540. /*
  541. * Initialize torture module. Please note that this is -not- invoked via
  542. * the usual module_init() mechanism, but rather by an explicit call from
  543. * the client torture module. This call must be paired with a later
  544. * torture_init_end().
  545. *
  546. * The runnable parameter points to a flag that controls whether or not
  547. * the test is currently runnable. If there is no such flag, pass in NULL.
  548. */
  549. bool torture_init_begin(char *ttype, bool v, int *runnable)
  550. {
  551. mutex_lock(&fullstop_mutex);
  552. if (torture_type != NULL) {
  553. pr_alert("torture_init_begin: refusing %s init: %s running",
  554. ttype, torture_type);
  555. mutex_unlock(&fullstop_mutex);
  556. return false;
  557. }
  558. torture_type = ttype;
  559. verbose = v;
  560. torture_runnable = runnable;
  561. fullstop = FULLSTOP_DONTSTOP;
  562. return true;
  563. }
  564. EXPORT_SYMBOL_GPL(torture_init_begin);
  565. /*
  566. * Tell the torture module that initialization is complete.
  567. */
  568. void torture_init_end(void)
  569. {
  570. mutex_unlock(&fullstop_mutex);
  571. register_reboot_notifier(&torture_shutdown_nb);
  572. }
  573. EXPORT_SYMBOL_GPL(torture_init_end);
  574. /*
  575. * Clean up torture module. Please note that this is -not- invoked via
  576. * the usual module_exit() mechanism, but rather by an explicit call from
  577. * the client torture module. Returns true if a race with system shutdown
  578. * is detected, otherwise, all kthreads started by functions in this file
  579. * will be shut down.
  580. *
  581. * This must be called before the caller starts shutting down its own
  582. * kthreads.
  583. *
  584. * Both torture_cleanup_begin() and torture_cleanup_end() must be paired,
  585. * in order to correctly perform the cleanup. They are separated because
  586. * threads can still need to reference the torture_type type, thus nullify
  587. * only after completing all other relevant calls.
  588. */
  589. bool torture_cleanup_begin(void)
  590. {
  591. mutex_lock(&fullstop_mutex);
  592. if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
  593. pr_warn("Concurrent rmmod and shutdown illegal!\n");
  594. mutex_unlock(&fullstop_mutex);
  595. schedule_timeout_uninterruptible(10);
  596. return true;
  597. }
  598. WRITE_ONCE(fullstop, FULLSTOP_RMMOD);
  599. mutex_unlock(&fullstop_mutex);
  600. torture_shutdown_cleanup();
  601. torture_shuffle_cleanup();
  602. torture_stutter_cleanup();
  603. torture_onoff_cleanup();
  604. return false;
  605. }
  606. EXPORT_SYMBOL_GPL(torture_cleanup_begin);
  607. void torture_cleanup_end(void)
  608. {
  609. mutex_lock(&fullstop_mutex);
  610. torture_type = NULL;
  611. mutex_unlock(&fullstop_mutex);
  612. }
  613. EXPORT_SYMBOL_GPL(torture_cleanup_end);
  614. /*
  615. * Is it time for the current torture test to stop?
  616. */
  617. bool torture_must_stop(void)
  618. {
  619. return torture_must_stop_irq() || kthread_should_stop();
  620. }
  621. EXPORT_SYMBOL_GPL(torture_must_stop);
  622. /*
  623. * Is it time for the current torture test to stop? This is the irq-safe
  624. * version, hence no check for kthread_should_stop().
  625. */
  626. bool torture_must_stop_irq(void)
  627. {
  628. return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP;
  629. }
  630. EXPORT_SYMBOL_GPL(torture_must_stop_irq);
  631. /*
  632. * Each kthread must wait for kthread_should_stop() before returning from
  633. * its top-level function, otherwise segfaults ensue. This function
  634. * prints a "stopping" message and waits for kthread_should_stop(), and
  635. * should be called from all torture kthreads immediately prior to
  636. * returning.
  637. */
  638. void torture_kthread_stopping(char *title)
  639. {
  640. char buf[128];
  641. snprintf(buf, sizeof(buf), "Stopping %s", title);
  642. VERBOSE_TOROUT_STRING(buf);
  643. while (!kthread_should_stop()) {
  644. torture_shutdown_absorb(title);
  645. schedule_timeout_uninterruptible(1);
  646. }
  647. }
  648. EXPORT_SYMBOL_GPL(torture_kthread_stopping);
  649. /*
  650. * Create a generic torture kthread that is immediately runnable. If you
  651. * need the kthread to be stopped so that you can do something to it before
  652. * it starts, you will need to open-code your own.
  653. */
  654. int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
  655. char *f, struct task_struct **tp)
  656. {
  657. int ret = 0;
  658. VERBOSE_TOROUT_STRING(m);
  659. *tp = kthread_run(fn, arg, "%s", s);
  660. if (IS_ERR(*tp)) {
  661. ret = PTR_ERR(*tp);
  662. VERBOSE_TOROUT_ERRSTRING(f);
  663. *tp = NULL;
  664. }
  665. torture_shuffle_task_register(*tp);
  666. return ret;
  667. }
  668. EXPORT_SYMBOL_GPL(_torture_create_kthread);
  669. /*
  670. * Stop a generic kthread, emitting a message.
  671. */
  672. void _torture_stop_kthread(char *m, struct task_struct **tp)
  673. {
  674. if (*tp == NULL)
  675. return;
  676. VERBOSE_TOROUT_STRING(m);
  677. kthread_stop(*tp);
  678. *tp = NULL;
  679. }
  680. EXPORT_SYMBOL_GPL(_torture_stop_kthread);