kthread.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /* Kernel thread helper functions.
  2. * Copyright (C) 2004 IBM Corporation, Rusty Russell.
  3. *
  4. * Creation is done via kthreadd, so that we get a clean environment
  5. * even if we're invoked from userspace (think modprobe, hotplug cpu,
  6. * etc.).
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/kthread.h>
  10. #include <linux/completion.h>
  11. #include <linux/err.h>
  12. #include <linux/cpuset.h>
  13. #include <linux/unistd.h>
  14. #include <linux/file.h>
  15. #include <linux/export.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/freezer.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/cgroup.h>
  22. #include <trace/events/sched.h>
  23. static DEFINE_SPINLOCK(kthread_create_lock);
  24. static LIST_HEAD(kthread_create_list);
  25. struct task_struct *kthreadd_task;
  26. struct kthread_create_info
  27. {
  28. /* Information passed to kthread() from kthreadd. */
  29. int (*threadfn)(void *data);
  30. void *data;
  31. int node;
  32. /* Result passed back to kthread_create() from kthreadd. */
  33. struct task_struct *result;
  34. struct completion *done;
  35. struct list_head list;
  36. };
  37. struct kthread {
  38. unsigned long flags;
  39. unsigned int cpu;
  40. void *data;
  41. struct completion parked;
  42. struct completion exited;
  43. };
  44. enum KTHREAD_BITS {
  45. KTHREAD_IS_PER_CPU = 0,
  46. KTHREAD_SHOULD_STOP,
  47. KTHREAD_SHOULD_PARK,
  48. KTHREAD_IS_PARKED,
  49. };
  50. #define __to_kthread(vfork) \
  51. container_of(vfork, struct kthread, exited)
  52. static inline struct kthread *to_kthread(struct task_struct *k)
  53. {
  54. return __to_kthread(k->vfork_done);
  55. }
  56. static struct kthread *to_live_kthread(struct task_struct *k)
  57. {
  58. struct completion *vfork = ACCESS_ONCE(k->vfork_done);
  59. if (likely(vfork))
  60. return __to_kthread(vfork);
  61. return NULL;
  62. }
  63. /**
  64. * kthread_should_stop - should this kthread return now?
  65. *
  66. * When someone calls kthread_stop() on your kthread, it will be woken
  67. * and this will return true. You should then return, and your return
  68. * value will be passed through to kthread_stop().
  69. */
  70. bool kthread_should_stop(void)
  71. {
  72. return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
  73. }
  74. EXPORT_SYMBOL(kthread_should_stop);
  75. /**
  76. * kthread_should_park - should this kthread park now?
  77. *
  78. * When someone calls kthread_park() on your kthread, it will be woken
  79. * and this will return true. You should then do the necessary
  80. * cleanup and call kthread_parkme()
  81. *
  82. * Similar to kthread_should_stop(), but this keeps the thread alive
  83. * and in a park position. kthread_unpark() "restarts" the thread and
  84. * calls the thread function again.
  85. */
  86. bool kthread_should_park(void)
  87. {
  88. return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
  89. }
  90. EXPORT_SYMBOL_GPL(kthread_should_park);
  91. /**
  92. * kthread_freezable_should_stop - should this freezable kthread return now?
  93. * @was_frozen: optional out parameter, indicates whether %current was frozen
  94. *
  95. * kthread_should_stop() for freezable kthreads, which will enter
  96. * refrigerator if necessary. This function is safe from kthread_stop() /
  97. * freezer deadlock and freezable kthreads should use this function instead
  98. * of calling try_to_freeze() directly.
  99. */
  100. bool kthread_freezable_should_stop(bool *was_frozen)
  101. {
  102. bool frozen = false;
  103. might_sleep();
  104. if (unlikely(freezing(current)))
  105. frozen = __refrigerator(true);
  106. if (was_frozen)
  107. *was_frozen = frozen;
  108. return kthread_should_stop();
  109. }
  110. EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  111. /**
  112. * kthread_data - return data value specified on kthread creation
  113. * @task: kthread task in question
  114. *
  115. * Return the data value specified when kthread @task was created.
  116. * The caller is responsible for ensuring the validity of @task when
  117. * calling this function.
  118. */
  119. void *kthread_data(struct task_struct *task)
  120. {
  121. return to_kthread(task)->data;
  122. }
  123. /**
  124. * probe_kthread_data - speculative version of kthread_data()
  125. * @task: possible kthread task in question
  126. *
  127. * @task could be a kthread task. Return the data value specified when it
  128. * was created if accessible. If @task isn't a kthread task or its data is
  129. * inaccessible for any reason, %NULL is returned. This function requires
  130. * that @task itself is safe to dereference.
  131. */
  132. void *probe_kthread_data(struct task_struct *task)
  133. {
  134. struct kthread *kthread = to_kthread(task);
  135. void *data = NULL;
  136. probe_kernel_read(&data, &kthread->data, sizeof(data));
  137. return data;
  138. }
  139. static void __kthread_parkme(struct kthread *self)
  140. {
  141. __set_current_state(TASK_PARKED);
  142. while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
  143. if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
  144. complete(&self->parked);
  145. schedule();
  146. __set_current_state(TASK_PARKED);
  147. }
  148. clear_bit(KTHREAD_IS_PARKED, &self->flags);
  149. __set_current_state(TASK_RUNNING);
  150. }
  151. void kthread_parkme(void)
  152. {
  153. __kthread_parkme(to_kthread(current));
  154. }
  155. EXPORT_SYMBOL_GPL(kthread_parkme);
  156. static int kthread(void *_create)
  157. {
  158. /* Copy data: it's on kthread's stack */
  159. struct kthread_create_info *create = _create;
  160. int (*threadfn)(void *data) = create->threadfn;
  161. void *data = create->data;
  162. struct completion *done;
  163. struct kthread self;
  164. int ret;
  165. self.flags = 0;
  166. self.data = data;
  167. init_completion(&self.exited);
  168. init_completion(&self.parked);
  169. current->vfork_done = &self.exited;
  170. /* If user was SIGKILLed, I release the structure. */
  171. done = xchg(&create->done, NULL);
  172. if (!done) {
  173. kfree(create);
  174. do_exit(-EINTR);
  175. }
  176. /* OK, tell user we're spawned, wait for stop or wakeup */
  177. __set_current_state(TASK_UNINTERRUPTIBLE);
  178. create->result = current;
  179. complete(done);
  180. schedule();
  181. ret = -EINTR;
  182. if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
  183. cgroup_kthread_ready();
  184. __kthread_parkme(&self);
  185. ret = threadfn(data);
  186. }
  187. /* we can't just return, we must preserve "self" on stack */
  188. do_exit(ret);
  189. }
  190. /* called from do_fork() to get node information for about to be created task */
  191. int tsk_fork_get_node(struct task_struct *tsk)
  192. {
  193. #ifdef CONFIG_NUMA
  194. if (tsk == kthreadd_task)
  195. return tsk->pref_node_fork;
  196. #endif
  197. return NUMA_NO_NODE;
  198. }
  199. static void create_kthread(struct kthread_create_info *create)
  200. {
  201. int pid;
  202. #ifdef CONFIG_NUMA
  203. current->pref_node_fork = create->node;
  204. #endif
  205. /* We want our own signal handler (we take no signals by default). */
  206. pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
  207. if (pid < 0) {
  208. /* If user was SIGKILLed, I release the structure. */
  209. struct completion *done = xchg(&create->done, NULL);
  210. if (!done) {
  211. kfree(create);
  212. return;
  213. }
  214. create->result = ERR_PTR(pid);
  215. complete(done);
  216. }
  217. }
  218. /**
  219. * kthread_create_on_node - create a kthread.
  220. * @threadfn: the function to run until signal_pending(current).
  221. * @data: data ptr for @threadfn.
  222. * @node: task and thread structures for the thread are allocated on this node
  223. * @namefmt: printf-style name for the thread.
  224. *
  225. * Description: This helper function creates and names a kernel
  226. * thread. The thread will be stopped: use wake_up_process() to start
  227. * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
  228. * is affine to all CPUs.
  229. *
  230. * If thread is going to be bound on a particular cpu, give its node
  231. * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
  232. * When woken, the thread will run @threadfn() with @data as its
  233. * argument. @threadfn() can either call do_exit() directly if it is a
  234. * standalone thread for which no one will call kthread_stop(), or
  235. * return when 'kthread_should_stop()' is true (which means
  236. * kthread_stop() has been called). The return value should be zero
  237. * or a negative error number; it will be passed to kthread_stop().
  238. *
  239. * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
  240. */
  241. struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  242. void *data, int node,
  243. const char namefmt[],
  244. ...)
  245. {
  246. DECLARE_COMPLETION_ONSTACK(done);
  247. struct task_struct *task;
  248. struct kthread_create_info *create = kmalloc(sizeof(*create),
  249. GFP_KERNEL);
  250. if (!create)
  251. return ERR_PTR(-ENOMEM);
  252. create->threadfn = threadfn;
  253. create->data = data;
  254. create->node = node;
  255. create->done = &done;
  256. spin_lock(&kthread_create_lock);
  257. list_add_tail(&create->list, &kthread_create_list);
  258. spin_unlock(&kthread_create_lock);
  259. wake_up_process(kthreadd_task);
  260. /*
  261. * Wait for completion in killable state, for I might be chosen by
  262. * the OOM killer while kthreadd is trying to allocate memory for
  263. * new kernel thread.
  264. */
  265. if (unlikely(wait_for_completion_killable(&done))) {
  266. /*
  267. * If I was SIGKILLed before kthreadd (or new kernel thread)
  268. * calls complete(), leave the cleanup of this structure to
  269. * that thread.
  270. */
  271. if (xchg(&create->done, NULL))
  272. return ERR_PTR(-EINTR);
  273. /*
  274. * kthreadd (or new kernel thread) will call complete()
  275. * shortly.
  276. */
  277. wait_for_completion(&done);
  278. }
  279. task = create->result;
  280. if (!IS_ERR(task)) {
  281. static const struct sched_param param = { .sched_priority = 0 };
  282. char name[TASK_COMM_LEN];
  283. va_list args;
  284. va_start(args, namefmt);
  285. /*
  286. * task is already visible to other tasks, so updating
  287. * COMM must be protected.
  288. */
  289. vsnprintf(name, sizeof(name), namefmt, args);
  290. set_task_comm(task, name);
  291. va_end(args);
  292. /*
  293. * root may have changed our (kthreadd's) priority or CPU mask.
  294. * The kernel thread should not inherit these properties.
  295. */
  296. sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
  297. set_cpus_allowed_ptr(task, cpu_all_mask);
  298. }
  299. kfree(create);
  300. return task;
  301. }
  302. EXPORT_SYMBOL(kthread_create_on_node);
  303. static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
  304. {
  305. unsigned long flags;
  306. if (!wait_task_inactive(p, state)) {
  307. WARN_ON(1);
  308. return;
  309. }
  310. /* It's safe because the task is inactive. */
  311. raw_spin_lock_irqsave(&p->pi_lock, flags);
  312. do_set_cpus_allowed(p, mask);
  313. p->flags |= PF_NO_SETAFFINITY;
  314. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  315. }
  316. static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
  317. {
  318. __kthread_bind_mask(p, cpumask_of(cpu), state);
  319. }
  320. void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  321. {
  322. __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
  323. }
  324. /**
  325. * kthread_bind - bind a just-created kthread to a cpu.
  326. * @p: thread created by kthread_create().
  327. * @cpu: cpu (might not be online, must be possible) for @k to run on.
  328. *
  329. * Description: This function is equivalent to set_cpus_allowed(),
  330. * except that @cpu doesn't need to be online, and the thread must be
  331. * stopped (i.e., just returned from kthread_create()).
  332. */
  333. void kthread_bind(struct task_struct *p, unsigned int cpu)
  334. {
  335. __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
  336. }
  337. EXPORT_SYMBOL(kthread_bind);
  338. /**
  339. * kthread_create_on_cpu - Create a cpu bound kthread
  340. * @threadfn: the function to run until signal_pending(current).
  341. * @data: data ptr for @threadfn.
  342. * @cpu: The cpu on which the thread should be bound,
  343. * @namefmt: printf-style name for the thread. Format is restricted
  344. * to "name.*%u". Code fills in cpu number.
  345. *
  346. * Description: This helper function creates and names a kernel thread
  347. * The thread will be woken and put into park mode.
  348. */
  349. struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  350. void *data, unsigned int cpu,
  351. const char *namefmt)
  352. {
  353. struct task_struct *p;
  354. p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
  355. cpu);
  356. if (IS_ERR(p))
  357. return p;
  358. set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
  359. to_kthread(p)->cpu = cpu;
  360. /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
  361. kthread_park(p);
  362. return p;
  363. }
  364. static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
  365. {
  366. clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  367. /*
  368. * We clear the IS_PARKED bit here as we don't wait
  369. * until the task has left the park code. So if we'd
  370. * park before that happens we'd see the IS_PARKED bit
  371. * which might be about to be cleared.
  372. */
  373. if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  374. if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  375. __kthread_bind(k, kthread->cpu, TASK_PARKED);
  376. wake_up_state(k, TASK_PARKED);
  377. }
  378. }
  379. /**
  380. * kthread_unpark - unpark a thread created by kthread_create().
  381. * @k: thread created by kthread_create().
  382. *
  383. * Sets kthread_should_park() for @k to return false, wakes it, and
  384. * waits for it to return. If the thread is marked percpu then its
  385. * bound to the cpu again.
  386. */
  387. void kthread_unpark(struct task_struct *k)
  388. {
  389. struct kthread *kthread = to_live_kthread(k);
  390. if (kthread)
  391. __kthread_unpark(k, kthread);
  392. }
  393. EXPORT_SYMBOL_GPL(kthread_unpark);
  394. /**
  395. * kthread_park - park a thread created by kthread_create().
  396. * @k: thread created by kthread_create().
  397. *
  398. * Sets kthread_should_park() for @k to return true, wakes it, and
  399. * waits for it to return. This can also be called after kthread_create()
  400. * instead of calling wake_up_process(): the thread will park without
  401. * calling threadfn().
  402. *
  403. * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
  404. * If called by the kthread itself just the park bit is set.
  405. */
  406. int kthread_park(struct task_struct *k)
  407. {
  408. struct kthread *kthread = to_live_kthread(k);
  409. int ret = -ENOSYS;
  410. if (kthread) {
  411. if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  412. set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  413. if (k != current) {
  414. wake_up_process(k);
  415. wait_for_completion(&kthread->parked);
  416. }
  417. }
  418. ret = 0;
  419. }
  420. return ret;
  421. }
  422. EXPORT_SYMBOL_GPL(kthread_park);
  423. /**
  424. * kthread_stop - stop a thread created by kthread_create().
  425. * @k: thread created by kthread_create().
  426. *
  427. * Sets kthread_should_stop() for @k to return true, wakes it, and
  428. * waits for it to exit. This can also be called after kthread_create()
  429. * instead of calling wake_up_process(): the thread will exit without
  430. * calling threadfn().
  431. *
  432. * If threadfn() may call do_exit() itself, the caller must ensure
  433. * task_struct can't go away.
  434. *
  435. * Returns the result of threadfn(), or %-EINTR if wake_up_process()
  436. * was never called.
  437. */
  438. int kthread_stop(struct task_struct *k)
  439. {
  440. struct kthread *kthread;
  441. int ret;
  442. trace_sched_kthread_stop(k);
  443. get_task_struct(k);
  444. kthread = to_live_kthread(k);
  445. if (kthread) {
  446. set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
  447. __kthread_unpark(k, kthread);
  448. wake_up_process(k);
  449. wait_for_completion(&kthread->exited);
  450. }
  451. ret = k->exit_code;
  452. put_task_struct(k);
  453. trace_sched_kthread_stop_ret(ret);
  454. return ret;
  455. }
  456. EXPORT_SYMBOL(kthread_stop);
  457. int kthreadd(void *unused)
  458. {
  459. struct task_struct *tsk = current;
  460. /* Setup a clean context for our children to inherit. */
  461. set_task_comm(tsk, "kthreadd");
  462. ignore_signals(tsk);
  463. set_cpus_allowed_ptr(tsk, cpu_all_mask);
  464. set_mems_allowed(node_states[N_MEMORY]);
  465. current->flags |= PF_NOFREEZE;
  466. cgroup_init_kthreadd();
  467. for (;;) {
  468. set_current_state(TASK_INTERRUPTIBLE);
  469. if (list_empty(&kthread_create_list))
  470. schedule();
  471. __set_current_state(TASK_RUNNING);
  472. spin_lock(&kthread_create_lock);
  473. while (!list_empty(&kthread_create_list)) {
  474. struct kthread_create_info *create;
  475. create = list_entry(kthread_create_list.next,
  476. struct kthread_create_info, list);
  477. list_del_init(&create->list);
  478. spin_unlock(&kthread_create_lock);
  479. create_kthread(create);
  480. spin_lock(&kthread_create_lock);
  481. }
  482. spin_unlock(&kthread_create_lock);
  483. }
  484. return 0;
  485. }
  486. void __init_kthread_worker(struct kthread_worker *worker,
  487. const char *name,
  488. struct lock_class_key *key)
  489. {
  490. spin_lock_init(&worker->lock);
  491. lockdep_set_class_and_name(&worker->lock, key, name);
  492. INIT_LIST_HEAD(&worker->work_list);
  493. worker->task = NULL;
  494. }
  495. EXPORT_SYMBOL_GPL(__init_kthread_worker);
  496. /**
  497. * kthread_worker_fn - kthread function to process kthread_worker
  498. * @worker_ptr: pointer to initialized kthread_worker
  499. *
  500. * This function can be used as @threadfn to kthread_create() or
  501. * kthread_run() with @worker_ptr argument pointing to an initialized
  502. * kthread_worker. The started kthread will process work_list until
  503. * the it is stopped with kthread_stop(). A kthread can also call
  504. * this function directly after extra initialization.
  505. *
  506. * Different kthreads can be used for the same kthread_worker as long
  507. * as there's only one kthread attached to it at any given time. A
  508. * kthread_worker without an attached kthread simply collects queued
  509. * kthread_works.
  510. */
  511. int kthread_worker_fn(void *worker_ptr)
  512. {
  513. struct kthread_worker *worker = worker_ptr;
  514. struct kthread_work *work;
  515. WARN_ON(worker->task);
  516. worker->task = current;
  517. repeat:
  518. set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
  519. if (kthread_should_stop()) {
  520. __set_current_state(TASK_RUNNING);
  521. spin_lock_irq(&worker->lock);
  522. worker->task = NULL;
  523. spin_unlock_irq(&worker->lock);
  524. return 0;
  525. }
  526. work = NULL;
  527. spin_lock_irq(&worker->lock);
  528. if (!list_empty(&worker->work_list)) {
  529. work = list_first_entry(&worker->work_list,
  530. struct kthread_work, node);
  531. list_del_init(&work->node);
  532. }
  533. worker->current_work = work;
  534. spin_unlock_irq(&worker->lock);
  535. if (work) {
  536. __set_current_state(TASK_RUNNING);
  537. work->func(work);
  538. } else if (!freezing(current))
  539. schedule();
  540. try_to_freeze();
  541. goto repeat;
  542. }
  543. EXPORT_SYMBOL_GPL(kthread_worker_fn);
  544. /* insert @work before @pos in @worker */
  545. static void insert_kthread_work(struct kthread_worker *worker,
  546. struct kthread_work *work,
  547. struct list_head *pos)
  548. {
  549. lockdep_assert_held(&worker->lock);
  550. list_add_tail(&work->node, pos);
  551. work->worker = worker;
  552. if (!worker->current_work && likely(worker->task))
  553. wake_up_process(worker->task);
  554. }
  555. /**
  556. * queue_kthread_work - queue a kthread_work
  557. * @worker: target kthread_worker
  558. * @work: kthread_work to queue
  559. *
  560. * Queue @work to work processor @task for async execution. @task
  561. * must have been created with kthread_worker_create(). Returns %true
  562. * if @work was successfully queued, %false if it was already pending.
  563. */
  564. bool queue_kthread_work(struct kthread_worker *worker,
  565. struct kthread_work *work)
  566. {
  567. bool ret = false;
  568. unsigned long flags;
  569. spin_lock_irqsave(&worker->lock, flags);
  570. if (list_empty(&work->node)) {
  571. insert_kthread_work(worker, work, &worker->work_list);
  572. ret = true;
  573. }
  574. spin_unlock_irqrestore(&worker->lock, flags);
  575. return ret;
  576. }
  577. EXPORT_SYMBOL_GPL(queue_kthread_work);
  578. struct kthread_flush_work {
  579. struct kthread_work work;
  580. struct completion done;
  581. };
  582. static void kthread_flush_work_fn(struct kthread_work *work)
  583. {
  584. struct kthread_flush_work *fwork =
  585. container_of(work, struct kthread_flush_work, work);
  586. complete(&fwork->done);
  587. }
  588. /**
  589. * flush_kthread_work - flush a kthread_work
  590. * @work: work to flush
  591. *
  592. * If @work is queued or executing, wait for it to finish execution.
  593. */
  594. void flush_kthread_work(struct kthread_work *work)
  595. {
  596. struct kthread_flush_work fwork = {
  597. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  598. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  599. };
  600. struct kthread_worker *worker;
  601. bool noop = false;
  602. retry:
  603. worker = work->worker;
  604. if (!worker)
  605. return;
  606. spin_lock_irq(&worker->lock);
  607. if (work->worker != worker) {
  608. spin_unlock_irq(&worker->lock);
  609. goto retry;
  610. }
  611. if (!list_empty(&work->node))
  612. insert_kthread_work(worker, &fwork.work, work->node.next);
  613. else if (worker->current_work == work)
  614. insert_kthread_work(worker, &fwork.work, worker->work_list.next);
  615. else
  616. noop = true;
  617. spin_unlock_irq(&worker->lock);
  618. if (!noop)
  619. wait_for_completion(&fwork.done);
  620. }
  621. EXPORT_SYMBOL_GPL(flush_kthread_work);
  622. /**
  623. * flush_kthread_worker - flush all current works on a kthread_worker
  624. * @worker: worker to flush
  625. *
  626. * Wait until all currently executing or pending works on @worker are
  627. * finished.
  628. */
  629. void flush_kthread_worker(struct kthread_worker *worker)
  630. {
  631. struct kthread_flush_work fwork = {
  632. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  633. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  634. };
  635. queue_kthread_work(worker, &fwork.work);
  636. wait_for_completion(&fwork.done);
  637. }
  638. EXPORT_SYMBOL_GPL(flush_kthread_worker);