123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720 |
- /* Kernel thread helper functions.
- * Copyright (C) 2004 IBM Corporation, Rusty Russell.
- *
- * Creation is done via kthreadd, so that we get a clean environment
- * even if we're invoked from userspace (think modprobe, hotplug cpu,
- * etc.).
- */
- #include <linux/sched.h>
- #include <linux/kthread.h>
- #include <linux/completion.h>
- #include <linux/err.h>
- #include <linux/cpuset.h>
- #include <linux/unistd.h>
- #include <linux/file.h>
- #include <linux/export.h>
- #include <linux/mutex.h>
- #include <linux/slab.h>
- #include <linux/freezer.h>
- #include <linux/ptrace.h>
- #include <linux/uaccess.h>
- #include <linux/cgroup.h>
- #include <trace/events/sched.h>
- static DEFINE_SPINLOCK(kthread_create_lock);
- static LIST_HEAD(kthread_create_list);
- struct task_struct *kthreadd_task;
- struct kthread_create_info
- {
- /* Information passed to kthread() from kthreadd. */
- int (*threadfn)(void *data);
- void *data;
- int node;
- /* Result passed back to kthread_create() from kthreadd. */
- struct task_struct *result;
- struct completion *done;
- struct list_head list;
- };
- struct kthread {
- unsigned long flags;
- unsigned int cpu;
- void *data;
- struct completion parked;
- struct completion exited;
- };
- enum KTHREAD_BITS {
- KTHREAD_IS_PER_CPU = 0,
- KTHREAD_SHOULD_STOP,
- KTHREAD_SHOULD_PARK,
- KTHREAD_IS_PARKED,
- };
- #define __to_kthread(vfork) \
- container_of(vfork, struct kthread, exited)
- static inline struct kthread *to_kthread(struct task_struct *k)
- {
- return __to_kthread(k->vfork_done);
- }
- static struct kthread *to_live_kthread(struct task_struct *k)
- {
- struct completion *vfork = ACCESS_ONCE(k->vfork_done);
- if (likely(vfork))
- return __to_kthread(vfork);
- return NULL;
- }
- /**
- * kthread_should_stop - should this kthread return now?
- *
- * When someone calls kthread_stop() on your kthread, it will be woken
- * and this will return true. You should then return, and your return
- * value will be passed through to kthread_stop().
- */
- bool kthread_should_stop(void)
- {
- return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
- }
- EXPORT_SYMBOL(kthread_should_stop);
- /**
- * kthread_should_park - should this kthread park now?
- *
- * When someone calls kthread_park() on your kthread, it will be woken
- * and this will return true. You should then do the necessary
- * cleanup and call kthread_parkme()
- *
- * Similar to kthread_should_stop(), but this keeps the thread alive
- * and in a park position. kthread_unpark() "restarts" the thread and
- * calls the thread function again.
- */
- bool kthread_should_park(void)
- {
- return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
- }
- EXPORT_SYMBOL_GPL(kthread_should_park);
- /**
- * kthread_freezable_should_stop - should this freezable kthread return now?
- * @was_frozen: optional out parameter, indicates whether %current was frozen
- *
- * kthread_should_stop() for freezable kthreads, which will enter
- * refrigerator if necessary. This function is safe from kthread_stop() /
- * freezer deadlock and freezable kthreads should use this function instead
- * of calling try_to_freeze() directly.
- */
- bool kthread_freezable_should_stop(bool *was_frozen)
- {
- bool frozen = false;
- might_sleep();
- if (unlikely(freezing(current)))
- frozen = __refrigerator(true);
- if (was_frozen)
- *was_frozen = frozen;
- return kthread_should_stop();
- }
- EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
- /**
- * kthread_data - return data value specified on kthread creation
- * @task: kthread task in question
- *
- * Return the data value specified when kthread @task was created.
- * The caller is responsible for ensuring the validity of @task when
- * calling this function.
- */
- void *kthread_data(struct task_struct *task)
- {
- return to_kthread(task)->data;
- }
- /**
- * probe_kthread_data - speculative version of kthread_data()
- * @task: possible kthread task in question
- *
- * @task could be a kthread task. Return the data value specified when it
- * was created if accessible. If @task isn't a kthread task or its data is
- * inaccessible for any reason, %NULL is returned. This function requires
- * that @task itself is safe to dereference.
- */
- void *probe_kthread_data(struct task_struct *task)
- {
- struct kthread *kthread = to_kthread(task);
- void *data = NULL;
- probe_kernel_read(&data, &kthread->data, sizeof(data));
- return data;
- }
- static void __kthread_parkme(struct kthread *self)
- {
- __set_current_state(TASK_PARKED);
- while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
- if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
- complete(&self->parked);
- schedule();
- __set_current_state(TASK_PARKED);
- }
- clear_bit(KTHREAD_IS_PARKED, &self->flags);
- __set_current_state(TASK_RUNNING);
- }
- void kthread_parkme(void)
- {
- __kthread_parkme(to_kthread(current));
- }
- EXPORT_SYMBOL_GPL(kthread_parkme);
- static int kthread(void *_create)
- {
- /* Copy data: it's on kthread's stack */
- struct kthread_create_info *create = _create;
- int (*threadfn)(void *data) = create->threadfn;
- void *data = create->data;
- struct completion *done;
- struct kthread self;
- int ret;
- self.flags = 0;
- self.data = data;
- init_completion(&self.exited);
- init_completion(&self.parked);
- current->vfork_done = &self.exited;
- /* If user was SIGKILLed, I release the structure. */
- done = xchg(&create->done, NULL);
- if (!done) {
- kfree(create);
- do_exit(-EINTR);
- }
- /* OK, tell user we're spawned, wait for stop or wakeup */
- __set_current_state(TASK_UNINTERRUPTIBLE);
- create->result = current;
- complete(done);
- schedule();
- ret = -EINTR;
- if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
- cgroup_kthread_ready();
- __kthread_parkme(&self);
- ret = threadfn(data);
- }
- /* we can't just return, we must preserve "self" on stack */
- do_exit(ret);
- }
- /* called from do_fork() to get node information for about to be created task */
- int tsk_fork_get_node(struct task_struct *tsk)
- {
- #ifdef CONFIG_NUMA
- if (tsk == kthreadd_task)
- return tsk->pref_node_fork;
- #endif
- return NUMA_NO_NODE;
- }
- static void create_kthread(struct kthread_create_info *create)
- {
- int pid;
- #ifdef CONFIG_NUMA
- current->pref_node_fork = create->node;
- #endif
- /* We want our own signal handler (we take no signals by default). */
- pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
- if (pid < 0) {
- /* If user was SIGKILLed, I release the structure. */
- struct completion *done = xchg(&create->done, NULL);
- if (!done) {
- kfree(create);
- return;
- }
- create->result = ERR_PTR(pid);
- complete(done);
- }
- }
- /**
- * kthread_create_on_node - create a kthread.
- * @threadfn: the function to run until signal_pending(current).
- * @data: data ptr for @threadfn.
- * @node: task and thread structures for the thread are allocated on this node
- * @namefmt: printf-style name for the thread.
- *
- * Description: This helper function creates and names a kernel
- * thread. The thread will be stopped: use wake_up_process() to start
- * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
- * is affine to all CPUs.
- *
- * If thread is going to be bound on a particular cpu, give its node
- * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
- * When woken, the thread will run @threadfn() with @data as its
- * argument. @threadfn() can either call do_exit() directly if it is a
- * standalone thread for which no one will call kthread_stop(), or
- * return when 'kthread_should_stop()' is true (which means
- * kthread_stop() has been called). The return value should be zero
- * or a negative error number; it will be passed to kthread_stop().
- *
- * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
- */
- struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
- void *data, int node,
- const char namefmt[],
- ...)
- {
- DECLARE_COMPLETION_ONSTACK(done);
- struct task_struct *task;
- struct kthread_create_info *create = kmalloc(sizeof(*create),
- GFP_KERNEL);
- if (!create)
- return ERR_PTR(-ENOMEM);
- create->threadfn = threadfn;
- create->data = data;
- create->node = node;
- create->done = &done;
- spin_lock(&kthread_create_lock);
- list_add_tail(&create->list, &kthread_create_list);
- spin_unlock(&kthread_create_lock);
- wake_up_process(kthreadd_task);
- /*
- * Wait for completion in killable state, for I might be chosen by
- * the OOM killer while kthreadd is trying to allocate memory for
- * new kernel thread.
- */
- if (unlikely(wait_for_completion_killable(&done))) {
- /*
- * If I was SIGKILLed before kthreadd (or new kernel thread)
- * calls complete(), leave the cleanup of this structure to
- * that thread.
- */
- if (xchg(&create->done, NULL))
- return ERR_PTR(-EINTR);
- /*
- * kthreadd (or new kernel thread) will call complete()
- * shortly.
- */
- wait_for_completion(&done);
- }
- task = create->result;
- if (!IS_ERR(task)) {
- static const struct sched_param param = { .sched_priority = 0 };
- char name[TASK_COMM_LEN];
- va_list args;
- va_start(args, namefmt);
- /*
- * task is already visible to other tasks, so updating
- * COMM must be protected.
- */
- vsnprintf(name, sizeof(name), namefmt, args);
- set_task_comm(task, name);
- va_end(args);
- /*
- * root may have changed our (kthreadd's) priority or CPU mask.
- * The kernel thread should not inherit these properties.
- */
- sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
- set_cpus_allowed_ptr(task, cpu_all_mask);
- }
- kfree(create);
- return task;
- }
- EXPORT_SYMBOL(kthread_create_on_node);
- static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
- {
- unsigned long flags;
- if (!wait_task_inactive(p, state)) {
- WARN_ON(1);
- return;
- }
- /* It's safe because the task is inactive. */
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- do_set_cpus_allowed(p, mask);
- p->flags |= PF_NO_SETAFFINITY;
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- }
- static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
- {
- __kthread_bind_mask(p, cpumask_of(cpu), state);
- }
- void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
- {
- __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
- }
- /**
- * kthread_bind - bind a just-created kthread to a cpu.
- * @p: thread created by kthread_create().
- * @cpu: cpu (might not be online, must be possible) for @k to run on.
- *
- * Description: This function is equivalent to set_cpus_allowed(),
- * except that @cpu doesn't need to be online, and the thread must be
- * stopped (i.e., just returned from kthread_create()).
- */
- void kthread_bind(struct task_struct *p, unsigned int cpu)
- {
- __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(kthread_bind);
- /**
- * kthread_create_on_cpu - Create a cpu bound kthread
- * @threadfn: the function to run until signal_pending(current).
- * @data: data ptr for @threadfn.
- * @cpu: The cpu on which the thread should be bound,
- * @namefmt: printf-style name for the thread. Format is restricted
- * to "name.*%u". Code fills in cpu number.
- *
- * Description: This helper function creates and names a kernel thread
- * The thread will be woken and put into park mode.
- */
- struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
- void *data, unsigned int cpu,
- const char *namefmt)
- {
- struct task_struct *p;
- p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
- cpu);
- if (IS_ERR(p))
- return p;
- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
- to_kthread(p)->cpu = cpu;
- /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
- kthread_park(p);
- return p;
- }
- static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
- {
- clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
- /*
- * We clear the IS_PARKED bit here as we don't wait
- * until the task has left the park code. So if we'd
- * park before that happens we'd see the IS_PARKED bit
- * which might be about to be cleared.
- */
- if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
- if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
- __kthread_bind(k, kthread->cpu, TASK_PARKED);
- wake_up_state(k, TASK_PARKED);
- }
- }
- /**
- * kthread_unpark - unpark a thread created by kthread_create().
- * @k: thread created by kthread_create().
- *
- * Sets kthread_should_park() for @k to return false, wakes it, and
- * waits for it to return. If the thread is marked percpu then its
- * bound to the cpu again.
- */
- void kthread_unpark(struct task_struct *k)
- {
- struct kthread *kthread = to_live_kthread(k);
- if (kthread)
- __kthread_unpark(k, kthread);
- }
- EXPORT_SYMBOL_GPL(kthread_unpark);
- /**
- * kthread_park - park a thread created by kthread_create().
- * @k: thread created by kthread_create().
- *
- * Sets kthread_should_park() for @k to return true, wakes it, and
- * waits for it to return. This can also be called after kthread_create()
- * instead of calling wake_up_process(): the thread will park without
- * calling threadfn().
- *
- * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
- * If called by the kthread itself just the park bit is set.
- */
- int kthread_park(struct task_struct *k)
- {
- struct kthread *kthread = to_live_kthread(k);
- int ret = -ENOSYS;
- if (kthread) {
- if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
- set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
- if (k != current) {
- wake_up_process(k);
- wait_for_completion(&kthread->parked);
- }
- }
- ret = 0;
- }
- return ret;
- }
- EXPORT_SYMBOL_GPL(kthread_park);
- /**
- * kthread_stop - stop a thread created by kthread_create().
- * @k: thread created by kthread_create().
- *
- * Sets kthread_should_stop() for @k to return true, wakes it, and
- * waits for it to exit. This can also be called after kthread_create()
- * instead of calling wake_up_process(): the thread will exit without
- * calling threadfn().
- *
- * If threadfn() may call do_exit() itself, the caller must ensure
- * task_struct can't go away.
- *
- * Returns the result of threadfn(), or %-EINTR if wake_up_process()
- * was never called.
- */
- int kthread_stop(struct task_struct *k)
- {
- struct kthread *kthread;
- int ret;
- trace_sched_kthread_stop(k);
- get_task_struct(k);
- kthread = to_live_kthread(k);
- if (kthread) {
- set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
- __kthread_unpark(k, kthread);
- wake_up_process(k);
- wait_for_completion(&kthread->exited);
- }
- ret = k->exit_code;
- put_task_struct(k);
- trace_sched_kthread_stop_ret(ret);
- return ret;
- }
- EXPORT_SYMBOL(kthread_stop);
- int kthreadd(void *unused)
- {
- struct task_struct *tsk = current;
- /* Setup a clean context for our children to inherit. */
- set_task_comm(tsk, "kthreadd");
- ignore_signals(tsk);
- set_cpus_allowed_ptr(tsk, cpu_all_mask);
- set_mems_allowed(node_states[N_MEMORY]);
- current->flags |= PF_NOFREEZE;
- cgroup_init_kthreadd();
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty(&kthread_create_list))
- schedule();
- __set_current_state(TASK_RUNNING);
- spin_lock(&kthread_create_lock);
- while (!list_empty(&kthread_create_list)) {
- struct kthread_create_info *create;
- create = list_entry(kthread_create_list.next,
- struct kthread_create_info, list);
- list_del_init(&create->list);
- spin_unlock(&kthread_create_lock);
- create_kthread(create);
- spin_lock(&kthread_create_lock);
- }
- spin_unlock(&kthread_create_lock);
- }
- return 0;
- }
- void __init_kthread_worker(struct kthread_worker *worker,
- const char *name,
- struct lock_class_key *key)
- {
- spin_lock_init(&worker->lock);
- lockdep_set_class_and_name(&worker->lock, key, name);
- INIT_LIST_HEAD(&worker->work_list);
- worker->task = NULL;
- }
- EXPORT_SYMBOL_GPL(__init_kthread_worker);
- /**
- * kthread_worker_fn - kthread function to process kthread_worker
- * @worker_ptr: pointer to initialized kthread_worker
- *
- * This function can be used as @threadfn to kthread_create() or
- * kthread_run() with @worker_ptr argument pointing to an initialized
- * kthread_worker. The started kthread will process work_list until
- * the it is stopped with kthread_stop(). A kthread can also call
- * this function directly after extra initialization.
- *
- * Different kthreads can be used for the same kthread_worker as long
- * as there's only one kthread attached to it at any given time. A
- * kthread_worker without an attached kthread simply collects queued
- * kthread_works.
- */
- int kthread_worker_fn(void *worker_ptr)
- {
- struct kthread_worker *worker = worker_ptr;
- struct kthread_work *work;
- WARN_ON(worker->task);
- worker->task = current;
- repeat:
- set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
- spin_lock_irq(&worker->lock);
- worker->task = NULL;
- spin_unlock_irq(&worker->lock);
- return 0;
- }
- work = NULL;
- spin_lock_irq(&worker->lock);
- if (!list_empty(&worker->work_list)) {
- work = list_first_entry(&worker->work_list,
- struct kthread_work, node);
- list_del_init(&work->node);
- }
- worker->current_work = work;
- spin_unlock_irq(&worker->lock);
- if (work) {
- __set_current_state(TASK_RUNNING);
- work->func(work);
- } else if (!freezing(current))
- schedule();
- try_to_freeze();
- goto repeat;
- }
- EXPORT_SYMBOL_GPL(kthread_worker_fn);
- /* insert @work before @pos in @worker */
- static void insert_kthread_work(struct kthread_worker *worker,
- struct kthread_work *work,
- struct list_head *pos)
- {
- lockdep_assert_held(&worker->lock);
- list_add_tail(&work->node, pos);
- work->worker = worker;
- if (!worker->current_work && likely(worker->task))
- wake_up_process(worker->task);
- }
- /**
- * queue_kthread_work - queue a kthread_work
- * @worker: target kthread_worker
- * @work: kthread_work to queue
- *
- * Queue @work to work processor @task for async execution. @task
- * must have been created with kthread_worker_create(). Returns %true
- * if @work was successfully queued, %false if it was already pending.
- */
- bool queue_kthread_work(struct kthread_worker *worker,
- struct kthread_work *work)
- {
- bool ret = false;
- unsigned long flags;
- spin_lock_irqsave(&worker->lock, flags);
- if (list_empty(&work->node)) {
- insert_kthread_work(worker, work, &worker->work_list);
- ret = true;
- }
- spin_unlock_irqrestore(&worker->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(queue_kthread_work);
- struct kthread_flush_work {
- struct kthread_work work;
- struct completion done;
- };
- static void kthread_flush_work_fn(struct kthread_work *work)
- {
- struct kthread_flush_work *fwork =
- container_of(work, struct kthread_flush_work, work);
- complete(&fwork->done);
- }
- /**
- * flush_kthread_work - flush a kthread_work
- * @work: work to flush
- *
- * If @work is queued or executing, wait for it to finish execution.
- */
- void flush_kthread_work(struct kthread_work *work)
- {
- struct kthread_flush_work fwork = {
- KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
- COMPLETION_INITIALIZER_ONSTACK(fwork.done),
- };
- struct kthread_worker *worker;
- bool noop = false;
- retry:
- worker = work->worker;
- if (!worker)
- return;
- spin_lock_irq(&worker->lock);
- if (work->worker != worker) {
- spin_unlock_irq(&worker->lock);
- goto retry;
- }
- if (!list_empty(&work->node))
- insert_kthread_work(worker, &fwork.work, work->node.next);
- else if (worker->current_work == work)
- insert_kthread_work(worker, &fwork.work, worker->work_list.next);
- else
- noop = true;
- spin_unlock_irq(&worker->lock);
- if (!noop)
- wait_for_completion(&fwork.done);
- }
- EXPORT_SYMBOL_GPL(flush_kthread_work);
- /**
- * flush_kthread_worker - flush all current works on a kthread_worker
- * @worker: worker to flush
- *
- * Wait until all currently executing or pending works on @worker are
- * finished.
- */
- void flush_kthread_worker(struct kthread_worker *worker)
- {
- struct kthread_flush_work fwork = {
- KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
- COMPLETION_INITIALIZER_ONSTACK(fwork.done),
- };
- queue_kthread_work(worker, &fwork.work);
- wait_for_completion(&fwork.done);
- }
- EXPORT_SYMBOL_GPL(flush_kthread_worker);
|