]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/kthread.c
xps: Transmit Packet Steering
[net-next-2.6.git] / kernel / kthread.c
CommitLineData
1da177e4
LT
1/* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
73c27992 4 * Creation is done via kthreadd, so that we get a clean environment
1da177e4
LT
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
58568d2a 12#include <linux/cpuset.h>
1da177e4
LT
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/module.h>
97d1f15b 16#include <linux/mutex.h>
b56c0d89
TH
17#include <linux/slab.h>
18#include <linux/freezer.h>
ad8d75ff 19#include <trace/events/sched.h>
1da177e4 20
73c27992
EB
21static DEFINE_SPINLOCK(kthread_create_lock);
22static LIST_HEAD(kthread_create_list);
23struct task_struct *kthreadd_task;
1da177e4
LT
24
25struct kthread_create_info
26{
73c27992 27 /* Information passed to kthread() from kthreadd. */
1da177e4
LT
28 int (*threadfn)(void *data);
29 void *data;
1da177e4 30
73c27992 31 /* Result passed back to kthread_create() from kthreadd. */
1da177e4
LT
32 struct task_struct *result;
33 struct completion done;
65f27f38 34
73c27992 35 struct list_head list;
1da177e4
LT
36};
37
63706172
ON
38struct kthread {
39 int should_stop;
82805ab7 40 void *data;
63706172 41 struct completion exited;
1da177e4
LT
42};
43
63706172
ON
44#define to_kthread(tsk) \
45 container_of((tsk)->vfork_done, struct kthread, exited)
1da177e4 46
9e37bd30
RD
47/**
48 * kthread_should_stop - should this kthread return now?
49 *
72fd4a35 50 * When someone calls kthread_stop() on your kthread, it will be woken
9e37bd30
RD
51 * and this will return true. You should then return, and your return
52 * value will be passed through to kthread_stop().
53 */
1da177e4
LT
54int kthread_should_stop(void)
55{
63706172 56 return to_kthread(current)->should_stop;
1da177e4
LT
57}
58EXPORT_SYMBOL(kthread_should_stop);
59
82805ab7
TH
60/**
61 * kthread_data - return data value specified on kthread creation
62 * @task: kthread task in question
63 *
64 * Return the data value specified when kthread @task was created.
65 * The caller is responsible for ensuring the validity of @task when
66 * calling this function.
67 */
68void *kthread_data(struct task_struct *task)
69{
70 return to_kthread(task)->data;
71}
72
1da177e4
LT
73static int kthread(void *_create)
74{
63706172 75 /* Copy data: it's on kthread's stack */
1da177e4 76 struct kthread_create_info *create = _create;
63706172
ON
77 int (*threadfn)(void *data) = create->threadfn;
78 void *data = create->data;
79 struct kthread self;
80 int ret;
1da177e4 81
63706172 82 self.should_stop = 0;
82805ab7 83 self.data = data;
63706172
ON
84 init_completion(&self.exited);
85 current->vfork_done = &self.exited;
1da177e4 86
1da177e4 87 /* OK, tell user we're spawned, wait for stop or wakeup */
a076e4bc 88 __set_current_state(TASK_UNINTERRUPTIBLE);
3217ab97 89 create->result = current;
cdd140bd 90 complete(&create->done);
1da177e4
LT
91 schedule();
92
63706172
ON
93 ret = -EINTR;
94 if (!self.should_stop)
1da177e4
LT
95 ret = threadfn(data);
96
63706172
ON
97 /* we can't just return, we must preserve "self" on stack */
98 do_exit(ret);
1da177e4
LT
99}
100
73c27992 101static void create_kthread(struct kthread_create_info *create)
1da177e4 102{
1da177e4
LT
103 int pid;
104
105 /* We want our own signal handler (we take no signals by default). */
106 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
cdd140bd 107 if (pid < 0) {
1da177e4 108 create->result = ERR_PTR(pid);
cdd140bd
ON
109 complete(&create->done);
110 }
1da177e4
LT
111}
112
9e37bd30
RD
113/**
114 * kthread_create - create a kthread.
115 * @threadfn: the function to run until signal_pending(current).
116 * @data: data ptr for @threadfn.
117 * @namefmt: printf-style name for the thread.
118 *
119 * Description: This helper function creates and names a kernel
120 * thread. The thread will be stopped: use wake_up_process() to start
301ba045 121 * it. See also kthread_run().
9e37bd30
RD
122 *
123 * When woken, the thread will run @threadfn() with @data as its
72fd4a35 124 * argument. @threadfn() can either call do_exit() directly if it is a
9e37bd30
RD
125 * standalone thread for which noone will call kthread_stop(), or
126 * return when 'kthread_should_stop()' is true (which means
127 * kthread_stop() has been called). The return value should be zero
128 * or a negative error number; it will be passed to kthread_stop().
129 *
130 * Returns a task_struct or ERR_PTR(-ENOMEM).
131 */
1da177e4
LT
132struct task_struct *kthread_create(int (*threadfn)(void *data),
133 void *data,
134 const char namefmt[],
135 ...)
136{
137 struct kthread_create_info create;
1da177e4
LT
138
139 create.threadfn = threadfn;
140 create.data = data;
1da177e4 141 init_completion(&create.done);
73c27992
EB
142
143 spin_lock(&kthread_create_lock);
144 list_add_tail(&create.list, &kthread_create_list);
73c27992
EB
145 spin_unlock(&kthread_create_lock);
146
cbd9b67b 147 wake_up_process(kthreadd_task);
73c27992
EB
148 wait_for_completion(&create.done);
149
1da177e4 150 if (!IS_ERR(create.result)) {
1c99315b 151 struct sched_param param = { .sched_priority = 0 };
1da177e4 152 va_list args;
1c99315b 153
1da177e4
LT
154 va_start(args, namefmt);
155 vsnprintf(create.result->comm, sizeof(create.result->comm),
156 namefmt, args);
157 va_end(args);
1c99315b
ON
158 /*
159 * root may have changed our (kthreadd's) priority or CPU mask.
160 * The kernel thread should not inherit these properties.
161 */
162 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
1c99315b 163 set_cpus_allowed_ptr(create.result, cpu_all_mask);
1da177e4 164 }
1da177e4
LT
165 return create.result;
166}
167EXPORT_SYMBOL(kthread_create);
168
881232b7
PZ
169/**
170 * kthread_bind - bind a just-created kthread to a cpu.
171 * @p: thread created by kthread_create().
172 * @cpu: cpu (might not be online, must be possible) for @k to run on.
173 *
174 * Description: This function is equivalent to set_cpus_allowed(),
175 * except that @cpu doesn't need to be online, and the thread must be
176 * stopped (i.e., just returned from kthread_create()).
177 */
178void kthread_bind(struct task_struct *p, unsigned int cpu)
179{
180 /* Must have done schedule() in kthread() before we set_task_cpu */
181 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
182 WARN_ON(1);
183 return;
184 }
185
186 p->cpus_allowed = cpumask_of_cpu(cpu);
187 p->rt.nr_cpus_allowed = 1;
188 p->flags |= PF_THREAD_BOUND;
189}
190EXPORT_SYMBOL(kthread_bind);
191
9e37bd30
RD
192/**
193 * kthread_stop - stop a thread created by kthread_create().
194 * @k: thread created by kthread_create().
195 *
196 * Sets kthread_should_stop() for @k to return true, wakes it, and
9ae26027
ON
197 * waits for it to exit. This can also be called after kthread_create()
198 * instead of calling wake_up_process(): the thread will exit without
199 * calling threadfn().
200 *
201 * If threadfn() may call do_exit() itself, the caller must ensure
202 * task_struct can't go away.
9e37bd30
RD
203 *
204 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
205 * was never called.
206 */
1da177e4
LT
207int kthread_stop(struct task_struct *k)
208{
63706172 209 struct kthread *kthread;
1da177e4
LT
210 int ret;
211
0a16b607 212 trace_sched_kthread_stop(k);
63706172 213 get_task_struct(k);
0a16b607 214
63706172
ON
215 kthread = to_kthread(k);
216 barrier(); /* it might have exited */
217 if (k->vfork_done != NULL) {
218 kthread->should_stop = 1;
219 wake_up_process(k);
220 wait_for_completion(&kthread->exited);
221 }
222 ret = k->exit_code;
1da177e4 223
1da177e4 224 put_task_struct(k);
0a16b607
MD
225 trace_sched_kthread_stop_ret(ret);
226
1da177e4
LT
227 return ret;
228}
52e92e57 229EXPORT_SYMBOL(kthread_stop);
1da177e4 230
e804a4a4 231int kthreadd(void *unused)
1da177e4 232{
73c27992 233 struct task_struct *tsk = current;
1da177e4 234
e804a4a4 235 /* Setup a clean context for our children to inherit. */
73c27992 236 set_task_comm(tsk, "kthreadd");
10ab825b 237 ignore_signals(tsk);
1a2142af 238 set_cpus_allowed_ptr(tsk, cpu_all_mask);
5ab116c9 239 set_mems_allowed(node_states[N_HIGH_MEMORY]);
73c27992 240
ebb12db5 241 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
73c27992
EB
242
243 for (;;) {
244 set_current_state(TASK_INTERRUPTIBLE);
245 if (list_empty(&kthread_create_list))
246 schedule();
247 __set_current_state(TASK_RUNNING);
248
249 spin_lock(&kthread_create_lock);
250 while (!list_empty(&kthread_create_list)) {
251 struct kthread_create_info *create;
252
253 create = list_entry(kthread_create_list.next,
254 struct kthread_create_info, list);
255 list_del_init(&create->list);
256 spin_unlock(&kthread_create_lock);
257
258 create_kthread(create);
259
260 spin_lock(&kthread_create_lock);
261 }
262 spin_unlock(&kthread_create_lock);
263 }
264
265 return 0;
266}
b56c0d89
TH
267
268/**
269 * kthread_worker_fn - kthread function to process kthread_worker
270 * @worker_ptr: pointer to initialized kthread_worker
271 *
272 * This function can be used as @threadfn to kthread_create() or
273 * kthread_run() with @worker_ptr argument pointing to an initialized
274 * kthread_worker. The started kthread will process work_list until
275 * the it is stopped with kthread_stop(). A kthread can also call
276 * this function directly after extra initialization.
277 *
278 * Different kthreads can be used for the same kthread_worker as long
279 * as there's only one kthread attached to it at any given time. A
280 * kthread_worker without an attached kthread simply collects queued
281 * kthread_works.
282 */
283int kthread_worker_fn(void *worker_ptr)
284{
285 struct kthread_worker *worker = worker_ptr;
286 struct kthread_work *work;
287
288 WARN_ON(worker->task);
289 worker->task = current;
290repeat:
291 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
292
293 if (kthread_should_stop()) {
294 __set_current_state(TASK_RUNNING);
295 spin_lock_irq(&worker->lock);
296 worker->task = NULL;
297 spin_unlock_irq(&worker->lock);
298 return 0;
299 }
300
301 work = NULL;
302 spin_lock_irq(&worker->lock);
303 if (!list_empty(&worker->work_list)) {
304 work = list_first_entry(&worker->work_list,
305 struct kthread_work, node);
306 list_del_init(&work->node);
307 }
308 spin_unlock_irq(&worker->lock);
309
310 if (work) {
311 __set_current_state(TASK_RUNNING);
312 work->func(work);
313 smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
314 work->done_seq = work->queue_seq;
315 smp_mb(); /* mb worker-b1 paired with flush-b0 */
316 if (atomic_read(&work->flushing))
317 wake_up_all(&work->done);
318 } else if (!freezing(current))
319 schedule();
320
321 try_to_freeze();
322 goto repeat;
323}
324EXPORT_SYMBOL_GPL(kthread_worker_fn);
325
326/**
327 * queue_kthread_work - queue a kthread_work
328 * @worker: target kthread_worker
329 * @work: kthread_work to queue
330 *
331 * Queue @work to work processor @task for async execution. @task
332 * must have been created with kthread_worker_create(). Returns %true
333 * if @work was successfully queued, %false if it was already pending.
334 */
335bool queue_kthread_work(struct kthread_worker *worker,
336 struct kthread_work *work)
337{
338 bool ret = false;
339 unsigned long flags;
340
341 spin_lock_irqsave(&worker->lock, flags);
342 if (list_empty(&work->node)) {
343 list_add_tail(&work->node, &worker->work_list);
344 work->queue_seq++;
345 if (likely(worker->task))
346 wake_up_process(worker->task);
347 ret = true;
348 }
349 spin_unlock_irqrestore(&worker->lock, flags);
350 return ret;
351}
352EXPORT_SYMBOL_GPL(queue_kthread_work);
353
354/**
355 * flush_kthread_work - flush a kthread_work
356 * @work: work to flush
357 *
358 * If @work is queued or executing, wait for it to finish execution.
359 */
360void flush_kthread_work(struct kthread_work *work)
361{
362 int seq = work->queue_seq;
363
364 atomic_inc(&work->flushing);
365
366 /*
367 * mb flush-b0 paired with worker-b1, to make sure either
368 * worker sees the above increment or we see done_seq update.
369 */
370 smp_mb__after_atomic_inc();
371
372 /* A - B <= 0 tests whether B is in front of A regardless of overflow */
373 wait_event(work->done, seq - work->done_seq <= 0);
374 atomic_dec(&work->flushing);
375
376 /*
377 * rmb flush-b1 paired with worker-b0, to make sure our caller
378 * sees every change made by work->func().
379 */
380 smp_mb__after_atomic_dec();
381}
382EXPORT_SYMBOL_GPL(flush_kthread_work);
383
384struct kthread_flush_work {
385 struct kthread_work work;
386 struct completion done;
387};
388
389static void kthread_flush_work_fn(struct kthread_work *work)
390{
391 struct kthread_flush_work *fwork =
392 container_of(work, struct kthread_flush_work, work);
393 complete(&fwork->done);
394}
395
396/**
397 * flush_kthread_worker - flush all current works on a kthread_worker
398 * @worker: worker to flush
399 *
400 * Wait until all currently executing or pending works on @worker are
401 * finished.
402 */
403void flush_kthread_worker(struct kthread_worker *worker)
404{
405 struct kthread_flush_work fwork = {
406 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
407 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
408 };
409
410 queue_kthread_work(worker, &fwork.work);
411 wait_for_completion(&fwork.done);
412}
413EXPORT_SYMBOL_GPL(flush_kthread_worker);