]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/workqueue.c
workqueue: temporarily remove workqueue tracing
[net-next-2.6.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
e1f8e874 12 * Andrew Morton
1da177e4
LT
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679 15 *
cde53535 16 * Made to use alloc_percpu by Christoph Lameter.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
4e6045f1 35#include <linux/lockdep.h>
1da177e4 36
4690c4ab
TH
37/*
38 * Structure fields follow one of the following exclusion rules.
39 *
40 * I: Set during initialization and read-only afterwards.
41 *
42 * L: cwq->lock protected. Access with cwq->lock held.
43 *
44 * W: workqueue_lock protected.
45 */
46
1da177e4 47/*
f756d5e2
NL
48 * The per-CPU workqueue (if single thread, we always use the first
49 * possible cpu).
1da177e4
LT
50 */
51struct cpu_workqueue_struct {
52
53 spinlock_t lock;
54
1da177e4
LT
55 struct list_head worklist;
56 wait_queue_head_t more_work;
3af24433 57 struct work_struct *current_work;
1da177e4 58
4690c4ab
TH
59 struct workqueue_struct *wq; /* I: the owning workqueue */
60 struct task_struct *thread;
1da177e4
LT
61} ____cacheline_aligned;
62
63/*
64 * The externally visible workqueue abstraction is an array of
65 * per-CPU workqueues:
66 */
67struct workqueue_struct {
97e37d7b 68 unsigned int flags; /* I: WQ_* flags */
4690c4ab
TH
69 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
70 struct list_head list; /* W: list of all workqueues */
71 const char *name; /* I: workqueue name */
4e6045f1 72#ifdef CONFIG_LOCKDEP
4690c4ab 73 struct lockdep_map lockdep_map;
4e6045f1 74#endif
1da177e4
LT
75};
76
dc186ad7
TG
77#ifdef CONFIG_DEBUG_OBJECTS_WORK
78
79static struct debug_obj_descr work_debug_descr;
80
81/*
82 * fixup_init is called when:
83 * - an active object is initialized
84 */
85static int work_fixup_init(void *addr, enum debug_obj_state state)
86{
87 struct work_struct *work = addr;
88
89 switch (state) {
90 case ODEBUG_STATE_ACTIVE:
91 cancel_work_sync(work);
92 debug_object_init(work, &work_debug_descr);
93 return 1;
94 default:
95 return 0;
96 }
97}
98
99/*
100 * fixup_activate is called when:
101 * - an active object is activated
102 * - an unknown object is activated (might be a statically initialized object)
103 */
104static int work_fixup_activate(void *addr, enum debug_obj_state state)
105{
106 struct work_struct *work = addr;
107
108 switch (state) {
109
110 case ODEBUG_STATE_NOTAVAILABLE:
111 /*
112 * This is not really a fixup. The work struct was
113 * statically initialized. We just make sure that it
114 * is tracked in the object tracker.
115 */
22df02bb 116 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
dc186ad7
TG
117 debug_object_init(work, &work_debug_descr);
118 debug_object_activate(work, &work_debug_descr);
119 return 0;
120 }
121 WARN_ON_ONCE(1);
122 return 0;
123
124 case ODEBUG_STATE_ACTIVE:
125 WARN_ON(1);
126
127 default:
128 return 0;
129 }
130}
131
132/*
133 * fixup_free is called when:
134 * - an active object is freed
135 */
136static int work_fixup_free(void *addr, enum debug_obj_state state)
137{
138 struct work_struct *work = addr;
139
140 switch (state) {
141 case ODEBUG_STATE_ACTIVE:
142 cancel_work_sync(work);
143 debug_object_free(work, &work_debug_descr);
144 return 1;
145 default:
146 return 0;
147 }
148}
149
150static struct debug_obj_descr work_debug_descr = {
151 .name = "work_struct",
152 .fixup_init = work_fixup_init,
153 .fixup_activate = work_fixup_activate,
154 .fixup_free = work_fixup_free,
155};
156
157static inline void debug_work_activate(struct work_struct *work)
158{
159 debug_object_activate(work, &work_debug_descr);
160}
161
162static inline void debug_work_deactivate(struct work_struct *work)
163{
164 debug_object_deactivate(work, &work_debug_descr);
165}
166
167void __init_work(struct work_struct *work, int onstack)
168{
169 if (onstack)
170 debug_object_init_on_stack(work, &work_debug_descr);
171 else
172 debug_object_init(work, &work_debug_descr);
173}
174EXPORT_SYMBOL_GPL(__init_work);
175
176void destroy_work_on_stack(struct work_struct *work)
177{
178 debug_object_free(work, &work_debug_descr);
179}
180EXPORT_SYMBOL_GPL(destroy_work_on_stack);
181
182#else
183static inline void debug_work_activate(struct work_struct *work) { }
184static inline void debug_work_deactivate(struct work_struct *work) { }
185#endif
186
95402b38
GS
187/* Serializes the accesses to the list of workqueues. */
188static DEFINE_SPINLOCK(workqueue_lock);
1da177e4
LT
189static LIST_HEAD(workqueues);
190
3af24433 191static int singlethread_cpu __read_mostly;
e7577c50 192static const struct cpumask *cpu_singlethread_map __read_mostly;
14441960
ON
193/*
194 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
195 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
196 * which comes in between can't use for_each_online_cpu(). We could
197 * use cpu_possible_map, the cpumask below is more a documentation
198 * than optimization.
199 */
e7577c50 200static cpumask_var_t cpu_populated_map __read_mostly;
f756d5e2 201
1da177e4 202/* If it's single threaded, it isn't in the list of workqueues. */
97e37d7b 203static inline bool is_wq_single_threaded(struct workqueue_struct *wq)
1da177e4 204{
97e37d7b 205 return wq->flags & WQ_SINGLE_THREAD;
1da177e4
LT
206}
207
e7577c50 208static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
b1f4ec17 209{
6cc88bc4 210 return is_wq_single_threaded(wq)
e7577c50 211 ? cpu_singlethread_map : cpu_populated_map;
b1f4ec17
ON
212}
213
4690c4ab
TH
214static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
215 struct workqueue_struct *wq)
a848e3b6 216{
6cc88bc4 217 if (unlikely(is_wq_single_threaded(wq)))
a848e3b6
ON
218 cpu = singlethread_cpu;
219 return per_cpu_ptr(wq->cpu_wq, cpu);
220}
221
4594bf15
DH
222/*
223 * Set the workqueue on which a work item is to be run
224 * - Must *only* be called if the pending flag is set
225 */
ed7c0fee 226static inline void set_wq_data(struct work_struct *work,
4690c4ab
TH
227 struct cpu_workqueue_struct *cwq,
228 unsigned long extra_flags)
365970a1 229{
4594bf15 230 BUG_ON(!work_pending(work));
365970a1 231
4690c4ab 232 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
22df02bb 233 WORK_STRUCT_PENDING | extra_flags);
365970a1
DH
234}
235
4d707b9f
ON
236/*
237 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
238 */
239static inline void clear_wq_data(struct work_struct *work)
240{
4690c4ab 241 atomic_long_set(&work->data, work_static(work));
4d707b9f
ON
242}
243
64166699 244static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 245{
64166699
TH
246 return (void *)(atomic_long_read(&work->data) &
247 WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
248}
249
4690c4ab
TH
250/**
251 * insert_work - insert a work into cwq
252 * @cwq: cwq @work belongs to
253 * @work: work to insert
254 * @head: insertion point
255 * @extra_flags: extra WORK_STRUCT_* flags to set
256 *
257 * Insert @work into @cwq after @head.
258 *
259 * CONTEXT:
260 * spin_lock_irq(cwq->lock).
261 */
b89deed3 262static void insert_work(struct cpu_workqueue_struct *cwq,
4690c4ab
TH
263 struct work_struct *work, struct list_head *head,
264 unsigned int extra_flags)
b89deed3 265{
4690c4ab
TH
266 /* we own @work, set data and link */
267 set_wq_data(work, cwq, extra_flags);
268
6e84d644
ON
269 /*
270 * Ensure that we get the right work->data if we see the
271 * result of list_add() below, see try_to_grab_pending().
272 */
273 smp_wmb();
4690c4ab 274
1a4d9b0a 275 list_add_tail(&work->entry, head);
b89deed3
ON
276 wake_up(&cwq->more_work);
277}
278
4690c4ab 279static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1da177e4
LT
280 struct work_struct *work)
281{
4690c4ab 282 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1da177e4
LT
283 unsigned long flags;
284
dc186ad7 285 debug_work_activate(work);
1da177e4 286 spin_lock_irqsave(&cwq->lock, flags);
4690c4ab
TH
287 BUG_ON(!list_empty(&work->entry));
288 insert_work(cwq, work, &cwq->worklist, 0);
1da177e4
LT
289 spin_unlock_irqrestore(&cwq->lock, flags);
290}
291
0fcb78c2
REB
292/**
293 * queue_work - queue work on a workqueue
294 * @wq: workqueue to use
295 * @work: work to queue
296 *
057647fc 297 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4 298 *
00dfcaf7
ON
299 * We queue the work to the CPU on which it was submitted, but if the CPU dies
300 * it can be processed by another CPU.
1da177e4 301 */
7ad5b3a5 302int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1da177e4 303{
ef1ca236
ON
304 int ret;
305
306 ret = queue_work_on(get_cpu(), wq, work);
307 put_cpu();
308
1da177e4
LT
309 return ret;
310}
ae90dd5d 311EXPORT_SYMBOL_GPL(queue_work);
1da177e4 312
c1a220e7
ZR
313/**
314 * queue_work_on - queue work on specific cpu
315 * @cpu: CPU number to execute work on
316 * @wq: workqueue to use
317 * @work: work to queue
318 *
319 * Returns 0 if @work was already on a queue, non-zero otherwise.
320 *
321 * We queue the work to a specific CPU, the caller must ensure it
322 * can't go away.
323 */
324int
325queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
326{
327 int ret = 0;
328
22df02bb 329 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
4690c4ab 330 __queue_work(cpu, wq, work);
c1a220e7
ZR
331 ret = 1;
332 }
333 return ret;
334}
335EXPORT_SYMBOL_GPL(queue_work_on);
336
6d141c3f 337static void delayed_work_timer_fn(unsigned long __data)
1da177e4 338{
52bad64d 339 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee 340 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
1da177e4 341
4690c4ab 342 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1da177e4
LT
343}
344
0fcb78c2
REB
345/**
346 * queue_delayed_work - queue work on a workqueue after delay
347 * @wq: workqueue to use
af9997e4 348 * @dwork: delayable work to queue
0fcb78c2
REB
349 * @delay: number of jiffies to wait before queueing
350 *
057647fc 351 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 352 */
7ad5b3a5 353int queue_delayed_work(struct workqueue_struct *wq,
52bad64d 354 struct delayed_work *dwork, unsigned long delay)
1da177e4 355{
52bad64d 356 if (delay == 0)
63bc0362 357 return queue_work(wq, &dwork->work);
1da177e4 358
63bc0362 359 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 360}
ae90dd5d 361EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 362
0fcb78c2
REB
363/**
364 * queue_delayed_work_on - queue work on specific CPU after delay
365 * @cpu: CPU number to execute work on
366 * @wq: workqueue to use
af9997e4 367 * @dwork: work to queue
0fcb78c2
REB
368 * @delay: number of jiffies to wait before queueing
369 *
057647fc 370 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 371 */
7a6bc1cd 372int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 373 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
374{
375 int ret = 0;
52bad64d
DH
376 struct timer_list *timer = &dwork->timer;
377 struct work_struct *work = &dwork->work;
7a6bc1cd 378
22df02bb 379 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
7a6bc1cd
VP
380 BUG_ON(timer_pending(timer));
381 BUG_ON(!list_empty(&work->entry));
382
8a3e77cc
AL
383 timer_stats_timer_set_start_info(&dwork->timer);
384
ed7c0fee 385 /* This stores cwq for the moment, for the timer_fn */
4690c4ab 386 set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0);
7a6bc1cd 387 timer->expires = jiffies + delay;
52bad64d 388 timer->data = (unsigned long)dwork;
7a6bc1cd 389 timer->function = delayed_work_timer_fn;
63bc0362
ON
390
391 if (unlikely(cpu >= 0))
392 add_timer_on(timer, cpu);
393 else
394 add_timer(timer);
7a6bc1cd
VP
395 ret = 1;
396 }
397 return ret;
398}
ae90dd5d 399EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 400
a62428c0
TH
401/**
402 * process_one_work - process single work
403 * @cwq: cwq to process work for
404 * @work: work to process
405 *
406 * Process @work. This function contains all the logics necessary to
407 * process a single work including synchronization against and
408 * interaction with other workers on the same cpu, queueing and
409 * flushing. As long as context requirement is met, any worker can
410 * call this function to process a work.
411 *
412 * CONTEXT:
413 * spin_lock_irq(cwq->lock) which is released and regrabbed.
414 */
415static void process_one_work(struct cpu_workqueue_struct *cwq,
416 struct work_struct *work)
417{
418 work_func_t f = work->func;
419#ifdef CONFIG_LOCKDEP
420 /*
421 * It is permissible to free the struct work_struct from
422 * inside the function that is called from it, this we need to
423 * take into account for lockdep too. To avoid bogus "held
424 * lock freed" warnings as well as problems when looking into
425 * work->lockdep_map, make a copy and use that here.
426 */
427 struct lockdep_map lockdep_map = work->lockdep_map;
428#endif
429 /* claim and process */
a62428c0
TH
430 debug_work_deactivate(work);
431 cwq->current_work = work;
432 list_del_init(&work->entry);
433
434 spin_unlock_irq(&cwq->lock);
435
436 BUG_ON(get_wq_data(work) != cwq);
437 work_clear_pending(work);
438 lock_map_acquire(&cwq->wq->lockdep_map);
439 lock_map_acquire(&lockdep_map);
440 f(work);
441 lock_map_release(&lockdep_map);
442 lock_map_release(&cwq->wq->lockdep_map);
443
444 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
445 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
446 "%s/0x%08x/%d\n",
447 current->comm, preempt_count(), task_pid_nr(current));
448 printk(KERN_ERR " last function: ");
449 print_symbol("%s\n", (unsigned long)f);
450 debug_show_held_locks(current);
451 dump_stack();
452 }
453
454 spin_lock_irq(&cwq->lock);
455
456 /* we're done with it, release */
457 cwq->current_work = NULL;
458}
459
858119e1 460static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 461{
f293ea92 462 spin_lock_irq(&cwq->lock);
1da177e4
LT
463 while (!list_empty(&cwq->worklist)) {
464 struct work_struct *work = list_entry(cwq->worklist.next,
465 struct work_struct, entry);
a62428c0 466 process_one_work(cwq, work);
1da177e4 467 }
f293ea92 468 spin_unlock_irq(&cwq->lock);
1da177e4
LT
469}
470
4690c4ab
TH
471/**
472 * worker_thread - the worker thread function
473 * @__cwq: cwq to serve
474 *
475 * The cwq worker thread function.
476 */
1da177e4
LT
477static int worker_thread(void *__cwq)
478{
479 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 480 DEFINE_WAIT(wait);
1da177e4 481
97e37d7b 482 if (cwq->wq->flags & WQ_FREEZEABLE)
83144186 483 set_freezable();
1da177e4 484
3af24433 485 for (;;) {
3af24433 486 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960
ON
487 if (!freezing(current) &&
488 !kthread_should_stop() &&
489 list_empty(&cwq->worklist))
1da177e4 490 schedule();
3af24433
ON
491 finish_wait(&cwq->more_work, &wait);
492
85f4186a
ON
493 try_to_freeze();
494
14441960 495 if (kthread_should_stop())
3af24433 496 break;
1da177e4 497
3af24433 498 run_workqueue(cwq);
1da177e4 499 }
3af24433 500
1da177e4
LT
501 return 0;
502}
503
fc2e4d70
ON
504struct wq_barrier {
505 struct work_struct work;
506 struct completion done;
507};
508
509static void wq_barrier_func(struct work_struct *work)
510{
511 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
512 complete(&barr->done);
513}
514
4690c4ab
TH
515/**
516 * insert_wq_barrier - insert a barrier work
517 * @cwq: cwq to insert barrier into
518 * @barr: wq_barrier to insert
519 * @head: insertion point
520 *
521 * Insert barrier @barr into @cwq before @head.
522 *
523 * CONTEXT:
524 * spin_lock_irq(cwq->lock).
525 */
83c22520 526static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1a4d9b0a 527 struct wq_barrier *barr, struct list_head *head)
fc2e4d70 528{
dc186ad7
TG
529 /*
530 * debugobject calls are safe here even with cwq->lock locked
531 * as we know for sure that this will not trigger any of the
532 * checks and call back into the fixup functions where we
533 * might deadlock.
534 */
535 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
22df02bb 536 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
fc2e4d70 537 init_completion(&barr->done);
83c22520 538
dc186ad7 539 debug_work_activate(&barr->work);
4690c4ab 540 insert_work(cwq, &barr->work, head, 0);
fc2e4d70
ON
541}
542
14441960 543static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 544{
2355b70f
LJ
545 int active = 0;
546 struct wq_barrier barr;
1da177e4 547
2355b70f 548 WARN_ON(cwq->thread == current);
1da177e4 549
2355b70f
LJ
550 spin_lock_irq(&cwq->lock);
551 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
552 insert_wq_barrier(cwq, &barr, &cwq->worklist);
553 active = 1;
1da177e4 554 }
2355b70f
LJ
555 spin_unlock_irq(&cwq->lock);
556
dc186ad7 557 if (active) {
2355b70f 558 wait_for_completion(&barr.done);
dc186ad7
TG
559 destroy_work_on_stack(&barr.work);
560 }
14441960
ON
561
562 return active;
1da177e4
LT
563}
564
0fcb78c2 565/**
1da177e4 566 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 567 * @wq: workqueue to flush
1da177e4
LT
568 *
569 * Forces execution of the workqueue and blocks until its completion.
570 * This is typically used in driver shutdown handlers.
571 *
fc2e4d70
ON
572 * We sleep until all works which were queued on entry have been handled,
573 * but we are not livelocked by new incoming ones.
1da177e4 574 */
7ad5b3a5 575void flush_workqueue(struct workqueue_struct *wq)
1da177e4 576{
e7577c50 577 const struct cpumask *cpu_map = wq_cpu_map(wq);
cce1a165 578 int cpu;
1da177e4 579
b1f4ec17 580 might_sleep();
3295f0ef
IM
581 lock_map_acquire(&wq->lockdep_map);
582 lock_map_release(&wq->lockdep_map);
aa85ea5b 583 for_each_cpu(cpu, cpu_map)
b1f4ec17 584 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 585}
ae90dd5d 586EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 587
db700897
ON
588/**
589 * flush_work - block until a work_struct's callback has terminated
590 * @work: the work which is to be flushed
591 *
a67da70d
ON
592 * Returns false if @work has already terminated.
593 *
db700897
ON
594 * It is expected that, prior to calling flush_work(), the caller has
595 * arranged for the work to not be requeued, otherwise it doesn't make
596 * sense to use this function.
597 */
598int flush_work(struct work_struct *work)
599{
600 struct cpu_workqueue_struct *cwq;
601 struct list_head *prev;
602 struct wq_barrier barr;
603
604 might_sleep();
605 cwq = get_wq_data(work);
606 if (!cwq)
607 return 0;
608
3295f0ef
IM
609 lock_map_acquire(&cwq->wq->lockdep_map);
610 lock_map_release(&cwq->wq->lockdep_map);
a67da70d 611
db700897
ON
612 spin_lock_irq(&cwq->lock);
613 if (!list_empty(&work->entry)) {
614 /*
615 * See the comment near try_to_grab_pending()->smp_rmb().
616 * If it was re-queued under us we are not going to wait.
617 */
618 smp_rmb();
619 if (unlikely(cwq != get_wq_data(work)))
4690c4ab 620 goto already_gone;
db700897
ON
621 prev = &work->entry;
622 } else {
623 if (cwq->current_work != work)
4690c4ab 624 goto already_gone;
db700897
ON
625 prev = &cwq->worklist;
626 }
627 insert_wq_barrier(cwq, &barr, prev->next);
db700897 628
4690c4ab 629 spin_unlock_irq(&cwq->lock);
db700897 630 wait_for_completion(&barr.done);
dc186ad7 631 destroy_work_on_stack(&barr.work);
db700897 632 return 1;
4690c4ab
TH
633already_gone:
634 spin_unlock_irq(&cwq->lock);
635 return 0;
db700897
ON
636}
637EXPORT_SYMBOL_GPL(flush_work);
638
6e84d644 639/*
1f1f642e 640 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644
ON
641 * so this work can't be re-armed in any way.
642 */
643static int try_to_grab_pending(struct work_struct *work)
644{
645 struct cpu_workqueue_struct *cwq;
1f1f642e 646 int ret = -1;
6e84d644 647
22df02bb 648 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1f1f642e 649 return 0;
6e84d644
ON
650
651 /*
652 * The queueing is in progress, or it is already queued. Try to
653 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
654 */
655
656 cwq = get_wq_data(work);
657 if (!cwq)
658 return ret;
659
660 spin_lock_irq(&cwq->lock);
661 if (!list_empty(&work->entry)) {
662 /*
663 * This work is queued, but perhaps we locked the wrong cwq.
664 * In that case we must see the new value after rmb(), see
665 * insert_work()->wmb().
666 */
667 smp_rmb();
668 if (cwq == get_wq_data(work)) {
dc186ad7 669 debug_work_deactivate(work);
6e84d644
ON
670 list_del_init(&work->entry);
671 ret = 1;
672 }
673 }
674 spin_unlock_irq(&cwq->lock);
675
676 return ret;
677}
678
679static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed3
ON
680 struct work_struct *work)
681{
682 struct wq_barrier barr;
683 int running = 0;
684
685 spin_lock_irq(&cwq->lock);
686 if (unlikely(cwq->current_work == work)) {
1a4d9b0a 687 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
b89deed3
ON
688 running = 1;
689 }
690 spin_unlock_irq(&cwq->lock);
691
dc186ad7 692 if (unlikely(running)) {
b89deed3 693 wait_for_completion(&barr.done);
dc186ad7
TG
694 destroy_work_on_stack(&barr.work);
695 }
b89deed3
ON
696}
697
6e84d644 698static void wait_on_work(struct work_struct *work)
b89deed3
ON
699{
700 struct cpu_workqueue_struct *cwq;
28e53bdd 701 struct workqueue_struct *wq;
e7577c50 702 const struct cpumask *cpu_map;
b1f4ec17 703 int cpu;
b89deed3 704
f293ea92
ON
705 might_sleep();
706
3295f0ef
IM
707 lock_map_acquire(&work->lockdep_map);
708 lock_map_release(&work->lockdep_map);
4e6045f1 709
b89deed3 710 cwq = get_wq_data(work);
b89deed3 711 if (!cwq)
3af24433 712 return;
b89deed3 713
28e53bdd
ON
714 wq = cwq->wq;
715 cpu_map = wq_cpu_map(wq);
716
aa85ea5b 717 for_each_cpu(cpu, cpu_map)
4690c4ab 718 wait_on_cpu_work(get_cwq(cpu, wq), work);
6e84d644
ON
719}
720
1f1f642e
ON
721static int __cancel_work_timer(struct work_struct *work,
722 struct timer_list* timer)
723{
724 int ret;
725
726 do {
727 ret = (timer && likely(del_timer(timer)));
728 if (!ret)
729 ret = try_to_grab_pending(work);
730 wait_on_work(work);
731 } while (unlikely(ret < 0));
732
4d707b9f 733 clear_wq_data(work);
1f1f642e
ON
734 return ret;
735}
736
6e84d644
ON
737/**
738 * cancel_work_sync - block until a work_struct's callback has terminated
739 * @work: the work which is to be flushed
740 *
1f1f642e
ON
741 * Returns true if @work was pending.
742 *
6e84d644
ON
743 * cancel_work_sync() will cancel the work if it is queued. If the work's
744 * callback appears to be running, cancel_work_sync() will block until it
745 * has completed.
746 *
747 * It is possible to use this function if the work re-queues itself. It can
748 * cancel the work even if it migrates to another workqueue, however in that
749 * case it only guarantees that work->func() has completed on the last queued
750 * workqueue.
751 *
752 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
753 * pending, otherwise it goes into a busy-wait loop until the timer expires.
754 *
755 * The caller must ensure that workqueue_struct on which this work was last
756 * queued can't be destroyed before this function returns.
757 */
1f1f642e 758int cancel_work_sync(struct work_struct *work)
6e84d644 759{
1f1f642e 760 return __cancel_work_timer(work, NULL);
b89deed3 761}
28e53bdd 762EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 763
6e84d644 764/**
f5a421a4 765 * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644
ON
766 * @dwork: the delayed work struct
767 *
1f1f642e
ON
768 * Returns true if @dwork was pending.
769 *
6e84d644
ON
770 * It is possible to use this function if @dwork rearms itself via queue_work()
771 * or queue_delayed_work(). See also the comment for cancel_work_sync().
772 */
1f1f642e 773int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 774{
1f1f642e 775 return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644 776}
f5a421a4 777EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 778
6e84d644 779static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4 780
0fcb78c2
REB
781/**
782 * schedule_work - put work task in global workqueue
783 * @work: job to be done
784 *
5b0f437d
BVA
785 * Returns zero if @work was already on the kernel-global workqueue and
786 * non-zero otherwise.
787 *
788 * This puts a job in the kernel-global workqueue if it was not already
789 * queued and leaves it in the same position on the kernel-global
790 * workqueue otherwise.
0fcb78c2 791 */
7ad5b3a5 792int schedule_work(struct work_struct *work)
1da177e4
LT
793{
794 return queue_work(keventd_wq, work);
795}
ae90dd5d 796EXPORT_SYMBOL(schedule_work);
1da177e4 797
c1a220e7
ZR
798/*
799 * schedule_work_on - put work task on a specific cpu
800 * @cpu: cpu to put the work task on
801 * @work: job to be done
802 *
803 * This puts a job on a specific cpu
804 */
805int schedule_work_on(int cpu, struct work_struct *work)
806{
807 return queue_work_on(cpu, keventd_wq, work);
808}
809EXPORT_SYMBOL(schedule_work_on);
810
0fcb78c2
REB
811/**
812 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
813 * @dwork: job to be done
814 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
815 *
816 * After waiting for a given time this puts a job in the kernel-global
817 * workqueue.
818 */
7ad5b3a5 819int schedule_delayed_work(struct delayed_work *dwork,
82f67cd9 820 unsigned long delay)
1da177e4 821{
52bad64d 822 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 823}
ae90dd5d 824EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 825
8c53e463
LT
826/**
827 * flush_delayed_work - block until a dwork_struct's callback has terminated
828 * @dwork: the delayed work which is to be flushed
829 *
830 * Any timeout is cancelled, and any pending work is run immediately.
831 */
832void flush_delayed_work(struct delayed_work *dwork)
833{
834 if (del_timer_sync(&dwork->timer)) {
4690c4ab
TH
835 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
836 &dwork->work);
8c53e463
LT
837 put_cpu();
838 }
839 flush_work(&dwork->work);
840}
841EXPORT_SYMBOL(flush_delayed_work);
842
0fcb78c2
REB
843/**
844 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
845 * @cpu: cpu to use
52bad64d 846 * @dwork: job to be done
0fcb78c2
REB
847 * @delay: number of jiffies to wait
848 *
849 * After waiting for a given time this puts a job in the kernel-global
850 * workqueue on the specified CPU.
851 */
1da177e4 852int schedule_delayed_work_on(int cpu,
52bad64d 853 struct delayed_work *dwork, unsigned long delay)
1da177e4 854{
52bad64d 855 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 856}
ae90dd5d 857EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 858
b6136773
AM
859/**
860 * schedule_on_each_cpu - call a function on each online CPU from keventd
861 * @func: the function to call
b6136773
AM
862 *
863 * Returns zero on success.
864 * Returns -ve errno on failure.
865 *
b6136773
AM
866 * schedule_on_each_cpu() is very slow.
867 */
65f27f38 868int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
869{
870 int cpu;
65a64464 871 int orig = -1;
b6136773 872 struct work_struct *works;
15316ba8 873
b6136773
AM
874 works = alloc_percpu(struct work_struct);
875 if (!works)
15316ba8 876 return -ENOMEM;
b6136773 877
93981800
TH
878 get_online_cpus();
879
65a64464 880 /*
93981800
TH
881 * When running in keventd don't schedule a work item on
882 * itself. Can just call directly because the work queue is
883 * already bound. This also is faster.
65a64464 884 */
93981800 885 if (current_is_keventd())
65a64464 886 orig = raw_smp_processor_id();
65a64464 887
15316ba8 888 for_each_online_cpu(cpu) {
9bfb1839
IM
889 struct work_struct *work = per_cpu_ptr(works, cpu);
890
891 INIT_WORK(work, func);
65a64464 892 if (cpu != orig)
93981800 893 schedule_work_on(cpu, work);
65a64464 894 }
93981800
TH
895 if (orig >= 0)
896 func(per_cpu_ptr(works, orig));
897
898 for_each_online_cpu(cpu)
899 flush_work(per_cpu_ptr(works, cpu));
900
95402b38 901 put_online_cpus();
b6136773 902 free_percpu(works);
15316ba8
CL
903 return 0;
904}
905
eef6a7d5
AS
906/**
907 * flush_scheduled_work - ensure that any scheduled work has run to completion.
908 *
909 * Forces execution of the kernel-global workqueue and blocks until its
910 * completion.
911 *
912 * Think twice before calling this function! It's very easy to get into
913 * trouble if you don't take great care. Either of the following situations
914 * will lead to deadlock:
915 *
916 * One of the work items currently on the workqueue needs to acquire
917 * a lock held by your code or its caller.
918 *
919 * Your code is running in the context of a work routine.
920 *
921 * They will be detected by lockdep when they occur, but the first might not
922 * occur very often. It depends on what work items are on the workqueue and
923 * what locks they need, which you have no control over.
924 *
925 * In most situations flushing the entire workqueue is overkill; you merely
926 * need to know that a particular work item isn't queued and isn't running.
927 * In such cases you should use cancel_delayed_work_sync() or
928 * cancel_work_sync() instead.
929 */
1da177e4
LT
930void flush_scheduled_work(void)
931{
932 flush_workqueue(keventd_wq);
933}
ae90dd5d 934EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 935
1fa44eca
JB
936/**
937 * execute_in_process_context - reliably execute the routine with user context
938 * @fn: the function to execute
1fa44eca
JB
939 * @ew: guaranteed storage for the execute work structure (must
940 * be available when the work executes)
941 *
942 * Executes the function immediately if process context is available,
943 * otherwise schedules the function for delayed execution.
944 *
945 * Returns: 0 - function was executed
946 * 1 - function was scheduled for execution
947 */
65f27f38 948int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
949{
950 if (!in_interrupt()) {
65f27f38 951 fn(&ew->work);
1fa44eca
JB
952 return 0;
953 }
954
65f27f38 955 INIT_WORK(&ew->work, fn);
1fa44eca
JB
956 schedule_work(&ew->work);
957
958 return 1;
959}
960EXPORT_SYMBOL_GPL(execute_in_process_context);
961
1da177e4
LT
962int keventd_up(void)
963{
964 return keventd_wq != NULL;
965}
966
967int current_is_keventd(void)
968{
969 struct cpu_workqueue_struct *cwq;
d243769d 970 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1da177e4
LT
971 int ret = 0;
972
973 BUG_ON(!keventd_wq);
974
89ada679 975 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
976 if (current == cwq->thread)
977 ret = 1;
978
979 return ret;
980
981}
982
3af24433
ON
983static struct cpu_workqueue_struct *
984init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4 985{
89ada679 986 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4 987
3af24433
ON
988 cwq->wq = wq;
989 spin_lock_init(&cwq->lock);
990 INIT_LIST_HEAD(&cwq->worklist);
991 init_waitqueue_head(&cwq->more_work);
992
993 return cwq;
1da177e4
LT
994}
995
3af24433
ON
996static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
997{
998 struct workqueue_struct *wq = cwq->wq;
6cc88bc4 999 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
3af24433
ON
1000 struct task_struct *p;
1001
1002 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
1003 /*
1004 * Nobody can add the work_struct to this cwq,
1005 * if (caller is __create_workqueue)
1006 * nobody should see this wq
1007 * else // caller is CPU_UP_PREPARE
1008 * cpu is not on cpu_online_map
1009 * so we can abort safely.
1010 */
1011 if (IS_ERR(p))
1012 return PTR_ERR(p);
3af24433 1013 cwq->thread = p;
3af24433
ON
1014
1015 return 0;
1016}
1017
06ba38a9
ON
1018static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1019{
1020 struct task_struct *p = cwq->thread;
1021
1022 if (p != NULL) {
1023 if (cpu >= 0)
1024 kthread_bind(p, cpu);
1025 wake_up_process(p);
1026 }
1027}
1028
4e6045f1 1029struct workqueue_struct *__create_workqueue_key(const char *name,
97e37d7b 1030 unsigned int flags,
eb13ba87
JB
1031 struct lock_class_key *key,
1032 const char *lock_name)
1da177e4 1033{
1da177e4 1034 struct workqueue_struct *wq;
3af24433
ON
1035 struct cpu_workqueue_struct *cwq;
1036 int err = 0, cpu;
1da177e4 1037
3af24433
ON
1038 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1039 if (!wq)
4690c4ab 1040 goto err;
3af24433
ON
1041
1042 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
4690c4ab
TH
1043 if (!wq->cpu_wq)
1044 goto err;
3af24433 1045
97e37d7b 1046 wq->flags = flags;
3af24433 1047 wq->name = name;
eb13ba87 1048 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a165 1049 INIT_LIST_HEAD(&wq->list);
3af24433 1050
97e37d7b 1051 if (flags & WQ_SINGLE_THREAD) {
3af24433
ON
1052 cwq = init_cpu_workqueue(wq, singlethread_cpu);
1053 err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9 1054 start_workqueue_thread(cwq, -1);
3af24433 1055 } else {
3da1c84c 1056 cpu_maps_update_begin();
6af8bf3d
ON
1057 /*
1058 * We must place this wq on list even if the code below fails.
1059 * cpu_down(cpu) can remove cpu from cpu_populated_map before
1060 * destroy_workqueue() takes the lock, in that case we leak
1061 * cwq[cpu]->thread.
1062 */
95402b38 1063 spin_lock(&workqueue_lock);
3af24433 1064 list_add(&wq->list, &workqueues);
95402b38 1065 spin_unlock(&workqueue_lock);
6af8bf3d
ON
1066 /*
1067 * We must initialize cwqs for each possible cpu even if we
1068 * are going to call destroy_workqueue() finally. Otherwise
1069 * cpu_up() can hit the uninitialized cwq once we drop the
1070 * lock.
1071 */
3af24433
ON
1072 for_each_possible_cpu(cpu) {
1073 cwq = init_cpu_workqueue(wq, cpu);
1074 if (err || !cpu_online(cpu))
1075 continue;
1076 err = create_workqueue_thread(cwq, cpu);
06ba38a9 1077 start_workqueue_thread(cwq, cpu);
1da177e4 1078 }
3da1c84c 1079 cpu_maps_update_done();
3af24433
ON
1080 }
1081
1082 if (err) {
1083 destroy_workqueue(wq);
1084 wq = NULL;
1085 }
1086 return wq;
4690c4ab
TH
1087err:
1088 if (wq) {
1089 free_percpu(wq->cpu_wq);
1090 kfree(wq);
1091 }
1092 return NULL;
3af24433 1093}
4e6045f1 1094EXPORT_SYMBOL_GPL(__create_workqueue_key);
1da177e4 1095
1e35eaa2 1096static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
3af24433 1097{
14441960 1098 /*
3da1c84c
ON
1099 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1100 * cpu_add_remove_lock protects cwq->thread.
14441960
ON
1101 */
1102 if (cwq->thread == NULL)
1103 return;
3af24433 1104
3295f0ef
IM
1105 lock_map_acquire(&cwq->wq->lockdep_map);
1106 lock_map_release(&cwq->wq->lockdep_map);
4e6045f1 1107
13c22168 1108 flush_cpu_workqueue(cwq);
14441960 1109 /*
3da1c84c 1110 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
13c22168
ON
1111 * a concurrent flush_workqueue() can insert a barrier after us.
1112 * However, in that case run_workqueue() won't return and check
1113 * kthread_should_stop() until it flushes all work_struct's.
14441960
ON
1114 * When ->worklist becomes empty it is safe to exit because no
1115 * more work_structs can be queued on this cwq: flush_workqueue
1116 * checks list_empty(), and a "normal" queue_work() can't use
1117 * a dead CPU.
1118 */
14441960
ON
1119 kthread_stop(cwq->thread);
1120 cwq->thread = NULL;
3af24433
ON
1121}
1122
1123/**
1124 * destroy_workqueue - safely terminate a workqueue
1125 * @wq: target workqueue
1126 *
1127 * Safely destroy a workqueue. All work currently pending will be done first.
1128 */
1129void destroy_workqueue(struct workqueue_struct *wq)
1130{
e7577c50 1131 const struct cpumask *cpu_map = wq_cpu_map(wq);
b1f4ec17 1132 int cpu;
3af24433 1133
3da1c84c 1134 cpu_maps_update_begin();
95402b38 1135 spin_lock(&workqueue_lock);
b1f4ec17 1136 list_del(&wq->list);
95402b38 1137 spin_unlock(&workqueue_lock);
3af24433 1138
aa85ea5b 1139 for_each_cpu(cpu, cpu_map)
1e35eaa2 1140 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
3da1c84c 1141 cpu_maps_update_done();
9b41ea72 1142
3af24433
ON
1143 free_percpu(wq->cpu_wq);
1144 kfree(wq);
1145}
1146EXPORT_SYMBOL_GPL(destroy_workqueue);
1147
1148static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1149 unsigned long action,
1150 void *hcpu)
1151{
1152 unsigned int cpu = (unsigned long)hcpu;
1153 struct cpu_workqueue_struct *cwq;
1154 struct workqueue_struct *wq;
80b5184c 1155 int err = 0;
3af24433 1156
8bb78442
RW
1157 action &= ~CPU_TASKS_FROZEN;
1158
3af24433 1159 switch (action) {
3af24433 1160 case CPU_UP_PREPARE:
e7577c50 1161 cpumask_set_cpu(cpu, cpu_populated_map);
3af24433 1162 }
8448502c 1163undo:
3af24433
ON
1164 list_for_each_entry(wq, &workqueues, list) {
1165 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1166
1167 switch (action) {
1168 case CPU_UP_PREPARE:
80b5184c
AM
1169 err = create_workqueue_thread(cwq, cpu);
1170 if (!err)
3af24433 1171 break;
95402b38
GS
1172 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1173 wq->name, cpu);
8448502c 1174 action = CPU_UP_CANCELED;
80b5184c 1175 err = -ENOMEM;
8448502c 1176 goto undo;
3af24433
ON
1177
1178 case CPU_ONLINE:
06ba38a9 1179 start_workqueue_thread(cwq, cpu);
3af24433
ON
1180 break;
1181
1182 case CPU_UP_CANCELED:
06ba38a9 1183 start_workqueue_thread(cwq, -1);
3da1c84c 1184 case CPU_POST_DEAD:
1e35eaa2 1185 cleanup_workqueue_thread(cwq);
3af24433
ON
1186 break;
1187 }
1da177e4
LT
1188 }
1189
00dfcaf7
ON
1190 switch (action) {
1191 case CPU_UP_CANCELED:
3da1c84c 1192 case CPU_POST_DEAD:
e7577c50 1193 cpumask_clear_cpu(cpu, cpu_populated_map);
00dfcaf7
ON
1194 }
1195
80b5184c 1196 return notifier_from_errno(err);
1da177e4 1197}
1da177e4 1198
2d3854a3 1199#ifdef CONFIG_SMP
8ccad40d 1200
2d3854a3 1201struct work_for_cpu {
6b44003e 1202 struct completion completion;
2d3854a3
RR
1203 long (*fn)(void *);
1204 void *arg;
1205 long ret;
1206};
1207
6b44003e 1208static int do_work_for_cpu(void *_wfc)
2d3854a3 1209{
6b44003e 1210 struct work_for_cpu *wfc = _wfc;
2d3854a3 1211 wfc->ret = wfc->fn(wfc->arg);
6b44003e
AM
1212 complete(&wfc->completion);
1213 return 0;
2d3854a3
RR
1214}
1215
1216/**
1217 * work_on_cpu - run a function in user context on a particular cpu
1218 * @cpu: the cpu to run on
1219 * @fn: the function to run
1220 * @arg: the function arg
1221 *
31ad9081
RR
1222 * This will return the value @fn returns.
1223 * It is up to the caller to ensure that the cpu doesn't go offline.
6b44003e 1224 * The caller must not hold any locks which would prevent @fn from completing.
2d3854a3
RR
1225 */
1226long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1227{
6b44003e
AM
1228 struct task_struct *sub_thread;
1229 struct work_for_cpu wfc = {
1230 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1231 .fn = fn,
1232 .arg = arg,
1233 };
1234
1235 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1236 if (IS_ERR(sub_thread))
1237 return PTR_ERR(sub_thread);
1238 kthread_bind(sub_thread, cpu);
1239 wake_up_process(sub_thread);
1240 wait_for_completion(&wfc.completion);
2d3854a3
RR
1241 return wfc.ret;
1242}
1243EXPORT_SYMBOL_GPL(work_on_cpu);
1244#endif /* CONFIG_SMP */
1245
c12920d1 1246void __init init_workqueues(void)
1da177e4 1247{
e7577c50
RR
1248 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1249
1250 cpumask_copy(cpu_populated_map, cpu_online_mask);
1251 singlethread_cpu = cpumask_first(cpu_possible_mask);
1252 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1da177e4
LT
1253 hotcpu_notifier(workqueue_cpu_callback, 0);
1254 keventd_wq = create_workqueue("events");
1255 BUG_ON(!keventd_wq);
1256}