]> bbs.cooldavid.org Git - net-next-2.6.git/blob - kernel/workqueue.c
workqueue: use shared worklist and pool all workers per cpu
[net-next-2.6.git] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #include <linux/idr.h>
37 #include <linux/delay.h>
38
39 enum {
40         /* global_cwq flags */
41         GCWQ_FREEZING           = 1 << 3,       /* freeze in progress */
42
43         /* worker flags */
44         WORKER_STARTED          = 1 << 0,       /* started */
45         WORKER_DIE              = 1 << 1,       /* die die die */
46         WORKER_IDLE             = 1 << 2,       /* is idle */
47         WORKER_ROGUE            = 1 << 4,       /* not bound to any cpu */
48
49         /* gcwq->trustee_state */
50         TRUSTEE_START           = 0,            /* start */
51         TRUSTEE_IN_CHARGE       = 1,            /* trustee in charge of gcwq */
52         TRUSTEE_BUTCHER         = 2,            /* butcher workers */
53         TRUSTEE_RELEASE         = 3,            /* release workers */
54         TRUSTEE_DONE            = 4,            /* trustee is done */
55
56         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
57         BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
58         BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
59
60         TRUSTEE_COOLDOWN        = HZ / 10,      /* for trustee draining */
61 };
62
63 /*
64  * Structure fields follow one of the following exclusion rules.
65  *
66  * I: Set during initialization and read-only afterwards.
67  *
68  * L: gcwq->lock protected.  Access with gcwq->lock held.
69  *
70  * F: wq->flush_mutex protected.
71  *
72  * W: workqueue_lock protected.
73  */
74
75 struct global_cwq;
76
77 struct worker {
78         /* on idle list while idle, on busy hash table while busy */
79         union {
80                 struct list_head        entry;  /* L: while idle */
81                 struct hlist_node       hentry; /* L: while busy */
82         };
83
84         struct work_struct      *current_work;  /* L: work being processed */
85         struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
86         struct list_head        scheduled;      /* L: scheduled works */
87         struct task_struct      *task;          /* I: worker task */
88         struct global_cwq       *gcwq;          /* I: the associated gcwq */
89         unsigned int            flags;          /* L: flags */
90         int                     id;             /* I: worker id */
91 };
92
93 /*
94  * Global per-cpu workqueue.
95  */
96 struct global_cwq {
97         spinlock_t              lock;           /* the gcwq lock */
98         struct list_head        worklist;       /* L: list of pending works */
99         unsigned int            cpu;            /* I: the associated cpu */
100         unsigned int            flags;          /* L: GCWQ_* flags */
101
102         int                     nr_workers;     /* L: total number of workers */
103         int                     nr_idle;        /* L: currently idle ones */
104
105         /* workers are chained either in the idle_list or busy_hash */
106         struct list_head        idle_list;      /* L: list of idle workers */
107         struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
108                                                 /* L: hash of busy workers */
109
110         struct ida              worker_ida;     /* L: for worker IDs */
111
112         struct task_struct      *trustee;       /* L: for gcwq shutdown */
113         unsigned int            trustee_state;  /* L: trustee state */
114         wait_queue_head_t       trustee_wait;   /* trustee wait */
115 } ____cacheline_aligned_in_smp;
116
117 /*
118  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
119  * work_struct->data are used for flags and thus cwqs need to be
120  * aligned at two's power of the number of flag bits.
121  */
122 struct cpu_workqueue_struct {
123         struct global_cwq       *gcwq;          /* I: the associated gcwq */
124         struct worker           *worker;
125         struct workqueue_struct *wq;            /* I: the owning workqueue */
126         int                     work_color;     /* L: current color */
127         int                     flush_color;    /* L: flushing color */
128         int                     nr_in_flight[WORK_NR_COLORS];
129                                                 /* L: nr of in_flight works */
130         int                     nr_active;      /* L: nr of active works */
131         int                     max_active;     /* L: max active works */
132         struct list_head        delayed_works;  /* L: delayed works */
133 };
134
135 /*
136  * Structure used to wait for workqueue flush.
137  */
138 struct wq_flusher {
139         struct list_head        list;           /* F: list of flushers */
140         int                     flush_color;    /* F: flush color waiting for */
141         struct completion       done;           /* flush completion */
142 };
143
144 /*
145  * The externally visible workqueue abstraction is an array of
146  * per-CPU workqueues:
147  */
148 struct workqueue_struct {
149         unsigned int            flags;          /* I: WQ_* flags */
150         struct cpu_workqueue_struct *cpu_wq;    /* I: cwq's */
151         struct list_head        list;           /* W: list of all workqueues */
152
153         struct mutex            flush_mutex;    /* protects wq flushing */
154         int                     work_color;     /* F: current work color */
155         int                     flush_color;    /* F: current flush color */
156         atomic_t                nr_cwqs_to_flush; /* flush in progress */
157         struct wq_flusher       *first_flusher; /* F: first flusher */
158         struct list_head        flusher_queue;  /* F: flush waiters */
159         struct list_head        flusher_overflow; /* F: flush overflow list */
160
161         unsigned long           single_cpu;     /* cpu for single cpu wq */
162
163         int                     saved_max_active; /* I: saved cwq max_active */
164         const char              *name;          /* I: workqueue name */
165 #ifdef CONFIG_LOCKDEP
166         struct lockdep_map      lockdep_map;
167 #endif
168 };
169
170 #define for_each_busy_worker(worker, i, pos, gcwq)                      \
171         for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
172                 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
173
174 #ifdef CONFIG_DEBUG_OBJECTS_WORK
175
176 static struct debug_obj_descr work_debug_descr;
177
178 /*
179  * fixup_init is called when:
180  * - an active object is initialized
181  */
182 static int work_fixup_init(void *addr, enum debug_obj_state state)
183 {
184         struct work_struct *work = addr;
185
186         switch (state) {
187         case ODEBUG_STATE_ACTIVE:
188                 cancel_work_sync(work);
189                 debug_object_init(work, &work_debug_descr);
190                 return 1;
191         default:
192                 return 0;
193         }
194 }
195
196 /*
197  * fixup_activate is called when:
198  * - an active object is activated
199  * - an unknown object is activated (might be a statically initialized object)
200  */
201 static int work_fixup_activate(void *addr, enum debug_obj_state state)
202 {
203         struct work_struct *work = addr;
204
205         switch (state) {
206
207         case ODEBUG_STATE_NOTAVAILABLE:
208                 /*
209                  * This is not really a fixup. The work struct was
210                  * statically initialized. We just make sure that it
211                  * is tracked in the object tracker.
212                  */
213                 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
214                         debug_object_init(work, &work_debug_descr);
215                         debug_object_activate(work, &work_debug_descr);
216                         return 0;
217                 }
218                 WARN_ON_ONCE(1);
219                 return 0;
220
221         case ODEBUG_STATE_ACTIVE:
222                 WARN_ON(1);
223
224         default:
225                 return 0;
226         }
227 }
228
229 /*
230  * fixup_free is called when:
231  * - an active object is freed
232  */
233 static int work_fixup_free(void *addr, enum debug_obj_state state)
234 {
235         struct work_struct *work = addr;
236
237         switch (state) {
238         case ODEBUG_STATE_ACTIVE:
239                 cancel_work_sync(work);
240                 debug_object_free(work, &work_debug_descr);
241                 return 1;
242         default:
243                 return 0;
244         }
245 }
246
247 static struct debug_obj_descr work_debug_descr = {
248         .name           = "work_struct",
249         .fixup_init     = work_fixup_init,
250         .fixup_activate = work_fixup_activate,
251         .fixup_free     = work_fixup_free,
252 };
253
254 static inline void debug_work_activate(struct work_struct *work)
255 {
256         debug_object_activate(work, &work_debug_descr);
257 }
258
259 static inline void debug_work_deactivate(struct work_struct *work)
260 {
261         debug_object_deactivate(work, &work_debug_descr);
262 }
263
264 void __init_work(struct work_struct *work, int onstack)
265 {
266         if (onstack)
267                 debug_object_init_on_stack(work, &work_debug_descr);
268         else
269                 debug_object_init(work, &work_debug_descr);
270 }
271 EXPORT_SYMBOL_GPL(__init_work);
272
273 void destroy_work_on_stack(struct work_struct *work)
274 {
275         debug_object_free(work, &work_debug_descr);
276 }
277 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
278
279 #else
280 static inline void debug_work_activate(struct work_struct *work) { }
281 static inline void debug_work_deactivate(struct work_struct *work) { }
282 #endif
283
284 /* Serializes the accesses to the list of workqueues. */
285 static DEFINE_SPINLOCK(workqueue_lock);
286 static LIST_HEAD(workqueues);
287 static bool workqueue_freezing;         /* W: have wqs started freezing? */
288
289 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
290
291 static int worker_thread(void *__worker);
292
293 static struct global_cwq *get_gcwq(unsigned int cpu)
294 {
295         return &per_cpu(global_cwq, cpu);
296 }
297
298 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
299                                             struct workqueue_struct *wq)
300 {
301         return per_cpu_ptr(wq->cpu_wq, cpu);
302 }
303
304 static unsigned int work_color_to_flags(int color)
305 {
306         return color << WORK_STRUCT_COLOR_SHIFT;
307 }
308
309 static int get_work_color(struct work_struct *work)
310 {
311         return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
312                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
313 }
314
315 static int work_next_color(int color)
316 {
317         return (color + 1) % WORK_NR_COLORS;
318 }
319
320 /*
321  * Work data points to the cwq while a work is on queue.  Once
322  * execution starts, it points to the cpu the work was last on.  This
323  * can be distinguished by comparing the data value against
324  * PAGE_OFFSET.
325  *
326  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
327  * cwq, cpu or clear work->data.  These functions should only be
328  * called while the work is owned - ie. while the PENDING bit is set.
329  *
330  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
331  * corresponding to a work.  gcwq is available once the work has been
332  * queued anywhere after initialization.  cwq is available only from
333  * queueing until execution starts.
334  */
335 static inline void set_work_data(struct work_struct *work, unsigned long data,
336                                  unsigned long flags)
337 {
338         BUG_ON(!work_pending(work));
339         atomic_long_set(&work->data, data | flags | work_static(work));
340 }
341
342 static void set_work_cwq(struct work_struct *work,
343                          struct cpu_workqueue_struct *cwq,
344                          unsigned long extra_flags)
345 {
346         set_work_data(work, (unsigned long)cwq,
347                       WORK_STRUCT_PENDING | extra_flags);
348 }
349
350 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
351 {
352         set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
353 }
354
355 static void clear_work_data(struct work_struct *work)
356 {
357         set_work_data(work, WORK_STRUCT_NO_CPU, 0);
358 }
359
360 static inline unsigned long get_work_data(struct work_struct *work)
361 {
362         return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
363 }
364
365 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
366 {
367         unsigned long data = get_work_data(work);
368
369         return data >= PAGE_OFFSET ? (void *)data : NULL;
370 }
371
372 static struct global_cwq *get_work_gcwq(struct work_struct *work)
373 {
374         unsigned long data = get_work_data(work);
375         unsigned int cpu;
376
377         if (data >= PAGE_OFFSET)
378                 return ((struct cpu_workqueue_struct *)data)->gcwq;
379
380         cpu = data >> WORK_STRUCT_FLAG_BITS;
381         if (cpu == NR_CPUS)
382                 return NULL;
383
384         BUG_ON(cpu >= num_possible_cpus());
385         return get_gcwq(cpu);
386 }
387
388 /* Return the first worker.  Safe with preemption disabled */
389 static struct worker *first_worker(struct global_cwq *gcwq)
390 {
391         if (unlikely(list_empty(&gcwq->idle_list)))
392                 return NULL;
393
394         return list_first_entry(&gcwq->idle_list, struct worker, entry);
395 }
396
397 /**
398  * wake_up_worker - wake up an idle worker
399  * @gcwq: gcwq to wake worker for
400  *
401  * Wake up the first idle worker of @gcwq.
402  *
403  * CONTEXT:
404  * spin_lock_irq(gcwq->lock).
405  */
406 static void wake_up_worker(struct global_cwq *gcwq)
407 {
408         struct worker *worker = first_worker(gcwq);
409
410         if (likely(worker))
411                 wake_up_process(worker->task);
412 }
413
414 /**
415  * busy_worker_head - return the busy hash head for a work
416  * @gcwq: gcwq of interest
417  * @work: work to be hashed
418  *
419  * Return hash head of @gcwq for @work.
420  *
421  * CONTEXT:
422  * spin_lock_irq(gcwq->lock).
423  *
424  * RETURNS:
425  * Pointer to the hash head.
426  */
427 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
428                                            struct work_struct *work)
429 {
430         const int base_shift = ilog2(sizeof(struct work_struct));
431         unsigned long v = (unsigned long)work;
432
433         /* simple shift and fold hash, do we need something better? */
434         v >>= base_shift;
435         v += v >> BUSY_WORKER_HASH_ORDER;
436         v &= BUSY_WORKER_HASH_MASK;
437
438         return &gcwq->busy_hash[v];
439 }
440
441 /**
442  * __find_worker_executing_work - find worker which is executing a work
443  * @gcwq: gcwq of interest
444  * @bwh: hash head as returned by busy_worker_head()
445  * @work: work to find worker for
446  *
447  * Find a worker which is executing @work on @gcwq.  @bwh should be
448  * the hash head obtained by calling busy_worker_head() with the same
449  * work.
450  *
451  * CONTEXT:
452  * spin_lock_irq(gcwq->lock).
453  *
454  * RETURNS:
455  * Pointer to worker which is executing @work if found, NULL
456  * otherwise.
457  */
458 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
459                                                    struct hlist_head *bwh,
460                                                    struct work_struct *work)
461 {
462         struct worker *worker;
463         struct hlist_node *tmp;
464
465         hlist_for_each_entry(worker, tmp, bwh, hentry)
466                 if (worker->current_work == work)
467                         return worker;
468         return NULL;
469 }
470
471 /**
472  * find_worker_executing_work - find worker which is executing a work
473  * @gcwq: gcwq of interest
474  * @work: work to find worker for
475  *
476  * Find a worker which is executing @work on @gcwq.  This function is
477  * identical to __find_worker_executing_work() except that this
478  * function calculates @bwh itself.
479  *
480  * CONTEXT:
481  * spin_lock_irq(gcwq->lock).
482  *
483  * RETURNS:
484  * Pointer to worker which is executing @work if found, NULL
485  * otherwise.
486  */
487 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
488                                                  struct work_struct *work)
489 {
490         return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
491                                             work);
492 }
493
494 /**
495  * insert_work - insert a work into gcwq
496  * @cwq: cwq @work belongs to
497  * @work: work to insert
498  * @head: insertion point
499  * @extra_flags: extra WORK_STRUCT_* flags to set
500  *
501  * Insert @work which belongs to @cwq into @gcwq after @head.
502  * @extra_flags is or'd to work_struct flags.
503  *
504  * CONTEXT:
505  * spin_lock_irq(gcwq->lock).
506  */
507 static void insert_work(struct cpu_workqueue_struct *cwq,
508                         struct work_struct *work, struct list_head *head,
509                         unsigned int extra_flags)
510 {
511         /* we own @work, set data and link */
512         set_work_cwq(work, cwq, extra_flags);
513
514         /*
515          * Ensure that we get the right work->data if we see the
516          * result of list_add() below, see try_to_grab_pending().
517          */
518         smp_wmb();
519
520         list_add_tail(&work->entry, head);
521         wake_up_worker(cwq->gcwq);
522 }
523
524 /**
525  * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
526  * @cwq: cwq to unbind
527  *
528  * Try to unbind @cwq from single cpu workqueue processing.  If
529  * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
530  *
531  * CONTEXT:
532  * spin_lock_irq(gcwq->lock).
533  */
534 static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
535 {
536         struct workqueue_struct *wq = cwq->wq;
537         struct global_cwq *gcwq = cwq->gcwq;
538
539         BUG_ON(wq->single_cpu != gcwq->cpu);
540         /*
541          * Unbind from workqueue if @cwq is not frozen.  If frozen,
542          * thaw_workqueues() will either restart processing on this
543          * cpu or unbind if empty.  This keeps works queued while
544          * frozen fully ordered and flushable.
545          */
546         if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
547                 smp_wmb();      /* paired with cmpxchg() in __queue_work() */
548                 wq->single_cpu = NR_CPUS;
549         }
550 }
551
552 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
553                          struct work_struct *work)
554 {
555         struct global_cwq *gcwq;
556         struct cpu_workqueue_struct *cwq;
557         struct list_head *worklist;
558         unsigned long flags;
559         bool arbitrate;
560
561         debug_work_activate(work);
562
563         /*
564          * Determine gcwq to use.  SINGLE_CPU is inherently
565          * NON_REENTRANT, so test it first.
566          */
567         if (!(wq->flags & WQ_SINGLE_CPU)) {
568                 struct global_cwq *last_gcwq;
569
570                 /*
571                  * It's multi cpu.  If @wq is non-reentrant and @work
572                  * was previously on a different cpu, it might still
573                  * be running there, in which case the work needs to
574                  * be queued on that cpu to guarantee non-reentrance.
575                  */
576                 gcwq = get_gcwq(cpu);
577                 if (wq->flags & WQ_NON_REENTRANT &&
578                     (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
579                         struct worker *worker;
580
581                         spin_lock_irqsave(&last_gcwq->lock, flags);
582
583                         worker = find_worker_executing_work(last_gcwq, work);
584
585                         if (worker && worker->current_cwq->wq == wq)
586                                 gcwq = last_gcwq;
587                         else {
588                                 /* meh... not running there, queue here */
589                                 spin_unlock_irqrestore(&last_gcwq->lock, flags);
590                                 spin_lock_irqsave(&gcwq->lock, flags);
591                         }
592                 } else
593                         spin_lock_irqsave(&gcwq->lock, flags);
594         } else {
595                 unsigned int req_cpu = cpu;
596
597                 /*
598                  * It's a bit more complex for single cpu workqueues.
599                  * We first need to determine which cpu is going to be
600                  * used.  If no cpu is currently serving this
601                  * workqueue, arbitrate using atomic accesses to
602                  * wq->single_cpu; otherwise, use the current one.
603                  */
604         retry:
605                 cpu = wq->single_cpu;
606                 arbitrate = cpu == NR_CPUS;
607                 if (arbitrate)
608                         cpu = req_cpu;
609
610                 gcwq = get_gcwq(cpu);
611                 spin_lock_irqsave(&gcwq->lock, flags);
612
613                 /*
614                  * The following cmpxchg() is a full barrier paired
615                  * with smp_wmb() in cwq_unbind_single_cpu() and
616                  * guarantees that all changes to wq->st_* fields are
617                  * visible on the new cpu after this point.
618                  */
619                 if (arbitrate)
620                         cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
621
622                 if (unlikely(wq->single_cpu != cpu)) {
623                         spin_unlock_irqrestore(&gcwq->lock, flags);
624                         goto retry;
625                 }
626         }
627
628         /* gcwq determined, get cwq and queue */
629         cwq = get_cwq(gcwq->cpu, wq);
630
631         BUG_ON(!list_empty(&work->entry));
632
633         cwq->nr_in_flight[cwq->work_color]++;
634
635         if (likely(cwq->nr_active < cwq->max_active)) {
636                 cwq->nr_active++;
637                 worklist = &gcwq->worklist;
638         } else
639                 worklist = &cwq->delayed_works;
640
641         insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
642
643         spin_unlock_irqrestore(&gcwq->lock, flags);
644 }
645
646 /**
647  * queue_work - queue work on a workqueue
648  * @wq: workqueue to use
649  * @work: work to queue
650  *
651  * Returns 0 if @work was already on a queue, non-zero otherwise.
652  *
653  * We queue the work to the CPU on which it was submitted, but if the CPU dies
654  * it can be processed by another CPU.
655  */
656 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
657 {
658         int ret;
659
660         ret = queue_work_on(get_cpu(), wq, work);
661         put_cpu();
662
663         return ret;
664 }
665 EXPORT_SYMBOL_GPL(queue_work);
666
667 /**
668  * queue_work_on - queue work on specific cpu
669  * @cpu: CPU number to execute work on
670  * @wq: workqueue to use
671  * @work: work to queue
672  *
673  * Returns 0 if @work was already on a queue, non-zero otherwise.
674  *
675  * We queue the work to a specific CPU, the caller must ensure it
676  * can't go away.
677  */
678 int
679 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
680 {
681         int ret = 0;
682
683         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
684                 __queue_work(cpu, wq, work);
685                 ret = 1;
686         }
687         return ret;
688 }
689 EXPORT_SYMBOL_GPL(queue_work_on);
690
691 static void delayed_work_timer_fn(unsigned long __data)
692 {
693         struct delayed_work *dwork = (struct delayed_work *)__data;
694         struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
695
696         __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
697 }
698
699 /**
700  * queue_delayed_work - queue work on a workqueue after delay
701  * @wq: workqueue to use
702  * @dwork: delayable work to queue
703  * @delay: number of jiffies to wait before queueing
704  *
705  * Returns 0 if @work was already on a queue, non-zero otherwise.
706  */
707 int queue_delayed_work(struct workqueue_struct *wq,
708                         struct delayed_work *dwork, unsigned long delay)
709 {
710         if (delay == 0)
711                 return queue_work(wq, &dwork->work);
712
713         return queue_delayed_work_on(-1, wq, dwork, delay);
714 }
715 EXPORT_SYMBOL_GPL(queue_delayed_work);
716
717 /**
718  * queue_delayed_work_on - queue work on specific CPU after delay
719  * @cpu: CPU number to execute work on
720  * @wq: workqueue to use
721  * @dwork: work to queue
722  * @delay: number of jiffies to wait before queueing
723  *
724  * Returns 0 if @work was already on a queue, non-zero otherwise.
725  */
726 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
727                         struct delayed_work *dwork, unsigned long delay)
728 {
729         int ret = 0;
730         struct timer_list *timer = &dwork->timer;
731         struct work_struct *work = &dwork->work;
732
733         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
734                 struct global_cwq *gcwq = get_work_gcwq(work);
735                 unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
736
737                 BUG_ON(timer_pending(timer));
738                 BUG_ON(!list_empty(&work->entry));
739
740                 timer_stats_timer_set_start_info(&dwork->timer);
741                 /*
742                  * This stores cwq for the moment, for the timer_fn.
743                  * Note that the work's gcwq is preserved to allow
744                  * reentrance detection for delayed works.
745                  */
746                 set_work_cwq(work, get_cwq(lcpu, wq), 0);
747                 timer->expires = jiffies + delay;
748                 timer->data = (unsigned long)dwork;
749                 timer->function = delayed_work_timer_fn;
750
751                 if (unlikely(cpu >= 0))
752                         add_timer_on(timer, cpu);
753                 else
754                         add_timer(timer);
755                 ret = 1;
756         }
757         return ret;
758 }
759 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
760
761 /**
762  * worker_enter_idle - enter idle state
763  * @worker: worker which is entering idle state
764  *
765  * @worker is entering idle state.  Update stats and idle timer if
766  * necessary.
767  *
768  * LOCKING:
769  * spin_lock_irq(gcwq->lock).
770  */
771 static void worker_enter_idle(struct worker *worker)
772 {
773         struct global_cwq *gcwq = worker->gcwq;
774
775         BUG_ON(worker->flags & WORKER_IDLE);
776         BUG_ON(!list_empty(&worker->entry) &&
777                (worker->hentry.next || worker->hentry.pprev));
778
779         worker->flags |= WORKER_IDLE;
780         gcwq->nr_idle++;
781
782         /* idle_list is LIFO */
783         list_add(&worker->entry, &gcwq->idle_list);
784
785         if (unlikely(worker->flags & WORKER_ROGUE))
786                 wake_up_all(&gcwq->trustee_wait);
787 }
788
789 /**
790  * worker_leave_idle - leave idle state
791  * @worker: worker which is leaving idle state
792  *
793  * @worker is leaving idle state.  Update stats.
794  *
795  * LOCKING:
796  * spin_lock_irq(gcwq->lock).
797  */
798 static void worker_leave_idle(struct worker *worker)
799 {
800         struct global_cwq *gcwq = worker->gcwq;
801
802         BUG_ON(!(worker->flags & WORKER_IDLE));
803         worker->flags &= ~WORKER_IDLE;
804         gcwq->nr_idle--;
805         list_del_init(&worker->entry);
806 }
807
808 static struct worker *alloc_worker(void)
809 {
810         struct worker *worker;
811
812         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
813         if (worker) {
814                 INIT_LIST_HEAD(&worker->entry);
815                 INIT_LIST_HEAD(&worker->scheduled);
816         }
817         return worker;
818 }
819
820 /**
821  * create_worker - create a new workqueue worker
822  * @gcwq: gcwq the new worker will belong to
823  * @bind: whether to set affinity to @cpu or not
824  *
825  * Create a new worker which is bound to @gcwq.  The returned worker
826  * can be started by calling start_worker() or destroyed using
827  * destroy_worker().
828  *
829  * CONTEXT:
830  * Might sleep.  Does GFP_KERNEL allocations.
831  *
832  * RETURNS:
833  * Pointer to the newly created worker.
834  */
835 static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
836 {
837         int id = -1;
838         struct worker *worker = NULL;
839
840         spin_lock_irq(&gcwq->lock);
841         while (ida_get_new(&gcwq->worker_ida, &id)) {
842                 spin_unlock_irq(&gcwq->lock);
843                 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
844                         goto fail;
845                 spin_lock_irq(&gcwq->lock);
846         }
847         spin_unlock_irq(&gcwq->lock);
848
849         worker = alloc_worker();
850         if (!worker)
851                 goto fail;
852
853         worker->gcwq = gcwq;
854         worker->id = id;
855
856         worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
857                                       gcwq->cpu, id);
858         if (IS_ERR(worker->task))
859                 goto fail;
860
861         /*
862          * A rogue worker will become a regular one if CPU comes
863          * online later on.  Make sure every worker has
864          * PF_THREAD_BOUND set.
865          */
866         if (bind)
867                 kthread_bind(worker->task, gcwq->cpu);
868         else
869                 worker->task->flags |= PF_THREAD_BOUND;
870
871         return worker;
872 fail:
873         if (id >= 0) {
874                 spin_lock_irq(&gcwq->lock);
875                 ida_remove(&gcwq->worker_ida, id);
876                 spin_unlock_irq(&gcwq->lock);
877         }
878         kfree(worker);
879         return NULL;
880 }
881
882 /**
883  * start_worker - start a newly created worker
884  * @worker: worker to start
885  *
886  * Make the gcwq aware of @worker and start it.
887  *
888  * CONTEXT:
889  * spin_lock_irq(gcwq->lock).
890  */
891 static void start_worker(struct worker *worker)
892 {
893         worker->flags |= WORKER_STARTED;
894         worker->gcwq->nr_workers++;
895         worker_enter_idle(worker);
896         wake_up_process(worker->task);
897 }
898
899 /**
900  * destroy_worker - destroy a workqueue worker
901  * @worker: worker to be destroyed
902  *
903  * Destroy @worker and adjust @gcwq stats accordingly.
904  *
905  * CONTEXT:
906  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
907  */
908 static void destroy_worker(struct worker *worker)
909 {
910         struct global_cwq *gcwq = worker->gcwq;
911         int id = worker->id;
912
913         /* sanity check frenzy */
914         BUG_ON(worker->current_work);
915         BUG_ON(!list_empty(&worker->scheduled));
916
917         if (worker->flags & WORKER_STARTED)
918                 gcwq->nr_workers--;
919         if (worker->flags & WORKER_IDLE)
920                 gcwq->nr_idle--;
921
922         list_del_init(&worker->entry);
923         worker->flags |= WORKER_DIE;
924
925         spin_unlock_irq(&gcwq->lock);
926
927         kthread_stop(worker->task);
928         kfree(worker);
929
930         spin_lock_irq(&gcwq->lock);
931         ida_remove(&gcwq->worker_ida, id);
932 }
933
934 /**
935  * move_linked_works - move linked works to a list
936  * @work: start of series of works to be scheduled
937  * @head: target list to append @work to
938  * @nextp: out paramter for nested worklist walking
939  *
940  * Schedule linked works starting from @work to @head.  Work series to
941  * be scheduled starts at @work and includes any consecutive work with
942  * WORK_STRUCT_LINKED set in its predecessor.
943  *
944  * If @nextp is not NULL, it's updated to point to the next work of
945  * the last scheduled work.  This allows move_linked_works() to be
946  * nested inside outer list_for_each_entry_safe().
947  *
948  * CONTEXT:
949  * spin_lock_irq(gcwq->lock).
950  */
951 static void move_linked_works(struct work_struct *work, struct list_head *head,
952                               struct work_struct **nextp)
953 {
954         struct work_struct *n;
955
956         /*
957          * Linked worklist will always end before the end of the list,
958          * use NULL for list head.
959          */
960         list_for_each_entry_safe_from(work, n, NULL, entry) {
961                 list_move_tail(&work->entry, head);
962                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
963                         break;
964         }
965
966         /*
967          * If we're already inside safe list traversal and have moved
968          * multiple works to the scheduled queue, the next position
969          * needs to be updated.
970          */
971         if (nextp)
972                 *nextp = n;
973 }
974
975 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
976 {
977         struct work_struct *work = list_first_entry(&cwq->delayed_works,
978                                                     struct work_struct, entry);
979
980         move_linked_works(work, &cwq->gcwq->worklist, NULL);
981         cwq->nr_active++;
982 }
983
984 /**
985  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
986  * @cwq: cwq of interest
987  * @color: color of work which left the queue
988  *
989  * A work either has completed or is removed from pending queue,
990  * decrement nr_in_flight of its cwq and handle workqueue flushing.
991  *
992  * CONTEXT:
993  * spin_lock_irq(gcwq->lock).
994  */
995 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
996 {
997         /* ignore uncolored works */
998         if (color == WORK_NO_COLOR)
999                 return;
1000
1001         cwq->nr_in_flight[color]--;
1002         cwq->nr_active--;
1003
1004         if (!list_empty(&cwq->delayed_works)) {
1005                 /* one down, submit a delayed one */
1006                 if (cwq->nr_active < cwq->max_active)
1007                         cwq_activate_first_delayed(cwq);
1008         } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
1009                 /* this was the last work, unbind from single cpu */
1010                 cwq_unbind_single_cpu(cwq);
1011         }
1012
1013         /* is flush in progress and are we at the flushing tip? */
1014         if (likely(cwq->flush_color != color))
1015                 return;
1016
1017         /* are there still in-flight works? */
1018         if (cwq->nr_in_flight[color])
1019                 return;
1020
1021         /* this cwq is done, clear flush_color */
1022         cwq->flush_color = -1;
1023
1024         /*
1025          * If this was the last cwq, wake up the first flusher.  It
1026          * will handle the rest.
1027          */
1028         if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1029                 complete(&cwq->wq->first_flusher->done);
1030 }
1031
1032 /**
1033  * process_one_work - process single work
1034  * @worker: self
1035  * @work: work to process
1036  *
1037  * Process @work.  This function contains all the logics necessary to
1038  * process a single work including synchronization against and
1039  * interaction with other workers on the same cpu, queueing and
1040  * flushing.  As long as context requirement is met, any worker can
1041  * call this function to process a work.
1042  *
1043  * CONTEXT:
1044  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1045  */
1046 static void process_one_work(struct worker *worker, struct work_struct *work)
1047 {
1048         struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1049         struct global_cwq *gcwq = cwq->gcwq;
1050         struct hlist_head *bwh = busy_worker_head(gcwq, work);
1051         work_func_t f = work->func;
1052         int work_color;
1053         struct worker *collision;
1054 #ifdef CONFIG_LOCKDEP
1055         /*
1056          * It is permissible to free the struct work_struct from
1057          * inside the function that is called from it, this we need to
1058          * take into account for lockdep too.  To avoid bogus "held
1059          * lock freed" warnings as well as problems when looking into
1060          * work->lockdep_map, make a copy and use that here.
1061          */
1062         struct lockdep_map lockdep_map = work->lockdep_map;
1063 #endif
1064         /*
1065          * A single work shouldn't be executed concurrently by
1066          * multiple workers on a single cpu.  Check whether anyone is
1067          * already processing the work.  If so, defer the work to the
1068          * currently executing one.
1069          */
1070         collision = __find_worker_executing_work(gcwq, bwh, work);
1071         if (unlikely(collision)) {
1072                 move_linked_works(work, &collision->scheduled, NULL);
1073                 return;
1074         }
1075
1076         /* claim and process */
1077         debug_work_deactivate(work);
1078         hlist_add_head(&worker->hentry, bwh);
1079         worker->current_work = work;
1080         worker->current_cwq = cwq;
1081         work_color = get_work_color(work);
1082
1083         /* record the current cpu number in the work data and dequeue */
1084         set_work_cpu(work, gcwq->cpu);
1085         list_del_init(&work->entry);
1086
1087         spin_unlock_irq(&gcwq->lock);
1088
1089         work_clear_pending(work);
1090         lock_map_acquire(&cwq->wq->lockdep_map);
1091         lock_map_acquire(&lockdep_map);
1092         f(work);
1093         lock_map_release(&lockdep_map);
1094         lock_map_release(&cwq->wq->lockdep_map);
1095
1096         if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1097                 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1098                        "%s/0x%08x/%d\n",
1099                        current->comm, preempt_count(), task_pid_nr(current));
1100                 printk(KERN_ERR "    last function: ");
1101                 print_symbol("%s\n", (unsigned long)f);
1102                 debug_show_held_locks(current);
1103                 dump_stack();
1104         }
1105
1106         spin_lock_irq(&gcwq->lock);
1107
1108         /* we're done with it, release */
1109         hlist_del_init(&worker->hentry);
1110         worker->current_work = NULL;
1111         worker->current_cwq = NULL;
1112         cwq_dec_nr_in_flight(cwq, work_color);
1113 }
1114
1115 /**
1116  * process_scheduled_works - process scheduled works
1117  * @worker: self
1118  *
1119  * Process all scheduled works.  Please note that the scheduled list
1120  * may change while processing a work, so this function repeatedly
1121  * fetches a work from the top and executes it.
1122  *
1123  * CONTEXT:
1124  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1125  * multiple times.
1126  */
1127 static void process_scheduled_works(struct worker *worker)
1128 {
1129         while (!list_empty(&worker->scheduled)) {
1130                 struct work_struct *work = list_first_entry(&worker->scheduled,
1131                                                 struct work_struct, entry);
1132                 process_one_work(worker, work);
1133         }
1134 }
1135
1136 /**
1137  * worker_thread - the worker thread function
1138  * @__worker: self
1139  *
1140  * The cwq worker thread function.
1141  */
1142 static int worker_thread(void *__worker)
1143 {
1144         struct worker *worker = __worker;
1145         struct global_cwq *gcwq = worker->gcwq;
1146
1147 woke_up:
1148         spin_lock_irq(&gcwq->lock);
1149
1150         /* DIE can be set only while we're idle, checking here is enough */
1151         if (worker->flags & WORKER_DIE) {
1152                 spin_unlock_irq(&gcwq->lock);
1153                 return 0;
1154         }
1155
1156         worker_leave_idle(worker);
1157 recheck:
1158         /*
1159          * ->scheduled list can only be filled while a worker is
1160          * preparing to process a work or actually processing it.
1161          * Make sure nobody diddled with it while I was sleeping.
1162          */
1163         BUG_ON(!list_empty(&worker->scheduled));
1164
1165         while (!list_empty(&gcwq->worklist)) {
1166                 struct work_struct *work =
1167                         list_first_entry(&gcwq->worklist,
1168                                          struct work_struct, entry);
1169
1170                 /*
1171                  * The following is a rather inefficient way to close
1172                  * race window against cpu hotplug operations.  Will
1173                  * be replaced soon.
1174                  */
1175                 if (unlikely(!(worker->flags & WORKER_ROGUE) &&
1176                              !cpumask_equal(&worker->task->cpus_allowed,
1177                                             get_cpu_mask(gcwq->cpu)))) {
1178                         spin_unlock_irq(&gcwq->lock);
1179                         set_cpus_allowed_ptr(worker->task,
1180                                              get_cpu_mask(gcwq->cpu));
1181                         cpu_relax();
1182                         spin_lock_irq(&gcwq->lock);
1183                         goto recheck;
1184                 }
1185
1186                 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1187                         /* optimization path, not strictly necessary */
1188                         process_one_work(worker, work);
1189                         if (unlikely(!list_empty(&worker->scheduled)))
1190                                 process_scheduled_works(worker);
1191                 } else {
1192                         move_linked_works(work, &worker->scheduled, NULL);
1193                         process_scheduled_works(worker);
1194                 }
1195         }
1196
1197         /*
1198          * gcwq->lock is held and there's no work to process, sleep.
1199          * Workers are woken up only while holding gcwq->lock, so
1200          * setting the current state before releasing gcwq->lock is
1201          * enough to prevent losing any event.
1202          */
1203         worker_enter_idle(worker);
1204         __set_current_state(TASK_INTERRUPTIBLE);
1205         spin_unlock_irq(&gcwq->lock);
1206         schedule();
1207         goto woke_up;
1208 }
1209
1210 struct wq_barrier {
1211         struct work_struct      work;
1212         struct completion       done;
1213 };
1214
1215 static void wq_barrier_func(struct work_struct *work)
1216 {
1217         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1218         complete(&barr->done);
1219 }
1220
1221 /**
1222  * insert_wq_barrier - insert a barrier work
1223  * @cwq: cwq to insert barrier into
1224  * @barr: wq_barrier to insert
1225  * @target: target work to attach @barr to
1226  * @worker: worker currently executing @target, NULL if @target is not executing
1227  *
1228  * @barr is linked to @target such that @barr is completed only after
1229  * @target finishes execution.  Please note that the ordering
1230  * guarantee is observed only with respect to @target and on the local
1231  * cpu.
1232  *
1233  * Currently, a queued barrier can't be canceled.  This is because
1234  * try_to_grab_pending() can't determine whether the work to be
1235  * grabbed is at the head of the queue and thus can't clear LINKED
1236  * flag of the previous work while there must be a valid next work
1237  * after a work with LINKED flag set.
1238  *
1239  * Note that when @worker is non-NULL, @target may be modified
1240  * underneath us, so we can't reliably determine cwq from @target.
1241  *
1242  * CONTEXT:
1243  * spin_lock_irq(gcwq->lock).
1244  */
1245 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1246                               struct wq_barrier *barr,
1247                               struct work_struct *target, struct worker *worker)
1248 {
1249         struct list_head *head;
1250         unsigned int linked = 0;
1251
1252         /*
1253          * debugobject calls are safe here even with gcwq->lock locked
1254          * as we know for sure that this will not trigger any of the
1255          * checks and call back into the fixup functions where we
1256          * might deadlock.
1257          */
1258         INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
1259         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1260         init_completion(&barr->done);
1261
1262         /*
1263          * If @target is currently being executed, schedule the
1264          * barrier to the worker; otherwise, put it after @target.
1265          */
1266         if (worker)
1267                 head = worker->scheduled.next;
1268         else {
1269                 unsigned long *bits = work_data_bits(target);
1270
1271                 head = target->entry.next;
1272                 /* there can already be other linked works, inherit and set */
1273                 linked = *bits & WORK_STRUCT_LINKED;
1274                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
1275         }
1276
1277         debug_work_activate(&barr->work);
1278         insert_work(cwq, &barr->work, head,
1279                     work_color_to_flags(WORK_NO_COLOR) | linked);
1280 }
1281
1282 /**
1283  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
1284  * @wq: workqueue being flushed
1285  * @flush_color: new flush color, < 0 for no-op
1286  * @work_color: new work color, < 0 for no-op
1287  *
1288  * Prepare cwqs for workqueue flushing.
1289  *
1290  * If @flush_color is non-negative, flush_color on all cwqs should be
1291  * -1.  If no cwq has in-flight commands at the specified color, all
1292  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
1293  * has in flight commands, its cwq->flush_color is set to
1294  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
1295  * wakeup logic is armed and %true is returned.
1296  *
1297  * The caller should have initialized @wq->first_flusher prior to
1298  * calling this function with non-negative @flush_color.  If
1299  * @flush_color is negative, no flush color update is done and %false
1300  * is returned.
1301  *
1302  * If @work_color is non-negative, all cwqs should have the same
1303  * work_color which is previous to @work_color and all will be
1304  * advanced to @work_color.
1305  *
1306  * CONTEXT:
1307  * mutex_lock(wq->flush_mutex).
1308  *
1309  * RETURNS:
1310  * %true if @flush_color >= 0 and there's something to flush.  %false
1311  * otherwise.
1312  */
1313 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
1314                                       int flush_color, int work_color)
1315 {
1316         bool wait = false;
1317         unsigned int cpu;
1318
1319         if (flush_color >= 0) {
1320                 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
1321                 atomic_set(&wq->nr_cwqs_to_flush, 1);
1322         }
1323
1324         for_each_possible_cpu(cpu) {
1325                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1326                 struct global_cwq *gcwq = cwq->gcwq;
1327
1328                 spin_lock_irq(&gcwq->lock);
1329
1330                 if (flush_color >= 0) {
1331                         BUG_ON(cwq->flush_color != -1);
1332
1333                         if (cwq->nr_in_flight[flush_color]) {
1334                                 cwq->flush_color = flush_color;
1335                                 atomic_inc(&wq->nr_cwqs_to_flush);
1336                                 wait = true;
1337                         }
1338                 }
1339
1340                 if (work_color >= 0) {
1341                         BUG_ON(work_color != work_next_color(cwq->work_color));
1342                         cwq->work_color = work_color;
1343                 }
1344
1345                 spin_unlock_irq(&gcwq->lock);
1346         }
1347
1348         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
1349                 complete(&wq->first_flusher->done);
1350
1351         return wait;
1352 }
1353
1354 /**
1355  * flush_workqueue - ensure that any scheduled work has run to completion.
1356  * @wq: workqueue to flush
1357  *
1358  * Forces execution of the workqueue and blocks until its completion.
1359  * This is typically used in driver shutdown handlers.
1360  *
1361  * We sleep until all works which were queued on entry have been handled,
1362  * but we are not livelocked by new incoming ones.
1363  */
1364 void flush_workqueue(struct workqueue_struct *wq)
1365 {
1366         struct wq_flusher this_flusher = {
1367                 .list = LIST_HEAD_INIT(this_flusher.list),
1368                 .flush_color = -1,
1369                 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
1370         };
1371         int next_color;
1372
1373         lock_map_acquire(&wq->lockdep_map);
1374         lock_map_release(&wq->lockdep_map);
1375
1376         mutex_lock(&wq->flush_mutex);
1377
1378         /*
1379          * Start-to-wait phase
1380          */
1381         next_color = work_next_color(wq->work_color);
1382
1383         if (next_color != wq->flush_color) {
1384                 /*
1385                  * Color space is not full.  The current work_color
1386                  * becomes our flush_color and work_color is advanced
1387                  * by one.
1388                  */
1389                 BUG_ON(!list_empty(&wq->flusher_overflow));
1390                 this_flusher.flush_color = wq->work_color;
1391                 wq->work_color = next_color;
1392
1393                 if (!wq->first_flusher) {
1394                         /* no flush in progress, become the first flusher */
1395                         BUG_ON(wq->flush_color != this_flusher.flush_color);
1396
1397                         wq->first_flusher = &this_flusher;
1398
1399                         if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
1400                                                        wq->work_color)) {
1401                                 /* nothing to flush, done */
1402                                 wq->flush_color = next_color;
1403                                 wq->first_flusher = NULL;
1404                                 goto out_unlock;
1405                         }
1406                 } else {
1407                         /* wait in queue */
1408                         BUG_ON(wq->flush_color == this_flusher.flush_color);
1409                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
1410                         flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1411                 }
1412         } else {
1413                 /*
1414                  * Oops, color space is full, wait on overflow queue.
1415                  * The next flush completion will assign us
1416                  * flush_color and transfer to flusher_queue.
1417                  */
1418                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
1419         }
1420
1421         mutex_unlock(&wq->flush_mutex);
1422
1423         wait_for_completion(&this_flusher.done);
1424
1425         /*
1426          * Wake-up-and-cascade phase
1427          *
1428          * First flushers are responsible for cascading flushes and
1429          * handling overflow.  Non-first flushers can simply return.
1430          */
1431         if (wq->first_flusher != &this_flusher)
1432                 return;
1433
1434         mutex_lock(&wq->flush_mutex);
1435
1436         wq->first_flusher = NULL;
1437
1438         BUG_ON(!list_empty(&this_flusher.list));
1439         BUG_ON(wq->flush_color != this_flusher.flush_color);
1440
1441         while (true) {
1442                 struct wq_flusher *next, *tmp;
1443
1444                 /* complete all the flushers sharing the current flush color */
1445                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
1446                         if (next->flush_color != wq->flush_color)
1447                                 break;
1448                         list_del_init(&next->list);
1449                         complete(&next->done);
1450                 }
1451
1452                 BUG_ON(!list_empty(&wq->flusher_overflow) &&
1453                        wq->flush_color != work_next_color(wq->work_color));
1454
1455                 /* this flush_color is finished, advance by one */
1456                 wq->flush_color = work_next_color(wq->flush_color);
1457
1458                 /* one color has been freed, handle overflow queue */
1459                 if (!list_empty(&wq->flusher_overflow)) {
1460                         /*
1461                          * Assign the same color to all overflowed
1462                          * flushers, advance work_color and append to
1463                          * flusher_queue.  This is the start-to-wait
1464                          * phase for these overflowed flushers.
1465                          */
1466                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
1467                                 tmp->flush_color = wq->work_color;
1468
1469                         wq->work_color = work_next_color(wq->work_color);
1470
1471                         list_splice_tail_init(&wq->flusher_overflow,
1472                                               &wq->flusher_queue);
1473                         flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1474                 }
1475
1476                 if (list_empty(&wq->flusher_queue)) {
1477                         BUG_ON(wq->flush_color != wq->work_color);
1478                         break;
1479                 }
1480
1481                 /*
1482                  * Need to flush more colors.  Make the next flusher
1483                  * the new first flusher and arm cwqs.
1484                  */
1485                 BUG_ON(wq->flush_color == wq->work_color);
1486                 BUG_ON(wq->flush_color != next->flush_color);
1487
1488                 list_del_init(&next->list);
1489                 wq->first_flusher = next;
1490
1491                 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
1492                         break;
1493
1494                 /*
1495                  * Meh... this color is already done, clear first
1496                  * flusher and repeat cascading.
1497                  */
1498                 wq->first_flusher = NULL;
1499         }
1500
1501 out_unlock:
1502         mutex_unlock(&wq->flush_mutex);
1503 }
1504 EXPORT_SYMBOL_GPL(flush_workqueue);
1505
1506 /**
1507  * flush_work - block until a work_struct's callback has terminated
1508  * @work: the work which is to be flushed
1509  *
1510  * Returns false if @work has already terminated.
1511  *
1512  * It is expected that, prior to calling flush_work(), the caller has
1513  * arranged for the work to not be requeued, otherwise it doesn't make
1514  * sense to use this function.
1515  */
1516 int flush_work(struct work_struct *work)
1517 {
1518         struct worker *worker = NULL;
1519         struct global_cwq *gcwq;
1520         struct cpu_workqueue_struct *cwq;
1521         struct wq_barrier barr;
1522
1523         might_sleep();
1524         gcwq = get_work_gcwq(work);
1525         if (!gcwq)
1526                 return 0;
1527
1528         spin_lock_irq(&gcwq->lock);
1529         if (!list_empty(&work->entry)) {
1530                 /*
1531                  * See the comment near try_to_grab_pending()->smp_rmb().
1532                  * If it was re-queued to a different gcwq under us, we
1533                  * are not going to wait.
1534                  */
1535                 smp_rmb();
1536                 cwq = get_work_cwq(work);
1537                 if (unlikely(!cwq || gcwq != cwq->gcwq))
1538                         goto already_gone;
1539         } else {
1540                 worker = find_worker_executing_work(gcwq, work);
1541                 if (!worker)
1542                         goto already_gone;
1543                 cwq = worker->current_cwq;
1544         }
1545
1546         insert_wq_barrier(cwq, &barr, work, worker);
1547         spin_unlock_irq(&gcwq->lock);
1548
1549         lock_map_acquire(&cwq->wq->lockdep_map);
1550         lock_map_release(&cwq->wq->lockdep_map);
1551
1552         wait_for_completion(&barr.done);
1553         destroy_work_on_stack(&barr.work);
1554         return 1;
1555 already_gone:
1556         spin_unlock_irq(&gcwq->lock);
1557         return 0;
1558 }
1559 EXPORT_SYMBOL_GPL(flush_work);
1560
1561 /*
1562  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
1563  * so this work can't be re-armed in any way.
1564  */
1565 static int try_to_grab_pending(struct work_struct *work)
1566 {
1567         struct global_cwq *gcwq;
1568         int ret = -1;
1569
1570         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1571                 return 0;
1572
1573         /*
1574          * The queueing is in progress, or it is already queued. Try to
1575          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1576          */
1577         gcwq = get_work_gcwq(work);
1578         if (!gcwq)
1579                 return ret;
1580
1581         spin_lock_irq(&gcwq->lock);
1582         if (!list_empty(&work->entry)) {
1583                 /*
1584                  * This work is queued, but perhaps we locked the wrong gcwq.
1585                  * In that case we must see the new value after rmb(), see
1586                  * insert_work()->wmb().
1587                  */
1588                 smp_rmb();
1589                 if (gcwq == get_work_gcwq(work)) {
1590                         debug_work_deactivate(work);
1591                         list_del_init(&work->entry);
1592                         cwq_dec_nr_in_flight(get_work_cwq(work),
1593                                              get_work_color(work));
1594                         ret = 1;
1595                 }
1596         }
1597         spin_unlock_irq(&gcwq->lock);
1598
1599         return ret;
1600 }
1601
1602 static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
1603 {
1604         struct wq_barrier barr;
1605         struct worker *worker;
1606
1607         spin_lock_irq(&gcwq->lock);
1608
1609         worker = find_worker_executing_work(gcwq, work);
1610         if (unlikely(worker))
1611                 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
1612
1613         spin_unlock_irq(&gcwq->lock);
1614
1615         if (unlikely(worker)) {
1616                 wait_for_completion(&barr.done);
1617                 destroy_work_on_stack(&barr.work);
1618         }
1619 }
1620
1621 static void wait_on_work(struct work_struct *work)
1622 {
1623         int cpu;
1624
1625         might_sleep();
1626
1627         lock_map_acquire(&work->lockdep_map);
1628         lock_map_release(&work->lockdep_map);
1629
1630         for_each_possible_cpu(cpu)
1631                 wait_on_cpu_work(get_gcwq(cpu), work);
1632 }
1633
1634 static int __cancel_work_timer(struct work_struct *work,
1635                                 struct timer_list* timer)
1636 {
1637         int ret;
1638
1639         do {
1640                 ret = (timer && likely(del_timer(timer)));
1641                 if (!ret)
1642                         ret = try_to_grab_pending(work);
1643                 wait_on_work(work);
1644         } while (unlikely(ret < 0));
1645
1646         clear_work_data(work);
1647         return ret;
1648 }
1649
1650 /**
1651  * cancel_work_sync - block until a work_struct's callback has terminated
1652  * @work: the work which is to be flushed
1653  *
1654  * Returns true if @work was pending.
1655  *
1656  * cancel_work_sync() will cancel the work if it is queued. If the work's
1657  * callback appears to be running, cancel_work_sync() will block until it
1658  * has completed.
1659  *
1660  * It is possible to use this function if the work re-queues itself. It can
1661  * cancel the work even if it migrates to another workqueue, however in that
1662  * case it only guarantees that work->func() has completed on the last queued
1663  * workqueue.
1664  *
1665  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1666  * pending, otherwise it goes into a busy-wait loop until the timer expires.
1667  *
1668  * The caller must ensure that workqueue_struct on which this work was last
1669  * queued can't be destroyed before this function returns.
1670  */
1671 int cancel_work_sync(struct work_struct *work)
1672 {
1673         return __cancel_work_timer(work, NULL);
1674 }
1675 EXPORT_SYMBOL_GPL(cancel_work_sync);
1676
1677 /**
1678  * cancel_delayed_work_sync - reliably kill off a delayed work.
1679  * @dwork: the delayed work struct
1680  *
1681  * Returns true if @dwork was pending.
1682  *
1683  * It is possible to use this function if @dwork rearms itself via queue_work()
1684  * or queue_delayed_work(). See also the comment for cancel_work_sync().
1685  */
1686 int cancel_delayed_work_sync(struct delayed_work *dwork)
1687 {
1688         return __cancel_work_timer(&dwork->work, &dwork->timer);
1689 }
1690 EXPORT_SYMBOL(cancel_delayed_work_sync);
1691
1692 static struct workqueue_struct *keventd_wq __read_mostly;
1693
1694 /**
1695  * schedule_work - put work task in global workqueue
1696  * @work: job to be done
1697  *
1698  * Returns zero if @work was already on the kernel-global workqueue and
1699  * non-zero otherwise.
1700  *
1701  * This puts a job in the kernel-global workqueue if it was not already
1702  * queued and leaves it in the same position on the kernel-global
1703  * workqueue otherwise.
1704  */
1705 int schedule_work(struct work_struct *work)
1706 {
1707         return queue_work(keventd_wq, work);
1708 }
1709 EXPORT_SYMBOL(schedule_work);
1710
1711 /*
1712  * schedule_work_on - put work task on a specific cpu
1713  * @cpu: cpu to put the work task on
1714  * @work: job to be done
1715  *
1716  * This puts a job on a specific cpu
1717  */
1718 int schedule_work_on(int cpu, struct work_struct *work)
1719 {
1720         return queue_work_on(cpu, keventd_wq, work);
1721 }
1722 EXPORT_SYMBOL(schedule_work_on);
1723
1724 /**
1725  * schedule_delayed_work - put work task in global workqueue after delay
1726  * @dwork: job to be done
1727  * @delay: number of jiffies to wait or 0 for immediate execution
1728  *
1729  * After waiting for a given time this puts a job in the kernel-global
1730  * workqueue.
1731  */
1732 int schedule_delayed_work(struct delayed_work *dwork,
1733                                         unsigned long delay)
1734 {
1735         return queue_delayed_work(keventd_wq, dwork, delay);
1736 }
1737 EXPORT_SYMBOL(schedule_delayed_work);
1738
1739 /**
1740  * flush_delayed_work - block until a dwork_struct's callback has terminated
1741  * @dwork: the delayed work which is to be flushed
1742  *
1743  * Any timeout is cancelled, and any pending work is run immediately.
1744  */
1745 void flush_delayed_work(struct delayed_work *dwork)
1746 {
1747         if (del_timer_sync(&dwork->timer)) {
1748                 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
1749                              &dwork->work);
1750                 put_cpu();
1751         }
1752         flush_work(&dwork->work);
1753 }
1754 EXPORT_SYMBOL(flush_delayed_work);
1755
1756 /**
1757  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1758  * @cpu: cpu to use
1759  * @dwork: job to be done
1760  * @delay: number of jiffies to wait
1761  *
1762  * After waiting for a given time this puts a job in the kernel-global
1763  * workqueue on the specified CPU.
1764  */
1765 int schedule_delayed_work_on(int cpu,
1766                         struct delayed_work *dwork, unsigned long delay)
1767 {
1768         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1769 }
1770 EXPORT_SYMBOL(schedule_delayed_work_on);
1771
1772 /**
1773  * schedule_on_each_cpu - call a function on each online CPU from keventd
1774  * @func: the function to call
1775  *
1776  * Returns zero on success.
1777  * Returns -ve errno on failure.
1778  *
1779  * schedule_on_each_cpu() is very slow.
1780  */
1781 int schedule_on_each_cpu(work_func_t func)
1782 {
1783         int cpu;
1784         int orig = -1;
1785         struct work_struct *works;
1786
1787         works = alloc_percpu(struct work_struct);
1788         if (!works)
1789                 return -ENOMEM;
1790
1791         get_online_cpus();
1792
1793         /*
1794          * When running in keventd don't schedule a work item on
1795          * itself.  Can just call directly because the work queue is
1796          * already bound.  This also is faster.
1797          */
1798         if (current_is_keventd())
1799                 orig = raw_smp_processor_id();
1800
1801         for_each_online_cpu(cpu) {
1802                 struct work_struct *work = per_cpu_ptr(works, cpu);
1803
1804                 INIT_WORK(work, func);
1805                 if (cpu != orig)
1806                         schedule_work_on(cpu, work);
1807         }
1808         if (orig >= 0)
1809                 func(per_cpu_ptr(works, orig));
1810
1811         for_each_online_cpu(cpu)
1812                 flush_work(per_cpu_ptr(works, cpu));
1813
1814         put_online_cpus();
1815         free_percpu(works);
1816         return 0;
1817 }
1818
1819 /**
1820  * flush_scheduled_work - ensure that any scheduled work has run to completion.
1821  *
1822  * Forces execution of the kernel-global workqueue and blocks until its
1823  * completion.
1824  *
1825  * Think twice before calling this function!  It's very easy to get into
1826  * trouble if you don't take great care.  Either of the following situations
1827  * will lead to deadlock:
1828  *
1829  *      One of the work items currently on the workqueue needs to acquire
1830  *      a lock held by your code or its caller.
1831  *
1832  *      Your code is running in the context of a work routine.
1833  *
1834  * They will be detected by lockdep when they occur, but the first might not
1835  * occur very often.  It depends on what work items are on the workqueue and
1836  * what locks they need, which you have no control over.
1837  *
1838  * In most situations flushing the entire workqueue is overkill; you merely
1839  * need to know that a particular work item isn't queued and isn't running.
1840  * In such cases you should use cancel_delayed_work_sync() or
1841  * cancel_work_sync() instead.
1842  */
1843 void flush_scheduled_work(void)
1844 {
1845         flush_workqueue(keventd_wq);
1846 }
1847 EXPORT_SYMBOL(flush_scheduled_work);
1848
1849 /**
1850  * execute_in_process_context - reliably execute the routine with user context
1851  * @fn:         the function to execute
1852  * @ew:         guaranteed storage for the execute work structure (must
1853  *              be available when the work executes)
1854  *
1855  * Executes the function immediately if process context is available,
1856  * otherwise schedules the function for delayed execution.
1857  *
1858  * Returns:     0 - function was executed
1859  *              1 - function was scheduled for execution
1860  */
1861 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1862 {
1863         if (!in_interrupt()) {
1864                 fn(&ew->work);
1865                 return 0;
1866         }
1867
1868         INIT_WORK(&ew->work, fn);
1869         schedule_work(&ew->work);
1870
1871         return 1;
1872 }
1873 EXPORT_SYMBOL_GPL(execute_in_process_context);
1874
1875 int keventd_up(void)
1876 {
1877         return keventd_wq != NULL;
1878 }
1879
1880 int current_is_keventd(void)
1881 {
1882         bool found = false;
1883         unsigned int cpu;
1884
1885         /*
1886          * There no longer is one-to-one relation between worker and
1887          * work queue and a worker task might be unbound from its cpu
1888          * if the cpu was offlined.  Match all busy workers.  This
1889          * function will go away once dynamic pool is implemented.
1890          */
1891         for_each_possible_cpu(cpu) {
1892                 struct global_cwq *gcwq = get_gcwq(cpu);
1893                 struct worker *worker;
1894                 struct hlist_node *pos;
1895                 unsigned long flags;
1896                 int i;
1897
1898                 spin_lock_irqsave(&gcwq->lock, flags);
1899
1900                 for_each_busy_worker(worker, i, pos, gcwq) {
1901                         if (worker->task == current) {
1902                                 found = true;
1903                                 break;
1904                         }
1905                 }
1906
1907                 spin_unlock_irqrestore(&gcwq->lock, flags);
1908                 if (found)
1909                         break;
1910         }
1911
1912         return found;
1913 }
1914
1915 static struct cpu_workqueue_struct *alloc_cwqs(void)
1916 {
1917         /*
1918          * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1919          * Make sure that the alignment isn't lower than that of
1920          * unsigned long long.
1921          */
1922         const size_t size = sizeof(struct cpu_workqueue_struct);
1923         const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
1924                                    __alignof__(unsigned long long));
1925         struct cpu_workqueue_struct *cwqs;
1926 #ifndef CONFIG_SMP
1927         void *ptr;
1928
1929         /*
1930          * On UP, percpu allocator doesn't honor alignment parameter
1931          * and simply uses arch-dependent default.  Allocate enough
1932          * room to align cwq and put an extra pointer at the end
1933          * pointing back to the originally allocated pointer which
1934          * will be used for free.
1935          *
1936          * FIXME: This really belongs to UP percpu code.  Update UP
1937          * percpu code to honor alignment and remove this ugliness.
1938          */
1939         ptr = __alloc_percpu(size + align + sizeof(void *), 1);
1940         cwqs = PTR_ALIGN(ptr, align);
1941         *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
1942 #else
1943         /* On SMP, percpu allocator can do it itself */
1944         cwqs = __alloc_percpu(size, align);
1945 #endif
1946         /* just in case, make sure it's actually aligned */
1947         BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
1948         return cwqs;
1949 }
1950
1951 static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1952 {
1953 #ifndef CONFIG_SMP
1954         /* on UP, the pointer to free is stored right after the cwq */
1955         if (cwqs)
1956                 free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
1957 #else
1958         free_percpu(cwqs);
1959 #endif
1960 }
1961
1962 struct workqueue_struct *__create_workqueue_key(const char *name,
1963                                                 unsigned int flags,
1964                                                 int max_active,
1965                                                 struct lock_class_key *key,
1966                                                 const char *lock_name)
1967 {
1968         struct workqueue_struct *wq;
1969         bool failed = false;
1970         unsigned int cpu;
1971
1972         max_active = clamp_val(max_active, 1, INT_MAX);
1973
1974         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1975         if (!wq)
1976                 goto err;
1977
1978         wq->cpu_wq = alloc_cwqs();
1979         if (!wq->cpu_wq)
1980                 goto err;
1981
1982         wq->flags = flags;
1983         wq->saved_max_active = max_active;
1984         mutex_init(&wq->flush_mutex);
1985         atomic_set(&wq->nr_cwqs_to_flush, 0);
1986         INIT_LIST_HEAD(&wq->flusher_queue);
1987         INIT_LIST_HEAD(&wq->flusher_overflow);
1988         wq->single_cpu = NR_CPUS;
1989
1990         wq->name = name;
1991         lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1992         INIT_LIST_HEAD(&wq->list);
1993
1994         cpu_maps_update_begin();
1995         /*
1996          * We must initialize cwqs for each possible cpu even if we
1997          * are going to call destroy_workqueue() finally. Otherwise
1998          * cpu_up() can hit the uninitialized cwq once we drop the
1999          * lock.
2000          */
2001         for_each_possible_cpu(cpu) {
2002                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2003                 struct global_cwq *gcwq = get_gcwq(cpu);
2004
2005                 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
2006                 cwq->gcwq = gcwq;
2007                 cwq->wq = wq;
2008                 cwq->flush_color = -1;
2009                 cwq->max_active = max_active;
2010                 INIT_LIST_HEAD(&cwq->delayed_works);
2011
2012                 if (failed)
2013                         continue;
2014                 cwq->worker = create_worker(gcwq, cpu_online(cpu));
2015                 if (cwq->worker)
2016                         start_worker(cwq->worker);
2017                 else
2018                         failed = true;
2019         }
2020
2021         /*
2022          * workqueue_lock protects global freeze state and workqueues
2023          * list.  Grab it, set max_active accordingly and add the new
2024          * workqueue to workqueues list.
2025          */
2026         spin_lock(&workqueue_lock);
2027
2028         if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2029                 for_each_possible_cpu(cpu)
2030                         get_cwq(cpu, wq)->max_active = 0;
2031
2032         list_add(&wq->list, &workqueues);
2033
2034         spin_unlock(&workqueue_lock);
2035
2036         cpu_maps_update_done();
2037
2038         if (failed) {
2039                 destroy_workqueue(wq);
2040                 wq = NULL;
2041         }
2042         return wq;
2043 err:
2044         if (wq) {
2045                 free_cwqs(wq->cpu_wq);
2046                 kfree(wq);
2047         }
2048         return NULL;
2049 }
2050 EXPORT_SYMBOL_GPL(__create_workqueue_key);
2051
2052 /**
2053  * destroy_workqueue - safely terminate a workqueue
2054  * @wq: target workqueue
2055  *
2056  * Safely destroy a workqueue. All work currently pending will be done first.
2057  */
2058 void destroy_workqueue(struct workqueue_struct *wq)
2059 {
2060         unsigned int cpu;
2061
2062         flush_workqueue(wq);
2063
2064         /*
2065          * wq list is used to freeze wq, remove from list after
2066          * flushing is complete in case freeze races us.
2067          */
2068         cpu_maps_update_begin();
2069         spin_lock(&workqueue_lock);
2070         list_del(&wq->list);
2071         spin_unlock(&workqueue_lock);
2072         cpu_maps_update_done();
2073
2074         for_each_possible_cpu(cpu) {
2075                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2076                 struct global_cwq *gcwq = cwq->gcwq;
2077                 int i;
2078
2079                 if (cwq->worker) {
2080                 retry:
2081                         spin_lock_irq(&gcwq->lock);
2082                         /*
2083                          * Worker can only be destroyed while idle.
2084                          * Wait till it becomes idle.  This is ugly
2085                          * and prone to starvation.  It will go away
2086                          * once dynamic worker pool is implemented.
2087                          */
2088                         if (!(cwq->worker->flags & WORKER_IDLE)) {
2089                                 spin_unlock_irq(&gcwq->lock);
2090                                 msleep(100);
2091                                 goto retry;
2092                         }
2093                         destroy_worker(cwq->worker);
2094                         cwq->worker = NULL;
2095                         spin_unlock_irq(&gcwq->lock);
2096                 }
2097
2098                 for (i = 0; i < WORK_NR_COLORS; i++)
2099                         BUG_ON(cwq->nr_in_flight[i]);
2100                 BUG_ON(cwq->nr_active);
2101                 BUG_ON(!list_empty(&cwq->delayed_works));
2102         }
2103
2104         free_cwqs(wq->cpu_wq);
2105         kfree(wq);
2106 }
2107 EXPORT_SYMBOL_GPL(destroy_workqueue);
2108
2109 /*
2110  * CPU hotplug.
2111  *
2112  * CPU hotplug is implemented by allowing cwqs to be detached from
2113  * CPU, running with unbound workers and allowing them to be
2114  * reattached later if the cpu comes back online.  A separate thread
2115  * is created to govern cwqs in such state and is called the trustee.
2116  *
2117  * Trustee states and their descriptions.
2118  *
2119  * START        Command state used on startup.  On CPU_DOWN_PREPARE, a
2120  *              new trustee is started with this state.
2121  *
2122  * IN_CHARGE    Once started, trustee will enter this state after
2123  *              making all existing workers rogue.  DOWN_PREPARE waits
2124  *              for trustee to enter this state.  After reaching
2125  *              IN_CHARGE, trustee tries to execute the pending
2126  *              worklist until it's empty and the state is set to
2127  *              BUTCHER, or the state is set to RELEASE.
2128  *
2129  * BUTCHER      Command state which is set by the cpu callback after
2130  *              the cpu has went down.  Once this state is set trustee
2131  *              knows that there will be no new works on the worklist
2132  *              and once the worklist is empty it can proceed to
2133  *              killing idle workers.
2134  *
2135  * RELEASE      Command state which is set by the cpu callback if the
2136  *              cpu down has been canceled or it has come online
2137  *              again.  After recognizing this state, trustee stops
2138  *              trying to drain or butcher and transits to DONE.
2139  *
2140  * DONE         Trustee will enter this state after BUTCHER or RELEASE
2141  *              is complete.
2142  *
2143  *          trustee                 CPU                draining
2144  *         took over                down               complete
2145  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2146  *                        |                     |                  ^
2147  *                        | CPU is back online  v   return workers |
2148  *                         ----------------> RELEASE --------------
2149  */
2150
2151 /**
2152  * trustee_wait_event_timeout - timed event wait for trustee
2153  * @cond: condition to wait for
2154  * @timeout: timeout in jiffies
2155  *
2156  * wait_event_timeout() for trustee to use.  Handles locking and
2157  * checks for RELEASE request.
2158  *
2159  * CONTEXT:
2160  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2161  * multiple times.  To be used by trustee.
2162  *
2163  * RETURNS:
2164  * Positive indicating left time if @cond is satisfied, 0 if timed
2165  * out, -1 if canceled.
2166  */
2167 #define trustee_wait_event_timeout(cond, timeout) ({                    \
2168         long __ret = (timeout);                                         \
2169         while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2170                __ret) {                                                 \
2171                 spin_unlock_irq(&gcwq->lock);                           \
2172                 __wait_event_timeout(gcwq->trustee_wait, (cond) ||      \
2173                         (gcwq->trustee_state == TRUSTEE_RELEASE),       \
2174                         __ret);                                         \
2175                 spin_lock_irq(&gcwq->lock);                             \
2176         }                                                               \
2177         gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);          \
2178 })
2179
2180 /**
2181  * trustee_wait_event - event wait for trustee
2182  * @cond: condition to wait for
2183  *
2184  * wait_event() for trustee to use.  Automatically handles locking and
2185  * checks for CANCEL request.
2186  *
2187  * CONTEXT:
2188  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2189  * multiple times.  To be used by trustee.
2190  *
2191  * RETURNS:
2192  * 0 if @cond is satisfied, -1 if canceled.
2193  */
2194 #define trustee_wait_event(cond) ({                                     \
2195         long __ret1;                                                    \
2196         __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2197         __ret1 < 0 ? -1 : 0;                                            \
2198 })
2199
2200 static int __cpuinit trustee_thread(void *__gcwq)
2201 {
2202         struct global_cwq *gcwq = __gcwq;
2203         struct worker *worker;
2204         struct hlist_node *pos;
2205         int i;
2206
2207         BUG_ON(gcwq->cpu != smp_processor_id());
2208
2209         spin_lock_irq(&gcwq->lock);
2210         /*
2211          * Make all workers rogue.  Trustee must be bound to the
2212          * target cpu and can't be cancelled.
2213          */
2214         BUG_ON(gcwq->cpu != smp_processor_id());
2215
2216         list_for_each_entry(worker, &gcwq->idle_list, entry)
2217                 worker->flags |= WORKER_ROGUE;
2218
2219         for_each_busy_worker(worker, i, pos, gcwq)
2220                 worker->flags |= WORKER_ROGUE;
2221
2222         /*
2223          * We're now in charge.  Notify and proceed to drain.  We need
2224          * to keep the gcwq running during the whole CPU down
2225          * procedure as other cpu hotunplug callbacks may need to
2226          * flush currently running tasks.
2227          */
2228         gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2229         wake_up_all(&gcwq->trustee_wait);
2230
2231         /*
2232          * The original cpu is in the process of dying and may go away
2233          * anytime now.  When that happens, we and all workers would
2234          * be migrated to other cpus.  Try draining any left work.
2235          * Note that if the gcwq is frozen, there may be frozen works
2236          * in freezeable cwqs.  Don't declare completion while frozen.
2237          */
2238         while (gcwq->nr_workers != gcwq->nr_idle ||
2239                gcwq->flags & GCWQ_FREEZING ||
2240                gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2241                 /* give a breather */
2242                 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2243                         break;
2244         }
2245
2246         /* notify completion */
2247         gcwq->trustee = NULL;
2248         gcwq->trustee_state = TRUSTEE_DONE;
2249         wake_up_all(&gcwq->trustee_wait);
2250         spin_unlock_irq(&gcwq->lock);
2251         return 0;
2252 }
2253
2254 /**
2255  * wait_trustee_state - wait for trustee to enter the specified state
2256  * @gcwq: gcwq the trustee of interest belongs to
2257  * @state: target state to wait for
2258  *
2259  * Wait for the trustee to reach @state.  DONE is already matched.
2260  *
2261  * CONTEXT:
2262  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2263  * multiple times.  To be used by cpu_callback.
2264  */
2265 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2266 {
2267         if (!(gcwq->trustee_state == state ||
2268               gcwq->trustee_state == TRUSTEE_DONE)) {
2269                 spin_unlock_irq(&gcwq->lock);
2270                 __wait_event(gcwq->trustee_wait,
2271                              gcwq->trustee_state == state ||
2272                              gcwq->trustee_state == TRUSTEE_DONE);
2273                 spin_lock_irq(&gcwq->lock);
2274         }
2275 }
2276
2277 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
2278                                                 unsigned long action,
2279                                                 void *hcpu)
2280 {
2281         unsigned int cpu = (unsigned long)hcpu;
2282         struct global_cwq *gcwq = get_gcwq(cpu);
2283         struct task_struct *new_trustee = NULL;
2284         struct worker *worker;
2285         struct hlist_node *pos;
2286         unsigned long flags;
2287         int i;
2288
2289         action &= ~CPU_TASKS_FROZEN;
2290
2291         switch (action) {
2292         case CPU_DOWN_PREPARE:
2293                 new_trustee = kthread_create(trustee_thread, gcwq,
2294                                              "workqueue_trustee/%d\n", cpu);
2295                 if (IS_ERR(new_trustee))
2296                         return notifier_from_errno(PTR_ERR(new_trustee));
2297                 kthread_bind(new_trustee, cpu);
2298         }
2299
2300         /* some are called w/ irq disabled, don't disturb irq status */
2301         spin_lock_irqsave(&gcwq->lock, flags);
2302
2303         switch (action) {
2304         case CPU_DOWN_PREPARE:
2305                 /* initialize trustee and tell it to acquire the gcwq */
2306                 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
2307                 gcwq->trustee = new_trustee;
2308                 gcwq->trustee_state = TRUSTEE_START;
2309                 wake_up_process(gcwq->trustee);
2310                 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
2311                 break;
2312
2313         case CPU_POST_DEAD:
2314                 gcwq->trustee_state = TRUSTEE_BUTCHER;
2315                 break;
2316
2317         case CPU_DOWN_FAILED:
2318         case CPU_ONLINE:
2319                 if (gcwq->trustee_state != TRUSTEE_DONE) {
2320                         gcwq->trustee_state = TRUSTEE_RELEASE;
2321                         wake_up_process(gcwq->trustee);
2322                         wait_trustee_state(gcwq, TRUSTEE_DONE);
2323                 }
2324
2325                 /* clear ROGUE from all workers */
2326                 list_for_each_entry(worker, &gcwq->idle_list, entry)
2327                         worker->flags &= ~WORKER_ROGUE;
2328
2329                 for_each_busy_worker(worker, i, pos, gcwq)
2330                         worker->flags &= ~WORKER_ROGUE;
2331                 break;
2332         }
2333
2334         spin_unlock_irqrestore(&gcwq->lock, flags);
2335
2336         return notifier_from_errno(0);
2337 }
2338
2339 #ifdef CONFIG_SMP
2340
2341 struct work_for_cpu {
2342         struct completion completion;
2343         long (*fn)(void *);
2344         void *arg;
2345         long ret;
2346 };
2347
2348 static int do_work_for_cpu(void *_wfc)
2349 {
2350         struct work_for_cpu *wfc = _wfc;
2351         wfc->ret = wfc->fn(wfc->arg);
2352         complete(&wfc->completion);
2353         return 0;
2354 }
2355
2356 /**
2357  * work_on_cpu - run a function in user context on a particular cpu
2358  * @cpu: the cpu to run on
2359  * @fn: the function to run
2360  * @arg: the function arg
2361  *
2362  * This will return the value @fn returns.
2363  * It is up to the caller to ensure that the cpu doesn't go offline.
2364  * The caller must not hold any locks which would prevent @fn from completing.
2365  */
2366 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
2367 {
2368         struct task_struct *sub_thread;
2369         struct work_for_cpu wfc = {
2370                 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
2371                 .fn = fn,
2372                 .arg = arg,
2373         };
2374
2375         sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
2376         if (IS_ERR(sub_thread))
2377                 return PTR_ERR(sub_thread);
2378         kthread_bind(sub_thread, cpu);
2379         wake_up_process(sub_thread);
2380         wait_for_completion(&wfc.completion);
2381         return wfc.ret;
2382 }
2383 EXPORT_SYMBOL_GPL(work_on_cpu);
2384 #endif /* CONFIG_SMP */
2385
2386 #ifdef CONFIG_FREEZER
2387
2388 /**
2389  * freeze_workqueues_begin - begin freezing workqueues
2390  *
2391  * Start freezing workqueues.  After this function returns, all
2392  * freezeable workqueues will queue new works to their frozen_works
2393  * list instead of gcwq->worklist.
2394  *
2395  * CONTEXT:
2396  * Grabs and releases workqueue_lock and gcwq->lock's.
2397  */
2398 void freeze_workqueues_begin(void)
2399 {
2400         struct workqueue_struct *wq;
2401         unsigned int cpu;
2402
2403         spin_lock(&workqueue_lock);
2404
2405         BUG_ON(workqueue_freezing);
2406         workqueue_freezing = true;
2407
2408         for_each_possible_cpu(cpu) {
2409                 struct global_cwq *gcwq = get_gcwq(cpu);
2410
2411                 spin_lock_irq(&gcwq->lock);
2412
2413                 BUG_ON(gcwq->flags & GCWQ_FREEZING);
2414                 gcwq->flags |= GCWQ_FREEZING;
2415
2416                 list_for_each_entry(wq, &workqueues, list) {
2417                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2418
2419                         if (wq->flags & WQ_FREEZEABLE)
2420                                 cwq->max_active = 0;
2421                 }
2422
2423                 spin_unlock_irq(&gcwq->lock);
2424         }
2425
2426         spin_unlock(&workqueue_lock);
2427 }
2428
2429 /**
2430  * freeze_workqueues_busy - are freezeable workqueues still busy?
2431  *
2432  * Check whether freezing is complete.  This function must be called
2433  * between freeze_workqueues_begin() and thaw_workqueues().
2434  *
2435  * CONTEXT:
2436  * Grabs and releases workqueue_lock.
2437  *
2438  * RETURNS:
2439  * %true if some freezeable workqueues are still busy.  %false if
2440  * freezing is complete.
2441  */
2442 bool freeze_workqueues_busy(void)
2443 {
2444         struct workqueue_struct *wq;
2445         unsigned int cpu;
2446         bool busy = false;
2447
2448         spin_lock(&workqueue_lock);
2449
2450         BUG_ON(!workqueue_freezing);
2451
2452         for_each_possible_cpu(cpu) {
2453                 /*
2454                  * nr_active is monotonically decreasing.  It's safe
2455                  * to peek without lock.
2456                  */
2457                 list_for_each_entry(wq, &workqueues, list) {
2458                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2459
2460                         if (!(wq->flags & WQ_FREEZEABLE))
2461                                 continue;
2462
2463                         BUG_ON(cwq->nr_active < 0);
2464                         if (cwq->nr_active) {
2465                                 busy = true;
2466                                 goto out_unlock;
2467                         }
2468                 }
2469         }
2470 out_unlock:
2471         spin_unlock(&workqueue_lock);
2472         return busy;
2473 }
2474
2475 /**
2476  * thaw_workqueues - thaw workqueues
2477  *
2478  * Thaw workqueues.  Normal queueing is restored and all collected
2479  * frozen works are transferred to their respective gcwq worklists.
2480  *
2481  * CONTEXT:
2482  * Grabs and releases workqueue_lock and gcwq->lock's.
2483  */
2484 void thaw_workqueues(void)
2485 {
2486         struct workqueue_struct *wq;
2487         unsigned int cpu;
2488
2489         spin_lock(&workqueue_lock);
2490
2491         if (!workqueue_freezing)
2492                 goto out_unlock;
2493
2494         for_each_possible_cpu(cpu) {
2495                 struct global_cwq *gcwq = get_gcwq(cpu);
2496
2497                 spin_lock_irq(&gcwq->lock);
2498
2499                 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
2500                 gcwq->flags &= ~GCWQ_FREEZING;
2501
2502                 list_for_each_entry(wq, &workqueues, list) {
2503                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2504
2505                         if (!(wq->flags & WQ_FREEZEABLE))
2506                                 continue;
2507
2508                         /* restore max_active and repopulate worklist */
2509                         cwq->max_active = wq->saved_max_active;
2510
2511                         while (!list_empty(&cwq->delayed_works) &&
2512                                cwq->nr_active < cwq->max_active)
2513                                 cwq_activate_first_delayed(cwq);
2514
2515                         /* perform delayed unbind from single cpu if empty */
2516                         if (wq->single_cpu == gcwq->cpu &&
2517                             !cwq->nr_active && list_empty(&cwq->delayed_works))
2518                                 cwq_unbind_single_cpu(cwq);
2519
2520                         wake_up_process(cwq->worker->task);
2521                 }
2522
2523                 spin_unlock_irq(&gcwq->lock);
2524         }
2525
2526         workqueue_freezing = false;
2527 out_unlock:
2528         spin_unlock(&workqueue_lock);
2529 }
2530 #endif /* CONFIG_FREEZER */
2531
2532 void __init init_workqueues(void)
2533 {
2534         unsigned int cpu;
2535         int i;
2536
2537         /*
2538          * The pointer part of work->data is either pointing to the
2539          * cwq or contains the cpu number the work ran last on.  Make
2540          * sure cpu number won't overflow into kernel pointer area so
2541          * that they can be distinguished.
2542          */
2543         BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
2544
2545         hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
2546
2547         /* initialize gcwqs */
2548         for_each_possible_cpu(cpu) {
2549                 struct global_cwq *gcwq = get_gcwq(cpu);
2550
2551                 spin_lock_init(&gcwq->lock);
2552                 INIT_LIST_HEAD(&gcwq->worklist);
2553                 gcwq->cpu = cpu;
2554
2555                 INIT_LIST_HEAD(&gcwq->idle_list);
2556                 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
2557                         INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
2558
2559                 ida_init(&gcwq->worker_ida);
2560
2561                 gcwq->trustee_state = TRUSTEE_DONE;
2562                 init_waitqueue_head(&gcwq->trustee_wait);
2563         }
2564
2565         keventd_wq = create_workqueue("events");
2566         BUG_ON(!keventd_wq);
2567 }