]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/stop_machine.c
cpu_stop: implement stop_cpu[s]()
[net-next-2.6.git] / kernel / stop_machine.c
CommitLineData
1142d810
TH
1/*
2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
e5582ca2 10 */
1142d810 11#include <linux/completion.h>
1da177e4 12#include <linux/cpu.h>
1142d810 13#include <linux/init.h>
ee527cd3
PB
14#include <linux/kthread.h>
15#include <linux/module.h>
1142d810 16#include <linux/percpu.h>
ee527cd3
PB
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
a12bb444 19#include <linux/interrupt.h>
1142d810 20#include <linux/kallsyms.h>
a12bb444 21
1da177e4 22#include <asm/atomic.h>
1142d810
TH
23
24/*
25 * Structure to determine completion condition and record errors. May
26 * be shared by works on different cpus.
27 */
28struct cpu_stop_done {
29 atomic_t nr_todo; /* nr left to execute */
30 bool executed; /* actually executed? */
31 int ret; /* collected return value */
32 struct completion completion; /* fired if nr_todo reaches 0 */
33};
34
35/* the actual stopper, one per every possible cpu, enabled on online cpus */
36struct cpu_stopper {
37 spinlock_t lock;
38 struct list_head works; /* list of pending works */
39 struct task_struct *thread; /* stopper thread */
40 bool enabled; /* is this stopper enabled? */
41};
42
43static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44
45static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
46{
47 memset(done, 0, sizeof(*done));
48 atomic_set(&done->nr_todo, nr_todo);
49 init_completion(&done->completion);
50}
51
52/* signal completion unless @done is NULL */
53static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
54{
55 if (done) {
56 if (executed)
57 done->executed = true;
58 if (atomic_dec_and_test(&done->nr_todo))
59 complete(&done->completion);
60 }
61}
62
63/* queue @work to @stopper. if offline, @work is completed immediately */
64static void cpu_stop_queue_work(struct cpu_stopper *stopper,
65 struct cpu_stop_work *work)
66{
67 unsigned long flags;
68
69 spin_lock_irqsave(&stopper->lock, flags);
70
71 if (stopper->enabled) {
72 list_add_tail(&work->list, &stopper->works);
73 wake_up_process(stopper->thread);
74 } else
75 cpu_stop_signal_done(work->done, false);
76
77 spin_unlock_irqrestore(&stopper->lock, flags);
78}
79
80/**
81 * stop_one_cpu - stop a cpu
82 * @cpu: cpu to stop
83 * @fn: function to execute
84 * @arg: argument to @fn
85 *
86 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
87 * the highest priority preempting any task on the cpu and
88 * monopolizing it. This function returns after the execution is
89 * complete.
90 *
91 * This function doesn't guarantee @cpu stays online till @fn
92 * completes. If @cpu goes down in the middle, execution may happen
93 * partially or fully on different cpus. @fn should either be ready
94 * for that or the caller should ensure that @cpu stays online until
95 * this function completes.
96 *
97 * CONTEXT:
98 * Might sleep.
99 *
100 * RETURNS:
101 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
102 * otherwise, the return value of @fn.
103 */
104int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
105{
106 struct cpu_stop_done done;
107 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
108
109 cpu_stop_init_done(&done, 1);
110 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
111 wait_for_completion(&done.completion);
112 return done.executed ? done.ret : -ENOENT;
113}
114
115/**
116 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
117 * @cpu: cpu to stop
118 * @fn: function to execute
119 * @arg: argument to @fn
120 *
121 * Similar to stop_one_cpu() but doesn't wait for completion. The
122 * caller is responsible for ensuring @work_buf is currently unused
123 * and will remain untouched until stopper starts executing @fn.
124 *
125 * CONTEXT:
126 * Don't care.
127 */
128void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
129 struct cpu_stop_work *work_buf)
130{
131 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
132 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
133}
134
135/* static data for stop_cpus */
136static DEFINE_MUTEX(stop_cpus_mutex);
137static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
138
139int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
140{
141 struct cpu_stop_work *work;
142 struct cpu_stop_done done;
143 unsigned int cpu;
144
145 /* initialize works and done */
146 for_each_cpu(cpu, cpumask) {
147 work = &per_cpu(stop_cpus_work, cpu);
148 work->fn = fn;
149 work->arg = arg;
150 work->done = &done;
151 }
152 cpu_stop_init_done(&done, cpumask_weight(cpumask));
153
154 /*
155 * Disable preemption while queueing to avoid getting
156 * preempted by a stopper which might wait for other stoppers
157 * to enter @fn which can lead to deadlock.
158 */
159 preempt_disable();
160 for_each_cpu(cpu, cpumask)
161 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
162 &per_cpu(stop_cpus_work, cpu));
163 preempt_enable();
164
165 wait_for_completion(&done.completion);
166 return done.executed ? done.ret : -ENOENT;
167}
168
169/**
170 * stop_cpus - stop multiple cpus
171 * @cpumask: cpus to stop
172 * @fn: function to execute
173 * @arg: argument to @fn
174 *
175 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
176 * @fn is run in a process context with the highest priority
177 * preempting any task on the cpu and monopolizing it. This function
178 * returns after all executions are complete.
179 *
180 * This function doesn't guarantee the cpus in @cpumask stay online
181 * till @fn completes. If some cpus go down in the middle, execution
182 * on the cpu may happen partially or fully on different cpus. @fn
183 * should either be ready for that or the caller should ensure that
184 * the cpus stay online until this function completes.
185 *
186 * All stop_cpus() calls are serialized making it safe for @fn to wait
187 * for all cpus to start executing it.
188 *
189 * CONTEXT:
190 * Might sleep.
191 *
192 * RETURNS:
193 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
194 * @cpumask were offline; otherwise, 0 if all executions of @fn
195 * returned 0, any non zero return value if any returned non zero.
196 */
197int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
198{
199 int ret;
200
201 /* static works are used, process one request at a time */
202 mutex_lock(&stop_cpus_mutex);
203 ret = __stop_cpus(cpumask, fn, arg);
204 mutex_unlock(&stop_cpus_mutex);
205 return ret;
206}
207
208/**
209 * try_stop_cpus - try to stop multiple cpus
210 * @cpumask: cpus to stop
211 * @fn: function to execute
212 * @arg: argument to @fn
213 *
214 * Identical to stop_cpus() except that it fails with -EAGAIN if
215 * someone else is already using the facility.
216 *
217 * CONTEXT:
218 * Might sleep.
219 *
220 * RETURNS:
221 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
222 * @fn(@arg) was not executed at all because all cpus in @cpumask were
223 * offline; otherwise, 0 if all executions of @fn returned 0, any non
224 * zero return value if any returned non zero.
225 */
226int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
227{
228 int ret;
229
230 /* static works are used, process one request at a time */
231 if (!mutex_trylock(&stop_cpus_mutex))
232 return -EAGAIN;
233 ret = __stop_cpus(cpumask, fn, arg);
234 mutex_unlock(&stop_cpus_mutex);
235 return ret;
236}
237
238static int cpu_stopper_thread(void *data)
239{
240 struct cpu_stopper *stopper = data;
241 struct cpu_stop_work *work;
242 int ret;
243
244repeat:
245 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
246
247 if (kthread_should_stop()) {
248 __set_current_state(TASK_RUNNING);
249 return 0;
250 }
251
252 work = NULL;
253 spin_lock_irq(&stopper->lock);
254 if (!list_empty(&stopper->works)) {
255 work = list_first_entry(&stopper->works,
256 struct cpu_stop_work, list);
257 list_del_init(&work->list);
258 }
259 spin_unlock_irq(&stopper->lock);
260
261 if (work) {
262 cpu_stop_fn_t fn = work->fn;
263 void *arg = work->arg;
264 struct cpu_stop_done *done = work->done;
265 char ksym_buf[KSYM_NAME_LEN];
266
267 __set_current_state(TASK_RUNNING);
268
269 /* cpu stop callbacks are not allowed to sleep */
270 preempt_disable();
271
272 ret = fn(arg);
273 if (ret)
274 done->ret = ret;
275
276 /* restore preemption and check it's still balanced */
277 preempt_enable();
278 WARN_ONCE(preempt_count(),
279 "cpu_stop: %s(%p) leaked preempt count\n",
280 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
281 ksym_buf), arg);
282
283 cpu_stop_signal_done(done, true);
284 } else
285 schedule();
286
287 goto repeat;
288}
289
290/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
291static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
292 unsigned long action, void *hcpu)
293{
294 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
295 unsigned int cpu = (unsigned long)hcpu;
296 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
297 struct cpu_stop_work *work;
298 struct task_struct *p;
299
300 switch (action & ~CPU_TASKS_FROZEN) {
301 case CPU_UP_PREPARE:
302 BUG_ON(stopper->thread || stopper->enabled ||
303 !list_empty(&stopper->works));
304 p = kthread_create(cpu_stopper_thread, stopper, "stopper/%d",
305 cpu);
306 if (IS_ERR(p))
307 return NOTIFY_BAD;
308 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
309 get_task_struct(p);
310 stopper->thread = p;
311 break;
312
313 case CPU_ONLINE:
314 kthread_bind(stopper->thread, cpu);
315 /* strictly unnecessary, as first user will wake it */
316 wake_up_process(stopper->thread);
317 /* mark enabled */
318 spin_lock_irq(&stopper->lock);
319 stopper->enabled = true;
320 spin_unlock_irq(&stopper->lock);
321 break;
322
323#ifdef CONFIG_HOTPLUG_CPU
324 case CPU_UP_CANCELED:
325 case CPU_DEAD:
326 /* kill the stopper */
327 kthread_stop(stopper->thread);
328 /* drain remaining works */
329 spin_lock_irq(&stopper->lock);
330 list_for_each_entry(work, &stopper->works, list)
331 cpu_stop_signal_done(work->done, false);
332 stopper->enabled = false;
333 spin_unlock_irq(&stopper->lock);
334 /* release the stopper */
335 put_task_struct(stopper->thread);
336 stopper->thread = NULL;
337 break;
338#endif
339 }
340
341 return NOTIFY_OK;
342}
343
344/*
345 * Give it a higher priority so that cpu stopper is available to other
346 * cpu notifiers. It currently shares the same priority as sched
347 * migration_notifier.
348 */
349static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
350 .notifier_call = cpu_stop_cpu_callback,
351 .priority = 10,
352};
353
354static int __init cpu_stop_init(void)
355{
356 void *bcpu = (void *)(long)smp_processor_id();
357 unsigned int cpu;
358 int err;
359
360 for_each_possible_cpu(cpu) {
361 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
362
363 spin_lock_init(&stopper->lock);
364 INIT_LIST_HEAD(&stopper->works);
365 }
366
367 /* start one for the boot cpu */
368 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
369 bcpu);
370 BUG_ON(err == NOTIFY_BAD);
371 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
372 register_cpu_notifier(&cpu_stop_cpu_notifier);
373
374 return 0;
375}
376early_initcall(cpu_stop_init);
1da177e4 377
ffdb5976 378/* This controls the threads on each CPU. */
1da177e4 379enum stopmachine_state {
ffdb5976
RR
380 /* Dummy starting state for thread. */
381 STOPMACHINE_NONE,
382 /* Awaiting everyone to be scheduled. */
1da177e4 383 STOPMACHINE_PREPARE,
ffdb5976 384 /* Disable interrupts. */
1da177e4 385 STOPMACHINE_DISABLE_IRQ,
ffdb5976 386 /* Run the function */
5c2aed62 387 STOPMACHINE_RUN,
ffdb5976 388 /* Exit */
1da177e4
LT
389 STOPMACHINE_EXIT,
390};
ffdb5976 391static enum stopmachine_state state;
1da177e4 392
5c2aed62
JB
393struct stop_machine_data {
394 int (*fn)(void *);
395 void *data;
ffdb5976
RR
396 int fnret;
397};
5c2aed62 398
ffdb5976
RR
399/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
400static unsigned int num_threads;
401static atomic_t thread_ack;
ffdb5976 402static DEFINE_MUTEX(lock);
9ea09af3
HC
403/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
404static DEFINE_MUTEX(setup_lock);
405/* Users of stop_machine. */
406static int refcount;
c9583e55
HC
407static struct workqueue_struct *stop_machine_wq;
408static struct stop_machine_data active, idle;
612a726f 409static const struct cpumask *active_cpus;
43cf38eb 410static void __percpu *stop_machine_work;
c9583e55 411
ffdb5976 412static void set_state(enum stopmachine_state newstate)
1da177e4 413{
ffdb5976
RR
414 /* Reset ack counter. */
415 atomic_set(&thread_ack, num_threads);
416 smp_wmb();
417 state = newstate;
1da177e4
LT
418}
419
ffdb5976
RR
420/* Last one to ack a state moves to the next state. */
421static void ack_state(void)
1da177e4 422{
c9583e55
HC
423 if (atomic_dec_and_test(&thread_ack))
424 set_state(state + 1);
1da177e4
LT
425}
426
c9583e55
HC
427/* This is the actual function which stops the CPU. It runs
428 * in the context of a dedicated stopmachine workqueue. */
429static void stop_cpu(struct work_struct *unused)
1da177e4 430{
ffdb5976 431 enum stopmachine_state curstate = STOPMACHINE_NONE;
c9583e55
HC
432 struct stop_machine_data *smdata = &idle;
433 int cpu = smp_processor_id();
8163bcac 434 int err;
c9583e55
HC
435
436 if (!active_cpus) {
41c7bb95 437 if (cpu == cpumask_first(cpu_online_mask))
c9583e55
HC
438 smdata = &active;
439 } else {
41c7bb95 440 if (cpumask_test_cpu(cpu, active_cpus))
c9583e55
HC
441 smdata = &active;
442 }
ffdb5976
RR
443 /* Simple state machine */
444 do {
445 /* Chill out and ensure we re-read stopmachine_state. */
3401a61e 446 cpu_relax();
ffdb5976
RR
447 if (state != curstate) {
448 curstate = state;
449 switch (curstate) {
450 case STOPMACHINE_DISABLE_IRQ:
451 local_irq_disable();
452 hard_irq_disable();
453 break;
454 case STOPMACHINE_RUN:
8163bcac
HC
455 /* On multiple CPUs only a single error code
456 * is needed to tell that something failed. */
457 err = smdata->fn(smdata->data);
458 if (err)
459 smdata->fnret = err;
ffdb5976
RR
460 break;
461 default:
462 break;
463 }
464 ack_state();
465 }
466 } while (curstate != STOPMACHINE_EXIT);
1da177e4 467
1da177e4
LT
468 local_irq_enable();
469}
470
ffdb5976
RR
471/* Callback for CPUs which aren't supposed to do anything. */
472static int chill(void *unused)
5c2aed62 473{
ffdb5976 474 return 0;
5c2aed62 475}
1da177e4 476
9ea09af3
HC
477int stop_machine_create(void)
478{
479 mutex_lock(&setup_lock);
480 if (refcount)
481 goto done;
482 stop_machine_wq = create_rt_workqueue("kstop");
483 if (!stop_machine_wq)
484 goto err_out;
485 stop_machine_work = alloc_percpu(struct work_struct);
486 if (!stop_machine_work)
487 goto err_out;
488done:
489 refcount++;
490 mutex_unlock(&setup_lock);
491 return 0;
492
493err_out:
494 if (stop_machine_wq)
495 destroy_workqueue(stop_machine_wq);
496 mutex_unlock(&setup_lock);
497 return -ENOMEM;
498}
499EXPORT_SYMBOL_GPL(stop_machine_create);
500
501void stop_machine_destroy(void)
502{
503 mutex_lock(&setup_lock);
504 refcount--;
505 if (refcount)
506 goto done;
507 destroy_workqueue(stop_machine_wq);
508 free_percpu(stop_machine_work);
509done:
510 mutex_unlock(&setup_lock);
511}
512EXPORT_SYMBOL_GPL(stop_machine_destroy);
513
41c7bb95 514int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
1da177e4 515{
c9583e55 516 struct work_struct *sm_work;
e14c8bf8 517 int i, ret;
ffdb5976 518
c9583e55
HC
519 /* Set up initial state. */
520 mutex_lock(&lock);
521 num_threads = num_online_cpus();
522 active_cpus = cpus;
ffdb5976
RR
523 active.fn = fn;
524 active.data = data;
525 active.fnret = 0;
526 idle.fn = chill;
527 idle.data = NULL;
528
ffdb5976 529 set_state(STOPMACHINE_PREPARE);
1da177e4 530
c9583e55 531 /* Schedule the stop_cpu work on all cpus: hold this CPU so one
ffdb5976 532 * doesn't hit this CPU until we're ready. */
eeec4fad 533 get_cpu();
c9583e55 534 for_each_online_cpu(i) {
b36128c8 535 sm_work = per_cpu_ptr(stop_machine_work, i);
c9583e55
HC
536 INIT_WORK(sm_work, stop_cpu);
537 queue_work_on(i, stop_machine_wq, sm_work);
538 }
ffdb5976
RR
539 /* This will release the thread on our CPU. */
540 put_cpu();
c9583e55 541 flush_workqueue(stop_machine_wq);
e14c8bf8 542 ret = active.fnret;
ffdb5976 543 mutex_unlock(&lock);
e14c8bf8 544 return ret;
1da177e4
LT
545}
546
41c7bb95 547int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
1da177e4 548{
1da177e4
LT
549 int ret;
550
9ea09af3
HC
551 ret = stop_machine_create();
552 if (ret)
553 return ret;
1da177e4 554 /* No CPUs can come up or down during this. */
86ef5c9a 555 get_online_cpus();
eeec4fad 556 ret = __stop_machine(fn, data, cpus);
86ef5c9a 557 put_online_cpus();
9ea09af3 558 stop_machine_destroy();
1da177e4
LT
559 return ret;
560}
eeec4fad 561EXPORT_SYMBOL_GPL(stop_machine);