]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/base/power/main.c
PM: Make it possible to avoid races between wakeup and system sleep
[net-next-2.6.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
11048dcf 22#include <linux/mutex.h>
cd59abfc 23#include <linux/pm.h>
5e928f77 24#include <linux/pm_runtime.h>
cd59abfc 25#include <linux/resume-trace.h>
2ed8d2b3 26#include <linux/interrupt.h>
f2511774 27#include <linux/sched.h>
5af84b82 28#include <linux/async.h>
11048dcf 29
cd59abfc 30#include "../base.h"
1da177e4
LT
31#include "power.h"
32
775b64d2 33/*
1eede070 34 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
35 * because children are guaranteed to be discovered after parents, and
36 * are inserted at the back of the list on discovery.
37 *
8e9394ce
GKH
38 * Since device_pm_add() may be called with a device lock held,
39 * we must never try to acquire a device lock while holding
775b64d2
RW
40 * dpm_list_mutex.
41 */
42
1eede070 43LIST_HEAD(dpm_list);
1da177e4 44
cd59abfc 45static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 46static pm_message_t pm_transition;
1da177e4 47
1eede070
RW
48/*
49 * Set once the preparation of devices for a PM transition has started, reset
50 * before starting to resume devices. Protected by dpm_list_mtx.
51 */
52static bool transition_started;
53
5e928f77 54/**
20d652d7 55 * device_pm_init - Initialize the PM-related part of a device object.
5e928f77
RW
56 * @dev: Device object being initialized.
57 */
58void device_pm_init(struct device *dev)
59{
60 dev->power.status = DPM_ON;
5af84b82 61 init_completion(&dev->power.completion);
c125e96f 62 dev->power.wakeup_count = 0;
5e928f77
RW
63 pm_runtime_init(dev);
64}
65
1eede070 66/**
20d652d7 67 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
68 */
69void device_pm_lock(void)
70{
71 mutex_lock(&dpm_list_mtx);
72}
73
74/**
20d652d7 75 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
76 */
77void device_pm_unlock(void)
78{
79 mutex_unlock(&dpm_list_mtx);
80}
075c1771 81
775b64d2 82/**
20d652d7
RW
83 * device_pm_add - Add a device to the PM core's list of active devices.
84 * @dev: Device to add to the list.
775b64d2 85 */
3b98aeaf 86void device_pm_add(struct device *dev)
1da177e4 87{
1da177e4 88 pr_debug("PM: Adding info for %s:%s\n",
c48ea603
DT
89 dev->bus ? dev->bus->name : "No Bus",
90 kobject_name(&dev->kobj));
11048dcf 91 mutex_lock(&dpm_list_mtx);
1eede070 92 if (dev->parent) {
f5a6d958
RW
93 if (dev->parent->power.status >= DPM_SUSPENDING)
94 dev_warn(dev, "parent %s should not be sleeping\n",
1e0b2cf9 95 dev_name(dev->parent));
1eede070
RW
96 } else if (transition_started) {
97 /*
98 * We refuse to register parentless devices while a PM
99 * transition is in progress in order to avoid leaving them
100 * unhandled down the road
101 */
728f0893 102 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
58aca232 103 }
3b98aeaf
AS
104
105 list_add_tail(&dev->power.entry, &dpm_list);
11048dcf 106 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
107}
108
775b64d2 109/**
20d652d7
RW
110 * device_pm_remove - Remove a device from the PM core's list of active devices.
111 * @dev: Device to be removed from the list.
775b64d2 112 */
9cddad77 113void device_pm_remove(struct device *dev)
1da177e4
LT
114{
115 pr_debug("PM: Removing info for %s:%s\n",
c48ea603
DT
116 dev->bus ? dev->bus->name : "No Bus",
117 kobject_name(&dev->kobj));
5af84b82 118 complete_all(&dev->power.completion);
11048dcf 119 mutex_lock(&dpm_list_mtx);
1da177e4 120 list_del_init(&dev->power.entry);
11048dcf 121 mutex_unlock(&dpm_list_mtx);
5e928f77 122 pm_runtime_remove(dev);
775b64d2
RW
123}
124
ffa6a705 125/**
20d652d7
RW
126 * device_pm_move_before - Move device in the PM core's list of active devices.
127 * @deva: Device to move in dpm_list.
128 * @devb: Device @deva should come before.
ffa6a705
CH
129 */
130void device_pm_move_before(struct device *deva, struct device *devb)
131{
132 pr_debug("PM: Moving %s:%s before %s:%s\n",
133 deva->bus ? deva->bus->name : "No Bus",
134 kobject_name(&deva->kobj),
135 devb->bus ? devb->bus->name : "No Bus",
136 kobject_name(&devb->kobj));
137 /* Delete deva from dpm_list and reinsert before devb. */
138 list_move_tail(&deva->power.entry, &devb->power.entry);
139}
140
141/**
20d652d7
RW
142 * device_pm_move_after - Move device in the PM core's list of active devices.
143 * @deva: Device to move in dpm_list.
144 * @devb: Device @deva should come after.
ffa6a705
CH
145 */
146void device_pm_move_after(struct device *deva, struct device *devb)
147{
148 pr_debug("PM: Moving %s:%s after %s:%s\n",
149 deva->bus ? deva->bus->name : "No Bus",
150 kobject_name(&deva->kobj),
151 devb->bus ? devb->bus->name : "No Bus",
152 kobject_name(&devb->kobj));
153 /* Delete deva from dpm_list and reinsert after devb. */
154 list_move(&deva->power.entry, &devb->power.entry);
155}
156
157/**
20d652d7
RW
158 * device_pm_move_last - Move device to end of the PM core's list of devices.
159 * @dev: Device to move in dpm_list.
ffa6a705
CH
160 */
161void device_pm_move_last(struct device *dev)
162{
163 pr_debug("PM: Moving %s:%s to end of list\n",
164 dev->bus ? dev->bus->name : "No Bus",
165 kobject_name(&dev->kobj));
166 list_move_tail(&dev->power.entry, &dpm_list);
167}
168
875ab0b7
RW
169static ktime_t initcall_debug_start(struct device *dev)
170{
171 ktime_t calltime = ktime_set(0, 0);
172
173 if (initcall_debug) {
174 pr_info("calling %s+ @ %i\n",
175 dev_name(dev), task_pid_nr(current));
176 calltime = ktime_get();
177 }
178
179 return calltime;
180}
181
182static void initcall_debug_report(struct device *dev, ktime_t calltime,
183 int error)
184{
185 ktime_t delta, rettime;
186
187 if (initcall_debug) {
188 rettime = ktime_get();
189 delta = ktime_sub(rettime, calltime);
190 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
191 error, (unsigned long long)ktime_to_ns(delta) >> 10);
192 }
193}
194
5af84b82
RW
195/**
196 * dpm_wait - Wait for a PM operation to complete.
197 * @dev: Device to wait for.
198 * @async: If unset, wait only if the device's power.async_suspend flag is set.
199 */
200static void dpm_wait(struct device *dev, bool async)
201{
202 if (!dev)
203 return;
204
0e06b4a8 205 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
206 wait_for_completion(&dev->power.completion);
207}
208
209static int dpm_wait_fn(struct device *dev, void *async_ptr)
210{
211 dpm_wait(dev, *((bool *)async_ptr));
212 return 0;
213}
214
215static void dpm_wait_for_children(struct device *dev, bool async)
216{
217 device_for_each_child(dev, &async, dpm_wait_fn);
218}
219
1eede070 220/**
20d652d7
RW
221 * pm_op - Execute the PM operation appropriate for given PM event.
222 * @dev: Device to handle.
223 * @ops: PM operations to choose from.
224 * @state: PM transition of the system being carried out.
1eede070 225 */
d9ab7716
DT
226static int pm_op(struct device *dev,
227 const struct dev_pm_ops *ops,
228 pm_message_t state)
1eede070
RW
229{
230 int error = 0;
875ab0b7 231 ktime_t calltime;
f2511774 232
875ab0b7 233 calltime = initcall_debug_start(dev);
1eede070
RW
234
235 switch (state.event) {
236#ifdef CONFIG_SUSPEND
237 case PM_EVENT_SUSPEND:
238 if (ops->suspend) {
239 error = ops->suspend(dev);
240 suspend_report_result(ops->suspend, error);
241 }
242 break;
243 case PM_EVENT_RESUME:
244 if (ops->resume) {
245 error = ops->resume(dev);
246 suspend_report_result(ops->resume, error);
247 }
248 break;
249#endif /* CONFIG_SUSPEND */
250#ifdef CONFIG_HIBERNATION
251 case PM_EVENT_FREEZE:
252 case PM_EVENT_QUIESCE:
253 if (ops->freeze) {
254 error = ops->freeze(dev);
255 suspend_report_result(ops->freeze, error);
256 }
257 break;
258 case PM_EVENT_HIBERNATE:
259 if (ops->poweroff) {
260 error = ops->poweroff(dev);
261 suspend_report_result(ops->poweroff, error);
262 }
263 break;
264 case PM_EVENT_THAW:
265 case PM_EVENT_RECOVER:
266 if (ops->thaw) {
267 error = ops->thaw(dev);
268 suspend_report_result(ops->thaw, error);
269 }
270 break;
271 case PM_EVENT_RESTORE:
272 if (ops->restore) {
273 error = ops->restore(dev);
274 suspend_report_result(ops->restore, error);
275 }
276 break;
277#endif /* CONFIG_HIBERNATION */
278 default:
279 error = -EINVAL;
280 }
f2511774 281
875ab0b7 282 initcall_debug_report(dev, calltime, error);
f2511774 283
1eede070
RW
284 return error;
285}
286
287/**
20d652d7
RW
288 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
289 * @dev: Device to handle.
290 * @ops: PM operations to choose from.
291 * @state: PM transition of the system being carried out.
1eede070 292 *
20d652d7
RW
293 * The driver of @dev will not receive interrupts while this function is being
294 * executed.
1eede070 295 */
d9ab7716
DT
296static int pm_noirq_op(struct device *dev,
297 const struct dev_pm_ops *ops,
1eede070
RW
298 pm_message_t state)
299{
300 int error = 0;
f2511774
AV
301 ktime_t calltime, delta, rettime;
302
303 if (initcall_debug) {
8cc6b39f
RW
304 pr_info("calling %s+ @ %i, parent: %s\n",
305 dev_name(dev), task_pid_nr(current),
306 dev->parent ? dev_name(dev->parent) : "none");
f2511774
AV
307 calltime = ktime_get();
308 }
1eede070
RW
309
310 switch (state.event) {
311#ifdef CONFIG_SUSPEND
312 case PM_EVENT_SUSPEND:
313 if (ops->suspend_noirq) {
314 error = ops->suspend_noirq(dev);
315 suspend_report_result(ops->suspend_noirq, error);
316 }
317 break;
318 case PM_EVENT_RESUME:
319 if (ops->resume_noirq) {
320 error = ops->resume_noirq(dev);
321 suspend_report_result(ops->resume_noirq, error);
322 }
323 break;
324#endif /* CONFIG_SUSPEND */
325#ifdef CONFIG_HIBERNATION
326 case PM_EVENT_FREEZE:
327 case PM_EVENT_QUIESCE:
328 if (ops->freeze_noirq) {
329 error = ops->freeze_noirq(dev);
330 suspend_report_result(ops->freeze_noirq, error);
331 }
332 break;
333 case PM_EVENT_HIBERNATE:
334 if (ops->poweroff_noirq) {
335 error = ops->poweroff_noirq(dev);
336 suspend_report_result(ops->poweroff_noirq, error);
337 }
338 break;
339 case PM_EVENT_THAW:
340 case PM_EVENT_RECOVER:
341 if (ops->thaw_noirq) {
342 error = ops->thaw_noirq(dev);
343 suspend_report_result(ops->thaw_noirq, error);
344 }
345 break;
346 case PM_EVENT_RESTORE:
347 if (ops->restore_noirq) {
348 error = ops->restore_noirq(dev);
349 suspend_report_result(ops->restore_noirq, error);
350 }
351 break;
352#endif /* CONFIG_HIBERNATION */
353 default:
354 error = -EINVAL;
355 }
f2511774
AV
356
357 if (initcall_debug) {
358 rettime = ktime_get();
359 delta = ktime_sub(rettime, calltime);
875ab0b7
RW
360 printk("initcall %s_i+ returned %d after %Ld usecs\n",
361 dev_name(dev), error,
362 (unsigned long long)ktime_to_ns(delta) >> 10);
f2511774
AV
363 }
364
1eede070
RW
365 return error;
366}
367
368static char *pm_verb(int event)
369{
370 switch (event) {
371 case PM_EVENT_SUSPEND:
372 return "suspend";
373 case PM_EVENT_RESUME:
374 return "resume";
375 case PM_EVENT_FREEZE:
376 return "freeze";
377 case PM_EVENT_QUIESCE:
378 return "quiesce";
379 case PM_EVENT_HIBERNATE:
380 return "hibernate";
381 case PM_EVENT_THAW:
382 return "thaw";
383 case PM_EVENT_RESTORE:
384 return "restore";
385 case PM_EVENT_RECOVER:
386 return "recover";
387 default:
388 return "(unknown PM event)";
389 }
390}
391
392static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
393{
394 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
395 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
396 ", may wakeup" : "");
397}
398
399static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
400 int error)
401{
402 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
403 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
404}
405
ecf762b2
RW
406static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
407{
408 ktime_t calltime;
409 s64 usecs64;
410 int usecs;
411
412 calltime = ktime_get();
413 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
414 do_div(usecs64, NSEC_PER_USEC);
415 usecs = usecs64;
416 if (usecs == 0)
417 usecs = 1;
418 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
419 info ?: "", info ? " " : "", pm_verb(state.event),
420 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
421}
422
cd59abfc
AS
423/*------------------------- Resume routines -------------------------*/
424
425/**
20d652d7
RW
426 * device_resume_noirq - Execute an "early resume" callback for given device.
427 * @dev: Device to handle.
428 * @state: PM transition of the system being carried out.
cd59abfc 429 *
20d652d7
RW
430 * The driver of @dev will not receive interrupts while this function is being
431 * executed.
cd59abfc 432 */
d1616302 433static int device_resume_noirq(struct device *dev, pm_message_t state)
cd59abfc
AS
434{
435 int error = 0;
436
437 TRACE_DEVICE(dev);
438 TRACE_RESUME(0);
439
33c33740 440 if (dev->bus && dev->bus->pm) {
1eede070
RW
441 pm_dev_dbg(dev, state, "EARLY ");
442 error = pm_noirq_op(dev, dev->bus->pm, state);
e7176a37
DB
443 if (error)
444 goto End;
775b64d2 445 }
33c33740 446
e7176a37
DB
447 if (dev->type && dev->type->pm) {
448 pm_dev_dbg(dev, state, "EARLY type ");
449 error = pm_noirq_op(dev, dev->type->pm, state);
450 if (error)
451 goto End;
452 }
453
454 if (dev->class && dev->class->pm) {
455 pm_dev_dbg(dev, state, "EARLY class ");
456 error = pm_noirq_op(dev, dev->class->pm, state);
457 }
458
459End:
775b64d2
RW
460 TRACE_RESUME(error);
461 return error;
462}
463
464/**
20d652d7
RW
465 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
466 * @state: PM transition of the system being carried out.
775b64d2 467 *
20d652d7
RW
468 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
469 * enable device drivers to receive interrupts.
775b64d2 470 */
d1616302 471void dpm_resume_noirq(pm_message_t state)
775b64d2 472{
1eede070 473 struct device *dev;
ecf762b2 474 ktime_t starttime = ktime_get();
775b64d2 475
32bdfac5 476 mutex_lock(&dpm_list_mtx);
3eb132c9 477 transition_started = false;
1eede070
RW
478 list_for_each_entry(dev, &dpm_list, power.entry)
479 if (dev->power.status > DPM_OFF) {
480 int error;
775b64d2 481
1eede070 482 dev->power.status = DPM_OFF;
d1616302 483 error = device_resume_noirq(dev, state);
1eede070
RW
484 if (error)
485 pm_dev_err(dev, state, " early", error);
486 }
32bdfac5 487 mutex_unlock(&dpm_list_mtx);
ecf762b2 488 dpm_show_time(starttime, state, "early");
2ed8d2b3 489 resume_device_irqs();
775b64d2 490}
d1616302 491EXPORT_SYMBOL_GPL(dpm_resume_noirq);
775b64d2 492
875ab0b7
RW
493/**
494 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
0a884223
RD
495 * @dev: Device to resume.
496 * @cb: Resume callback to execute.
875ab0b7
RW
497 */
498static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
499{
500 int error;
501 ktime_t calltime;
502
503 calltime = initcall_debug_start(dev);
504
505 error = cb(dev);
506 suspend_report_result(cb, error);
507
508 initcall_debug_report(dev, calltime, error);
509
510 return error;
511}
512
775b64d2 513/**
97df8c12 514 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
515 * @dev: Device to handle.
516 * @state: PM transition of the system being carried out.
5af84b82 517 * @async: If true, the device is being resumed asynchronously.
775b64d2 518 */
97df8c12 519static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2
RW
520{
521 int error = 0;
522
523 TRACE_DEVICE(dev);
524 TRACE_RESUME(0);
cd59abfc 525
5af84b82 526 dpm_wait(dev->parent, async);
8e9394ce 527 device_lock(dev);
7a8d37a3 528
97df8c12
RW
529 dev->power.status = DPM_RESUMING;
530
1eede070
RW
531 if (dev->bus) {
532 if (dev->bus->pm) {
533 pm_dev_dbg(dev, state, "");
adf09493 534 error = pm_op(dev, dev->bus->pm, state);
1eede070
RW
535 } else if (dev->bus->resume) {
536 pm_dev_dbg(dev, state, "legacy ");
875ab0b7 537 error = legacy_resume(dev, dev->bus->resume);
1eede070
RW
538 }
539 if (error)
540 goto End;
cd59abfc
AS
541 }
542
1eede070
RW
543 if (dev->type) {
544 if (dev->type->pm) {
545 pm_dev_dbg(dev, state, "type ");
546 error = pm_op(dev, dev->type->pm, state);
1eede070
RW
547 }
548 if (error)
549 goto End;
cd59abfc
AS
550 }
551
1eede070
RW
552 if (dev->class) {
553 if (dev->class->pm) {
554 pm_dev_dbg(dev, state, "class ");
555 error = pm_op(dev, dev->class->pm, state);
556 } else if (dev->class->resume) {
557 pm_dev_dbg(dev, state, "legacy class ");
875ab0b7 558 error = legacy_resume(dev, dev->class->resume);
1eede070 559 }
cd59abfc 560 }
1eede070 561 End:
8e9394ce 562 device_unlock(dev);
5af84b82 563 complete_all(&dev->power.completion);
7a8d37a3 564
cd59abfc
AS
565 TRACE_RESUME(error);
566 return error;
567}
568
5af84b82
RW
569static void async_resume(void *data, async_cookie_t cookie)
570{
571 struct device *dev = (struct device *)data;
572 int error;
573
97df8c12 574 error = device_resume(dev, pm_transition, true);
5af84b82
RW
575 if (error)
576 pm_dev_err(dev, pm_transition, " async", error);
577 put_device(dev);
578}
579
97df8c12 580static bool is_async(struct device *dev)
5af84b82 581{
97df8c12
RW
582 return dev->power.async_suspend && pm_async_enabled
583 && !pm_trace_is_enabled();
5af84b82
RW
584}
585
775b64d2 586/**
20d652d7
RW
587 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
588 * @state: PM transition of the system being carried out.
775b64d2 589 *
20d652d7
RW
590 * Execute the appropriate "resume" callback for all devices whose status
591 * indicates that they are suspended.
1eede070
RW
592 */
593static void dpm_resume(pm_message_t state)
594{
595 struct list_head list;
97df8c12 596 struct device *dev;
ecf762b2 597 ktime_t starttime = ktime_get();
1eede070
RW
598
599 INIT_LIST_HEAD(&list);
600 mutex_lock(&dpm_list_mtx);
5af84b82 601 pm_transition = state;
1eede070 602
97df8c12
RW
603 list_for_each_entry(dev, &dpm_list, power.entry) {
604 if (dev->power.status < DPM_OFF)
605 continue;
606
607 INIT_COMPLETION(dev->power.completion);
608 if (is_async(dev)) {
609 get_device(dev);
610 async_schedule(async_resume, dev);
611 }
612 }
613
614 while (!list_empty(&dpm_list)) {
615 dev = to_device(dpm_list.next);
1eede070 616 get_device(dev);
97df8c12 617 if (dev->power.status >= DPM_OFF && !is_async(dev)) {
1eede070
RW
618 int error;
619
1eede070
RW
620 mutex_unlock(&dpm_list_mtx);
621
97df8c12 622 error = device_resume(dev, state, false);
1eede070
RW
623
624 mutex_lock(&dpm_list_mtx);
625 if (error)
626 pm_dev_err(dev, state, "", error);
627 } else if (dev->power.status == DPM_SUSPENDING) {
628 /* Allow new children of the device to be registered */
629 dev->power.status = DPM_RESUMING;
630 }
631 if (!list_empty(&dev->power.entry))
632 list_move_tail(&dev->power.entry, &list);
633 put_device(dev);
634 }
635 list_splice(&list, &dpm_list);
636 mutex_unlock(&dpm_list_mtx);
5af84b82 637 async_synchronize_full();
ecf762b2 638 dpm_show_time(starttime, state, NULL);
1eede070
RW
639}
640
641/**
20d652d7
RW
642 * device_complete - Complete a PM transition for given device.
643 * @dev: Device to handle.
644 * @state: PM transition of the system being carried out.
1eede070 645 */
d1616302 646static void device_complete(struct device *dev, pm_message_t state)
1eede070 647{
8e9394ce 648 device_lock(dev);
1eede070
RW
649
650 if (dev->class && dev->class->pm && dev->class->pm->complete) {
651 pm_dev_dbg(dev, state, "completing class ");
652 dev->class->pm->complete(dev);
653 }
654
655 if (dev->type && dev->type->pm && dev->type->pm->complete) {
656 pm_dev_dbg(dev, state, "completing type ");
657 dev->type->pm->complete(dev);
658 }
659
adf09493 660 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
1eede070 661 pm_dev_dbg(dev, state, "completing ");
adf09493 662 dev->bus->pm->complete(dev);
1eede070
RW
663 }
664
8e9394ce 665 device_unlock(dev);
1eede070
RW
666}
667
668/**
20d652d7
RW
669 * dpm_complete - Complete a PM transition for all non-sysdev devices.
670 * @state: PM transition of the system being carried out.
775b64d2 671 *
20d652d7
RW
672 * Execute the ->complete() callbacks for all devices whose PM status is not
673 * DPM_ON (this allows new devices to be registered).
cd59abfc 674 */
1eede070 675static void dpm_complete(pm_message_t state)
cd59abfc 676{
1eede070
RW
677 struct list_head list;
678
679 INIT_LIST_HEAD(&list);
cd59abfc 680 mutex_lock(&dpm_list_mtx);
e528e876 681 transition_started = false;
1eede070
RW
682 while (!list_empty(&dpm_list)) {
683 struct device *dev = to_device(dpm_list.prev);
cd59abfc 684
1eede070
RW
685 get_device(dev);
686 if (dev->power.status > DPM_ON) {
687 dev->power.status = DPM_ON;
688 mutex_unlock(&dpm_list_mtx);
689
d1616302 690 device_complete(dev, state);
aa0baaef 691 pm_runtime_put_sync(dev);
1eede070
RW
692
693 mutex_lock(&dpm_list_mtx);
694 }
695 if (!list_empty(&dev->power.entry))
696 list_move(&dev->power.entry, &list);
697 put_device(dev);
cd59abfc 698 }
1eede070 699 list_splice(&list, &dpm_list);
cd59abfc
AS
700 mutex_unlock(&dpm_list_mtx);
701}
702
cd59abfc 703/**
20d652d7
RW
704 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
705 * @state: PM transition of the system being carried out.
cd59abfc 706 *
20d652d7
RW
707 * Execute "resume" callbacks for all devices and complete the PM transition of
708 * the system.
cd59abfc 709 */
d1616302 710void dpm_resume_end(pm_message_t state)
cd59abfc 711{
775b64d2 712 might_sleep();
1eede070
RW
713 dpm_resume(state);
714 dpm_complete(state);
cd59abfc 715}
d1616302 716EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
717
718
719/*------------------------- Suspend routines -------------------------*/
720
1eede070 721/**
20d652d7
RW
722 * resume_event - Return a "resume" message for given "suspend" sleep state.
723 * @sleep_state: PM message representing a sleep state.
724 *
725 * Return a PM message representing the resume event corresponding to given
726 * sleep state.
1eede070
RW
727 */
728static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 729{
1eede070
RW
730 switch (sleep_state.event) {
731 case PM_EVENT_SUSPEND:
732 return PMSG_RESUME;
733 case PM_EVENT_FREEZE:
734 case PM_EVENT_QUIESCE:
735 return PMSG_RECOVER;
736 case PM_EVENT_HIBERNATE:
737 return PMSG_RESTORE;
cd59abfc 738 }
1eede070 739 return PMSG_ON;
cd59abfc
AS
740}
741
742/**
20d652d7
RW
743 * device_suspend_noirq - Execute a "late suspend" callback for given device.
744 * @dev: Device to handle.
745 * @state: PM transition of the system being carried out.
775b64d2 746 *
20d652d7
RW
747 * The driver of @dev will not receive interrupts while this function is being
748 * executed.
cd59abfc 749 */
d1616302 750static int device_suspend_noirq(struct device *dev, pm_message_t state)
775b64d2
RW
751{
752 int error = 0;
cd59abfc 753
e7176a37
DB
754 if (dev->class && dev->class->pm) {
755 pm_dev_dbg(dev, state, "LATE class ");
756 error = pm_noirq_op(dev, dev->class->pm, state);
757 if (error)
758 goto End;
759 }
760
761 if (dev->type && dev->type->pm) {
762 pm_dev_dbg(dev, state, "LATE type ");
763 error = pm_noirq_op(dev, dev->type->pm, state);
764 if (error)
765 goto End;
766 }
767
33c33740 768 if (dev->bus && dev->bus->pm) {
1eede070
RW
769 pm_dev_dbg(dev, state, "LATE ");
770 error = pm_noirq_op(dev, dev->bus->pm, state);
775b64d2 771 }
e7176a37
DB
772
773End:
775b64d2
RW
774 return error;
775}
776
777/**
20d652d7
RW
778 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
779 * @state: PM transition of the system being carried out.
775b64d2 780 *
20d652d7
RW
781 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
782 * handlers for all non-sysdev devices.
775b64d2 783 */
d1616302 784int dpm_suspend_noirq(pm_message_t state)
775b64d2 785{
1eede070 786 struct device *dev;
ecf762b2 787 ktime_t starttime = ktime_get();
775b64d2
RW
788 int error = 0;
789
2ed8d2b3 790 suspend_device_irqs();
32bdfac5 791 mutex_lock(&dpm_list_mtx);
1eede070 792 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
d1616302 793 error = device_suspend_noirq(dev, state);
775b64d2 794 if (error) {
1eede070 795 pm_dev_err(dev, state, " late", error);
775b64d2
RW
796 break;
797 }
1eede070 798 dev->power.status = DPM_OFF_IRQ;
775b64d2 799 }
32bdfac5 800 mutex_unlock(&dpm_list_mtx);
775b64d2 801 if (error)
d1616302 802 dpm_resume_noirq(resume_event(state));
ecf762b2
RW
803 else
804 dpm_show_time(starttime, state, "late");
775b64d2
RW
805 return error;
806}
d1616302 807EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
775b64d2 808
875ab0b7
RW
809/**
810 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
811 * @dev: Device to suspend.
812 * @state: PM transition of the system being carried out.
813 * @cb: Suspend callback to execute.
875ab0b7
RW
814 */
815static int legacy_suspend(struct device *dev, pm_message_t state,
816 int (*cb)(struct device *dev, pm_message_t state))
817{
818 int error;
819 ktime_t calltime;
820
821 calltime = initcall_debug_start(dev);
822
823 error = cb(dev, state);
824 suspend_report_result(cb, error);
825
826 initcall_debug_report(dev, calltime, error);
827
828 return error;
829}
830
5af84b82
RW
831static int async_error;
832
775b64d2 833/**
20d652d7
RW
834 * device_suspend - Execute "suspend" callbacks for given device.
835 * @dev: Device to handle.
836 * @state: PM transition of the system being carried out.
5af84b82 837 * @async: If true, the device is being suspended asynchronously.
775b64d2 838 */
5af84b82 839static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc
AS
840{
841 int error = 0;
842
5af84b82 843 dpm_wait_for_children(dev, async);
8e9394ce 844 device_lock(dev);
7a8d37a3 845
5af84b82
RW
846 if (async_error)
847 goto End;
848
1eede070
RW
849 if (dev->class) {
850 if (dev->class->pm) {
851 pm_dev_dbg(dev, state, "class ");
852 error = pm_op(dev, dev->class->pm, state);
853 } else if (dev->class->suspend) {
854 pm_dev_dbg(dev, state, "legacy class ");
875ab0b7 855 error = legacy_suspend(dev, state, dev->class->suspend);
1eede070
RW
856 }
857 if (error)
858 goto End;
cd59abfc
AS
859 }
860
1eede070
RW
861 if (dev->type) {
862 if (dev->type->pm) {
863 pm_dev_dbg(dev, state, "type ");
864 error = pm_op(dev, dev->type->pm, state);
1eede070
RW
865 }
866 if (error)
867 goto End;
cd59abfc
AS
868 }
869
1eede070
RW
870 if (dev->bus) {
871 if (dev->bus->pm) {
872 pm_dev_dbg(dev, state, "");
adf09493 873 error = pm_op(dev, dev->bus->pm, state);
1eede070
RW
874 } else if (dev->bus->suspend) {
875 pm_dev_dbg(dev, state, "legacy ");
875ab0b7 876 error = legacy_suspend(dev, state, dev->bus->suspend);
1eede070 877 }
cd59abfc 878 }
5af84b82
RW
879
880 if (!error)
881 dev->power.status = DPM_OFF;
882
1eede070 883 End:
8e9394ce 884 device_unlock(dev);
5af84b82 885 complete_all(&dev->power.completion);
7a8d37a3 886
cd59abfc
AS
887 return error;
888}
889
5af84b82
RW
890static void async_suspend(void *data, async_cookie_t cookie)
891{
892 struct device *dev = (struct device *)data;
893 int error;
894
895 error = __device_suspend(dev, pm_transition, true);
896 if (error) {
897 pm_dev_err(dev, pm_transition, " async", error);
898 async_error = error;
899 }
900
901 put_device(dev);
902}
903
904static int device_suspend(struct device *dev)
905{
906 INIT_COMPLETION(dev->power.completion);
907
0e06b4a8 908 if (pm_async_enabled && dev->power.async_suspend) {
5af84b82
RW
909 get_device(dev);
910 async_schedule(async_suspend, dev);
911 return 0;
912 }
913
914 return __device_suspend(dev, pm_transition, false);
915}
916
cd59abfc 917/**
20d652d7
RW
918 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
919 * @state: PM transition of the system being carried out.
cd59abfc 920 */
775b64d2 921static int dpm_suspend(pm_message_t state)
cd59abfc 922{
1eede070 923 struct list_head list;
ecf762b2 924 ktime_t starttime = ktime_get();
cd59abfc
AS
925 int error = 0;
926
1eede070 927 INIT_LIST_HEAD(&list);
cd59abfc 928 mutex_lock(&dpm_list_mtx);
5af84b82
RW
929 pm_transition = state;
930 async_error = 0;
1eede070
RW
931 while (!list_empty(&dpm_list)) {
932 struct device *dev = to_device(dpm_list.prev);
58aca232 933
1eede070 934 get_device(dev);
cd59abfc 935 mutex_unlock(&dpm_list_mtx);
1eede070 936
5af84b82 937 error = device_suspend(dev);
1eede070 938
1b3cbec1 939 mutex_lock(&dpm_list_mtx);
775b64d2 940 if (error) {
1eede070
RW
941 pm_dev_err(dev, state, "", error);
942 put_device(dev);
775b64d2
RW
943 break;
944 }
7a8d37a3 945 if (!list_empty(&dev->power.entry))
1eede070
RW
946 list_move(&dev->power.entry, &list);
947 put_device(dev);
5af84b82
RW
948 if (async_error)
949 break;
cd59abfc 950 }
1eede070 951 list_splice(&list, dpm_list.prev);
cd59abfc 952 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
953 async_synchronize_full();
954 if (!error)
955 error = async_error;
ecf762b2
RW
956 if (!error)
957 dpm_show_time(starttime, state, NULL);
1eede070
RW
958 return error;
959}
960
961/**
20d652d7
RW
962 * device_prepare - Prepare a device for system power transition.
963 * @dev: Device to handle.
964 * @state: PM transition of the system being carried out.
965 *
966 * Execute the ->prepare() callback(s) for given device. No new children of the
967 * device may be registered after this function has returned.
1eede070 968 */
d1616302 969static int device_prepare(struct device *dev, pm_message_t state)
1eede070
RW
970{
971 int error = 0;
972
8e9394ce 973 device_lock(dev);
1eede070 974
adf09493 975 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
1eede070 976 pm_dev_dbg(dev, state, "preparing ");
adf09493
RW
977 error = dev->bus->pm->prepare(dev);
978 suspend_report_result(dev->bus->pm->prepare, error);
1eede070
RW
979 if (error)
980 goto End;
981 }
982
983 if (dev->type && dev->type->pm && dev->type->pm->prepare) {
984 pm_dev_dbg(dev, state, "preparing type ");
985 error = dev->type->pm->prepare(dev);
986 suspend_report_result(dev->type->pm->prepare, error);
987 if (error)
988 goto End;
989 }
990
991 if (dev->class && dev->class->pm && dev->class->pm->prepare) {
992 pm_dev_dbg(dev, state, "preparing class ");
993 error = dev->class->pm->prepare(dev);
994 suspend_report_result(dev->class->pm->prepare, error);
995 }
996 End:
8e9394ce 997 device_unlock(dev);
1eede070
RW
998
999 return error;
1000}
cd59abfc 1001
1eede070 1002/**
20d652d7
RW
1003 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1004 * @state: PM transition of the system being carried out.
1eede070 1005 *
20d652d7 1006 * Execute the ->prepare() callback(s) for all devices.
1eede070
RW
1007 */
1008static int dpm_prepare(pm_message_t state)
1009{
1010 struct list_head list;
1011 int error = 0;
1012
1013 INIT_LIST_HEAD(&list);
1014 mutex_lock(&dpm_list_mtx);
1015 transition_started = true;
1016 while (!list_empty(&dpm_list)) {
1017 struct device *dev = to_device(dpm_list.next);
1018
1019 get_device(dev);
1020 dev->power.status = DPM_PREPARING;
1021 mutex_unlock(&dpm_list_mtx);
1022
5e928f77
RW
1023 pm_runtime_get_noresume(dev);
1024 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
1025 /* Wake-up requested during system sleep transition. */
aa0baaef 1026 pm_runtime_put_sync(dev);
5e928f77
RW
1027 error = -EBUSY;
1028 } else {
1029 error = device_prepare(dev, state);
1030 }
1eede070
RW
1031
1032 mutex_lock(&dpm_list_mtx);
1033 if (error) {
1034 dev->power.status = DPM_ON;
1035 if (error == -EAGAIN) {
1036 put_device(dev);
886a7a33 1037 error = 0;
1eede070
RW
1038 continue;
1039 }
1040 printk(KERN_ERR "PM: Failed to prepare device %s "
1041 "for power transition: error %d\n",
1042 kobject_name(&dev->kobj), error);
1043 put_device(dev);
1044 break;
1045 }
1046 dev->power.status = DPM_SUSPENDING;
1047 if (!list_empty(&dev->power.entry))
1048 list_move_tail(&dev->power.entry, &list);
1049 put_device(dev);
1050 }
1051 list_splice(&list, &dpm_list);
1052 mutex_unlock(&dpm_list_mtx);
cd59abfc
AS
1053 return error;
1054}
1055
775b64d2 1056/**
20d652d7
RW
1057 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1058 * @state: PM transition of the system being carried out.
775b64d2 1059 *
20d652d7
RW
1060 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1061 * callbacks for them.
775b64d2 1062 */
d1616302 1063int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1064{
1065 int error;
cd59abfc 1066
775b64d2 1067 might_sleep();
1eede070
RW
1068 error = dpm_prepare(state);
1069 if (!error)
1070 error = dpm_suspend(state);
cd59abfc 1071 return error;
cd59abfc 1072}
d1616302 1073EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1074
1075void __suspend_report_result(const char *function, void *fn, int ret)
1076{
c80cfb04
BH
1077 if (ret)
1078 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1079}
1080EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1081
1082/**
1083 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1084 * @dev: Device to wait for.
1085 * @subordinate: Device that needs to wait for @dev.
1086 */
1087void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1088{
1089 dpm_wait(dev, subordinate->power.async_suspend);
1090}
1091EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);