]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/base/power/main.c
cpuimx27: fix i2c bus selection
[net-next-2.6.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29
30 #include "../base.h"
31 #include "power.h"
32
33 /*
34  * The entries in the dpm_list list are in a depth first order, simply
35  * because children are guaranteed to be discovered after parents, and
36  * are inserted at the back of the list on discovery.
37  *
38  * Since device_pm_add() may be called with a device lock held,
39  * we must never try to acquire a device lock while holding
40  * dpm_list_mutex.
41  */
42
43 LIST_HEAD(dpm_list);
44
45 static DEFINE_MUTEX(dpm_list_mtx);
46 static pm_message_t pm_transition;
47
48 /*
49  * Set once the preparation of devices for a PM transition has started, reset
50  * before starting to resume devices.  Protected by dpm_list_mtx.
51  */
52 static bool transition_started;
53
54 /**
55  * device_pm_init - Initialize the PM-related part of a device object.
56  * @dev: Device object being initialized.
57  */
58 void device_pm_init(struct device *dev)
59 {
60         dev->power.status = DPM_ON;
61         init_completion(&dev->power.completion);
62         dev->power.wakeup_count = 0;
63         pm_runtime_init(dev);
64 }
65
66 /**
67  * device_pm_lock - Lock the list of active devices used by the PM core.
68  */
69 void device_pm_lock(void)
70 {
71         mutex_lock(&dpm_list_mtx);
72 }
73
74 /**
75  * device_pm_unlock - Unlock the list of active devices used by the PM core.
76  */
77 void device_pm_unlock(void)
78 {
79         mutex_unlock(&dpm_list_mtx);
80 }
81
82 /**
83  * device_pm_add - Add a device to the PM core's list of active devices.
84  * @dev: Device to add to the list.
85  */
86 void device_pm_add(struct device *dev)
87 {
88         pr_debug("PM: Adding info for %s:%s\n",
89                  dev->bus ? dev->bus->name : "No Bus",
90                  kobject_name(&dev->kobj));
91         mutex_lock(&dpm_list_mtx);
92         if (dev->parent) {
93                 if (dev->parent->power.status >= DPM_SUSPENDING)
94                         dev_warn(dev, "parent %s should not be sleeping\n",
95                                  dev_name(dev->parent));
96         } else if (transition_started) {
97                 /*
98                  * We refuse to register parentless devices while a PM
99                  * transition is in progress in order to avoid leaving them
100                  * unhandled down the road
101                  */
102                 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
103         }
104
105         list_add_tail(&dev->power.entry, &dpm_list);
106         mutex_unlock(&dpm_list_mtx);
107 }
108
109 /**
110  * device_pm_remove - Remove a device from the PM core's list of active devices.
111  * @dev: Device to be removed from the list.
112  */
113 void device_pm_remove(struct device *dev)
114 {
115         pr_debug("PM: Removing info for %s:%s\n",
116                  dev->bus ? dev->bus->name : "No Bus",
117                  kobject_name(&dev->kobj));
118         complete_all(&dev->power.completion);
119         mutex_lock(&dpm_list_mtx);
120         list_del_init(&dev->power.entry);
121         mutex_unlock(&dpm_list_mtx);
122         pm_runtime_remove(dev);
123 }
124
125 /**
126  * device_pm_move_before - Move device in the PM core's list of active devices.
127  * @deva: Device to move in dpm_list.
128  * @devb: Device @deva should come before.
129  */
130 void device_pm_move_before(struct device *deva, struct device *devb)
131 {
132         pr_debug("PM: Moving %s:%s before %s:%s\n",
133                  deva->bus ? deva->bus->name : "No Bus",
134                  kobject_name(&deva->kobj),
135                  devb->bus ? devb->bus->name : "No Bus",
136                  kobject_name(&devb->kobj));
137         /* Delete deva from dpm_list and reinsert before devb. */
138         list_move_tail(&deva->power.entry, &devb->power.entry);
139 }
140
141 /**
142  * device_pm_move_after - Move device in the PM core's list of active devices.
143  * @deva: Device to move in dpm_list.
144  * @devb: Device @deva should come after.
145  */
146 void device_pm_move_after(struct device *deva, struct device *devb)
147 {
148         pr_debug("PM: Moving %s:%s after %s:%s\n",
149                  deva->bus ? deva->bus->name : "No Bus",
150                  kobject_name(&deva->kobj),
151                  devb->bus ? devb->bus->name : "No Bus",
152                  kobject_name(&devb->kobj));
153         /* Delete deva from dpm_list and reinsert after devb. */
154         list_move(&deva->power.entry, &devb->power.entry);
155 }
156
157 /**
158  * device_pm_move_last - Move device to end of the PM core's list of devices.
159  * @dev: Device to move in dpm_list.
160  */
161 void device_pm_move_last(struct device *dev)
162 {
163         pr_debug("PM: Moving %s:%s to end of list\n",
164                  dev->bus ? dev->bus->name : "No Bus",
165                  kobject_name(&dev->kobj));
166         list_move_tail(&dev->power.entry, &dpm_list);
167 }
168
169 static ktime_t initcall_debug_start(struct device *dev)
170 {
171         ktime_t calltime = ktime_set(0, 0);
172
173         if (initcall_debug) {
174                 pr_info("calling  %s+ @ %i\n",
175                                 dev_name(dev), task_pid_nr(current));
176                 calltime = ktime_get();
177         }
178
179         return calltime;
180 }
181
182 static void initcall_debug_report(struct device *dev, ktime_t calltime,
183                                   int error)
184 {
185         ktime_t delta, rettime;
186
187         if (initcall_debug) {
188                 rettime = ktime_get();
189                 delta = ktime_sub(rettime, calltime);
190                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
191                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
192         }
193 }
194
195 /**
196  * dpm_wait - Wait for a PM operation to complete.
197  * @dev: Device to wait for.
198  * @async: If unset, wait only if the device's power.async_suspend flag is set.
199  */
200 static void dpm_wait(struct device *dev, bool async)
201 {
202         if (!dev)
203                 return;
204
205         if (async || (pm_async_enabled && dev->power.async_suspend))
206                 wait_for_completion(&dev->power.completion);
207 }
208
209 static int dpm_wait_fn(struct device *dev, void *async_ptr)
210 {
211         dpm_wait(dev, *((bool *)async_ptr));
212         return 0;
213 }
214
215 static void dpm_wait_for_children(struct device *dev, bool async)
216 {
217        device_for_each_child(dev, &async, dpm_wait_fn);
218 }
219
220 /**
221  * pm_op - Execute the PM operation appropriate for given PM event.
222  * @dev: Device to handle.
223  * @ops: PM operations to choose from.
224  * @state: PM transition of the system being carried out.
225  */
226 static int pm_op(struct device *dev,
227                  const struct dev_pm_ops *ops,
228                  pm_message_t state)
229 {
230         int error = 0;
231         ktime_t calltime;
232
233         calltime = initcall_debug_start(dev);
234
235         switch (state.event) {
236 #ifdef CONFIG_SUSPEND
237         case PM_EVENT_SUSPEND:
238                 if (ops->suspend) {
239                         error = ops->suspend(dev);
240                         suspend_report_result(ops->suspend, error);
241                 }
242                 break;
243         case PM_EVENT_RESUME:
244                 if (ops->resume) {
245                         error = ops->resume(dev);
246                         suspend_report_result(ops->resume, error);
247                 }
248                 break;
249 #endif /* CONFIG_SUSPEND */
250 #ifdef CONFIG_HIBERNATION
251         case PM_EVENT_FREEZE:
252         case PM_EVENT_QUIESCE:
253                 if (ops->freeze) {
254                         error = ops->freeze(dev);
255                         suspend_report_result(ops->freeze, error);
256                 }
257                 break;
258         case PM_EVENT_HIBERNATE:
259                 if (ops->poweroff) {
260                         error = ops->poweroff(dev);
261                         suspend_report_result(ops->poweroff, error);
262                 }
263                 break;
264         case PM_EVENT_THAW:
265         case PM_EVENT_RECOVER:
266                 if (ops->thaw) {
267                         error = ops->thaw(dev);
268                         suspend_report_result(ops->thaw, error);
269                 }
270                 break;
271         case PM_EVENT_RESTORE:
272                 if (ops->restore) {
273                         error = ops->restore(dev);
274                         suspend_report_result(ops->restore, error);
275                 }
276                 break;
277 #endif /* CONFIG_HIBERNATION */
278         default:
279                 error = -EINVAL;
280         }
281
282         initcall_debug_report(dev, calltime, error);
283
284         return error;
285 }
286
287 /**
288  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
289  * @dev: Device to handle.
290  * @ops: PM operations to choose from.
291  * @state: PM transition of the system being carried out.
292  *
293  * The driver of @dev will not receive interrupts while this function is being
294  * executed.
295  */
296 static int pm_noirq_op(struct device *dev,
297                         const struct dev_pm_ops *ops,
298                         pm_message_t state)
299 {
300         int error = 0;
301         ktime_t calltime, delta, rettime;
302
303         if (initcall_debug) {
304                 pr_info("calling  %s+ @ %i, parent: %s\n",
305                                 dev_name(dev), task_pid_nr(current),
306                                 dev->parent ? dev_name(dev->parent) : "none");
307                 calltime = ktime_get();
308         }
309
310         switch (state.event) {
311 #ifdef CONFIG_SUSPEND
312         case PM_EVENT_SUSPEND:
313                 if (ops->suspend_noirq) {
314                         error = ops->suspend_noirq(dev);
315                         suspend_report_result(ops->suspend_noirq, error);
316                 }
317                 break;
318         case PM_EVENT_RESUME:
319                 if (ops->resume_noirq) {
320                         error = ops->resume_noirq(dev);
321                         suspend_report_result(ops->resume_noirq, error);
322                 }
323                 break;
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATION
326         case PM_EVENT_FREEZE:
327         case PM_EVENT_QUIESCE:
328                 if (ops->freeze_noirq) {
329                         error = ops->freeze_noirq(dev);
330                         suspend_report_result(ops->freeze_noirq, error);
331                 }
332                 break;
333         case PM_EVENT_HIBERNATE:
334                 if (ops->poweroff_noirq) {
335                         error = ops->poweroff_noirq(dev);
336                         suspend_report_result(ops->poweroff_noirq, error);
337                 }
338                 break;
339         case PM_EVENT_THAW:
340         case PM_EVENT_RECOVER:
341                 if (ops->thaw_noirq) {
342                         error = ops->thaw_noirq(dev);
343                         suspend_report_result(ops->thaw_noirq, error);
344                 }
345                 break;
346         case PM_EVENT_RESTORE:
347                 if (ops->restore_noirq) {
348                         error = ops->restore_noirq(dev);
349                         suspend_report_result(ops->restore_noirq, error);
350                 }
351                 break;
352 #endif /* CONFIG_HIBERNATION */
353         default:
354                 error = -EINVAL;
355         }
356
357         if (initcall_debug) {
358                 rettime = ktime_get();
359                 delta = ktime_sub(rettime, calltime);
360                 printk("initcall %s_i+ returned %d after %Ld usecs\n",
361                         dev_name(dev), error,
362                         (unsigned long long)ktime_to_ns(delta) >> 10);
363         }
364
365         return error;
366 }
367
368 static char *pm_verb(int event)
369 {
370         switch (event) {
371         case PM_EVENT_SUSPEND:
372                 return "suspend";
373         case PM_EVENT_RESUME:
374                 return "resume";
375         case PM_EVENT_FREEZE:
376                 return "freeze";
377         case PM_EVENT_QUIESCE:
378                 return "quiesce";
379         case PM_EVENT_HIBERNATE:
380                 return "hibernate";
381         case PM_EVENT_THAW:
382                 return "thaw";
383         case PM_EVENT_RESTORE:
384                 return "restore";
385         case PM_EVENT_RECOVER:
386                 return "recover";
387         default:
388                 return "(unknown PM event)";
389         }
390 }
391
392 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
393 {
394         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
395                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
396                 ", may wakeup" : "");
397 }
398
399 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
400                         int error)
401 {
402         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
403                 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
404 }
405
406 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
407 {
408         ktime_t calltime;
409         s64 usecs64;
410         int usecs;
411
412         calltime = ktime_get();
413         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
414         do_div(usecs64, NSEC_PER_USEC);
415         usecs = usecs64;
416         if (usecs == 0)
417                 usecs = 1;
418         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
419                 info ?: "", info ? " " : "", pm_verb(state.event),
420                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
421 }
422
423 /*------------------------- Resume routines -------------------------*/
424
425 /**
426  * device_resume_noirq - Execute an "early resume" callback for given device.
427  * @dev: Device to handle.
428  * @state: PM transition of the system being carried out.
429  *
430  * The driver of @dev will not receive interrupts while this function is being
431  * executed.
432  */
433 static int device_resume_noirq(struct device *dev, pm_message_t state)
434 {
435         int error = 0;
436
437         TRACE_DEVICE(dev);
438         TRACE_RESUME(0);
439
440         if (dev->bus && dev->bus->pm) {
441                 pm_dev_dbg(dev, state, "EARLY ");
442                 error = pm_noirq_op(dev, dev->bus->pm, state);
443                 if (error)
444                         goto End;
445         }
446
447         if (dev->type && dev->type->pm) {
448                 pm_dev_dbg(dev, state, "EARLY type ");
449                 error = pm_noirq_op(dev, dev->type->pm, state);
450                 if (error)
451                         goto End;
452         }
453
454         if (dev->class && dev->class->pm) {
455                 pm_dev_dbg(dev, state, "EARLY class ");
456                 error = pm_noirq_op(dev, dev->class->pm, state);
457         }
458
459 End:
460         TRACE_RESUME(error);
461         return error;
462 }
463
464 /**
465  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
466  * @state: PM transition of the system being carried out.
467  *
468  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
469  * enable device drivers to receive interrupts.
470  */
471 void dpm_resume_noirq(pm_message_t state)
472 {
473         struct device *dev;
474         ktime_t starttime = ktime_get();
475
476         mutex_lock(&dpm_list_mtx);
477         transition_started = false;
478         list_for_each_entry(dev, &dpm_list, power.entry)
479                 if (dev->power.status > DPM_OFF) {
480                         int error;
481
482                         dev->power.status = DPM_OFF;
483                         error = device_resume_noirq(dev, state);
484                         if (error)
485                                 pm_dev_err(dev, state, " early", error);
486                 }
487         mutex_unlock(&dpm_list_mtx);
488         dpm_show_time(starttime, state, "early");
489         resume_device_irqs();
490 }
491 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
492
493 /**
494  * legacy_resume - Execute a legacy (bus or class) resume callback for device.
495  * @dev: Device to resume.
496  * @cb: Resume callback to execute.
497  */
498 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
499 {
500         int error;
501         ktime_t calltime;
502
503         calltime = initcall_debug_start(dev);
504
505         error = cb(dev);
506         suspend_report_result(cb, error);
507
508         initcall_debug_report(dev, calltime, error);
509
510         return error;
511 }
512
513 /**
514  * device_resume - Execute "resume" callbacks for given device.
515  * @dev: Device to handle.
516  * @state: PM transition of the system being carried out.
517  * @async: If true, the device is being resumed asynchronously.
518  */
519 static int device_resume(struct device *dev, pm_message_t state, bool async)
520 {
521         int error = 0;
522
523         TRACE_DEVICE(dev);
524         TRACE_RESUME(0);
525
526         dpm_wait(dev->parent, async);
527         device_lock(dev);
528
529         dev->power.status = DPM_RESUMING;
530
531         if (dev->bus) {
532                 if (dev->bus->pm) {
533                         pm_dev_dbg(dev, state, "");
534                         error = pm_op(dev, dev->bus->pm, state);
535                 } else if (dev->bus->resume) {
536                         pm_dev_dbg(dev, state, "legacy ");
537                         error = legacy_resume(dev, dev->bus->resume);
538                 }
539                 if (error)
540                         goto End;
541         }
542
543         if (dev->type) {
544                 if (dev->type->pm) {
545                         pm_dev_dbg(dev, state, "type ");
546                         error = pm_op(dev, dev->type->pm, state);
547                 }
548                 if (error)
549                         goto End;
550         }
551
552         if (dev->class) {
553                 if (dev->class->pm) {
554                         pm_dev_dbg(dev, state, "class ");
555                         error = pm_op(dev, dev->class->pm, state);
556                 } else if (dev->class->resume) {
557                         pm_dev_dbg(dev, state, "legacy class ");
558                         error = legacy_resume(dev, dev->class->resume);
559                 }
560         }
561  End:
562         device_unlock(dev);
563         complete_all(&dev->power.completion);
564
565         TRACE_RESUME(error);
566         return error;
567 }
568
569 static void async_resume(void *data, async_cookie_t cookie)
570 {
571         struct device *dev = (struct device *)data;
572         int error;
573
574         error = device_resume(dev, pm_transition, true);
575         if (error)
576                 pm_dev_err(dev, pm_transition, " async", error);
577         put_device(dev);
578 }
579
580 static bool is_async(struct device *dev)
581 {
582         return dev->power.async_suspend && pm_async_enabled
583                 && !pm_trace_is_enabled();
584 }
585
586 /**
587  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
588  * @state: PM transition of the system being carried out.
589  *
590  * Execute the appropriate "resume" callback for all devices whose status
591  * indicates that they are suspended.
592  */
593 static void dpm_resume(pm_message_t state)
594 {
595         struct list_head list;
596         struct device *dev;
597         ktime_t starttime = ktime_get();
598
599         INIT_LIST_HEAD(&list);
600         mutex_lock(&dpm_list_mtx);
601         pm_transition = state;
602
603         list_for_each_entry(dev, &dpm_list, power.entry) {
604                 if (dev->power.status < DPM_OFF)
605                         continue;
606
607                 INIT_COMPLETION(dev->power.completion);
608                 if (is_async(dev)) {
609                         get_device(dev);
610                         async_schedule(async_resume, dev);
611                 }
612         }
613
614         while (!list_empty(&dpm_list)) {
615                 dev = to_device(dpm_list.next);
616                 get_device(dev);
617                 if (dev->power.status >= DPM_OFF && !is_async(dev)) {
618                         int error;
619
620                         mutex_unlock(&dpm_list_mtx);
621
622                         error = device_resume(dev, state, false);
623
624                         mutex_lock(&dpm_list_mtx);
625                         if (error)
626                                 pm_dev_err(dev, state, "", error);
627                 } else if (dev->power.status == DPM_SUSPENDING) {
628                         /* Allow new children of the device to be registered */
629                         dev->power.status = DPM_RESUMING;
630                 }
631                 if (!list_empty(&dev->power.entry))
632                         list_move_tail(&dev->power.entry, &list);
633                 put_device(dev);
634         }
635         list_splice(&list, &dpm_list);
636         mutex_unlock(&dpm_list_mtx);
637         async_synchronize_full();
638         dpm_show_time(starttime, state, NULL);
639 }
640
641 /**
642  * device_complete - Complete a PM transition for given device.
643  * @dev: Device to handle.
644  * @state: PM transition of the system being carried out.
645  */
646 static void device_complete(struct device *dev, pm_message_t state)
647 {
648         device_lock(dev);
649
650         if (dev->class && dev->class->pm && dev->class->pm->complete) {
651                 pm_dev_dbg(dev, state, "completing class ");
652                 dev->class->pm->complete(dev);
653         }
654
655         if (dev->type && dev->type->pm && dev->type->pm->complete) {
656                 pm_dev_dbg(dev, state, "completing type ");
657                 dev->type->pm->complete(dev);
658         }
659
660         if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
661                 pm_dev_dbg(dev, state, "completing ");
662                 dev->bus->pm->complete(dev);
663         }
664
665         device_unlock(dev);
666 }
667
668 /**
669  * dpm_complete - Complete a PM transition for all non-sysdev devices.
670  * @state: PM transition of the system being carried out.
671  *
672  * Execute the ->complete() callbacks for all devices whose PM status is not
673  * DPM_ON (this allows new devices to be registered).
674  */
675 static void dpm_complete(pm_message_t state)
676 {
677         struct list_head list;
678
679         INIT_LIST_HEAD(&list);
680         mutex_lock(&dpm_list_mtx);
681         transition_started = false;
682         while (!list_empty(&dpm_list)) {
683                 struct device *dev = to_device(dpm_list.prev);
684
685                 get_device(dev);
686                 if (dev->power.status > DPM_ON) {
687                         dev->power.status = DPM_ON;
688                         mutex_unlock(&dpm_list_mtx);
689
690                         device_complete(dev, state);
691                         pm_runtime_put_sync(dev);
692
693                         mutex_lock(&dpm_list_mtx);
694                 }
695                 if (!list_empty(&dev->power.entry))
696                         list_move(&dev->power.entry, &list);
697                 put_device(dev);
698         }
699         list_splice(&list, &dpm_list);
700         mutex_unlock(&dpm_list_mtx);
701 }
702
703 /**
704  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
705  * @state: PM transition of the system being carried out.
706  *
707  * Execute "resume" callbacks for all devices and complete the PM transition of
708  * the system.
709  */
710 void dpm_resume_end(pm_message_t state)
711 {
712         might_sleep();
713         dpm_resume(state);
714         dpm_complete(state);
715 }
716 EXPORT_SYMBOL_GPL(dpm_resume_end);
717
718
719 /*------------------------- Suspend routines -------------------------*/
720
721 /**
722  * resume_event - Return a "resume" message for given "suspend" sleep state.
723  * @sleep_state: PM message representing a sleep state.
724  *
725  * Return a PM message representing the resume event corresponding to given
726  * sleep state.
727  */
728 static pm_message_t resume_event(pm_message_t sleep_state)
729 {
730         switch (sleep_state.event) {
731         case PM_EVENT_SUSPEND:
732                 return PMSG_RESUME;
733         case PM_EVENT_FREEZE:
734         case PM_EVENT_QUIESCE:
735                 return PMSG_RECOVER;
736         case PM_EVENT_HIBERNATE:
737                 return PMSG_RESTORE;
738         }
739         return PMSG_ON;
740 }
741
742 /**
743  * device_suspend_noirq - Execute a "late suspend" callback for given device.
744  * @dev: Device to handle.
745  * @state: PM transition of the system being carried out.
746  *
747  * The driver of @dev will not receive interrupts while this function is being
748  * executed.
749  */
750 static int device_suspend_noirq(struct device *dev, pm_message_t state)
751 {
752         int error = 0;
753
754         if (dev->class && dev->class->pm) {
755                 pm_dev_dbg(dev, state, "LATE class ");
756                 error = pm_noirq_op(dev, dev->class->pm, state);
757                 if (error)
758                         goto End;
759         }
760
761         if (dev->type && dev->type->pm) {
762                 pm_dev_dbg(dev, state, "LATE type ");
763                 error = pm_noirq_op(dev, dev->type->pm, state);
764                 if (error)
765                         goto End;
766         }
767
768         if (dev->bus && dev->bus->pm) {
769                 pm_dev_dbg(dev, state, "LATE ");
770                 error = pm_noirq_op(dev, dev->bus->pm, state);
771         }
772
773 End:
774         return error;
775 }
776
777 /**
778  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
779  * @state: PM transition of the system being carried out.
780  *
781  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
782  * handlers for all non-sysdev devices.
783  */
784 int dpm_suspend_noirq(pm_message_t state)
785 {
786         struct device *dev;
787         ktime_t starttime = ktime_get();
788         int error = 0;
789
790         suspend_device_irqs();
791         mutex_lock(&dpm_list_mtx);
792         list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
793                 error = device_suspend_noirq(dev, state);
794                 if (error) {
795                         pm_dev_err(dev, state, " late", error);
796                         break;
797                 }
798                 dev->power.status = DPM_OFF_IRQ;
799         }
800         mutex_unlock(&dpm_list_mtx);
801         if (error)
802                 dpm_resume_noirq(resume_event(state));
803         else
804                 dpm_show_time(starttime, state, "late");
805         return error;
806 }
807 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
808
809 /**
810  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
811  * @dev: Device to suspend.
812  * @state: PM transition of the system being carried out.
813  * @cb: Suspend callback to execute.
814  */
815 static int legacy_suspend(struct device *dev, pm_message_t state,
816                           int (*cb)(struct device *dev, pm_message_t state))
817 {
818         int error;
819         ktime_t calltime;
820
821         calltime = initcall_debug_start(dev);
822
823         error = cb(dev, state);
824         suspend_report_result(cb, error);
825
826         initcall_debug_report(dev, calltime, error);
827
828         return error;
829 }
830
831 static int async_error;
832
833 /**
834  * device_suspend - Execute "suspend" callbacks for given device.
835  * @dev: Device to handle.
836  * @state: PM transition of the system being carried out.
837  * @async: If true, the device is being suspended asynchronously.
838  */
839 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
840 {
841         int error = 0;
842
843         dpm_wait_for_children(dev, async);
844         device_lock(dev);
845
846         if (async_error)
847                 goto End;
848
849         if (dev->class) {
850                 if (dev->class->pm) {
851                         pm_dev_dbg(dev, state, "class ");
852                         error = pm_op(dev, dev->class->pm, state);
853                 } else if (dev->class->suspend) {
854                         pm_dev_dbg(dev, state, "legacy class ");
855                         error = legacy_suspend(dev, state, dev->class->suspend);
856                 }
857                 if (error)
858                         goto End;
859         }
860
861         if (dev->type) {
862                 if (dev->type->pm) {
863                         pm_dev_dbg(dev, state, "type ");
864                         error = pm_op(dev, dev->type->pm, state);
865                 }
866                 if (error)
867                         goto End;
868         }
869
870         if (dev->bus) {
871                 if (dev->bus->pm) {
872                         pm_dev_dbg(dev, state, "");
873                         error = pm_op(dev, dev->bus->pm, state);
874                 } else if (dev->bus->suspend) {
875                         pm_dev_dbg(dev, state, "legacy ");
876                         error = legacy_suspend(dev, state, dev->bus->suspend);
877                 }
878         }
879
880         if (!error)
881                 dev->power.status = DPM_OFF;
882
883  End:
884         device_unlock(dev);
885         complete_all(&dev->power.completion);
886
887         return error;
888 }
889
890 static void async_suspend(void *data, async_cookie_t cookie)
891 {
892         struct device *dev = (struct device *)data;
893         int error;
894
895         error = __device_suspend(dev, pm_transition, true);
896         if (error) {
897                 pm_dev_err(dev, pm_transition, " async", error);
898                 async_error = error;
899         }
900
901         put_device(dev);
902 }
903
904 static int device_suspend(struct device *dev)
905 {
906         INIT_COMPLETION(dev->power.completion);
907
908         if (pm_async_enabled && dev->power.async_suspend) {
909                 get_device(dev);
910                 async_schedule(async_suspend, dev);
911                 return 0;
912         }
913
914         return __device_suspend(dev, pm_transition, false);
915 }
916
917 /**
918  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
919  * @state: PM transition of the system being carried out.
920  */
921 static int dpm_suspend(pm_message_t state)
922 {
923         struct list_head list;
924         ktime_t starttime = ktime_get();
925         int error = 0;
926
927         INIT_LIST_HEAD(&list);
928         mutex_lock(&dpm_list_mtx);
929         pm_transition = state;
930         async_error = 0;
931         while (!list_empty(&dpm_list)) {
932                 struct device *dev = to_device(dpm_list.prev);
933
934                 get_device(dev);
935                 mutex_unlock(&dpm_list_mtx);
936
937                 error = device_suspend(dev);
938
939                 mutex_lock(&dpm_list_mtx);
940                 if (error) {
941                         pm_dev_err(dev, state, "", error);
942                         put_device(dev);
943                         break;
944                 }
945                 if (!list_empty(&dev->power.entry))
946                         list_move(&dev->power.entry, &list);
947                 put_device(dev);
948                 if (async_error)
949                         break;
950         }
951         list_splice(&list, dpm_list.prev);
952         mutex_unlock(&dpm_list_mtx);
953         async_synchronize_full();
954         if (!error)
955                 error = async_error;
956         if (!error)
957                 dpm_show_time(starttime, state, NULL);
958         return error;
959 }
960
961 /**
962  * device_prepare - Prepare a device for system power transition.
963  * @dev: Device to handle.
964  * @state: PM transition of the system being carried out.
965  *
966  * Execute the ->prepare() callback(s) for given device.  No new children of the
967  * device may be registered after this function has returned.
968  */
969 static int device_prepare(struct device *dev, pm_message_t state)
970 {
971         int error = 0;
972
973         device_lock(dev);
974
975         if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
976                 pm_dev_dbg(dev, state, "preparing ");
977                 error = dev->bus->pm->prepare(dev);
978                 suspend_report_result(dev->bus->pm->prepare, error);
979                 if (error)
980                         goto End;
981         }
982
983         if (dev->type && dev->type->pm && dev->type->pm->prepare) {
984                 pm_dev_dbg(dev, state, "preparing type ");
985                 error = dev->type->pm->prepare(dev);
986                 suspend_report_result(dev->type->pm->prepare, error);
987                 if (error)
988                         goto End;
989         }
990
991         if (dev->class && dev->class->pm && dev->class->pm->prepare) {
992                 pm_dev_dbg(dev, state, "preparing class ");
993                 error = dev->class->pm->prepare(dev);
994                 suspend_report_result(dev->class->pm->prepare, error);
995         }
996  End:
997         device_unlock(dev);
998
999         return error;
1000 }
1001
1002 /**
1003  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1004  * @state: PM transition of the system being carried out.
1005  *
1006  * Execute the ->prepare() callback(s) for all devices.
1007  */
1008 static int dpm_prepare(pm_message_t state)
1009 {
1010         struct list_head list;
1011         int error = 0;
1012
1013         INIT_LIST_HEAD(&list);
1014         mutex_lock(&dpm_list_mtx);
1015         transition_started = true;
1016         while (!list_empty(&dpm_list)) {
1017                 struct device *dev = to_device(dpm_list.next);
1018
1019                 get_device(dev);
1020                 dev->power.status = DPM_PREPARING;
1021                 mutex_unlock(&dpm_list_mtx);
1022
1023                 pm_runtime_get_noresume(dev);
1024                 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
1025                         /* Wake-up requested during system sleep transition. */
1026                         pm_runtime_put_sync(dev);
1027                         error = -EBUSY;
1028                 } else {
1029                         error = device_prepare(dev, state);
1030                 }
1031
1032                 mutex_lock(&dpm_list_mtx);
1033                 if (error) {
1034                         dev->power.status = DPM_ON;
1035                         if (error == -EAGAIN) {
1036                                 put_device(dev);
1037                                 error = 0;
1038                                 continue;
1039                         }
1040                         printk(KERN_ERR "PM: Failed to prepare device %s "
1041                                 "for power transition: error %d\n",
1042                                 kobject_name(&dev->kobj), error);
1043                         put_device(dev);
1044                         break;
1045                 }
1046                 dev->power.status = DPM_SUSPENDING;
1047                 if (!list_empty(&dev->power.entry))
1048                         list_move_tail(&dev->power.entry, &list);
1049                 put_device(dev);
1050         }
1051         list_splice(&list, &dpm_list);
1052         mutex_unlock(&dpm_list_mtx);
1053         return error;
1054 }
1055
1056 /**
1057  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1058  * @state: PM transition of the system being carried out.
1059  *
1060  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1061  * callbacks for them.
1062  */
1063 int dpm_suspend_start(pm_message_t state)
1064 {
1065         int error;
1066
1067         might_sleep();
1068         error = dpm_prepare(state);
1069         if (!error)
1070                 error = dpm_suspend(state);
1071         return error;
1072 }
1073 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1074
1075 void __suspend_report_result(const char *function, void *fn, int ret)
1076 {
1077         if (ret)
1078                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1079 }
1080 EXPORT_SYMBOL_GPL(__suspend_report_result);
1081
1082 /**
1083  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1084  * @dev: Device to wait for.
1085  * @subordinate: Device that needs to wait for @dev.
1086  */
1087 void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1088 {
1089         dpm_wait(dev, subordinate->power.async_suspend);
1090 }
1091 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);