]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/gpu/drm/radeon/radeon_pm.c
DRM / radeon / KMS: Fix hibernation regression related to radeon PM (was: Re: [Regres...
[net-next-2.6.git] / drivers / gpu / drm / radeon / radeon_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include "drmP.h"
24 #include "radeon.h"
25 #include "avivod.h"
26 #ifdef CONFIG_ACPI
27 #include <linux/acpi.h>
28 #endif
29 #include <linux/power_supply.h>
30
31 #define RADEON_IDLE_LOOP_MS 100
32 #define RADEON_RECLOCK_DELAY_MS 200
33 #define RADEON_WAIT_VBLANK_TIMEOUT 200
34 #define RADEON_WAIT_IDLE_TIMEOUT 200
35
36 static const char *radeon_pm_state_type_name[5] = {
37         "Default",
38         "Powersave",
39         "Battery",
40         "Balanced",
41         "Performance",
42 };
43
44 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
45 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
46 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
47 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
48 static void radeon_pm_update_profile(struct radeon_device *rdev);
49 static void radeon_pm_set_clocks(struct radeon_device *rdev);
50
51 #define ACPI_AC_CLASS           "ac_adapter"
52
53 #ifdef CONFIG_ACPI
54 static int radeon_acpi_event(struct notifier_block *nb,
55                              unsigned long val,
56                              void *data)
57 {
58         struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
59         struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
60
61         if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
62                 if (power_supply_is_system_supplied() > 0)
63                         DRM_DEBUG("pm: AC\n");
64                 else
65                         DRM_DEBUG("pm: DC\n");
66
67                 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
68                         if (rdev->pm.profile == PM_PROFILE_AUTO) {
69                                 mutex_lock(&rdev->pm.mutex);
70                                 radeon_pm_update_profile(rdev);
71                                 radeon_pm_set_clocks(rdev);
72                                 mutex_unlock(&rdev->pm.mutex);
73                         }
74                 }
75         }
76
77         return NOTIFY_OK;
78 }
79 #endif
80
81 static void radeon_pm_update_profile(struct radeon_device *rdev)
82 {
83         switch (rdev->pm.profile) {
84         case PM_PROFILE_DEFAULT:
85                 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
86                 break;
87         case PM_PROFILE_AUTO:
88                 if (power_supply_is_system_supplied() > 0) {
89                         if (rdev->pm.active_crtc_count > 1)
90                                 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
91                         else
92                                 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
93                 } else {
94                         if (rdev->pm.active_crtc_count > 1)
95                                 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
96                         else
97                                 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
98                 }
99                 break;
100         case PM_PROFILE_LOW:
101                 if (rdev->pm.active_crtc_count > 1)
102                         rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
103                 else
104                         rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
105                 break;
106         case PM_PROFILE_MID:
107                 if (rdev->pm.active_crtc_count > 1)
108                         rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
109                 else
110                         rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
111                 break;
112         case PM_PROFILE_HIGH:
113                 if (rdev->pm.active_crtc_count > 1)
114                         rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
115                 else
116                         rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
117                 break;
118         }
119
120         if (rdev->pm.active_crtc_count == 0) {
121                 rdev->pm.requested_power_state_index =
122                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
123                 rdev->pm.requested_clock_mode_index =
124                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
125         } else {
126                 rdev->pm.requested_power_state_index =
127                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
128                 rdev->pm.requested_clock_mode_index =
129                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
130         }
131 }
132
133 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
134 {
135         struct radeon_bo *bo, *n;
136
137         if (list_empty(&rdev->gem.objects))
138                 return;
139
140         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
141                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
142                         ttm_bo_unmap_virtual(&bo->tbo);
143         }
144 }
145
146 static void radeon_sync_with_vblank(struct radeon_device *rdev)
147 {
148         if (rdev->pm.active_crtcs) {
149                 rdev->pm.vblank_sync = false;
150                 wait_event_timeout(
151                         rdev->irq.vblank_queue, rdev->pm.vblank_sync,
152                         msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
153         }
154 }
155
156 static void radeon_set_power_state(struct radeon_device *rdev)
157 {
158         u32 sclk, mclk;
159         bool misc_after = false;
160
161         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
162             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
163                 return;
164
165         if (radeon_gui_idle(rdev)) {
166                 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
167                         clock_info[rdev->pm.requested_clock_mode_index].sclk;
168                 if (sclk > rdev->clock.default_sclk)
169                         sclk = rdev->clock.default_sclk;
170
171                 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
172                         clock_info[rdev->pm.requested_clock_mode_index].mclk;
173                 if (mclk > rdev->clock.default_mclk)
174                         mclk = rdev->clock.default_mclk;
175
176                 /* upvolt before raising clocks, downvolt after lowering clocks */
177                 if (sclk < rdev->pm.current_sclk)
178                         misc_after = true;
179
180                 radeon_sync_with_vblank(rdev);
181
182                 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
183                         if (!radeon_pm_in_vbl(rdev))
184                                 return;
185                 }
186
187                 radeon_pm_prepare(rdev);
188
189                 if (!misc_after)
190                         /* voltage, pcie lanes, etc.*/
191                         radeon_pm_misc(rdev);
192
193                 /* set engine clock */
194                 if (sclk != rdev->pm.current_sclk) {
195                         radeon_pm_debug_check_in_vbl(rdev, false);
196                         radeon_set_engine_clock(rdev, sclk);
197                         radeon_pm_debug_check_in_vbl(rdev, true);
198                         rdev->pm.current_sclk = sclk;
199                         DRM_DEBUG("Setting: e: %d\n", sclk);
200                 }
201
202                 /* set memory clock */
203                 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
204                         radeon_pm_debug_check_in_vbl(rdev, false);
205                         radeon_set_memory_clock(rdev, mclk);
206                         radeon_pm_debug_check_in_vbl(rdev, true);
207                         rdev->pm.current_mclk = mclk;
208                         DRM_DEBUG("Setting: m: %d\n", mclk);
209                 }
210
211                 if (misc_after)
212                         /* voltage, pcie lanes, etc.*/
213                         radeon_pm_misc(rdev);
214
215                 radeon_pm_finish(rdev);
216
217                 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
218                 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
219         } else
220                 DRM_DEBUG("pm: GUI not idle!!!\n");
221 }
222
223 static void radeon_pm_set_clocks(struct radeon_device *rdev)
224 {
225         int i;
226
227         mutex_lock(&rdev->ddev->struct_mutex);
228         mutex_lock(&rdev->vram_mutex);
229         mutex_lock(&rdev->cp.mutex);
230
231         /* gui idle int has issues on older chips it seems */
232         if (rdev->family >= CHIP_R600) {
233                 if (rdev->irq.installed) {
234                         /* wait for GPU idle */
235                         rdev->pm.gui_idle = false;
236                         rdev->irq.gui_idle = true;
237                         radeon_irq_set(rdev);
238                         wait_event_interruptible_timeout(
239                                 rdev->irq.idle_queue, rdev->pm.gui_idle,
240                                 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
241                         rdev->irq.gui_idle = false;
242                         radeon_irq_set(rdev);
243                 }
244         } else {
245                 if (rdev->cp.ready) {
246                         struct radeon_fence *fence;
247                         radeon_ring_alloc(rdev, 64);
248                         radeon_fence_create(rdev, &fence);
249                         radeon_fence_emit(rdev, fence);
250                         radeon_ring_commit(rdev);
251                         radeon_fence_wait(fence, false);
252                         radeon_fence_unref(&fence);
253                 }
254         }
255         radeon_unmap_vram_bos(rdev);
256
257         if (rdev->irq.installed) {
258                 for (i = 0; i < rdev->num_crtc; i++) {
259                         if (rdev->pm.active_crtcs & (1 << i)) {
260                                 rdev->pm.req_vblank |= (1 << i);
261                                 drm_vblank_get(rdev->ddev, i);
262                         }
263                 }
264         }
265
266         radeon_set_power_state(rdev);
267
268         if (rdev->irq.installed) {
269                 for (i = 0; i < rdev->num_crtc; i++) {
270                         if (rdev->pm.req_vblank & (1 << i)) {
271                                 rdev->pm.req_vblank &= ~(1 << i);
272                                 drm_vblank_put(rdev->ddev, i);
273                         }
274                 }
275         }
276
277         /* update display watermarks based on new power state */
278         radeon_update_bandwidth_info(rdev);
279         if (rdev->pm.active_crtc_count)
280                 radeon_bandwidth_update(rdev);
281
282         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
283
284         mutex_unlock(&rdev->cp.mutex);
285         mutex_unlock(&rdev->vram_mutex);
286         mutex_unlock(&rdev->ddev->struct_mutex);
287 }
288
289 static void radeon_pm_print_states(struct radeon_device *rdev)
290 {
291         int i, j;
292         struct radeon_power_state *power_state;
293         struct radeon_pm_clock_info *clock_info;
294
295         DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states);
296         for (i = 0; i < rdev->pm.num_power_states; i++) {
297                 power_state = &rdev->pm.power_state[i];
298                 DRM_DEBUG("State %d: %s\n", i,
299                         radeon_pm_state_type_name[power_state->type]);
300                 if (i == rdev->pm.default_power_state_index)
301                         DRM_DEBUG("\tDefault");
302                 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
303                         DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes);
304                 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
305                         DRM_DEBUG("\tSingle display only\n");
306                 DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
307                 for (j = 0; j < power_state->num_clock_modes; j++) {
308                         clock_info = &(power_state->clock_info[j]);
309                         if (rdev->flags & RADEON_IS_IGP)
310                                 DRM_DEBUG("\t\t%d e: %d%s\n",
311                                         j,
312                                         clock_info->sclk * 10,
313                                         clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
314                         else
315                                 DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n",
316                                         j,
317                                         clock_info->sclk * 10,
318                                         clock_info->mclk * 10,
319                                         clock_info->voltage.voltage,
320                                         clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
321                 }
322         }
323 }
324
325 static ssize_t radeon_get_pm_profile(struct device *dev,
326                                      struct device_attribute *attr,
327                                      char *buf)
328 {
329         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
330         struct radeon_device *rdev = ddev->dev_private;
331         int cp = rdev->pm.profile;
332
333         return snprintf(buf, PAGE_SIZE, "%s\n",
334                         (cp == PM_PROFILE_AUTO) ? "auto" :
335                         (cp == PM_PROFILE_LOW) ? "low" :
336                         (cp == PM_PROFILE_HIGH) ? "high" : "default");
337 }
338
339 static ssize_t radeon_set_pm_profile(struct device *dev,
340                                      struct device_attribute *attr,
341                                      const char *buf,
342                                      size_t count)
343 {
344         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
345         struct radeon_device *rdev = ddev->dev_private;
346
347         mutex_lock(&rdev->pm.mutex);
348         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
349                 if (strncmp("default", buf, strlen("default")) == 0)
350                         rdev->pm.profile = PM_PROFILE_DEFAULT;
351                 else if (strncmp("auto", buf, strlen("auto")) == 0)
352                         rdev->pm.profile = PM_PROFILE_AUTO;
353                 else if (strncmp("low", buf, strlen("low")) == 0)
354                         rdev->pm.profile = PM_PROFILE_LOW;
355                 else if (strncmp("mid", buf, strlen("mid")) == 0)
356                         rdev->pm.profile = PM_PROFILE_MID;
357                 else if (strncmp("high", buf, strlen("high")) == 0)
358                         rdev->pm.profile = PM_PROFILE_HIGH;
359                 else {
360                         DRM_ERROR("invalid power profile!\n");
361                         goto fail;
362                 }
363                 radeon_pm_update_profile(rdev);
364                 radeon_pm_set_clocks(rdev);
365         }
366 fail:
367         mutex_unlock(&rdev->pm.mutex);
368
369         return count;
370 }
371
372 static ssize_t radeon_get_pm_method(struct device *dev,
373                                     struct device_attribute *attr,
374                                     char *buf)
375 {
376         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
377         struct radeon_device *rdev = ddev->dev_private;
378         int pm = rdev->pm.pm_method;
379
380         return snprintf(buf, PAGE_SIZE, "%s\n",
381                         (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
382 }
383
384 static ssize_t radeon_set_pm_method(struct device *dev,
385                                     struct device_attribute *attr,
386                                     const char *buf,
387                                     size_t count)
388 {
389         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
390         struct radeon_device *rdev = ddev->dev_private;
391
392
393         if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
394                 mutex_lock(&rdev->pm.mutex);
395                 rdev->pm.pm_method = PM_METHOD_DYNPM;
396                 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
397                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
398                 mutex_unlock(&rdev->pm.mutex);
399         } else if (strncmp("profile", buf, strlen("profile")) == 0) {
400                 bool flush_wq = false;
401
402                 mutex_lock(&rdev->pm.mutex);
403                 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
404                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
405                         flush_wq = true;
406                 }
407                 /* disable dynpm */
408                 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
409                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
410                 rdev->pm.pm_method = PM_METHOD_PROFILE;
411                 mutex_unlock(&rdev->pm.mutex);
412                 if (flush_wq)
413                         flush_workqueue(rdev->wq);
414         } else {
415                 DRM_ERROR("invalid power method!\n");
416                 goto fail;
417         }
418         radeon_pm_compute_clocks(rdev);
419 fail:
420         return count;
421 }
422
423 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
424 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
425
426 void radeon_pm_suspend(struct radeon_device *rdev)
427 {
428         bool flush_wq = false;
429
430         mutex_lock(&rdev->pm.mutex);
431         if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
432                 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
433                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
434                         rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
435                 flush_wq = true;
436         }
437         mutex_unlock(&rdev->pm.mutex);
438         if (flush_wq)
439                 flush_workqueue(rdev->wq);
440 }
441
442 void radeon_pm_resume(struct radeon_device *rdev)
443 {
444         /* asic init will reset the default power state */
445         mutex_lock(&rdev->pm.mutex);
446         rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
447         rdev->pm.current_clock_mode_index = 0;
448         rdev->pm.current_sclk = rdev->clock.default_sclk;
449         rdev->pm.current_mclk = rdev->clock.default_mclk;
450         rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
451         if (rdev->pm.pm_method == PM_METHOD_DYNPM
452             && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
453                 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
454                 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
455                                         msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
456         }
457         mutex_unlock(&rdev->pm.mutex);
458         radeon_pm_compute_clocks(rdev);
459 }
460
461 int radeon_pm_init(struct radeon_device *rdev)
462 {
463         int ret;
464         /* default to profile method */
465         rdev->pm.pm_method = PM_METHOD_PROFILE;
466         rdev->pm.profile = PM_PROFILE_DEFAULT;
467         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
468         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
469         rdev->pm.dynpm_can_upclock = true;
470         rdev->pm.dynpm_can_downclock = true;
471         rdev->pm.current_sclk = rdev->clock.default_sclk;
472         rdev->pm.current_mclk = rdev->clock.default_mclk;
473
474         if (rdev->bios) {
475                 if (rdev->is_atom_bios)
476                         radeon_atombios_get_power_modes(rdev);
477                 else
478                         radeon_combios_get_power_modes(rdev);
479                 radeon_pm_print_states(rdev);
480                 radeon_pm_init_profile(rdev);
481         }
482
483         if (rdev->pm.num_power_states > 1) {
484                 /* where's the best place to put these? */
485                 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
486                 if (ret)
487                         DRM_ERROR("failed to create device file for power profile\n");
488                 ret = device_create_file(rdev->dev, &dev_attr_power_method);
489                 if (ret)
490                         DRM_ERROR("failed to create device file for power method\n");
491
492 #ifdef CONFIG_ACPI
493                 rdev->acpi_nb.notifier_call = radeon_acpi_event;
494                 register_acpi_notifier(&rdev->acpi_nb);
495 #endif
496                 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
497
498                 if (radeon_debugfs_pm_init(rdev)) {
499                         DRM_ERROR("Failed to register debugfs file for PM!\n");
500                 }
501
502                 DRM_INFO("radeon: power management initialized\n");
503         }
504
505         return 0;
506 }
507
508 void radeon_pm_fini(struct radeon_device *rdev)
509 {
510         if (rdev->pm.num_power_states > 1) {
511                 bool flush_wq = false;
512
513                 mutex_lock(&rdev->pm.mutex);
514                 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
515                         rdev->pm.profile = PM_PROFILE_DEFAULT;
516                         radeon_pm_update_profile(rdev);
517                         radeon_pm_set_clocks(rdev);
518                 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
519                         /* cancel work */
520                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
521                         flush_wq = true;
522                         /* reset default clocks */
523                         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
524                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
525                         radeon_pm_set_clocks(rdev);
526                 }
527                 mutex_unlock(&rdev->pm.mutex);
528                 if (flush_wq)
529                         flush_workqueue(rdev->wq);
530
531                 device_remove_file(rdev->dev, &dev_attr_power_profile);
532                 device_remove_file(rdev->dev, &dev_attr_power_method);
533 #ifdef CONFIG_ACPI
534                 unregister_acpi_notifier(&rdev->acpi_nb);
535 #endif
536         }
537
538         if (rdev->pm.i2c_bus)
539                 radeon_i2c_destroy(rdev->pm.i2c_bus);
540 }
541
542 void radeon_pm_compute_clocks(struct radeon_device *rdev)
543 {
544         struct drm_device *ddev = rdev->ddev;
545         struct drm_crtc *crtc;
546         struct radeon_crtc *radeon_crtc;
547
548         if (rdev->pm.num_power_states < 2)
549                 return;
550
551         mutex_lock(&rdev->pm.mutex);
552
553         rdev->pm.active_crtcs = 0;
554         rdev->pm.active_crtc_count = 0;
555         list_for_each_entry(crtc,
556                 &ddev->mode_config.crtc_list, head) {
557                 radeon_crtc = to_radeon_crtc(crtc);
558                 if (radeon_crtc->enabled) {
559                         rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
560                         rdev->pm.active_crtc_count++;
561                 }
562         }
563
564         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
565                 radeon_pm_update_profile(rdev);
566                 radeon_pm_set_clocks(rdev);
567         } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
568                 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
569                         if (rdev->pm.active_crtc_count > 1) {
570                                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
571                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
572
573                                         rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
574                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
575                                         radeon_pm_get_dynpm_state(rdev);
576                                         radeon_pm_set_clocks(rdev);
577
578                                         DRM_DEBUG("radeon: dynamic power management deactivated\n");
579                                 }
580                         } else if (rdev->pm.active_crtc_count == 1) {
581                                 /* TODO: Increase clocks if needed for current mode */
582
583                                 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
584                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
585                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
586                                         radeon_pm_get_dynpm_state(rdev);
587                                         radeon_pm_set_clocks(rdev);
588
589                                         queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
590                                                            msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
591                                 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
592                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
593                                         queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
594                                                            msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
595                                         DRM_DEBUG("radeon: dynamic power management activated\n");
596                                 }
597                         } else { /* count == 0 */
598                                 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
599                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
600
601                                         rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
602                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
603                                         radeon_pm_get_dynpm_state(rdev);
604                                         radeon_pm_set_clocks(rdev);
605                                 }
606                         }
607                 }
608         }
609
610         mutex_unlock(&rdev->pm.mutex);
611 }
612
613 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
614 {
615         u32 stat_crtc = 0, vbl = 0, position = 0;
616         bool in_vbl = true;
617
618         if (ASIC_IS_DCE4(rdev)) {
619                 if (rdev->pm.active_crtcs & (1 << 0)) {
620                         vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
621                                      EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
622                         position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
623                                           EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
624                 }
625                 if (rdev->pm.active_crtcs & (1 << 1)) {
626                         vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
627                                      EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
628                         position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
629                                           EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
630                 }
631                 if (rdev->pm.active_crtcs & (1 << 2)) {
632                         vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
633                                      EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
634                         position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
635                                           EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
636                 }
637                 if (rdev->pm.active_crtcs & (1 << 3)) {
638                         vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
639                                      EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
640                         position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
641                                           EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
642                 }
643                 if (rdev->pm.active_crtcs & (1 << 4)) {
644                         vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
645                                      EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
646                         position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
647                                           EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
648                 }
649                 if (rdev->pm.active_crtcs & (1 << 5)) {
650                         vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
651                                      EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
652                         position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
653                                           EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
654                 }
655         } else if (ASIC_IS_AVIVO(rdev)) {
656                 if (rdev->pm.active_crtcs & (1 << 0)) {
657                         vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
658                         position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
659                 }
660                 if (rdev->pm.active_crtcs & (1 << 1)) {
661                         vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
662                         position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
663                 }
664                 if (position < vbl && position > 1)
665                         in_vbl = false;
666         } else {
667                 if (rdev->pm.active_crtcs & (1 << 0)) {
668                         stat_crtc = RREG32(RADEON_CRTC_STATUS);
669                         if (!(stat_crtc & 1))
670                                 in_vbl = false;
671                 }
672                 if (rdev->pm.active_crtcs & (1 << 1)) {
673                         stat_crtc = RREG32(RADEON_CRTC2_STATUS);
674                         if (!(stat_crtc & 1))
675                                 in_vbl = false;
676                 }
677         }
678
679         if (position < vbl && position > 1)
680                 in_vbl = false;
681
682         return in_vbl;
683 }
684
685 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
686 {
687         u32 stat_crtc = 0;
688         bool in_vbl = radeon_pm_in_vbl(rdev);
689
690         if (in_vbl == false)
691                 DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
692                          finish ? "exit" : "entry");
693         return in_vbl;
694 }
695
696 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
697 {
698         struct radeon_device *rdev;
699         int resched;
700         rdev = container_of(work, struct radeon_device,
701                                 pm.dynpm_idle_work.work);
702
703         resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
704         mutex_lock(&rdev->pm.mutex);
705         if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
706                 unsigned long irq_flags;
707                 int not_processed = 0;
708
709                 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
710                 if (!list_empty(&rdev->fence_drv.emited)) {
711                         struct list_head *ptr;
712                         list_for_each(ptr, &rdev->fence_drv.emited) {
713                                 /* count up to 3, that's enought info */
714                                 if (++not_processed >= 3)
715                                         break;
716                         }
717                 }
718                 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
719
720                 if (not_processed >= 3) { /* should upclock */
721                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
722                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
723                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
724                                    rdev->pm.dynpm_can_upclock) {
725                                 rdev->pm.dynpm_planned_action =
726                                         DYNPM_ACTION_UPCLOCK;
727                                 rdev->pm.dynpm_action_timeout = jiffies +
728                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
729                         }
730                 } else if (not_processed == 0) { /* should downclock */
731                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
732                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
733                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
734                                    rdev->pm.dynpm_can_downclock) {
735                                 rdev->pm.dynpm_planned_action =
736                                         DYNPM_ACTION_DOWNCLOCK;
737                                 rdev->pm.dynpm_action_timeout = jiffies +
738                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
739                         }
740                 }
741
742                 /* Note, radeon_pm_set_clocks is called with static_switch set
743                  * to false since we want to wait for vbl to avoid flicker.
744                  */
745                 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
746                     jiffies > rdev->pm.dynpm_action_timeout) {
747                         radeon_pm_get_dynpm_state(rdev);
748                         radeon_pm_set_clocks(rdev);
749                 }
750
751                 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
752                                         msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
753         }
754         mutex_unlock(&rdev->pm.mutex);
755         ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
756 }
757
758 /*
759  * Debugfs info
760  */
761 #if defined(CONFIG_DEBUG_FS)
762
763 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
764 {
765         struct drm_info_node *node = (struct drm_info_node *) m->private;
766         struct drm_device *dev = node->minor->dev;
767         struct radeon_device *rdev = dev->dev_private;
768
769         seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
770         seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
771         seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
772         if (rdev->asic->get_memory_clock)
773                 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
774         if (rdev->pm.current_vddc)
775                 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
776         if (rdev->asic->get_pcie_lanes)
777                 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
778
779         return 0;
780 }
781
782 static struct drm_info_list radeon_pm_info_list[] = {
783         {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
784 };
785 #endif
786
787 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
788 {
789 #if defined(CONFIG_DEBUG_FS)
790         return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
791 #else
792         return 0;
793 #endif
794 }