]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/radeon_pm.c
[rfc] drm/radeon/kms: pm debugging check for vbl.
[net-next-2.6.git] / drivers / gpu / drm / radeon / radeon_pm.c
CommitLineData
7433874e
RM
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
56278a8e 21 * Alex Deucher <alexdeucher@gmail.com>
7433874e
RM
22 */
23#include "drmP.h"
24#include "radeon.h"
f735261b 25#include "avivod.h"
7433874e 26
c913e23a
RM
27#define RADEON_IDLE_LOOP_MS 100
28#define RADEON_RECLOCK_DELAY_MS 200
73a6d3fc 29#define RADEON_WAIT_VBLANK_TIMEOUT 200
c913e23a 30
c913e23a
RM
31static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
32static void radeon_pm_set_clocks(struct radeon_device *rdev);
c913e23a
RM
33static void radeon_pm_idle_work_handler(struct work_struct *work);
34static int radeon_debugfs_pm_init(struct radeon_device *rdev);
35
36static const char *pm_state_names[4] = {
37 "PM_STATE_DISABLED",
38 "PM_STATE_MINIMUM",
39 "PM_STATE_PAUSED",
40 "PM_STATE_ACTIVE"
41};
7433874e 42
0ec0e74f
AD
43static const char *pm_state_types[5] = {
44 "Default",
45 "Powersave",
46 "Battery",
47 "Balanced",
48 "Performance",
49};
50
56278a8e
AD
51static void radeon_print_power_mode_info(struct radeon_device *rdev)
52{
53 int i, j;
54 bool is_default;
55
56 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
59 is_default = true;
60 else
61 is_default = false;
0ec0e74f
AD
62 DRM_INFO("State %d %s %s\n", i,
63 pm_state_types[rdev->pm.power_state[i].type],
64 is_default ? "(default)" : "");
56278a8e
AD
65 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
66 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
67 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
68 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
69 if (rdev->flags & RADEON_IS_IGP)
70 DRM_INFO("\t\t%d engine: %d\n",
71 j,
72 rdev->pm.power_state[i].clock_info[j].sclk * 10);
73 else
74 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
75 j,
76 rdev->pm.power_state[i].clock_info[j].sclk * 10,
77 rdev->pm.power_state[i].clock_info[j].mclk * 10);
78 }
79 }
80}
81
516d0e46
AD
82static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
83 enum radeon_pm_state_type type)
84{
bc4624ca
RM
85 int i, j;
86 enum radeon_pm_state_type wanted_types[2];
87 int wanted_count;
516d0e46
AD
88
89 switch (type) {
90 case POWER_STATE_TYPE_DEFAULT:
91 default:
92 return rdev->pm.default_power_state;
93 case POWER_STATE_TYPE_POWERSAVE:
bc4624ca
RM
94 wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
95 wanted_types[1] = POWER_STATE_TYPE_BATTERY;
96 wanted_count = 2;
516d0e46
AD
97 break;
98 case POWER_STATE_TYPE_BATTERY:
bc4624ca
RM
99 wanted_types[0] = POWER_STATE_TYPE_BATTERY;
100 wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
101 wanted_count = 2;
516d0e46
AD
102 break;
103 case POWER_STATE_TYPE_BALANCED:
104 case POWER_STATE_TYPE_PERFORMANCE:
bc4624ca
RM
105 wanted_types[0] = type;
106 wanted_count = 1;
516d0e46
AD
107 break;
108 }
109
bc4624ca
RM
110 for (i = 0; i < wanted_count; i++) {
111 for (j = 0; j < rdev->pm.num_power_states; j++) {
112 if (rdev->pm.power_state[j].type == wanted_types[i])
113 return &rdev->pm.power_state[j];
114 }
115 }
516d0e46 116
bc4624ca 117 return rdev->pm.default_power_state;
516d0e46
AD
118}
119
120static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
121 struct radeon_power_state *power_state,
122 enum radeon_pm_clock_mode_type type)
123{
124 switch (type) {
125 case POWER_MODE_TYPE_DEFAULT:
126 default:
127 return power_state->default_clock_mode;
128 case POWER_MODE_TYPE_LOW:
129 return &power_state->clock_info[0];
130 case POWER_MODE_TYPE_MID:
131 if (power_state->num_clock_modes > 2)
132 return &power_state->clock_info[1];
133 else
134 return &power_state->clock_info[0];
135 break;
136 case POWER_MODE_TYPE_HIGH:
137 return &power_state->clock_info[power_state->num_clock_modes - 1];
138 }
139
140}
141
142static void radeon_get_power_state(struct radeon_device *rdev,
143 enum radeon_pm_action action)
144{
145 switch (action) {
146 case PM_ACTION_NONE:
147 default:
148 rdev->pm.requested_power_state = rdev->pm.current_power_state;
149 rdev->pm.requested_power_state->requested_clock_mode =
150 rdev->pm.requested_power_state->current_clock_mode;
151 break;
152 case PM_ACTION_MINIMUM:
153 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
154 rdev->pm.requested_power_state->requested_clock_mode =
155 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
156 break;
157 case PM_ACTION_DOWNCLOCK:
158 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
159 rdev->pm.requested_power_state->requested_clock_mode =
160 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
161 break;
162 case PM_ACTION_UPCLOCK:
163 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
164 rdev->pm.requested_power_state->requested_clock_mode =
165 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
166 break;
167 }
530079a8
AD
168 DRM_INFO("Requested: e: %d m: %d p: %d\n",
169 rdev->pm.requested_power_state->requested_clock_mode->sclk,
170 rdev->pm.requested_power_state->requested_clock_mode->mclk,
171 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
516d0e46
AD
172}
173
174static void radeon_set_power_state(struct radeon_device *rdev)
175{
176 if (rdev->pm.requested_power_state == rdev->pm.current_power_state)
177 return;
530079a8
AD
178
179 DRM_INFO("Setting: e: %d m: %d p: %d\n",
180 rdev->pm.requested_power_state->requested_clock_mode->sclk,
181 rdev->pm.requested_power_state->requested_clock_mode->mclk,
182 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
516d0e46
AD
183 /* set pcie lanes */
184 /* set voltage */
185 /* set engine clock */
186 radeon_set_engine_clock(rdev, rdev->pm.requested_power_state->requested_clock_mode->sclk);
187 /* set memory clock */
188
189 rdev->pm.current_power_state = rdev->pm.requested_power_state;
190}
191
7433874e
RM
192int radeon_pm_init(struct radeon_device *rdev)
193{
c913e23a
RM
194 rdev->pm.state = PM_STATE_DISABLED;
195 rdev->pm.planned_action = PM_ACTION_NONE;
196 rdev->pm.downclocked = false;
c913e23a 197
56278a8e
AD
198 if (rdev->bios) {
199 if (rdev->is_atom_bios)
200 radeon_atombios_get_power_modes(rdev);
201 else
202 radeon_combios_get_power_modes(rdev);
203 radeon_print_power_mode_info(rdev);
204 }
205
7433874e 206 if (radeon_debugfs_pm_init(rdev)) {
c142c3e5 207 DRM_ERROR("Failed to register debugfs file for PM!\n");
7433874e
RM
208 }
209
c913e23a
RM
210 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
211
212 if (radeon_dynpm != -1 && radeon_dynpm) {
213 rdev->pm.state = PM_STATE_PAUSED;
214 DRM_INFO("radeon: dynamic power management enabled\n");
215 }
216
217 DRM_INFO("radeon: power management initialized\n");
218
7433874e
RM
219 return 0;
220}
221
c913e23a
RM
222void radeon_pm_compute_clocks(struct radeon_device *rdev)
223{
224 struct drm_device *ddev = rdev->ddev;
225 struct drm_connector *connector;
226 struct radeon_crtc *radeon_crtc;
227 int count = 0;
228
229 if (rdev->pm.state == PM_STATE_DISABLED)
230 return;
231
232 mutex_lock(&rdev->pm.mutex);
233
234 rdev->pm.active_crtcs = 0;
235 list_for_each_entry(connector,
236 &ddev->mode_config.connector_list, head) {
237 if (connector->encoder &&
238 connector->dpms != DRM_MODE_DPMS_OFF) {
239 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
240 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
241 ++count;
242 }
243 }
244
245 if (count > 1) {
246 if (rdev->pm.state == PM_STATE_ACTIVE) {
c913e23a
RM
247 cancel_delayed_work(&rdev->pm.idle_work);
248
249 rdev->pm.state = PM_STATE_PAUSED;
250 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
73a6d3fc 251 if (rdev->pm.downclocked)
c913e23a
RM
252 radeon_pm_set_clocks(rdev);
253
254 DRM_DEBUG("radeon: dynamic power management deactivated\n");
c913e23a
RM
255 }
256 } else if (count == 1) {
c913e23a
RM
257 /* TODO: Increase clocks if needed for current mode */
258
259 if (rdev->pm.state == PM_STATE_MINIMUM) {
260 rdev->pm.state = PM_STATE_ACTIVE;
261 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
73a6d3fc 262 radeon_pm_set_clocks(rdev);
c913e23a
RM
263
264 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
265 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
266 }
267 else if (rdev->pm.state == PM_STATE_PAUSED) {
268 rdev->pm.state = PM_STATE_ACTIVE;
269 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
270 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
271 DRM_DEBUG("radeon: dynamic power management activated\n");
272 }
c913e23a
RM
273 }
274 else { /* count == 0 */
275 if (rdev->pm.state != PM_STATE_MINIMUM) {
276 cancel_delayed_work(&rdev->pm.idle_work);
277
278 rdev->pm.state = PM_STATE_MINIMUM;
279 rdev->pm.planned_action = PM_ACTION_MINIMUM;
73a6d3fc 280 radeon_pm_set_clocks(rdev);
c913e23a 281 }
c913e23a 282 }
73a6d3fc
RM
283
284 mutex_unlock(&rdev->pm.mutex);
c913e23a
RM
285}
286
f735261b
DA
287static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
288{
289 u32 stat_crtc1 = 0, stat_crtc2 = 0;
290 bool in_vbl = true;
291
292 if (ASIC_IS_AVIVO(rdev)) {
293 if (rdev->pm.active_crtcs & (1 << 0)) {
294 stat_crtc1 = RREG32(D1CRTC_STATUS);
295 if (!(stat_crtc1 & 1))
296 in_vbl = false;
297 }
298 if (rdev->pm.active_crtcs & (1 << 1)) {
299 stat_crtc2 = RREG32(D2CRTC_STATUS);
300 if (!(stat_crtc2 & 1))
301 in_vbl = false;
302 }
303 }
304 if (in_vbl == false)
305 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
306 stat_crtc2, finish ? "exit" : "entry");
307 return in_vbl;
308}
c913e23a
RM
309static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
310{
311 /*radeon_fence_wait_last(rdev);*/
312 switch (rdev->pm.planned_action) {
313 case PM_ACTION_UPCLOCK:
c913e23a
RM
314 rdev->pm.downclocked = false;
315 break;
316 case PM_ACTION_DOWNCLOCK:
c913e23a
RM
317 rdev->pm.downclocked = true;
318 break;
319 case PM_ACTION_MINIMUM:
c913e23a
RM
320 break;
321 case PM_ACTION_NONE:
322 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
323 break;
324 }
f735261b
DA
325
326 /* check if we are in vblank */
327 radeon_pm_debug_check_in_vbl(rdev, false);
530079a8 328 radeon_set_power_state(rdev);
f735261b 329 radeon_pm_debug_check_in_vbl(rdev, true);
c913e23a
RM
330 rdev->pm.planned_action = PM_ACTION_NONE;
331}
332
333static void radeon_pm_set_clocks(struct radeon_device *rdev)
334{
73a6d3fc
RM
335 radeon_get_power_state(rdev, rdev->pm.planned_action);
336 mutex_lock(&rdev->cp.mutex);
337
338 if (rdev->pm.active_crtcs & (1 << 0)) {
339 rdev->pm.req_vblank |= (1 << 0);
340 drm_vblank_get(rdev->ddev, 0);
341 }
342 if (rdev->pm.active_crtcs & (1 << 1)) {
343 rdev->pm.req_vblank |= (1 << 1);
344 drm_vblank_get(rdev->ddev, 1);
345 }
346 if (rdev->pm.active_crtcs)
347 wait_event_interruptible_timeout(
348 rdev->irq.vblank_queue, 0,
349 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
350 if (rdev->pm.req_vblank & (1 << 0)) {
351 rdev->pm.req_vblank &= ~(1 << 0);
352 drm_vblank_put(rdev->ddev, 0);
353 }
354 if (rdev->pm.req_vblank & (1 << 1)) {
355 rdev->pm.req_vblank &= ~(1 << 1);
356 drm_vblank_put(rdev->ddev, 1);
c913e23a 357 }
c913e23a 358
73a6d3fc
RM
359 radeon_pm_set_clocks_locked(rdev);
360 mutex_unlock(&rdev->cp.mutex);
c913e23a
RM
361}
362
363static void radeon_pm_idle_work_handler(struct work_struct *work)
364{
365 struct radeon_device *rdev;
366 rdev = container_of(work, struct radeon_device,
367 pm.idle_work.work);
368
369 mutex_lock(&rdev->pm.mutex);
73a6d3fc 370 if (rdev->pm.state == PM_STATE_ACTIVE) {
c913e23a
RM
371 unsigned long irq_flags;
372 int not_processed = 0;
373
374 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
375 if (!list_empty(&rdev->fence_drv.emited)) {
376 struct list_head *ptr;
377 list_for_each(ptr, &rdev->fence_drv.emited) {
378 /* count up to 3, that's enought info */
379 if (++not_processed >= 3)
380 break;
381 }
382 }
383 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
384
385 if (not_processed >= 3) { /* should upclock */
386 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
387 rdev->pm.planned_action = PM_ACTION_NONE;
388 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
389 rdev->pm.downclocked) {
390 rdev->pm.planned_action =
391 PM_ACTION_UPCLOCK;
392 rdev->pm.action_timeout = jiffies +
393 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
394 }
395 } else if (not_processed == 0) { /* should downclock */
396 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
397 rdev->pm.planned_action = PM_ACTION_NONE;
398 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
399 !rdev->pm.downclocked) {
400 rdev->pm.planned_action =
401 PM_ACTION_DOWNCLOCK;
402 rdev->pm.action_timeout = jiffies +
403 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
404 }
405 }
406
407 if (rdev->pm.planned_action != PM_ACTION_NONE &&
73a6d3fc
RM
408 jiffies > rdev->pm.action_timeout) {
409 radeon_pm_set_clocks(rdev);
c913e23a
RM
410 }
411 }
412 mutex_unlock(&rdev->pm.mutex);
413
414 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
415 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
416}
417
7433874e
RM
418/*
419 * Debugfs info
420 */
421#if defined(CONFIG_DEBUG_FS)
422
423static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
424{
425 struct drm_info_node *node = (struct drm_info_node *) m->private;
426 struct drm_device *dev = node->minor->dev;
427 struct radeon_device *rdev = dev->dev_private;
428
c913e23a 429 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
6234077d
RM
430 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
431 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
432 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
433 if (rdev->asic->get_memory_clock)
434 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
7433874e
RM
435
436 return 0;
437}
438
439static struct drm_info_list radeon_pm_info_list[] = {
440 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
441};
442#endif
443
c913e23a 444static int radeon_debugfs_pm_init(struct radeon_device *rdev)
7433874e
RM
445{
446#if defined(CONFIG_DEBUG_FS)
447 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
448#else
449 return 0;
450#endif
451}