]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/intel_display.c
drm/i915: Add the support of memory self-refresh on Ironlake
[net-next-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
c1c7af60
JB
27#include <linux/module.h>
28#include <linux/input.h>
79e53945 29#include <linux/i2c.h>
7662c8bd 30#include <linux/kernel.h>
79e53945
JB
31#include "drmP.h"
32#include "intel_drv.h"
33#include "i915_drm.h"
34#include "i915_drv.h"
ab2c0672 35#include "drm_dp_helper.h"
79e53945
JB
36
37#include "drm_crtc_helper.h"
38
32f9d658
ZW
39#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
40
79e53945 41bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
7662c8bd 42static void intel_update_watermarks(struct drm_device *dev);
652c393a 43static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
79e53945
JB
44
45typedef struct {
46 /* given values */
47 int n;
48 int m1, m2;
49 int p1, p2;
50 /* derived values */
51 int dot;
52 int vco;
53 int m;
54 int p;
55} intel_clock_t;
56
57typedef struct {
58 int min, max;
59} intel_range_t;
60
61typedef struct {
62 int dot_limit;
63 int p2_slow, p2_fast;
64} intel_p2_t;
65
66#define INTEL_P2_NUM 2
d4906093
ML
67typedef struct intel_limit intel_limit_t;
68struct intel_limit {
79e53945
JB
69 intel_range_t dot, vco, n, m, m1, m2, p, p1;
70 intel_p2_t p2;
d4906093
ML
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *);
73};
79e53945
JB
74
75#define I8XX_DOT_MIN 25000
76#define I8XX_DOT_MAX 350000
77#define I8XX_VCO_MIN 930000
78#define I8XX_VCO_MAX 1400000
79#define I8XX_N_MIN 3
80#define I8XX_N_MAX 16
81#define I8XX_M_MIN 96
82#define I8XX_M_MAX 140
83#define I8XX_M1_MIN 18
84#define I8XX_M1_MAX 26
85#define I8XX_M2_MIN 6
86#define I8XX_M2_MAX 16
87#define I8XX_P_MIN 4
88#define I8XX_P_MAX 128
89#define I8XX_P1_MIN 2
90#define I8XX_P1_MAX 33
91#define I8XX_P1_LVDS_MIN 1
92#define I8XX_P1_LVDS_MAX 6
93#define I8XX_P2_SLOW 4
94#define I8XX_P2_FAST 2
95#define I8XX_P2_LVDS_SLOW 14
0c2e3952 96#define I8XX_P2_LVDS_FAST 7
79e53945
JB
97#define I8XX_P2_SLOW_LIMIT 165000
98
99#define I9XX_DOT_MIN 20000
100#define I9XX_DOT_MAX 400000
101#define I9XX_VCO_MIN 1400000
102#define I9XX_VCO_MAX 2800000
f2b115e6
AJ
103#define PINEVIEW_VCO_MIN 1700000
104#define PINEVIEW_VCO_MAX 3500000
f3cade5c
KH
105#define I9XX_N_MIN 1
106#define I9XX_N_MAX 6
f2b115e6
AJ
107/* Pineview's Ncounter is a ring counter */
108#define PINEVIEW_N_MIN 3
109#define PINEVIEW_N_MAX 6
79e53945
JB
110#define I9XX_M_MIN 70
111#define I9XX_M_MAX 120
f2b115e6
AJ
112#define PINEVIEW_M_MIN 2
113#define PINEVIEW_M_MAX 256
79e53945 114#define I9XX_M1_MIN 10
f3cade5c 115#define I9XX_M1_MAX 22
79e53945
JB
116#define I9XX_M2_MIN 5
117#define I9XX_M2_MAX 9
f2b115e6
AJ
118/* Pineview M1 is reserved, and must be 0 */
119#define PINEVIEW_M1_MIN 0
120#define PINEVIEW_M1_MAX 0
121#define PINEVIEW_M2_MIN 0
122#define PINEVIEW_M2_MAX 254
79e53945
JB
123#define I9XX_P_SDVO_DAC_MIN 5
124#define I9XX_P_SDVO_DAC_MAX 80
125#define I9XX_P_LVDS_MIN 7
126#define I9XX_P_LVDS_MAX 98
f2b115e6
AJ
127#define PINEVIEW_P_LVDS_MIN 7
128#define PINEVIEW_P_LVDS_MAX 112
79e53945
JB
129#define I9XX_P1_MIN 1
130#define I9XX_P1_MAX 8
131#define I9XX_P2_SDVO_DAC_SLOW 10
132#define I9XX_P2_SDVO_DAC_FAST 5
133#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
134#define I9XX_P2_LVDS_SLOW 14
135#define I9XX_P2_LVDS_FAST 7
136#define I9XX_P2_LVDS_SLOW_LIMIT 112000
137
044c7c41
ML
138/*The parameter is for SDVO on G4x platform*/
139#define G4X_DOT_SDVO_MIN 25000
140#define G4X_DOT_SDVO_MAX 270000
141#define G4X_VCO_MIN 1750000
142#define G4X_VCO_MAX 3500000
143#define G4X_N_SDVO_MIN 1
144#define G4X_N_SDVO_MAX 4
145#define G4X_M_SDVO_MIN 104
146#define G4X_M_SDVO_MAX 138
147#define G4X_M1_SDVO_MIN 17
148#define G4X_M1_SDVO_MAX 23
149#define G4X_M2_SDVO_MIN 5
150#define G4X_M2_SDVO_MAX 11
151#define G4X_P_SDVO_MIN 10
152#define G4X_P_SDVO_MAX 30
153#define G4X_P1_SDVO_MIN 1
154#define G4X_P1_SDVO_MAX 3
155#define G4X_P2_SDVO_SLOW 10
156#define G4X_P2_SDVO_FAST 10
157#define G4X_P2_SDVO_LIMIT 270000
158
159/*The parameter is for HDMI_DAC on G4x platform*/
160#define G4X_DOT_HDMI_DAC_MIN 22000
161#define G4X_DOT_HDMI_DAC_MAX 400000
162#define G4X_N_HDMI_DAC_MIN 1
163#define G4X_N_HDMI_DAC_MAX 4
164#define G4X_M_HDMI_DAC_MIN 104
165#define G4X_M_HDMI_DAC_MAX 138
166#define G4X_M1_HDMI_DAC_MIN 16
167#define G4X_M1_HDMI_DAC_MAX 23
168#define G4X_M2_HDMI_DAC_MIN 5
169#define G4X_M2_HDMI_DAC_MAX 11
170#define G4X_P_HDMI_DAC_MIN 5
171#define G4X_P_HDMI_DAC_MAX 80
172#define G4X_P1_HDMI_DAC_MIN 1
173#define G4X_P1_HDMI_DAC_MAX 8
174#define G4X_P2_HDMI_DAC_SLOW 10
175#define G4X_P2_HDMI_DAC_FAST 5
176#define G4X_P2_HDMI_DAC_LIMIT 165000
177
178/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
179#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
180#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
181#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
182#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
183#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
184#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
185#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
186#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
187#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
188#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
189#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
190#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
191#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
192#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
193#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
194#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
195#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
196
197/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
198#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
199#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
200#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
201#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
202#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
203#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
204#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
205#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
206#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
207#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
208#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
209#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
210#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
211#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
212#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
213#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
214#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
215
a4fc5ed6
KP
216/*The parameter is for DISPLAY PORT on G4x platform*/
217#define G4X_DOT_DISPLAY_PORT_MIN 161670
218#define G4X_DOT_DISPLAY_PORT_MAX 227000
219#define G4X_N_DISPLAY_PORT_MIN 1
220#define G4X_N_DISPLAY_PORT_MAX 2
221#define G4X_M_DISPLAY_PORT_MIN 97
222#define G4X_M_DISPLAY_PORT_MAX 108
223#define G4X_M1_DISPLAY_PORT_MIN 0x10
224#define G4X_M1_DISPLAY_PORT_MAX 0x12
225#define G4X_M2_DISPLAY_PORT_MIN 0x05
226#define G4X_M2_DISPLAY_PORT_MAX 0x06
227#define G4X_P_DISPLAY_PORT_MIN 10
228#define G4X_P_DISPLAY_PORT_MAX 20
229#define G4X_P1_DISPLAY_PORT_MIN 1
230#define G4X_P1_DISPLAY_PORT_MAX 2
231#define G4X_P2_DISPLAY_PORT_SLOW 10
232#define G4X_P2_DISPLAY_PORT_FAST 10
233#define G4X_P2_DISPLAY_PORT_LIMIT 0
234
bad720ff 235/* Ironlake / Sandybridge */
2c07245f
ZW
236/* as we calculate clock using (register_value + 2) for
237 N/M1/M2, so here the range value for them is (actual_value-2).
238 */
f2b115e6
AJ
239#define IRONLAKE_DOT_MIN 25000
240#define IRONLAKE_DOT_MAX 350000
241#define IRONLAKE_VCO_MIN 1760000
242#define IRONLAKE_VCO_MAX 3510000
f2b115e6 243#define IRONLAKE_M1_MIN 12
a59e385e 244#define IRONLAKE_M1_MAX 22
f2b115e6
AJ
245#define IRONLAKE_M2_MIN 5
246#define IRONLAKE_M2_MAX 9
f2b115e6 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
2c07245f 248
b91ad0ec
ZW
249/* We have parameter ranges for different type of outputs. */
250
251/* DAC & HDMI Refclk 120Mhz */
252#define IRONLAKE_DAC_N_MIN 1
253#define IRONLAKE_DAC_N_MAX 5
254#define IRONLAKE_DAC_M_MIN 79
255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
4547668a 323
d4906093
ML
324static bool
325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
326 int target, int refclk, intel_clock_t *best_clock);
327static bool
328intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
329 int target, int refclk, intel_clock_t *best_clock);
79e53945 330
a4fc5ed6
KP
331static bool
332intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
333 int target, int refclk, intel_clock_t *best_clock);
5eb08b69 334static bool
f2b115e6
AJ
335intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
336 int target, int refclk, intel_clock_t *best_clock);
a4fc5ed6 337
e4b36699 338static const intel_limit_t intel_limits_i8xx_dvo = {
79e53945
JB
339 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
340 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
341 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
342 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
343 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
344 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
345 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
346 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
347 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
348 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
d4906093 349 .find_pll = intel_find_best_PLL,
e4b36699
KP
350};
351
352static const intel_limit_t intel_limits_i8xx_lvds = {
79e53945
JB
353 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
354 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
355 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
356 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
357 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
358 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
359 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
360 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
361 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
362 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
d4906093 363 .find_pll = intel_find_best_PLL,
e4b36699
KP
364};
365
366static const intel_limit_t intel_limits_i9xx_sdvo = {
79e53945
JB
367 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
368 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
369 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
370 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
371 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
372 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
373 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
374 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
375 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
376 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
d4906093 377 .find_pll = intel_find_best_PLL,
e4b36699
KP
378};
379
380static const intel_limit_t intel_limits_i9xx_lvds = {
79e53945
JB
381 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
382 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
383 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
384 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
385 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
386 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
387 .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
388 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
389 /* The single-channel range is 25-112Mhz, and dual-channel
390 * is 80-224Mhz. Prefer single channel as much as possible.
391 */
392 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
393 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
d4906093 394 .find_pll = intel_find_best_PLL,
e4b36699
KP
395};
396
044c7c41 397 /* below parameter and function is for G4X Chipset Family*/
e4b36699 398static const intel_limit_t intel_limits_g4x_sdvo = {
044c7c41
ML
399 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
400 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
401 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
402 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
403 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
404 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
405 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
406 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
407 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
408 .p2_slow = G4X_P2_SDVO_SLOW,
409 .p2_fast = G4X_P2_SDVO_FAST
410 },
d4906093 411 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
412};
413
414static const intel_limit_t intel_limits_g4x_hdmi = {
044c7c41
ML
415 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
416 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
417 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
418 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
419 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
420 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
421 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
422 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
423 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
424 .p2_slow = G4X_P2_HDMI_DAC_SLOW,
425 .p2_fast = G4X_P2_HDMI_DAC_FAST
426 },
d4906093 427 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
428};
429
430static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
044c7c41
ML
431 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
432 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
433 .vco = { .min = G4X_VCO_MIN,
434 .max = G4X_VCO_MAX },
435 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
436 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
437 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
438 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
439 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
440 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
441 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
442 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
443 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
444 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
445 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
446 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
447 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
448 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
449 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
450 },
d4906093 451 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
452};
453
454static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
044c7c41
ML
455 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
456 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
457 .vco = { .min = G4X_VCO_MIN,
458 .max = G4X_VCO_MAX },
459 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
460 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
461 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
462 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
463 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
464 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
465 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
466 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
467 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
468 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
469 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
470 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
471 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
472 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
473 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
474 },
d4906093 475 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
476};
477
478static const intel_limit_t intel_limits_g4x_display_port = {
a4fc5ed6
KP
479 .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
480 .max = G4X_DOT_DISPLAY_PORT_MAX },
481 .vco = { .min = G4X_VCO_MIN,
482 .max = G4X_VCO_MAX},
483 .n = { .min = G4X_N_DISPLAY_PORT_MIN,
484 .max = G4X_N_DISPLAY_PORT_MAX },
485 .m = { .min = G4X_M_DISPLAY_PORT_MIN,
486 .max = G4X_M_DISPLAY_PORT_MAX },
487 .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
488 .max = G4X_M1_DISPLAY_PORT_MAX },
489 .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
490 .max = G4X_M2_DISPLAY_PORT_MAX },
491 .p = { .min = G4X_P_DISPLAY_PORT_MIN,
492 .max = G4X_P_DISPLAY_PORT_MAX },
493 .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
494 .max = G4X_P1_DISPLAY_PORT_MAX},
495 .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
496 .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
497 .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
498 .find_pll = intel_find_pll_g4x_dp,
e4b36699
KP
499};
500
f2b115e6 501static const intel_limit_t intel_limits_pineview_sdvo = {
2177832f 502 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
f2b115e6
AJ
503 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
504 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
505 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
506 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
507 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
2177832f
SL
508 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
509 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
510 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
511 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
6115707b 512 .find_pll = intel_find_best_PLL,
e4b36699
KP
513};
514
f2b115e6 515static const intel_limit_t intel_limits_pineview_lvds = {
2177832f 516 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
f2b115e6
AJ
517 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
518 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
519 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
520 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
521 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
522 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
2177832f 523 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
f2b115e6 524 /* Pineview only supports single-channel mode. */
2177832f
SL
525 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
526 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
6115707b 527 .find_pll = intel_find_best_PLL,
e4b36699
KP
528};
529
b91ad0ec 530static const intel_limit_t intel_limits_ironlake_dac = {
f2b115e6
AJ
531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
b91ad0ec
ZW
533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
f2b115e6
AJ
535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
b91ad0ec
ZW
537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
f2b115e6 539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
b91ad0ec
ZW
540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
541 .p2_fast = IRONLAKE_DAC_P2_FAST },
4547668a 542 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
543};
544
b91ad0ec 545static const intel_limit_t intel_limits_ironlake_single_lvds = {
f2b115e6
AJ
546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
b91ad0ec
ZW
548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
f2b115e6
AJ
550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
b91ad0ec
ZW
552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
f2b115e6 554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
b91ad0ec
ZW
555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
4547668a
ZY
602 .find_pll = intel_g4x_find_best_PLL,
603};
604
605static const intel_limit_t intel_limits_ironlake_display_port = {
606 .dot = { .min = IRONLAKE_DOT_MIN,
607 .max = IRONLAKE_DOT_MAX },
608 .vco = { .min = IRONLAKE_VCO_MIN,
609 .max = IRONLAKE_VCO_MAX},
b91ad0ec
ZW
610 .n = { .min = IRONLAKE_DP_N_MIN,
611 .max = IRONLAKE_DP_N_MAX },
612 .m = { .min = IRONLAKE_DP_M_MIN,
613 .max = IRONLAKE_DP_M_MAX },
4547668a
ZY
614 .m1 = { .min = IRONLAKE_M1_MIN,
615 .max = IRONLAKE_M1_MAX },
616 .m2 = { .min = IRONLAKE_M2_MIN,
617 .max = IRONLAKE_M2_MAX },
b91ad0ec
ZW
618 .p = { .min = IRONLAKE_DP_P_MIN,
619 .max = IRONLAKE_DP_P_MAX },
620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
621 .max = IRONLAKE_DP_P1_MAX},
622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
623 .p2_slow = IRONLAKE_DP_P2_SLOW,
624 .p2_fast = IRONLAKE_DP_P2_FAST },
4547668a 625 .find_pll = intel_find_pll_ironlake_dp,
79e53945
JB
626};
627
f2b115e6 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
2c07245f 629{
b91ad0ec
ZW
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 632 const intel_limit_t *limit;
b91ad0ec
ZW
633 int refclk = 120;
634
635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
4547668a
ZY
653 HAS_eDP)
654 limit = &intel_limits_ironlake_display_port;
2c07245f 655 else
b91ad0ec 656 limit = &intel_limits_ironlake_dac;
2c07245f
ZW
657
658 return limit;
659}
660
044c7c41
ML
661static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
662{
663 struct drm_device *dev = crtc->dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 const intel_limit_t *limit;
666
667 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
668 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
669 LVDS_CLKB_POWER_UP)
670 /* LVDS with dual channel */
e4b36699 671 limit = &intel_limits_g4x_dual_channel_lvds;
044c7c41
ML
672 else
673 /* LVDS with dual channel */
e4b36699 674 limit = &intel_limits_g4x_single_channel_lvds;
044c7c41
ML
675 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
676 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
e4b36699 677 limit = &intel_limits_g4x_hdmi;
044c7c41 678 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
e4b36699 679 limit = &intel_limits_g4x_sdvo;
a4fc5ed6 680 } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
e4b36699 681 limit = &intel_limits_g4x_display_port;
044c7c41 682 } else /* The option is for other outputs */
e4b36699 683 limit = &intel_limits_i9xx_sdvo;
044c7c41
ML
684
685 return limit;
686}
687
79e53945
JB
688static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
689{
690 struct drm_device *dev = crtc->dev;
691 const intel_limit_t *limit;
692
bad720ff 693 if (HAS_PCH_SPLIT(dev))
f2b115e6 694 limit = intel_ironlake_limit(crtc);
2c07245f 695 else if (IS_G4X(dev)) {
044c7c41 696 limit = intel_g4x_limit(crtc);
f2b115e6 697 } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
79e53945 698 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
e4b36699 699 limit = &intel_limits_i9xx_lvds;
79e53945 700 else
e4b36699 701 limit = &intel_limits_i9xx_sdvo;
f2b115e6 702 } else if (IS_PINEVIEW(dev)) {
2177832f 703 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
f2b115e6 704 limit = &intel_limits_pineview_lvds;
2177832f 705 else
f2b115e6 706 limit = &intel_limits_pineview_sdvo;
79e53945
JB
707 } else {
708 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
e4b36699 709 limit = &intel_limits_i8xx_lvds;
79e53945 710 else
e4b36699 711 limit = &intel_limits_i8xx_dvo;
79e53945
JB
712 }
713 return limit;
714}
715
f2b115e6
AJ
716/* m1 is reserved as 0 in Pineview, n is a ring counter */
717static void pineview_clock(int refclk, intel_clock_t *clock)
79e53945 718{
2177832f
SL
719 clock->m = clock->m2 + 2;
720 clock->p = clock->p1 * clock->p2;
721 clock->vco = refclk * clock->m / clock->n;
722 clock->dot = clock->vco / clock->p;
723}
724
725static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
726{
f2b115e6
AJ
727 if (IS_PINEVIEW(dev)) {
728 pineview_clock(refclk, clock);
2177832f
SL
729 return;
730 }
79e53945
JB
731 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
732 clock->p = clock->p1 * clock->p2;
733 clock->vco = refclk * clock->m / (clock->n + 2);
734 clock->dot = clock->vco / clock->p;
735}
736
79e53945
JB
737/**
738 * Returns whether any output on the specified pipe is of the specified type
739 */
740bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
741{
742 struct drm_device *dev = crtc->dev;
743 struct drm_mode_config *mode_config = &dev->mode_config;
c5e4df33 744 struct drm_encoder *l_entry;
79e53945 745
c5e4df33
ZW
746 list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
747 if (l_entry && l_entry->crtc == crtc) {
748 struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
21d40d37 749 if (intel_encoder->type == type)
79e53945
JB
750 return true;
751 }
752 }
753 return false;
754}
755
7c04d1d9 756#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
757/**
758 * Returns whether the given set of divisors are valid for a given refclk with
759 * the given connectors.
760 */
761
762static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
763{
764 const intel_limit_t *limit = intel_limit (crtc);
2177832f 765 struct drm_device *dev = crtc->dev;
79e53945
JB
766
767 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
768 INTELPllInvalid ("p1 out of range\n");
769 if (clock->p < limit->p.min || limit->p.max < clock->p)
770 INTELPllInvalid ("p out of range\n");
771 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
772 INTELPllInvalid ("m2 out of range\n");
773 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
774 INTELPllInvalid ("m1 out of range\n");
f2b115e6 775 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
79e53945
JB
776 INTELPllInvalid ("m1 <= m2\n");
777 if (clock->m < limit->m.min || limit->m.max < clock->m)
778 INTELPllInvalid ("m out of range\n");
779 if (clock->n < limit->n.min || limit->n.max < clock->n)
780 INTELPllInvalid ("n out of range\n");
781 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
782 INTELPllInvalid ("vco out of range\n");
783 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
784 * connector, etc., rather than just a single range.
785 */
786 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
787 INTELPllInvalid ("dot out of range\n");
788
789 return true;
790}
791
d4906093
ML
792static bool
793intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
794 int target, int refclk, intel_clock_t *best_clock)
795
79e53945
JB
796{
797 struct drm_device *dev = crtc->dev;
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 intel_clock_t clock;
79e53945
JB
800 int err = target;
801
bc5e5718 802 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
832cc28d 803 (I915_READ(LVDS)) != 0) {
79e53945
JB
804 /*
805 * For LVDS, if the panel is on, just rely on its current
806 * settings for dual-channel. We haven't figured out how to
807 * reliably set up different single/dual channel state, if we
808 * even can.
809 */
810 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
811 LVDS_CLKB_POWER_UP)
812 clock.p2 = limit->p2.p2_fast;
813 else
814 clock.p2 = limit->p2.p2_slow;
815 } else {
816 if (target < limit->p2.dot_limit)
817 clock.p2 = limit->p2.p2_slow;
818 else
819 clock.p2 = limit->p2.p2_fast;
820 }
821
822 memset (best_clock, 0, sizeof (*best_clock));
823
42158660
ZY
824 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
825 clock.m1++) {
826 for (clock.m2 = limit->m2.min;
827 clock.m2 <= limit->m2.max; clock.m2++) {
f2b115e6
AJ
828 /* m1 is always 0 in Pineview */
829 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
42158660
ZY
830 break;
831 for (clock.n = limit->n.min;
832 clock.n <= limit->n.max; clock.n++) {
833 for (clock.p1 = limit->p1.min;
834 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
835 int this_err;
836
2177832f 837 intel_clock(dev, refclk, &clock);
79e53945
JB
838
839 if (!intel_PLL_is_valid(crtc, &clock))
840 continue;
841
842 this_err = abs(clock.dot - target);
843 if (this_err < err) {
844 *best_clock = clock;
845 err = this_err;
846 }
847 }
848 }
849 }
850 }
851
852 return (err != target);
853}
854
d4906093
ML
855static bool
856intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
857 int target, int refclk, intel_clock_t *best_clock)
858{
859 struct drm_device *dev = crtc->dev;
860 struct drm_i915_private *dev_priv = dev->dev_private;
861 intel_clock_t clock;
862 int max_n;
863 bool found;
864 /* approximately equals target * 0.00488 */
865 int err_most = (target >> 8) + (target >> 10);
866 found = false;
867
868 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4547668a
ZY
869 int lvds_reg;
870
c619eed4 871 if (HAS_PCH_SPLIT(dev))
4547668a
ZY
872 lvds_reg = PCH_LVDS;
873 else
874 lvds_reg = LVDS;
875 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
d4906093
ML
876 LVDS_CLKB_POWER_UP)
877 clock.p2 = limit->p2.p2_fast;
878 else
879 clock.p2 = limit->p2.p2_slow;
880 } else {
881 if (target < limit->p2.dot_limit)
882 clock.p2 = limit->p2.p2_slow;
883 else
884 clock.p2 = limit->p2.p2_fast;
885 }
886
887 memset(best_clock, 0, sizeof(*best_clock));
888 max_n = limit->n.max;
889 /* based on hardware requriment prefer smaller n to precision */
890 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
652c393a 891 /* based on hardware requirment prefere larger m1,m2 */
d4906093
ML
892 for (clock.m1 = limit->m1.max;
893 clock.m1 >= limit->m1.min; clock.m1--) {
894 for (clock.m2 = limit->m2.max;
895 clock.m2 >= limit->m2.min; clock.m2--) {
896 for (clock.p1 = limit->p1.max;
897 clock.p1 >= limit->p1.min; clock.p1--) {
898 int this_err;
899
2177832f 900 intel_clock(dev, refclk, &clock);
d4906093
ML
901 if (!intel_PLL_is_valid(crtc, &clock))
902 continue;
903 this_err = abs(clock.dot - target) ;
904 if (this_err < err_most) {
905 *best_clock = clock;
906 err_most = this_err;
907 max_n = clock.n;
908 found = true;
909 }
910 }
911 }
912 }
913 }
2c07245f
ZW
914 return found;
915}
916
5eb08b69 917static bool
f2b115e6
AJ
918intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
919 int target, int refclk, intel_clock_t *best_clock)
5eb08b69
ZW
920{
921 struct drm_device *dev = crtc->dev;
922 intel_clock_t clock;
4547668a
ZY
923
924 /* return directly when it is eDP */
925 if (HAS_eDP)
926 return true;
927
5eb08b69
ZW
928 if (target < 200000) {
929 clock.n = 1;
930 clock.p1 = 2;
931 clock.p2 = 10;
932 clock.m1 = 12;
933 clock.m2 = 9;
934 } else {
935 clock.n = 2;
936 clock.p1 = 1;
937 clock.p2 = 10;
938 clock.m1 = 14;
939 clock.m2 = 8;
940 }
941 intel_clock(dev, refclk, &clock);
942 memcpy(best_clock, &clock, sizeof(intel_clock_t));
943 return true;
944}
945
a4fc5ed6
KP
946/* DisplayPort has only two frequencies, 162MHz and 270MHz */
947static bool
948intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
949 int target, int refclk, intel_clock_t *best_clock)
950{
951 intel_clock_t clock;
952 if (target < 200000) {
a4fc5ed6
KP
953 clock.p1 = 2;
954 clock.p2 = 10;
b3d25495
KP
955 clock.n = 2;
956 clock.m1 = 23;
957 clock.m2 = 8;
a4fc5ed6 958 } else {
a4fc5ed6
KP
959 clock.p1 = 1;
960 clock.p2 = 10;
b3d25495
KP
961 clock.n = 1;
962 clock.m1 = 14;
963 clock.m2 = 2;
a4fc5ed6 964 }
b3d25495
KP
965 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
966 clock.p = (clock.p1 * clock.p2);
967 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
fe798b97 968 clock.vco = 0;
a4fc5ed6
KP
969 memcpy(best_clock, &clock, sizeof(intel_clock_t));
970 return true;
971}
972
79e53945
JB
973void
974intel_wait_for_vblank(struct drm_device *dev)
975{
976 /* Wait for 20ms, i.e. one cycle at 50hz. */
311089d3 977 msleep(20);
79e53945
JB
978}
979
80824003
JB
980/* Parameters have changed, update FBC info */
981static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
982{
983 struct drm_device *dev = crtc->dev;
984 struct drm_i915_private *dev_priv = dev->dev_private;
985 struct drm_framebuffer *fb = crtc->fb;
986 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
23010e43 987 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
80824003
JB
988 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
989 int plane, i;
990 u32 fbc_ctl, fbc_ctl2;
991
992 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
993
994 if (fb->pitch < dev_priv->cfb_pitch)
995 dev_priv->cfb_pitch = fb->pitch;
996
997 /* FBC_CTL wants 64B units */
998 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
999 dev_priv->cfb_fence = obj_priv->fence_reg;
1000 dev_priv->cfb_plane = intel_crtc->plane;
1001 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1002
1003 /* Clear old tags */
1004 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1005 I915_WRITE(FBC_TAG + (i * 4), 0);
1006
1007 /* Set it up... */
1008 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
1009 if (obj_priv->tiling_mode != I915_TILING_NONE)
1010 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1011 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1012 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1013
1014 /* enable it... */
1015 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
ee25df2b 1016 if (IS_I945GM(dev))
49677901 1017 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
80824003
JB
1018 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1019 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1020 if (obj_priv->tiling_mode != I915_TILING_NONE)
1021 fbc_ctl |= dev_priv->cfb_fence;
1022 I915_WRITE(FBC_CONTROL, fbc_ctl);
1023
28c97730 1024 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
80824003
JB
1025 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1026}
1027
1028void i8xx_disable_fbc(struct drm_device *dev)
1029{
1030 struct drm_i915_private *dev_priv = dev->dev_private;
1031 u32 fbc_ctl;
1032
c1a1cdc1
JB
1033 if (!I915_HAS_FBC(dev))
1034 return;
1035
80824003
JB
1036 /* Disable compression */
1037 fbc_ctl = I915_READ(FBC_CONTROL);
1038 fbc_ctl &= ~FBC_CTL_EN;
1039 I915_WRITE(FBC_CONTROL, fbc_ctl);
1040
1041 /* Wait for compressing bit to clear */
1042 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
1043 ; /* nothing */
1044
1045 intel_wait_for_vblank(dev);
1046
28c97730 1047 DRM_DEBUG_KMS("disabled FBC\n");
80824003
JB
1048}
1049
1050static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
1051{
1052 struct drm_device *dev = crtc->dev;
1053 struct drm_i915_private *dev_priv = dev->dev_private;
1054
1055 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1056}
1057
74dff282
JB
1058static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1059{
1060 struct drm_device *dev = crtc->dev;
1061 struct drm_i915_private *dev_priv = dev->dev_private;
1062 struct drm_framebuffer *fb = crtc->fb;
1063 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
23010e43 1064 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
74dff282
JB
1065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1066 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
1067 DPFC_CTL_PLANEB);
1068 unsigned long stall_watermark = 200;
1069 u32 dpfc_ctl;
1070
1071 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1072 dev_priv->cfb_fence = obj_priv->fence_reg;
1073 dev_priv->cfb_plane = intel_crtc->plane;
1074
1075 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1076 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1077 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
1078 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1079 } else {
1080 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1081 }
1082
1083 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1084 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1085 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1086 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1087 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1088
1089 /* enable it... */
1090 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1091
28c97730 1092 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
74dff282
JB
1093}
1094
1095void g4x_disable_fbc(struct drm_device *dev)
1096{
1097 struct drm_i915_private *dev_priv = dev->dev_private;
1098 u32 dpfc_ctl;
1099
1100 /* Disable compression */
1101 dpfc_ctl = I915_READ(DPFC_CONTROL);
1102 dpfc_ctl &= ~DPFC_CTL_EN;
1103 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1104 intel_wait_for_vblank(dev);
1105
28c97730 1106 DRM_DEBUG_KMS("disabled FBC\n");
74dff282
JB
1107}
1108
1109static bool g4x_fbc_enabled(struct drm_crtc *crtc)
1110{
1111 struct drm_device *dev = crtc->dev;
1112 struct drm_i915_private *dev_priv = dev->dev_private;
1113
1114 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1115}
1116
80824003
JB
1117/**
1118 * intel_update_fbc - enable/disable FBC as needed
1119 * @crtc: CRTC to point the compressor at
1120 * @mode: mode in use
1121 *
1122 * Set up the framebuffer compression hardware at mode set time. We
1123 * enable it if possible:
1124 * - plane A only (on pre-965)
1125 * - no pixel mulitply/line duplication
1126 * - no alpha buffer discard
1127 * - no dual wide
1128 * - framebuffer <= 2048 in width, 1536 in height
1129 *
1130 * We can't assume that any compression will take place (worst case),
1131 * so the compressed buffer has to be the same size as the uncompressed
1132 * one. It also must reside (along with the line length buffer) in
1133 * stolen memory.
1134 *
1135 * We need to enable/disable FBC on a global basis.
1136 */
1137static void intel_update_fbc(struct drm_crtc *crtc,
1138 struct drm_display_mode *mode)
1139{
1140 struct drm_device *dev = crtc->dev;
1141 struct drm_i915_private *dev_priv = dev->dev_private;
1142 struct drm_framebuffer *fb = crtc->fb;
1143 struct intel_framebuffer *intel_fb;
1144 struct drm_i915_gem_object *obj_priv;
1145 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1146 int plane = intel_crtc->plane;
1147
1148 if (!i915_powersave)
1149 return;
1150
e70236a8
JB
1151 if (!dev_priv->display.fbc_enabled ||
1152 !dev_priv->display.enable_fbc ||
1153 !dev_priv->display.disable_fbc)
1154 return;
1155
80824003
JB
1156 if (!crtc->fb)
1157 return;
1158
1159 intel_fb = to_intel_framebuffer(fb);
23010e43 1160 obj_priv = to_intel_bo(intel_fb->obj);
80824003
JB
1161
1162 /*
1163 * If FBC is already on, we just have to verify that we can
1164 * keep it that way...
1165 * Need to disable if:
1166 * - changing FBC params (stride, fence, mode)
1167 * - new fb is too large to fit in compressed buffer
1168 * - going to an unsupported config (interlace, pixel multiply, etc.)
1169 */
1170 if (intel_fb->obj->size > dev_priv->cfb_size) {
28c97730
ZY
1171 DRM_DEBUG_KMS("framebuffer too large, disabling "
1172 "compression\n");
b5e50c3f 1173 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
80824003
JB
1174 goto out_disable;
1175 }
1176 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1177 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
28c97730
ZY
1178 DRM_DEBUG_KMS("mode incompatible with compression, "
1179 "disabling\n");
b5e50c3f 1180 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
80824003
JB
1181 goto out_disable;
1182 }
1183 if ((mode->hdisplay > 2048) ||
1184 (mode->vdisplay > 1536)) {
28c97730 1185 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
b5e50c3f 1186 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
80824003
JB
1187 goto out_disable;
1188 }
74dff282 1189 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
28c97730 1190 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
b5e50c3f 1191 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
80824003
JB
1192 goto out_disable;
1193 }
1194 if (obj_priv->tiling_mode != I915_TILING_X) {
28c97730 1195 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
b5e50c3f 1196 dev_priv->no_fbc_reason = FBC_NOT_TILED;
80824003
JB
1197 goto out_disable;
1198 }
1199
e70236a8 1200 if (dev_priv->display.fbc_enabled(crtc)) {
80824003
JB
1201 /* We can re-enable it in this case, but need to update pitch */
1202 if (fb->pitch > dev_priv->cfb_pitch)
e70236a8 1203 dev_priv->display.disable_fbc(dev);
80824003 1204 if (obj_priv->fence_reg != dev_priv->cfb_fence)
e70236a8 1205 dev_priv->display.disable_fbc(dev);
80824003 1206 if (plane != dev_priv->cfb_plane)
e70236a8 1207 dev_priv->display.disable_fbc(dev);
80824003
JB
1208 }
1209
e70236a8 1210 if (!dev_priv->display.fbc_enabled(crtc)) {
80824003 1211 /* Now try to turn it back on if possible */
e70236a8 1212 dev_priv->display.enable_fbc(crtc, 500);
80824003
JB
1213 }
1214
1215 return;
1216
1217out_disable:
28c97730 1218 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
80824003 1219 /* Multiple disables should be harmless */
e70236a8
JB
1220 if (dev_priv->display.fbc_enabled(crtc))
1221 dev_priv->display.disable_fbc(dev);
80824003
JB
1222}
1223
6b95a207
KH
1224static int
1225intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1226{
23010e43 1227 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
6b95a207
KH
1228 u32 alignment;
1229 int ret;
1230
1231 switch (obj_priv->tiling_mode) {
1232 case I915_TILING_NONE:
1233 alignment = 64 * 1024;
1234 break;
1235 case I915_TILING_X:
1236 /* pin() will align the object as required by fence */
1237 alignment = 0;
1238 break;
1239 case I915_TILING_Y:
1240 /* FIXME: Is this true? */
1241 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1242 return -EINVAL;
1243 default:
1244 BUG();
1245 }
1246
6b95a207
KH
1247 ret = i915_gem_object_pin(obj, alignment);
1248 if (ret != 0)
1249 return ret;
1250
1251 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1252 * fence, whereas 965+ only requires a fence if using
1253 * framebuffer compression. For simplicity, we always install
1254 * a fence as the cost is not that onerous.
1255 */
1256 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1257 obj_priv->tiling_mode != I915_TILING_NONE) {
1258 ret = i915_gem_object_get_fence_reg(obj);
1259 if (ret != 0) {
1260 i915_gem_object_unpin(obj);
1261 return ret;
1262 }
1263 }
1264
1265 return 0;
1266}
1267
5c3b82e2 1268static int
3c4fdcfb
KH
1269intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1270 struct drm_framebuffer *old_fb)
79e53945
JB
1271{
1272 struct drm_device *dev = crtc->dev;
1273 struct drm_i915_private *dev_priv = dev->dev_private;
1274 struct drm_i915_master_private *master_priv;
1275 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1276 struct intel_framebuffer *intel_fb;
1277 struct drm_i915_gem_object *obj_priv;
1278 struct drm_gem_object *obj;
1279 int pipe = intel_crtc->pipe;
80824003 1280 int plane = intel_crtc->plane;
79e53945 1281 unsigned long Start, Offset;
80824003
JB
1282 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
1283 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
1284 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1285 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1286 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
6b95a207 1287 u32 dspcntr;
5c3b82e2 1288 int ret;
79e53945
JB
1289
1290 /* no fb bound */
1291 if (!crtc->fb) {
28c97730 1292 DRM_DEBUG_KMS("No FB bound\n");
5c3b82e2
CW
1293 return 0;
1294 }
1295
80824003 1296 switch (plane) {
5c3b82e2
CW
1297 case 0:
1298 case 1:
1299 break;
1300 default:
80824003 1301 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
5c3b82e2 1302 return -EINVAL;
79e53945
JB
1303 }
1304
1305 intel_fb = to_intel_framebuffer(crtc->fb);
79e53945 1306 obj = intel_fb->obj;
23010e43 1307 obj_priv = to_intel_bo(obj);
79e53945 1308
5c3b82e2 1309 mutex_lock(&dev->struct_mutex);
6b95a207 1310 ret = intel_pin_and_fence_fb_obj(dev, obj);
5c3b82e2
CW
1311 if (ret != 0) {
1312 mutex_unlock(&dev->struct_mutex);
1313 return ret;
1314 }
79e53945 1315
b9241ea3 1316 ret = i915_gem_object_set_to_display_plane(obj);
5c3b82e2 1317 if (ret != 0) {
8c4b8c3f 1318 i915_gem_object_unpin(obj);
5c3b82e2
CW
1319 mutex_unlock(&dev->struct_mutex);
1320 return ret;
1321 }
79e53945
JB
1322
1323 dspcntr = I915_READ(dspcntr_reg);
712531bf
JB
1324 /* Mask out pixel format bits in case we change it */
1325 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
79e53945
JB
1326 switch (crtc->fb->bits_per_pixel) {
1327 case 8:
1328 dspcntr |= DISPPLANE_8BPP;
1329 break;
1330 case 16:
1331 if (crtc->fb->depth == 15)
1332 dspcntr |= DISPPLANE_15_16BPP;
1333 else
1334 dspcntr |= DISPPLANE_16BPP;
1335 break;
1336 case 24:
1337 case 32:
a4f45cf1
KH
1338 if (crtc->fb->depth == 30)
1339 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1340 else
1341 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
79e53945
JB
1342 break;
1343 default:
1344 DRM_ERROR("Unknown color depth\n");
8c4b8c3f 1345 i915_gem_object_unpin(obj);
5c3b82e2
CW
1346 mutex_unlock(&dev->struct_mutex);
1347 return -EINVAL;
79e53945 1348 }
f544847f
JB
1349 if (IS_I965G(dev)) {
1350 if (obj_priv->tiling_mode != I915_TILING_NONE)
1351 dspcntr |= DISPPLANE_TILED;
1352 else
1353 dspcntr &= ~DISPPLANE_TILED;
1354 }
1355
bad720ff 1356 if (HAS_PCH_SPLIT(dev))
553bd149
ZW
1357 /* must disable */
1358 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1359
79e53945
JB
1360 I915_WRITE(dspcntr_reg, dspcntr);
1361
5c3b82e2
CW
1362 Start = obj_priv->gtt_offset;
1363 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1364
28c97730 1365 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
5c3b82e2 1366 I915_WRITE(dspstride, crtc->fb->pitch);
79e53945
JB
1367 if (IS_I965G(dev)) {
1368 I915_WRITE(dspbase, Offset);
1369 I915_READ(dspbase);
1370 I915_WRITE(dspsurf, Start);
1371 I915_READ(dspsurf);
f544847f 1372 I915_WRITE(dsptileoff, (y << 16) | x);
79e53945
JB
1373 } else {
1374 I915_WRITE(dspbase, Start + Offset);
1375 I915_READ(dspbase);
1376 }
1377
74dff282 1378 if ((IS_I965G(dev) || plane == 0))
edb81956
JB
1379 intel_update_fbc(crtc, &crtc->mode);
1380
3c4fdcfb
KH
1381 intel_wait_for_vblank(dev);
1382
1383 if (old_fb) {
1384 intel_fb = to_intel_framebuffer(old_fb);
23010e43 1385 obj_priv = to_intel_bo(intel_fb->obj);
3c4fdcfb
KH
1386 i915_gem_object_unpin(intel_fb->obj);
1387 }
652c393a
JB
1388 intel_increase_pllclock(crtc, true);
1389
5c3b82e2 1390 mutex_unlock(&dev->struct_mutex);
79e53945
JB
1391
1392 if (!dev->primary->master)
5c3b82e2 1393 return 0;
79e53945
JB
1394
1395 master_priv = dev->primary->master->driver_priv;
1396 if (!master_priv->sarea_priv)
5c3b82e2 1397 return 0;
79e53945 1398
5c3b82e2 1399 if (pipe) {
79e53945
JB
1400 master_priv->sarea_priv->pipeB_x = x;
1401 master_priv->sarea_priv->pipeB_y = y;
5c3b82e2
CW
1402 } else {
1403 master_priv->sarea_priv->pipeA_x = x;
1404 master_priv->sarea_priv->pipeA_y = y;
79e53945 1405 }
5c3b82e2
CW
1406
1407 return 0;
79e53945
JB
1408}
1409
24f119c7
ZW
1410/* Disable the VGA plane that we never use */
1411static void i915_disable_vga (struct drm_device *dev)
1412{
1413 struct drm_i915_private *dev_priv = dev->dev_private;
1414 u8 sr1;
1415 u32 vga_reg;
1416
bad720ff 1417 if (HAS_PCH_SPLIT(dev))
24f119c7
ZW
1418 vga_reg = CPU_VGACNTRL;
1419 else
1420 vga_reg = VGACNTRL;
1421
1422 if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
1423 return;
1424
1425 I915_WRITE8(VGA_SR_INDEX, 1);
1426 sr1 = I915_READ8(VGA_SR_DATA);
1427 I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
1428 udelay(100);
1429
1430 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1431}
1432
f2b115e6 1433static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
32f9d658
ZW
1434{
1435 struct drm_device *dev = crtc->dev;
1436 struct drm_i915_private *dev_priv = dev->dev_private;
1437 u32 dpa_ctl;
1438
28c97730 1439 DRM_DEBUG_KMS("\n");
32f9d658
ZW
1440 dpa_ctl = I915_READ(DP_A);
1441 dpa_ctl &= ~DP_PLL_ENABLE;
1442 I915_WRITE(DP_A, dpa_ctl);
1443}
1444
f2b115e6 1445static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
32f9d658
ZW
1446{
1447 struct drm_device *dev = crtc->dev;
1448 struct drm_i915_private *dev_priv = dev->dev_private;
1449 u32 dpa_ctl;
1450
1451 dpa_ctl = I915_READ(DP_A);
1452 dpa_ctl |= DP_PLL_ENABLE;
1453 I915_WRITE(DP_A, dpa_ctl);
1454 udelay(200);
1455}
1456
1457
f2b115e6 1458static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
32f9d658
ZW
1459{
1460 struct drm_device *dev = crtc->dev;
1461 struct drm_i915_private *dev_priv = dev->dev_private;
1462 u32 dpa_ctl;
1463
28c97730 1464 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
32f9d658
ZW
1465 dpa_ctl = I915_READ(DP_A);
1466 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1467
1468 if (clock < 200000) {
1469 u32 temp;
1470 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1471 /* workaround for 160Mhz:
1472 1) program 0x4600c bits 15:0 = 0x8124
1473 2) program 0x46010 bit 0 = 1
1474 3) program 0x46034 bit 24 = 1
1475 4) program 0x64000 bit 14 = 1
1476 */
1477 temp = I915_READ(0x4600c);
1478 temp &= 0xffff0000;
1479 I915_WRITE(0x4600c, temp | 0x8124);
1480
1481 temp = I915_READ(0x46010);
1482 I915_WRITE(0x46010, temp | 1);
1483
1484 temp = I915_READ(0x46034);
1485 I915_WRITE(0x46034, temp | (1 << 24));
1486 } else {
1487 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1488 }
1489 I915_WRITE(DP_A, dpa_ctl);
1490
1491 udelay(500);
1492}
1493
8db9d77b
ZW
1494/* The FDI link training functions for ILK/Ibexpeak. */
1495static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1496{
1497 struct drm_device *dev = crtc->dev;
1498 struct drm_i915_private *dev_priv = dev->dev_private;
1499 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1500 int pipe = intel_crtc->pipe;
1501 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1502 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1503 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1504 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1505 u32 temp, tries = 0;
1506
1507 /* enable CPU FDI TX and PCH FDI RX */
1508 temp = I915_READ(fdi_tx_reg);
1509 temp |= FDI_TX_ENABLE;
1510 temp |= FDI_DP_PORT_WIDTH_X4; /* default */
1511 temp &= ~FDI_LINK_TRAIN_NONE;
1512 temp |= FDI_LINK_TRAIN_PATTERN_1;
1513 I915_WRITE(fdi_tx_reg, temp);
1514 I915_READ(fdi_tx_reg);
1515
1516 temp = I915_READ(fdi_rx_reg);
1517 temp &= ~FDI_LINK_TRAIN_NONE;
1518 temp |= FDI_LINK_TRAIN_PATTERN_1;
1519 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1520 I915_READ(fdi_rx_reg);
1521 udelay(150);
1522
1523 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1524 for train result */
1525 temp = I915_READ(fdi_rx_imr_reg);
1526 temp &= ~FDI_RX_SYMBOL_LOCK;
1527 temp &= ~FDI_RX_BIT_LOCK;
1528 I915_WRITE(fdi_rx_imr_reg, temp);
1529 I915_READ(fdi_rx_imr_reg);
1530 udelay(150);
1531
1532 for (;;) {
1533 temp = I915_READ(fdi_rx_iir_reg);
1534 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1535
1536 if ((temp & FDI_RX_BIT_LOCK)) {
1537 DRM_DEBUG_KMS("FDI train 1 done.\n");
1538 I915_WRITE(fdi_rx_iir_reg,
1539 temp | FDI_RX_BIT_LOCK);
1540 break;
1541 }
1542
1543 tries++;
1544
1545 if (tries > 5) {
1546 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1547 break;
1548 }
1549 }
1550
1551 /* Train 2 */
1552 temp = I915_READ(fdi_tx_reg);
1553 temp &= ~FDI_LINK_TRAIN_NONE;
1554 temp |= FDI_LINK_TRAIN_PATTERN_2;
1555 I915_WRITE(fdi_tx_reg, temp);
1556
1557 temp = I915_READ(fdi_rx_reg);
1558 temp &= ~FDI_LINK_TRAIN_NONE;
1559 temp |= FDI_LINK_TRAIN_PATTERN_2;
1560 I915_WRITE(fdi_rx_reg, temp);
1561 udelay(150);
1562
1563 tries = 0;
1564
1565 for (;;) {
1566 temp = I915_READ(fdi_rx_iir_reg);
1567 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1568
1569 if (temp & FDI_RX_SYMBOL_LOCK) {
1570 I915_WRITE(fdi_rx_iir_reg,
1571 temp | FDI_RX_SYMBOL_LOCK);
1572 DRM_DEBUG_KMS("FDI train 2 done.\n");
1573 break;
1574 }
1575
1576 tries++;
1577
1578 if (tries > 5) {
1579 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1580 break;
1581 }
1582 }
1583
1584 DRM_DEBUG_KMS("FDI train done\n");
1585}
1586
1587static int snb_b_fdi_train_param [] = {
1588 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
1589 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
1590 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
1591 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
1592};
1593
1594/* The FDI link training functions for SNB/Cougarpoint. */
1595static void gen6_fdi_link_train(struct drm_crtc *crtc)
1596{
1597 struct drm_device *dev = crtc->dev;
1598 struct drm_i915_private *dev_priv = dev->dev_private;
1599 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1600 int pipe = intel_crtc->pipe;
1601 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1602 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1603 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1604 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1605 u32 temp, i;
1606
1607 /* enable CPU FDI TX and PCH FDI RX */
1608 temp = I915_READ(fdi_tx_reg);
1609 temp |= FDI_TX_ENABLE;
1610 temp |= FDI_DP_PORT_WIDTH_X4; /* default */
1611 temp &= ~FDI_LINK_TRAIN_NONE;
1612 temp |= FDI_LINK_TRAIN_PATTERN_1;
1613 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1614 /* SNB-B */
1615 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1616 I915_WRITE(fdi_tx_reg, temp);
1617 I915_READ(fdi_tx_reg);
1618
1619 temp = I915_READ(fdi_rx_reg);
1620 if (HAS_PCH_CPT(dev)) {
1621 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1622 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1623 } else {
1624 temp &= ~FDI_LINK_TRAIN_NONE;
1625 temp |= FDI_LINK_TRAIN_PATTERN_1;
1626 }
1627 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1628 I915_READ(fdi_rx_reg);
1629 udelay(150);
1630
1631 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1632 for train result */
1633 temp = I915_READ(fdi_rx_imr_reg);
1634 temp &= ~FDI_RX_SYMBOL_LOCK;
1635 temp &= ~FDI_RX_BIT_LOCK;
1636 I915_WRITE(fdi_rx_imr_reg, temp);
1637 I915_READ(fdi_rx_imr_reg);
1638 udelay(150);
1639
1640 for (i = 0; i < 4; i++ ) {
1641 temp = I915_READ(fdi_tx_reg);
1642 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1643 temp |= snb_b_fdi_train_param[i];
1644 I915_WRITE(fdi_tx_reg, temp);
1645 udelay(500);
1646
1647 temp = I915_READ(fdi_rx_iir_reg);
1648 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1649
1650 if (temp & FDI_RX_BIT_LOCK) {
1651 I915_WRITE(fdi_rx_iir_reg,
1652 temp | FDI_RX_BIT_LOCK);
1653 DRM_DEBUG_KMS("FDI train 1 done.\n");
1654 break;
1655 }
1656 }
1657 if (i == 4)
1658 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1659
1660 /* Train 2 */
1661 temp = I915_READ(fdi_tx_reg);
1662 temp &= ~FDI_LINK_TRAIN_NONE;
1663 temp |= FDI_LINK_TRAIN_PATTERN_2;
1664 if (IS_GEN6(dev)) {
1665 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1666 /* SNB-B */
1667 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1668 }
1669 I915_WRITE(fdi_tx_reg, temp);
1670
1671 temp = I915_READ(fdi_rx_reg);
1672 if (HAS_PCH_CPT(dev)) {
1673 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1674 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
1675 } else {
1676 temp &= ~FDI_LINK_TRAIN_NONE;
1677 temp |= FDI_LINK_TRAIN_PATTERN_2;
1678 }
1679 I915_WRITE(fdi_rx_reg, temp);
1680 udelay(150);
1681
1682 for (i = 0; i < 4; i++ ) {
1683 temp = I915_READ(fdi_tx_reg);
1684 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1685 temp |= snb_b_fdi_train_param[i];
1686 I915_WRITE(fdi_tx_reg, temp);
1687 udelay(500);
1688
1689 temp = I915_READ(fdi_rx_iir_reg);
1690 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1691
1692 if (temp & FDI_RX_SYMBOL_LOCK) {
1693 I915_WRITE(fdi_rx_iir_reg,
1694 temp | FDI_RX_SYMBOL_LOCK);
1695 DRM_DEBUG_KMS("FDI train 2 done.\n");
1696 break;
1697 }
1698 }
1699 if (i == 4)
1700 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1701
1702 DRM_DEBUG_KMS("FDI train done.\n");
1703}
1704
f2b115e6 1705static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2c07245f
ZW
1706{
1707 struct drm_device *dev = crtc->dev;
1708 struct drm_i915_private *dev_priv = dev->dev_private;
1709 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1710 int pipe = intel_crtc->pipe;
7662c8bd 1711 int plane = intel_crtc->plane;
2c07245f
ZW
1712 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
1713 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1714 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1715 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
1716 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1717 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
2c07245f
ZW
1718 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1719 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
249c0e64 1720 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
8dd81a38 1721 int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
2c07245f
ZW
1722 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1723 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1724 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
1725 int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
1726 int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
1727 int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
1728 int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
1729 int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
1730 int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
1731 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
1732 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
1733 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
8db9d77b 1734 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
2c07245f 1735 u32 temp;
8db9d77b 1736 int n;
8faf3b31
ZY
1737 u32 pipe_bpc;
1738
1739 temp = I915_READ(pipeconf_reg);
1740 pipe_bpc = temp & PIPE_BPC_MASK;
79e53945 1741
2c07245f
ZW
1742 /* XXX: When our outputs are all unaware of DPMS modes other than off
1743 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
1744 */
1745 switch (mode) {
1746 case DRM_MODE_DPMS_ON:
1747 case DRM_MODE_DPMS_STANDBY:
1748 case DRM_MODE_DPMS_SUSPEND:
28c97730 1749 DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
1b3c7a47
ZW
1750
1751 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1752 temp = I915_READ(PCH_LVDS);
1753 if ((temp & LVDS_PORT_EN) == 0) {
1754 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
1755 POSTING_READ(PCH_LVDS);
1756 }
1757 }
1758
32f9d658
ZW
1759 if (HAS_eDP) {
1760 /* enable eDP PLL */
f2b115e6 1761 ironlake_enable_pll_edp(crtc);
32f9d658 1762 } else {
2c07245f 1763
32f9d658
ZW
1764 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1765 temp = I915_READ(fdi_rx_reg);
8faf3b31
ZY
1766 /*
1767 * make the BPC in FDI Rx be consistent with that in
1768 * pipeconf reg.
1769 */
1770 temp &= ~(0x7 << 16);
1771 temp |= (pipe_bpc << 11);
32f9d658 1772 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
32f9d658
ZW
1773 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
1774 I915_READ(fdi_rx_reg);
1775 udelay(200);
1776
8db9d77b
ZW
1777 /* Switch from Rawclk to PCDclk */
1778 temp = I915_READ(fdi_rx_reg);
1779 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
1780 I915_READ(fdi_rx_reg);
1781 udelay(200);
1782
f2b115e6 1783 /* Enable CPU FDI TX PLL, always on for Ironlake */
32f9d658
ZW
1784 temp = I915_READ(fdi_tx_reg);
1785 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1786 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
1787 I915_READ(fdi_tx_reg);
1788 udelay(100);
1789 }
2c07245f
ZW
1790 }
1791
8dd81a38
ZW
1792 /* Enable panel fitting for LVDS */
1793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1794 temp = I915_READ(pf_ctl_reg);
b1f60b70 1795 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
8dd81a38
ZW
1796
1797 /* currently full aspect */
1798 I915_WRITE(pf_win_pos, 0);
1799
1800 I915_WRITE(pf_win_size,
1801 (dev_priv->panel_fixed_mode->hdisplay << 16) |
1802 (dev_priv->panel_fixed_mode->vdisplay));
1803 }
1804
2c07245f
ZW
1805 /* Enable CPU pipe */
1806 temp = I915_READ(pipeconf_reg);
1807 if ((temp & PIPEACONF_ENABLE) == 0) {
1808 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
1809 I915_READ(pipeconf_reg);
1810 udelay(100);
1811 }
1812
1813 /* configure and enable CPU plane */
1814 temp = I915_READ(dspcntr_reg);
1815 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
1816 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
1817 /* Flush the plane changes */
1818 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1819 }
1820
32f9d658 1821 if (!HAS_eDP) {
8db9d77b
ZW
1822 /* For PCH output, training FDI link */
1823 if (IS_GEN6(dev))
1824 gen6_fdi_link_train(crtc);
1825 else
1826 ironlake_fdi_link_train(crtc);
2c07245f 1827
8db9d77b
ZW
1828 /* enable PCH DPLL */
1829 temp = I915_READ(pch_dpll_reg);
1830 if ((temp & DPLL_VCO_ENABLE) == 0) {
1831 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
1832 I915_READ(pch_dpll_reg);
32f9d658 1833 }
8db9d77b 1834 udelay(200);
2c07245f 1835
8db9d77b
ZW
1836 if (HAS_PCH_CPT(dev)) {
1837 /* Be sure PCH DPLL SEL is set */
1838 temp = I915_READ(PCH_DPLL_SEL);
1839 if (trans_dpll_sel == 0 &&
1840 (temp & TRANSA_DPLL_ENABLE) == 0)
1841 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
1842 else if (trans_dpll_sel == 1 &&
1843 (temp & TRANSB_DPLL_ENABLE) == 0)
1844 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
1845 I915_WRITE(PCH_DPLL_SEL, temp);
1846 I915_READ(PCH_DPLL_SEL);
32f9d658 1847 }
2c07245f 1848
32f9d658
ZW
1849 /* set transcoder timing */
1850 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
1851 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
1852 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
2c07245f 1853
32f9d658
ZW
1854 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
1855 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
1856 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
2c07245f 1857
8db9d77b
ZW
1858 /* enable normal train */
1859 temp = I915_READ(fdi_tx_reg);
1860 temp &= ~FDI_LINK_TRAIN_NONE;
1861 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1862 FDI_TX_ENHANCE_FRAME_ENABLE);
1863 I915_READ(fdi_tx_reg);
1864
1865 temp = I915_READ(fdi_rx_reg);
1866 if (HAS_PCH_CPT(dev)) {
1867 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1868 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1869 } else {
1870 temp &= ~FDI_LINK_TRAIN_NONE;
1871 temp |= FDI_LINK_TRAIN_NONE;
1872 }
1873 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1874 I915_READ(fdi_rx_reg);
1875
1876 /* wait one idle pattern time */
1877 udelay(100);
1878
e3421a18
ZW
1879 /* For PCH DP, enable TRANS_DP_CTL */
1880 if (HAS_PCH_CPT(dev) &&
1881 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
1882 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
1883 int reg;
1884
1885 reg = I915_READ(trans_dp_ctl);
1886 reg &= ~TRANS_DP_PORT_SEL_MASK;
1887 reg = TRANS_DP_OUTPUT_ENABLE |
1888 TRANS_DP_ENH_FRAMING |
1889 TRANS_DP_VSYNC_ACTIVE_HIGH |
1890 TRANS_DP_HSYNC_ACTIVE_HIGH;
1891
1892 switch (intel_trans_dp_port_sel(crtc)) {
1893 case PCH_DP_B:
1894 reg |= TRANS_DP_PORT_SEL_B;
1895 break;
1896 case PCH_DP_C:
1897 reg |= TRANS_DP_PORT_SEL_C;
1898 break;
1899 case PCH_DP_D:
1900 reg |= TRANS_DP_PORT_SEL_D;
1901 break;
1902 default:
1903 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
1904 reg |= TRANS_DP_PORT_SEL_B;
1905 break;
1906 }
1907
1908 I915_WRITE(trans_dp_ctl, reg);
1909 POSTING_READ(trans_dp_ctl);
1910 }
1911
32f9d658
ZW
1912 /* enable PCH transcoder */
1913 temp = I915_READ(transconf_reg);
8faf3b31
ZY
1914 /*
1915 * make the BPC in transcoder be consistent with
1916 * that in pipeconf reg.
1917 */
1918 temp &= ~PIPE_BPC_MASK;
1919 temp |= pipe_bpc;
32f9d658
ZW
1920 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1921 I915_READ(transconf_reg);
2c07245f 1922
32f9d658
ZW
1923 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
1924 ;
2c07245f 1925
32f9d658 1926 }
2c07245f
ZW
1927
1928 intel_crtc_load_lut(crtc);
1929
1930 break;
1931 case DRM_MODE_DPMS_OFF:
28c97730 1932 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
2c07245f 1933
c062df61 1934 drm_vblank_off(dev, pipe);
2c07245f
ZW
1935 /* Disable display plane */
1936 temp = I915_READ(dspcntr_reg);
1937 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
1938 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
1939 /* Flush the plane changes */
1940 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1941 I915_READ(dspbase_reg);
1942 }
1943
1b3c7a47
ZW
1944 i915_disable_vga(dev);
1945
2c07245f
ZW
1946 /* disable cpu pipe, disable after all planes disabled */
1947 temp = I915_READ(pipeconf_reg);
1948 if ((temp & PIPEACONF_ENABLE) != 0) {
1949 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
1950 I915_READ(pipeconf_reg);
249c0e64 1951 n = 0;
2c07245f 1952 /* wait for cpu pipe off, pipe state */
249c0e64
ZW
1953 while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
1954 n++;
1955 if (n < 60) {
1956 udelay(500);
1957 continue;
1958 } else {
28c97730
ZY
1959 DRM_DEBUG_KMS("pipe %d off delay\n",
1960 pipe);
249c0e64
ZW
1961 break;
1962 }
1963 }
2c07245f 1964 } else
28c97730 1965 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
2c07245f 1966
1b3c7a47
ZW
1967 udelay(100);
1968
1969 /* Disable PF */
1970 temp = I915_READ(pf_ctl_reg);
1971 if ((temp & PF_ENABLE) != 0) {
1972 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1973 I915_READ(pf_ctl_reg);
32f9d658 1974 }
1b3c7a47 1975 I915_WRITE(pf_win_size, 0);
8db9d77b
ZW
1976 POSTING_READ(pf_win_size);
1977
32f9d658 1978
2c07245f
ZW
1979 /* disable CPU FDI tx and PCH FDI rx */
1980 temp = I915_READ(fdi_tx_reg);
1981 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
1982 I915_READ(fdi_tx_reg);
1983
1984 temp = I915_READ(fdi_rx_reg);
8faf3b31
ZY
1985 /* BPC in FDI rx is consistent with that in pipeconf */
1986 temp &= ~(0x07 << 16);
1987 temp |= (pipe_bpc << 11);
2c07245f
ZW
1988 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1989 I915_READ(fdi_rx_reg);
1990
249c0e64
ZW
1991 udelay(100);
1992
2c07245f
ZW
1993 /* still set train pattern 1 */
1994 temp = I915_READ(fdi_tx_reg);
1995 temp &= ~FDI_LINK_TRAIN_NONE;
1996 temp |= FDI_LINK_TRAIN_PATTERN_1;
1997 I915_WRITE(fdi_tx_reg, temp);
8db9d77b 1998 POSTING_READ(fdi_tx_reg);
2c07245f
ZW
1999
2000 temp = I915_READ(fdi_rx_reg);
8db9d77b
ZW
2001 if (HAS_PCH_CPT(dev)) {
2002 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2003 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2004 } else {
2005 temp &= ~FDI_LINK_TRAIN_NONE;
2006 temp |= FDI_LINK_TRAIN_PATTERN_1;
2007 }
2c07245f 2008 I915_WRITE(fdi_rx_reg, temp);
8db9d77b 2009 POSTING_READ(fdi_rx_reg);
2c07245f 2010
249c0e64
ZW
2011 udelay(100);
2012
1b3c7a47
ZW
2013 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2014 temp = I915_READ(PCH_LVDS);
2015 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
2016 I915_READ(PCH_LVDS);
2017 udelay(100);
2018 }
2019
2c07245f
ZW
2020 /* disable PCH transcoder */
2021 temp = I915_READ(transconf_reg);
2022 if ((temp & TRANS_ENABLE) != 0) {
2023 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
2024 I915_READ(transconf_reg);
249c0e64 2025 n = 0;
2c07245f 2026 /* wait for PCH transcoder off, transcoder state */
249c0e64
ZW
2027 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
2028 n++;
2029 if (n < 60) {
2030 udelay(500);
2031 continue;
2032 } else {
28c97730
ZY
2033 DRM_DEBUG_KMS("transcoder %d off "
2034 "delay\n", pipe);
249c0e64
ZW
2035 break;
2036 }
2037 }
2c07245f 2038 }
8db9d77b 2039
8faf3b31
ZY
2040 temp = I915_READ(transconf_reg);
2041 /* BPC in transcoder is consistent with that in pipeconf */
2042 temp &= ~PIPE_BPC_MASK;
2043 temp |= pipe_bpc;
2044 I915_WRITE(transconf_reg, temp);
2045 I915_READ(transconf_reg);
1b3c7a47
ZW
2046 udelay(100);
2047
8db9d77b 2048 if (HAS_PCH_CPT(dev)) {
e3421a18
ZW
2049 /* disable TRANS_DP_CTL */
2050 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
2051 int reg;
2052
2053 reg = I915_READ(trans_dp_ctl);
2054 reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2055 I915_WRITE(trans_dp_ctl, reg);
2056 POSTING_READ(trans_dp_ctl);
8db9d77b
ZW
2057
2058 /* disable DPLL_SEL */
2059 temp = I915_READ(PCH_DPLL_SEL);
2060 if (trans_dpll_sel == 0)
2061 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2062 else
2063 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2064 I915_WRITE(PCH_DPLL_SEL, temp);
2065 I915_READ(PCH_DPLL_SEL);
2066
2067 }
2068
2c07245f
ZW
2069 /* disable PCH DPLL */
2070 temp = I915_READ(pch_dpll_reg);
8db9d77b
ZW
2071 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
2072 I915_READ(pch_dpll_reg);
2c07245f 2073
1b3c7a47 2074 if (HAS_eDP) {
f2b115e6 2075 ironlake_disable_pll_edp(crtc);
2c07245f
ZW
2076 }
2077
8db9d77b 2078 /* Switch from PCDclk to Rawclk */
1b3c7a47
ZW
2079 temp = I915_READ(fdi_rx_reg);
2080 temp &= ~FDI_SEL_PCDCLK;
2081 I915_WRITE(fdi_rx_reg, temp);
2082 I915_READ(fdi_rx_reg);
2083
8db9d77b
ZW
2084 /* Disable CPU FDI TX PLL */
2085 temp = I915_READ(fdi_tx_reg);
2086 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
2087 I915_READ(fdi_tx_reg);
2088 udelay(100);
2089
1b3c7a47
ZW
2090 temp = I915_READ(fdi_rx_reg);
2091 temp &= ~FDI_RX_PLL_ENABLE;
2092 I915_WRITE(fdi_rx_reg, temp);
2093 I915_READ(fdi_rx_reg);
2094
2c07245f 2095 /* Wait for the clocks to turn off. */
1b3c7a47 2096 udelay(100);
2c07245f
ZW
2097 break;
2098 }
2099}
2100
02e792fb
DV
2101static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2102{
2103 struct intel_overlay *overlay;
03f77ea5 2104 int ret;
02e792fb
DV
2105
2106 if (!enable && intel_crtc->overlay) {
2107 overlay = intel_crtc->overlay;
2108 mutex_lock(&overlay->dev->struct_mutex);
03f77ea5
DV
2109 for (;;) {
2110 ret = intel_overlay_switch_off(overlay);
2111 if (ret == 0)
2112 break;
2113
2114 ret = intel_overlay_recover_from_interrupt(overlay, 0);
2115 if (ret != 0) {
2116 /* overlay doesn't react anymore. Usually
2117 * results in a black screen and an unkillable
2118 * X server. */
2119 BUG();
2120 overlay->hw_wedged = HW_WEDGED;
2121 break;
2122 }
2123 }
02e792fb
DV
2124 mutex_unlock(&overlay->dev->struct_mutex);
2125 }
2126 /* Let userspace switch the overlay on again. In most cases userspace
2127 * has to recompute where to put it anyway. */
2128
2129 return;
2130}
2131
2c07245f 2132static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
79e53945
JB
2133{
2134 struct drm_device *dev = crtc->dev;
79e53945
JB
2135 struct drm_i915_private *dev_priv = dev->dev_private;
2136 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2137 int pipe = intel_crtc->pipe;
80824003 2138 int plane = intel_crtc->plane;
79e53945 2139 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
80824003
JB
2140 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
2141 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
79e53945
JB
2142 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
2143 u32 temp;
79e53945
JB
2144
2145 /* XXX: When our outputs are all unaware of DPMS modes other than off
2146 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2147 */
2148 switch (mode) {
2149 case DRM_MODE_DPMS_ON:
2150 case DRM_MODE_DPMS_STANDBY:
2151 case DRM_MODE_DPMS_SUSPEND:
629598da
JB
2152 intel_update_watermarks(dev);
2153
79e53945
JB
2154 /* Enable the DPLL */
2155 temp = I915_READ(dpll_reg);
2156 if ((temp & DPLL_VCO_ENABLE) == 0) {
2157 I915_WRITE(dpll_reg, temp);
2158 I915_READ(dpll_reg);
2159 /* Wait for the clocks to stabilize. */
2160 udelay(150);
2161 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
2162 I915_READ(dpll_reg);
2163 /* Wait for the clocks to stabilize. */
2164 udelay(150);
2165 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
2166 I915_READ(dpll_reg);
2167 /* Wait for the clocks to stabilize. */
2168 udelay(150);
2169 }
2170
2171 /* Enable the pipe */
2172 temp = I915_READ(pipeconf_reg);
2173 if ((temp & PIPEACONF_ENABLE) == 0)
2174 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
2175
2176 /* Enable the plane */
2177 temp = I915_READ(dspcntr_reg);
2178 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2179 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
2180 /* Flush the plane changes */
2181 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
2182 }
2183
2184 intel_crtc_load_lut(crtc);
2185
74dff282
JB
2186 if ((IS_I965G(dev) || plane == 0))
2187 intel_update_fbc(crtc, &crtc->mode);
80824003 2188
79e53945 2189 /* Give the overlay scaler a chance to enable if it's on this pipe */
02e792fb 2190 intel_crtc_dpms_overlay(intel_crtc, true);
79e53945
JB
2191 break;
2192 case DRM_MODE_DPMS_OFF:
7662c8bd 2193 intel_update_watermarks(dev);
02e792fb 2194
79e53945 2195 /* Give the overlay scaler a chance to disable if it's on this pipe */
02e792fb 2196 intel_crtc_dpms_overlay(intel_crtc, false);
778c9026 2197 drm_vblank_off(dev, pipe);
79e53945 2198
e70236a8
JB
2199 if (dev_priv->cfb_plane == plane &&
2200 dev_priv->display.disable_fbc)
2201 dev_priv->display.disable_fbc(dev);
80824003 2202
79e53945 2203 /* Disable the VGA plane that we never use */
24f119c7 2204 i915_disable_vga(dev);
79e53945
JB
2205
2206 /* Disable display plane */
2207 temp = I915_READ(dspcntr_reg);
2208 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
2209 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
2210 /* Flush the plane changes */
2211 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
2212 I915_READ(dspbase_reg);
2213 }
2214
2215 if (!IS_I9XX(dev)) {
2216 /* Wait for vblank for the disable to take effect */
2217 intel_wait_for_vblank(dev);
2218 }
2219
2220 /* Next, disable display pipes */
2221 temp = I915_READ(pipeconf_reg);
2222 if ((temp & PIPEACONF_ENABLE) != 0) {
2223 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
2224 I915_READ(pipeconf_reg);
2225 }
2226
2227 /* Wait for vblank for the disable to take effect. */
2228 intel_wait_for_vblank(dev);
2229
2230 temp = I915_READ(dpll_reg);
2231 if ((temp & DPLL_VCO_ENABLE) != 0) {
2232 I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
2233 I915_READ(dpll_reg);
2234 }
2235
2236 /* Wait for the clocks to turn off. */
2237 udelay(150);
2238 break;
2239 }
2c07245f
ZW
2240}
2241
2242/**
2243 * Sets the power management mode of the pipe and plane.
2244 *
2245 * This code should probably grow support for turning the cursor off and back
2246 * on appropriately at the same time as we're turning the pipe off/on.
2247 */
2248static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2249{
2250 struct drm_device *dev = crtc->dev;
e70236a8 2251 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f
ZW
2252 struct drm_i915_master_private *master_priv;
2253 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2254 int pipe = intel_crtc->pipe;
2255 bool enabled;
2256
e70236a8 2257 dev_priv->display.dpms(crtc, mode);
79e53945 2258
65655d4a
DV
2259 intel_crtc->dpms_mode = mode;
2260
79e53945
JB
2261 if (!dev->primary->master)
2262 return;
2263
2264 master_priv = dev->primary->master->driver_priv;
2265 if (!master_priv->sarea_priv)
2266 return;
2267
2268 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2269
2270 switch (pipe) {
2271 case 0:
2272 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
2273 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
2274 break;
2275 case 1:
2276 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
2277 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
2278 break;
2279 default:
2280 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
2281 break;
2282 }
79e53945
JB
2283}
2284
2285static void intel_crtc_prepare (struct drm_crtc *crtc)
2286{
2287 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2288 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2289}
2290
2291static void intel_crtc_commit (struct drm_crtc *crtc)
2292{
2293 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2294 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
2295}
2296
2297void intel_encoder_prepare (struct drm_encoder *encoder)
2298{
2299 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
2300 /* lvds has its own version of prepare see intel_lvds_prepare */
2301 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
2302}
2303
2304void intel_encoder_commit (struct drm_encoder *encoder)
2305{
2306 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
2307 /* lvds has its own version of commit see intel_lvds_commit */
2308 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
2309}
2310
2311static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2312 struct drm_display_mode *mode,
2313 struct drm_display_mode *adjusted_mode)
2314{
2c07245f 2315 struct drm_device *dev = crtc->dev;
bad720ff 2316 if (HAS_PCH_SPLIT(dev)) {
2c07245f
ZW
2317 /* FDI link clock is fixed at 2.7G */
2318 if (mode->clock * 3 > 27000 * 4)
2319 return MODE_CLOCK_HIGH;
2320 }
79e53945
JB
2321 return true;
2322}
2323
e70236a8
JB
2324static int i945_get_display_clock_speed(struct drm_device *dev)
2325{
2326 return 400000;
2327}
79e53945 2328
e70236a8 2329static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 2330{
e70236a8
JB
2331 return 333000;
2332}
79e53945 2333
e70236a8
JB
2334static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
2335{
2336 return 200000;
2337}
79e53945 2338
e70236a8
JB
2339static int i915gm_get_display_clock_speed(struct drm_device *dev)
2340{
2341 u16 gcfgc = 0;
79e53945 2342
e70236a8
JB
2343 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
2344
2345 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
2346 return 133000;
2347 else {
2348 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
2349 case GC_DISPLAY_CLOCK_333_MHZ:
2350 return 333000;
2351 default:
2352 case GC_DISPLAY_CLOCK_190_200_MHZ:
2353 return 190000;
79e53945 2354 }
e70236a8
JB
2355 }
2356}
2357
2358static int i865_get_display_clock_speed(struct drm_device *dev)
2359{
2360 return 266000;
2361}
2362
2363static int i855_get_display_clock_speed(struct drm_device *dev)
2364{
2365 u16 hpllcc = 0;
2366 /* Assume that the hardware is in the high speed state. This
2367 * should be the default.
2368 */
2369 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
2370 case GC_CLOCK_133_200:
2371 case GC_CLOCK_100_200:
2372 return 200000;
2373 case GC_CLOCK_166_250:
2374 return 250000;
2375 case GC_CLOCK_100_133:
79e53945 2376 return 133000;
e70236a8 2377 }
79e53945 2378
e70236a8
JB
2379 /* Shouldn't happen */
2380 return 0;
2381}
79e53945 2382
e70236a8
JB
2383static int i830_get_display_clock_speed(struct drm_device *dev)
2384{
2385 return 133000;
79e53945
JB
2386}
2387
79e53945
JB
2388/**
2389 * Return the pipe currently connected to the panel fitter,
2390 * or -1 if the panel fitter is not present or not in use
2391 */
02e792fb 2392int intel_panel_fitter_pipe (struct drm_device *dev)
79e53945
JB
2393{
2394 struct drm_i915_private *dev_priv = dev->dev_private;
2395 u32 pfit_control;
2396
2397 /* i830 doesn't have a panel fitter */
2398 if (IS_I830(dev))
2399 return -1;
2400
2401 pfit_control = I915_READ(PFIT_CONTROL);
2402
2403 /* See if the panel fitter is in use */
2404 if ((pfit_control & PFIT_ENABLE) == 0)
2405 return -1;
2406
2407 /* 965 can place panel fitter on either pipe */
2408 if (IS_I965G(dev))
2409 return (pfit_control >> 29) & 0x3;
2410
2411 /* older chips can only use pipe 1 */
2412 return 1;
2413}
2414
2c07245f
ZW
2415struct fdi_m_n {
2416 u32 tu;
2417 u32 gmch_m;
2418 u32 gmch_n;
2419 u32 link_m;
2420 u32 link_n;
2421};
2422
2423static void
2424fdi_reduce_ratio(u32 *num, u32 *den)
2425{
2426 while (*num > 0xffffff || *den > 0xffffff) {
2427 *num >>= 1;
2428 *den >>= 1;
2429 }
2430}
2431
2432#define DATA_N 0x800000
2433#define LINK_N 0x80000
2434
2435static void
f2b115e6
AJ
2436ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
2437 int link_clock, struct fdi_m_n *m_n)
2c07245f
ZW
2438{
2439 u64 temp;
2440
2441 m_n->tu = 64; /* default size */
2442
2443 temp = (u64) DATA_N * pixel_clock;
2444 temp = div_u64(temp, link_clock);
58a27471
ZW
2445 m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
2446 m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
2c07245f
ZW
2447 m_n->gmch_n = DATA_N;
2448 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
2449
2450 temp = (u64) LINK_N * pixel_clock;
2451 m_n->link_m = div_u64(temp, link_clock);
2452 m_n->link_n = LINK_N;
2453 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
2454}
2455
2456
7662c8bd
SL
2457struct intel_watermark_params {
2458 unsigned long fifo_size;
2459 unsigned long max_wm;
2460 unsigned long default_wm;
2461 unsigned long guard_size;
2462 unsigned long cacheline_size;
2463};
2464
f2b115e6
AJ
2465/* Pineview has different values for various configs */
2466static struct intel_watermark_params pineview_display_wm = {
2467 PINEVIEW_DISPLAY_FIFO,
2468 PINEVIEW_MAX_WM,
2469 PINEVIEW_DFT_WM,
2470 PINEVIEW_GUARD_WM,
2471 PINEVIEW_FIFO_LINE_SIZE
7662c8bd 2472};
f2b115e6
AJ
2473static struct intel_watermark_params pineview_display_hplloff_wm = {
2474 PINEVIEW_DISPLAY_FIFO,
2475 PINEVIEW_MAX_WM,
2476 PINEVIEW_DFT_HPLLOFF_WM,
2477 PINEVIEW_GUARD_WM,
2478 PINEVIEW_FIFO_LINE_SIZE
7662c8bd 2479};
f2b115e6
AJ
2480static struct intel_watermark_params pineview_cursor_wm = {
2481 PINEVIEW_CURSOR_FIFO,
2482 PINEVIEW_CURSOR_MAX_WM,
2483 PINEVIEW_CURSOR_DFT_WM,
2484 PINEVIEW_CURSOR_GUARD_WM,
2485 PINEVIEW_FIFO_LINE_SIZE,
7662c8bd 2486};
f2b115e6
AJ
2487static struct intel_watermark_params pineview_cursor_hplloff_wm = {
2488 PINEVIEW_CURSOR_FIFO,
2489 PINEVIEW_CURSOR_MAX_WM,
2490 PINEVIEW_CURSOR_DFT_WM,
2491 PINEVIEW_CURSOR_GUARD_WM,
2492 PINEVIEW_FIFO_LINE_SIZE
7662c8bd 2493};
0e442c60
JB
2494static struct intel_watermark_params g4x_wm_info = {
2495 G4X_FIFO_SIZE,
2496 G4X_MAX_WM,
2497 G4X_MAX_WM,
2498 2,
2499 G4X_FIFO_LINE_SIZE,
2500};
7662c8bd 2501static struct intel_watermark_params i945_wm_info = {
dff33cfc 2502 I945_FIFO_SIZE,
7662c8bd
SL
2503 I915_MAX_WM,
2504 1,
dff33cfc
JB
2505 2,
2506 I915_FIFO_LINE_SIZE
7662c8bd
SL
2507};
2508static struct intel_watermark_params i915_wm_info = {
dff33cfc 2509 I915_FIFO_SIZE,
7662c8bd
SL
2510 I915_MAX_WM,
2511 1,
dff33cfc 2512 2,
7662c8bd
SL
2513 I915_FIFO_LINE_SIZE
2514};
2515static struct intel_watermark_params i855_wm_info = {
2516 I855GM_FIFO_SIZE,
2517 I915_MAX_WM,
2518 1,
dff33cfc 2519 2,
7662c8bd
SL
2520 I830_FIFO_LINE_SIZE
2521};
2522static struct intel_watermark_params i830_wm_info = {
2523 I830_FIFO_SIZE,
2524 I915_MAX_WM,
2525 1,
dff33cfc 2526 2,
7662c8bd
SL
2527 I830_FIFO_LINE_SIZE
2528};
2529
7f8a8569
ZW
2530static struct intel_watermark_params ironlake_display_wm_info = {
2531 ILK_DISPLAY_FIFO,
2532 ILK_DISPLAY_MAXWM,
2533 ILK_DISPLAY_DFTWM,
2534 2,
2535 ILK_FIFO_LINE_SIZE
2536};
2537
2538static struct intel_watermark_params ironlake_display_srwm_info = {
2539 ILK_DISPLAY_SR_FIFO,
2540 ILK_DISPLAY_MAX_SRWM,
2541 ILK_DISPLAY_DFT_SRWM,
2542 2,
2543 ILK_FIFO_LINE_SIZE
2544};
2545
2546static struct intel_watermark_params ironlake_cursor_srwm_info = {
2547 ILK_CURSOR_SR_FIFO,
2548 ILK_CURSOR_MAX_SRWM,
2549 ILK_CURSOR_DFT_SRWM,
2550 2,
2551 ILK_FIFO_LINE_SIZE
2552};
2553
dff33cfc
JB
2554/**
2555 * intel_calculate_wm - calculate watermark level
2556 * @clock_in_khz: pixel clock
2557 * @wm: chip FIFO params
2558 * @pixel_size: display pixel size
2559 * @latency_ns: memory latency for the platform
2560 *
2561 * Calculate the watermark level (the level at which the display plane will
2562 * start fetching from memory again). Each chip has a different display
2563 * FIFO size and allocation, so the caller needs to figure that out and pass
2564 * in the correct intel_watermark_params structure.
2565 *
2566 * As the pixel clock runs, the FIFO will be drained at a rate that depends
2567 * on the pixel size. When it reaches the watermark level, it'll start
2568 * fetching FIFO line sized based chunks from memory until the FIFO fills
2569 * past the watermark point. If the FIFO drains completely, a FIFO underrun
2570 * will occur, and a display engine hang could result.
2571 */
7662c8bd
SL
2572static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2573 struct intel_watermark_params *wm,
2574 int pixel_size,
2575 unsigned long latency_ns)
2576{
390c4dd4 2577 long entries_required, wm_size;
dff33cfc 2578
d660467c
JB
2579 /*
2580 * Note: we need to make sure we don't overflow for various clock &
2581 * latency values.
2582 * clocks go from a few thousand to several hundred thousand.
2583 * latency is usually a few thousand
2584 */
2585 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
2586 1000;
dff33cfc 2587 entries_required /= wm->cacheline_size;
7662c8bd 2588
28c97730 2589 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
dff33cfc
JB
2590
2591 wm_size = wm->fifo_size - (entries_required + wm->guard_size);
2592
28c97730 2593 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
7662c8bd 2594
390c4dd4
JB
2595 /* Don't promote wm_size to unsigned... */
2596 if (wm_size > (long)wm->max_wm)
7662c8bd 2597 wm_size = wm->max_wm;
390c4dd4 2598 if (wm_size <= 0)
7662c8bd
SL
2599 wm_size = wm->default_wm;
2600 return wm_size;
2601}
2602
2603struct cxsr_latency {
2604 int is_desktop;
2605 unsigned long fsb_freq;
2606 unsigned long mem_freq;
2607 unsigned long display_sr;
2608 unsigned long display_hpll_disable;
2609 unsigned long cursor_sr;
2610 unsigned long cursor_hpll_disable;
2611};
2612
2613static struct cxsr_latency cxsr_latency_table[] = {
2614 {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
2615 {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
2616 {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
2617
2618 {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
2619 {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
2620 {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
2621
2622 {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
2623 {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
2624 {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
2625
2626 {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
2627 {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
2628 {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
2629
2630 {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
2631 {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
2632 {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
2633
2634 {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
2635 {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
2636 {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
2637};
2638
2639static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
2640 int mem)
2641{
2642 int i;
2643 struct cxsr_latency *latency;
2644
2645 if (fsb == 0 || mem == 0)
2646 return NULL;
2647
2648 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
2649 latency = &cxsr_latency_table[i];
2650 if (is_desktop == latency->is_desktop &&
decbbcda
JSR
2651 fsb == latency->fsb_freq && mem == latency->mem_freq)
2652 return latency;
7662c8bd 2653 }
decbbcda 2654
28c97730 2655 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
decbbcda
JSR
2656
2657 return NULL;
7662c8bd
SL
2658}
2659
f2b115e6 2660static void pineview_disable_cxsr(struct drm_device *dev)
7662c8bd
SL
2661{
2662 struct drm_i915_private *dev_priv = dev->dev_private;
2663 u32 reg;
2664
2665 /* deactivate cxsr */
2666 reg = I915_READ(DSPFW3);
f2b115e6 2667 reg &= ~(PINEVIEW_SELF_REFRESH_EN);
7662c8bd
SL
2668 I915_WRITE(DSPFW3, reg);
2669 DRM_INFO("Big FIFO is disabled\n");
2670}
2671
bcc24fb4
JB
2672/*
2673 * Latency for FIFO fetches is dependent on several factors:
2674 * - memory configuration (speed, channels)
2675 * - chipset
2676 * - current MCH state
2677 * It can be fairly high in some situations, so here we assume a fairly
2678 * pessimal value. It's a tradeoff between extra memory fetches (if we
2679 * set this value too high, the FIFO will fetch frequently to stay full)
2680 * and power consumption (set it too low to save power and we might see
2681 * FIFO underruns and display "flicker").
2682 *
2683 * A value of 5us seems to be a good balance; safe for very low end
2684 * platforms but not overly aggressive on lower latency configs.
2685 */
69e302a9 2686static const int latency_ns = 5000;
7662c8bd 2687
e70236a8 2688static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
dff33cfc
JB
2689{
2690 struct drm_i915_private *dev_priv = dev->dev_private;
2691 uint32_t dsparb = I915_READ(DSPARB);
2692 int size;
2693
e70236a8 2694 if (plane == 0)
f3601326 2695 size = dsparb & 0x7f;
e70236a8
JB
2696 else
2697 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2698 (dsparb & 0x7f);
dff33cfc 2699
28c97730
ZY
2700 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2701 plane ? "B" : "A", size);
dff33cfc
JB
2702
2703 return size;
2704}
7662c8bd 2705
e70236a8
JB
2706static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2707{
2708 struct drm_i915_private *dev_priv = dev->dev_private;
2709 uint32_t dsparb = I915_READ(DSPARB);
2710 int size;
2711
2712 if (plane == 0)
2713 size = dsparb & 0x1ff;
2714 else
2715 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2716 (dsparb & 0x1ff);
2717 size >>= 1; /* Convert to cachelines */
dff33cfc 2718
28c97730
ZY
2719 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2720 plane ? "B" : "A", size);
dff33cfc
JB
2721
2722 return size;
2723}
7662c8bd 2724
e70236a8
JB
2725static int i845_get_fifo_size(struct drm_device *dev, int plane)
2726{
2727 struct drm_i915_private *dev_priv = dev->dev_private;
2728 uint32_t dsparb = I915_READ(DSPARB);
2729 int size;
2730
2731 size = dsparb & 0x7f;
2732 size >>= 2; /* Convert to cachelines */
2733
28c97730
ZY
2734 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2735 plane ? "B" : "A",
e70236a8
JB
2736 size);
2737
2738 return size;
2739}
2740
2741static int i830_get_fifo_size(struct drm_device *dev, int plane)
2742{
2743 struct drm_i915_private *dev_priv = dev->dev_private;
2744 uint32_t dsparb = I915_READ(DSPARB);
2745 int size;
2746
2747 size = dsparb & 0x7f;
2748 size >>= 1; /* Convert to cachelines */
2749
28c97730
ZY
2750 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2751 plane ? "B" : "A", size);
e70236a8
JB
2752
2753 return size;
2754}
2755
d4294342
ZY
2756static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2757 int planeb_clock, int sr_hdisplay, int pixel_size)
2758{
2759 struct drm_i915_private *dev_priv = dev->dev_private;
2760 u32 reg;
2761 unsigned long wm;
2762 struct cxsr_latency *latency;
2763 int sr_clock;
2764
2765 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2766 dev_priv->mem_freq);
2767 if (!latency) {
2768 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2769 pineview_disable_cxsr(dev);
2770 return;
2771 }
2772
2773 if (!planea_clock || !planeb_clock) {
2774 sr_clock = planea_clock ? planea_clock : planeb_clock;
2775
2776 /* Display SR */
2777 wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
2778 pixel_size, latency->display_sr);
2779 reg = I915_READ(DSPFW1);
2780 reg &= ~DSPFW_SR_MASK;
2781 reg |= wm << DSPFW_SR_SHIFT;
2782 I915_WRITE(DSPFW1, reg);
2783 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2784
2785 /* cursor SR */
2786 wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
2787 pixel_size, latency->cursor_sr);
2788 reg = I915_READ(DSPFW3);
2789 reg &= ~DSPFW_CURSOR_SR_MASK;
2790 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
2791 I915_WRITE(DSPFW3, reg);
2792
2793 /* Display HPLL off SR */
2794 wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
2795 pixel_size, latency->display_hpll_disable);
2796 reg = I915_READ(DSPFW3);
2797 reg &= ~DSPFW_HPLL_SR_MASK;
2798 reg |= wm & DSPFW_HPLL_SR_MASK;
2799 I915_WRITE(DSPFW3, reg);
2800
2801 /* cursor HPLL off SR */
2802 wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
2803 pixel_size, latency->cursor_hpll_disable);
2804 reg = I915_READ(DSPFW3);
2805 reg &= ~DSPFW_HPLL_CURSOR_MASK;
2806 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
2807 I915_WRITE(DSPFW3, reg);
2808 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2809
2810 /* activate cxsr */
2811 reg = I915_READ(DSPFW3);
2812 reg |= PINEVIEW_SELF_REFRESH_EN;
2813 I915_WRITE(DSPFW3, reg);
2814 DRM_DEBUG_KMS("Self-refresh is enabled\n");
2815 } else {
2816 pineview_disable_cxsr(dev);
2817 DRM_DEBUG_KMS("Self-refresh is disabled\n");
2818 }
2819}
2820
0e442c60
JB
2821static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2822 int planeb_clock, int sr_hdisplay, int pixel_size)
652c393a
JB
2823{
2824 struct drm_i915_private *dev_priv = dev->dev_private;
0e442c60
JB
2825 int total_size, cacheline_size;
2826 int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
2827 struct intel_watermark_params planea_params, planeb_params;
2828 unsigned long line_time_us;
2829 int sr_clock, sr_entries = 0, entries_required;
652c393a 2830
0e442c60
JB
2831 /* Create copies of the base settings for each pipe */
2832 planea_params = planeb_params = g4x_wm_info;
2833
2834 /* Grab a couple of global values before we overwrite them */
2835 total_size = planea_params.fifo_size;
2836 cacheline_size = planea_params.cacheline_size;
2837
2838 /*
2839 * Note: we need to make sure we don't overflow for various clock &
2840 * latency values.
2841 * clocks go from a few thousand to several hundred thousand.
2842 * latency is usually a few thousand
2843 */
2844 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
2845 1000;
2846 entries_required /= G4X_FIFO_LINE_SIZE;
2847 planea_wm = entries_required + planea_params.guard_size;
2848
2849 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
2850 1000;
2851 entries_required /= G4X_FIFO_LINE_SIZE;
2852 planeb_wm = entries_required + planeb_params.guard_size;
2853
2854 cursora_wm = cursorb_wm = 16;
2855 cursor_sr = 32;
2856
2857 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2858
2859 /* Calc sr entries for one plane configs */
2860 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2861 /* self-refresh has much higher latency */
69e302a9 2862 static const int sr_latency_ns = 12000;
0e442c60
JB
2863
2864 sr_clock = planea_clock ? planea_clock : planeb_clock;
2865 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2866
2867 /* Use ns/us then divide to preserve precision */
2868 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2869 pixel_size * sr_hdisplay) / 1000;
2870 sr_entries = roundup(sr_entries / cacheline_size, 1);
2871 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2872 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
33c5fd12
DJ
2873 } else {
2874 /* Turn off self refresh if both pipes are enabled */
2875 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2876 & ~FW_BLC_SELF_EN);
0e442c60
JB
2877 }
2878
2879 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
2880 planea_wm, planeb_wm, sr_entries);
2881
2882 planea_wm &= 0x3f;
2883 planeb_wm &= 0x3f;
2884
2885 I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
2886 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
2887 (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
2888 I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
2889 (cursora_wm << DSPFW_CURSORA_SHIFT));
2890 /* HPLL off in SR has some issues on G4x... disable it */
2891 I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
2892 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
652c393a
JB
2893}
2894
1dc7546d
JB
2895static void i965_update_wm(struct drm_device *dev, int planea_clock,
2896 int planeb_clock, int sr_hdisplay, int pixel_size)
7662c8bd
SL
2897{
2898 struct drm_i915_private *dev_priv = dev->dev_private;
1dc7546d
JB
2899 unsigned long line_time_us;
2900 int sr_clock, sr_entries, srwm = 1;
2901
2902 /* Calc sr entries for one plane configs */
2903 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2904 /* self-refresh has much higher latency */
69e302a9 2905 static const int sr_latency_ns = 12000;
1dc7546d
JB
2906
2907 sr_clock = planea_clock ? planea_clock : planeb_clock;
2908 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2909
2910 /* Use ns/us then divide to preserve precision */
2911 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2912 pixel_size * sr_hdisplay) / 1000;
2913 sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
2914 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2915 srwm = I945_FIFO_SIZE - sr_entries;
2916 if (srwm < 0)
2917 srwm = 1;
2918 srwm &= 0x3f;
2919 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
33c5fd12
DJ
2920 } else {
2921 /* Turn off self refresh if both pipes are enabled */
2922 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2923 & ~FW_BLC_SELF_EN);
1dc7546d 2924 }
7662c8bd 2925
1dc7546d
JB
2926 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2927 srwm);
7662c8bd
SL
2928
2929 /* 965 has limitations... */
1dc7546d
JB
2930 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
2931 (8 << 0));
7662c8bd
SL
2932 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
2933}
2934
2935static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2936 int planeb_clock, int sr_hdisplay, int pixel_size)
2937{
2938 struct drm_i915_private *dev_priv = dev->dev_private;
dff33cfc
JB
2939 uint32_t fwater_lo;
2940 uint32_t fwater_hi;
2941 int total_size, cacheline_size, cwm, srwm = 1;
2942 int planea_wm, planeb_wm;
2943 struct intel_watermark_params planea_params, planeb_params;
7662c8bd
SL
2944 unsigned long line_time_us;
2945 int sr_clock, sr_entries = 0;
2946
dff33cfc 2947 /* Create copies of the base settings for each pipe */
7662c8bd 2948 if (IS_I965GM(dev) || IS_I945GM(dev))
dff33cfc 2949 planea_params = planeb_params = i945_wm_info;
7662c8bd 2950 else if (IS_I9XX(dev))
dff33cfc 2951 planea_params = planeb_params = i915_wm_info;
7662c8bd 2952 else
dff33cfc 2953 planea_params = planeb_params = i855_wm_info;
7662c8bd 2954
dff33cfc
JB
2955 /* Grab a couple of global values before we overwrite them */
2956 total_size = planea_params.fifo_size;
2957 cacheline_size = planea_params.cacheline_size;
7662c8bd 2958
dff33cfc 2959 /* Update per-plane FIFO sizes */
e70236a8
JB
2960 planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
2961 planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
7662c8bd 2962
dff33cfc
JB
2963 planea_wm = intel_calculate_wm(planea_clock, &planea_params,
2964 pixel_size, latency_ns);
2965 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
2966 pixel_size, latency_ns);
28c97730 2967 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
7662c8bd
SL
2968
2969 /*
2970 * Overlay gets an aggressive default since video jitter is bad.
2971 */
2972 cwm = 2;
2973
dff33cfc 2974 /* Calc sr entries for one plane configs */
652c393a
JB
2975 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2976 (!planea_clock || !planeb_clock)) {
dff33cfc 2977 /* self-refresh has much higher latency */
69e302a9 2978 static const int sr_latency_ns = 6000;
dff33cfc 2979
7662c8bd 2980 sr_clock = planea_clock ? planea_clock : planeb_clock;
dff33cfc
JB
2981 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2982
2983 /* Use ns/us then divide to preserve precision */
2984 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2985 pixel_size * sr_hdisplay) / 1000;
2986 sr_entries = roundup(sr_entries / cacheline_size, 1);
28c97730 2987 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
dff33cfc
JB
2988 srwm = total_size - sr_entries;
2989 if (srwm < 0)
2990 srwm = 1;
ee980b80
LP
2991
2992 if (IS_I945G(dev) || IS_I945GM(dev))
2993 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2994 else if (IS_I915GM(dev)) {
2995 /* 915M has a smaller SRWM field */
2996 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2997 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
2998 }
33c5fd12
DJ
2999 } else {
3000 /* Turn off self refresh if both pipes are enabled */
ee980b80
LP
3001 if (IS_I945G(dev) || IS_I945GM(dev)) {
3002 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3003 & ~FW_BLC_SELF_EN);
3004 } else if (IS_I915GM(dev)) {
3005 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
3006 }
7662c8bd
SL
3007 }
3008
28c97730 3009 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
dff33cfc 3010 planea_wm, planeb_wm, cwm, srwm);
7662c8bd 3011
dff33cfc
JB
3012 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
3013 fwater_hi = (cwm & 0x1f);
3014
3015 /* Set request length to 8 cachelines per fetch */
3016 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
3017 fwater_hi = fwater_hi | (1 << 8);
7662c8bd
SL
3018
3019 I915_WRITE(FW_BLC, fwater_lo);
3020 I915_WRITE(FW_BLC2, fwater_hi);
7662c8bd
SL
3021}
3022
e70236a8
JB
3023static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3024 int unused2, int pixel_size)
7662c8bd
SL
3025{
3026 struct drm_i915_private *dev_priv = dev->dev_private;
f3601326 3027 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
dff33cfc 3028 int planea_wm;
7662c8bd 3029
e70236a8 3030 i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
7662c8bd 3031
dff33cfc
JB
3032 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
3033 pixel_size, latency_ns);
f3601326
JB
3034 fwater_lo |= (3<<8) | planea_wm;
3035
28c97730 3036 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
7662c8bd
SL
3037
3038 I915_WRITE(FW_BLC, fwater_lo);
3039}
3040
7f8a8569
ZW
3041#define ILK_LP0_PLANE_LATENCY 700
3042
3043static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3044 int planeb_clock, int sr_hdisplay, int pixel_size)
3045{
3046 struct drm_i915_private *dev_priv = dev->dev_private;
3047 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3048 int sr_wm, cursor_wm;
3049 unsigned long line_time_us;
3050 int sr_clock, entries_required;
3051 u32 reg_value;
3052
3053 /* Calculate and update the watermark for plane A */
3054 if (planea_clock) {
3055 entries_required = ((planea_clock / 1000) * pixel_size *
3056 ILK_LP0_PLANE_LATENCY) / 1000;
3057 entries_required = DIV_ROUND_UP(entries_required,
3058 ironlake_display_wm_info.cacheline_size);
3059 planea_wm = entries_required +
3060 ironlake_display_wm_info.guard_size;
3061
3062 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3063 planea_wm = ironlake_display_wm_info.max_wm;
3064
3065 cursora_wm = 16;
3066 reg_value = I915_READ(WM0_PIPEA_ILK);
3067 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3068 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
3069 (cursora_wm & WM0_PIPE_CURSOR_MASK);
3070 I915_WRITE(WM0_PIPEA_ILK, reg_value);
3071 DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
3072 "cursor: %d\n", planea_wm, cursora_wm);
3073 }
3074 /* Calculate and update the watermark for plane B */
3075 if (planeb_clock) {
3076 entries_required = ((planeb_clock / 1000) * pixel_size *
3077 ILK_LP0_PLANE_LATENCY) / 1000;
3078 entries_required = DIV_ROUND_UP(entries_required,
3079 ironlake_display_wm_info.cacheline_size);
3080 planeb_wm = entries_required +
3081 ironlake_display_wm_info.guard_size;
3082
3083 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3084 planeb_wm = ironlake_display_wm_info.max_wm;
3085
3086 cursorb_wm = 16;
3087 reg_value = I915_READ(WM0_PIPEB_ILK);
3088 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3089 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
3090 (cursorb_wm & WM0_PIPE_CURSOR_MASK);
3091 I915_WRITE(WM0_PIPEB_ILK, reg_value);
3092 DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
3093 "cursor: %d\n", planeb_wm, cursorb_wm);
3094 }
3095
3096 /*
3097 * Calculate and update the self-refresh watermark only when one
3098 * display plane is used.
3099 */
3100 if (!planea_clock || !planeb_clock) {
3101 int line_count;
3102 /* Read the self-refresh latency. The unit is 0.5us */
3103 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3104
3105 sr_clock = planea_clock ? planea_clock : planeb_clock;
3106 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
3107
3108 /* Use ns/us then divide to preserve precision */
3109 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
3110 / 1000;
3111
3112 /* calculate the self-refresh watermark for display plane */
3113 entries_required = line_count * sr_hdisplay * pixel_size;
3114 entries_required = DIV_ROUND_UP(entries_required,
3115 ironlake_display_srwm_info.cacheline_size);
3116 sr_wm = entries_required +
3117 ironlake_display_srwm_info.guard_size;
3118
3119 /* calculate the self-refresh watermark for display cursor */
3120 entries_required = line_count * pixel_size * 64;
3121 entries_required = DIV_ROUND_UP(entries_required,
3122 ironlake_cursor_srwm_info.cacheline_size);
3123 cursor_wm = entries_required +
3124 ironlake_cursor_srwm_info.guard_size;
3125
3126 /* configure watermark and enable self-refresh */
3127 reg_value = I915_READ(WM1_LP_ILK);
3128 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3129 WM1_LP_CURSOR_MASK);
3130 reg_value |= WM1_LP_SR_EN |
3131 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3132 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3133
3134 I915_WRITE(WM1_LP_ILK, reg_value);
3135 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3136 "cursor %d\n", sr_wm, cursor_wm);
3137
3138 } else {
3139 /* Turn off self refresh if both pipes are enabled */
3140 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
3141 }
3142}
7662c8bd
SL
3143/**
3144 * intel_update_watermarks - update FIFO watermark values based on current modes
3145 *
3146 * Calculate watermark values for the various WM regs based on current mode
3147 * and plane configuration.
3148 *
3149 * There are several cases to deal with here:
3150 * - normal (i.e. non-self-refresh)
3151 * - self-refresh (SR) mode
3152 * - lines are large relative to FIFO size (buffer can hold up to 2)
3153 * - lines are small relative to FIFO size (buffer can hold more than 2
3154 * lines), so need to account for TLB latency
3155 *
3156 * The normal calculation is:
3157 * watermark = dotclock * bytes per pixel * latency
3158 * where latency is platform & configuration dependent (we assume pessimal
3159 * values here).
3160 *
3161 * The SR calculation is:
3162 * watermark = (trunc(latency/line time)+1) * surface width *
3163 * bytes per pixel
3164 * where
3165 * line time = htotal / dotclock
3166 * and latency is assumed to be high, as above.
3167 *
3168 * The final value programmed to the register should always be rounded up,
3169 * and include an extra 2 entries to account for clock crossings.
3170 *
3171 * We don't use the sprite, so we can ignore that. And on Crestline we have
3172 * to set the non-SR watermarks to 8.
3173 */
3174static void intel_update_watermarks(struct drm_device *dev)
3175{
e70236a8 3176 struct drm_i915_private *dev_priv = dev->dev_private;
7662c8bd
SL
3177 struct drm_crtc *crtc;
3178 struct intel_crtc *intel_crtc;
3179 int sr_hdisplay = 0;
3180 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
3181 int enabled = 0, pixel_size = 0;
3182
c03342fa
ZW
3183 if (!dev_priv->display.update_wm)
3184 return;
3185
7662c8bd
SL
3186 /* Get the clock config from both planes */
3187 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3188 intel_crtc = to_intel_crtc(crtc);
3189 if (crtc->enabled) {
3190 enabled++;
3191 if (intel_crtc->plane == 0) {
28c97730 3192 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
7662c8bd
SL
3193 intel_crtc->pipe, crtc->mode.clock);
3194 planea_clock = crtc->mode.clock;
3195 } else {
28c97730 3196 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
7662c8bd
SL
3197 intel_crtc->pipe, crtc->mode.clock);
3198 planeb_clock = crtc->mode.clock;
3199 }
3200 sr_hdisplay = crtc->mode.hdisplay;
3201 sr_clock = crtc->mode.clock;
3202 if (crtc->fb)
3203 pixel_size = crtc->fb->bits_per_pixel / 8;
3204 else
3205 pixel_size = 4; /* by default */
3206 }
3207 }
3208
3209 if (enabled <= 0)
3210 return;
3211
e70236a8
JB
3212 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
3213 sr_hdisplay, pixel_size);
7662c8bd
SL
3214}
3215
5c3b82e2
CW
3216static int intel_crtc_mode_set(struct drm_crtc *crtc,
3217 struct drm_display_mode *mode,
3218 struct drm_display_mode *adjusted_mode,
3219 int x, int y,
3220 struct drm_framebuffer *old_fb)
79e53945
JB
3221{
3222 struct drm_device *dev = crtc->dev;
3223 struct drm_i915_private *dev_priv = dev->dev_private;
3224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3225 int pipe = intel_crtc->pipe;
80824003 3226 int plane = intel_crtc->plane;
79e53945
JB
3227 int fp_reg = (pipe == 0) ? FPA0 : FPB0;
3228 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3229 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
80824003 3230 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
79e53945
JB
3231 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
3232 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
3233 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
3234 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
3235 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
3236 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
3237 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
80824003
JB
3238 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
3239 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
79e53945 3240 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
c751ce4f 3241 int refclk, num_connectors = 0;
652c393a
JB
3242 intel_clock_t clock, reduced_clock;
3243 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
3244 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
a4fc5ed6 3245 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
32f9d658 3246 bool is_edp = false;
79e53945 3247 struct drm_mode_config *mode_config = &dev->mode_config;
c5e4df33 3248 struct drm_encoder *encoder;
55f78c43 3249 struct intel_encoder *intel_encoder = NULL;
d4906093 3250 const intel_limit_t *limit;
5c3b82e2 3251 int ret;
2c07245f
ZW
3252 struct fdi_m_n m_n = {0};
3253 int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
3254 int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
3255 int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
3256 int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
3257 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
3258 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
3259 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
8db9d77b
ZW
3260 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
3261 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
541998a1 3262 int lvds_reg = LVDS;
2c07245f
ZW
3263 u32 temp;
3264 int sdvo_pixel_multiply;
5eb08b69 3265 int target_clock;
79e53945
JB
3266
3267 drm_vblank_pre_modeset(dev, pipe);
3268
c5e4df33 3269 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
79e53945 3270
c5e4df33 3271 if (!encoder || encoder->crtc != crtc)
79e53945
JB
3272 continue;
3273
c5e4df33
ZW
3274 intel_encoder = enc_to_intel_encoder(encoder);
3275
21d40d37 3276 switch (intel_encoder->type) {
79e53945
JB
3277 case INTEL_OUTPUT_LVDS:
3278 is_lvds = true;
3279 break;
3280 case INTEL_OUTPUT_SDVO:
7d57382e 3281 case INTEL_OUTPUT_HDMI:
79e53945 3282 is_sdvo = true;
21d40d37 3283 if (intel_encoder->needs_tv_clock)
e2f0ba97 3284 is_tv = true;
79e53945
JB
3285 break;
3286 case INTEL_OUTPUT_DVO:
3287 is_dvo = true;
3288 break;
3289 case INTEL_OUTPUT_TVOUT:
3290 is_tv = true;
3291 break;
3292 case INTEL_OUTPUT_ANALOG:
3293 is_crt = true;
3294 break;
a4fc5ed6
KP
3295 case INTEL_OUTPUT_DISPLAYPORT:
3296 is_dp = true;
3297 break;
32f9d658
ZW
3298 case INTEL_OUTPUT_EDP:
3299 is_edp = true;
3300 break;
79e53945 3301 }
43565a06 3302
c751ce4f 3303 num_connectors++;
79e53945
JB
3304 }
3305
c751ce4f 3306 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
43565a06 3307 refclk = dev_priv->lvds_ssc_freq * 1000;
28c97730
ZY
3308 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3309 refclk / 1000);
43565a06 3310 } else if (IS_I9XX(dev)) {
79e53945 3311 refclk = 96000;
bad720ff 3312 if (HAS_PCH_SPLIT(dev))
2c07245f 3313 refclk = 120000; /* 120Mhz refclk */
79e53945
JB
3314 } else {
3315 refclk = 48000;
3316 }
a4fc5ed6 3317
79e53945 3318
d4906093
ML
3319 /*
3320 * Returns a set of divisors for the desired target clock with the given
3321 * refclk, or FALSE. The returned values represent the clock equation:
3322 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
3323 */
3324 limit = intel_limit(crtc);
3325 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
79e53945
JB
3326 if (!ok) {
3327 DRM_ERROR("Couldn't find PLL settings for mode!\n");
1f803ee5 3328 drm_vblank_post_modeset(dev, pipe);
5c3b82e2 3329 return -EINVAL;
79e53945
JB
3330 }
3331
ddc9003c
ZY
3332 if (is_lvds && dev_priv->lvds_downclock_avail) {
3333 has_reduced_clock = limit->find_pll(limit, crtc,
18f9ed12 3334 dev_priv->lvds_downclock,
652c393a
JB
3335 refclk,
3336 &reduced_clock);
18f9ed12
ZY
3337 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
3338 /*
3339 * If the different P is found, it means that we can't
3340 * switch the display clock by using the FP0/FP1.
3341 * In such case we will disable the LVDS downclock
3342 * feature.
3343 */
3344 DRM_DEBUG_KMS("Different P is found for "
3345 "LVDS clock/downclock\n");
3346 has_reduced_clock = 0;
3347 }
652c393a 3348 }
7026d4ac
ZW
3349 /* SDVO TV has fixed PLL values depend on its clock range,
3350 this mirrors vbios setting. */
3351 if (is_sdvo && is_tv) {
3352 if (adjusted_mode->clock >= 100000
3353 && adjusted_mode->clock < 140500) {
3354 clock.p1 = 2;
3355 clock.p2 = 10;
3356 clock.n = 3;
3357 clock.m1 = 16;
3358 clock.m2 = 8;
3359 } else if (adjusted_mode->clock >= 140500
3360 && adjusted_mode->clock <= 200000) {
3361 clock.p1 = 1;
3362 clock.p2 = 10;
3363 clock.n = 6;
3364 clock.m1 = 12;
3365 clock.m2 = 8;
3366 }
3367 }
3368
2c07245f 3369 /* FDI link */
bad720ff 3370 if (HAS_PCH_SPLIT(dev)) {
58a27471 3371 int lane, link_bw, bpp;
32f9d658
ZW
3372 /* eDP doesn't require FDI link, so just set DP M/N
3373 according to current link config */
3374 if (is_edp) {
5eb08b69 3375 target_clock = mode->clock;
55f78c43 3376 intel_edp_link_config(intel_encoder,
32f9d658
ZW
3377 &lane, &link_bw);
3378 } else {
3379 /* DP over FDI requires target mode clock
3380 instead of link clock */
3381 if (is_dp)
3382 target_clock = mode->clock;
3383 else
3384 target_clock = adjusted_mode->clock;
3385 lane = 4;
3386 link_bw = 270000;
3387 }
58a27471
ZW
3388
3389 /* determine panel color depth */
3390 temp = I915_READ(pipeconf_reg);
e5a95eb7
ZY
3391 temp &= ~PIPE_BPC_MASK;
3392 if (is_lvds) {
3393 int lvds_reg = I915_READ(PCH_LVDS);
3394 /* the BPC will be 6 if it is 18-bit LVDS panel */
3395 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
3396 temp |= PIPE_8BPC;
3397 else
3398 temp |= PIPE_6BPC;
885a5fb5
ZW
3399 } else if (is_edp) {
3400 switch (dev_priv->edp_bpp/3) {
3401 case 8:
3402 temp |= PIPE_8BPC;
3403 break;
3404 case 10:
3405 temp |= PIPE_10BPC;
3406 break;
3407 case 6:
3408 temp |= PIPE_6BPC;
3409 break;
3410 case 12:
3411 temp |= PIPE_12BPC;
3412 break;
3413 }
e5a95eb7
ZY
3414 } else
3415 temp |= PIPE_8BPC;
3416 I915_WRITE(pipeconf_reg, temp);
3417 I915_READ(pipeconf_reg);
58a27471
ZW
3418
3419 switch (temp & PIPE_BPC_MASK) {
3420 case PIPE_8BPC:
3421 bpp = 24;
3422 break;
3423 case PIPE_10BPC:
3424 bpp = 30;
3425 break;
3426 case PIPE_6BPC:
3427 bpp = 18;
3428 break;
3429 case PIPE_12BPC:
3430 bpp = 36;
3431 break;
3432 default:
3433 DRM_ERROR("unknown pipe bpc value\n");
3434 bpp = 24;
3435 }
3436
f2b115e6 3437 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
5eb08b69 3438 }
2c07245f 3439
c038e51e
ZW
3440 /* Ironlake: try to setup display ref clock before DPLL
3441 * enabling. This is only under driver's control after
3442 * PCH B stepping, previous chipset stepping should be
3443 * ignoring this setting.
3444 */
bad720ff 3445 if (HAS_PCH_SPLIT(dev)) {
c038e51e
ZW
3446 temp = I915_READ(PCH_DREF_CONTROL);
3447 /* Always enable nonspread source */
3448 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
3449 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
3450 I915_WRITE(PCH_DREF_CONTROL, temp);
3451 POSTING_READ(PCH_DREF_CONTROL);
3452
3453 temp &= ~DREF_SSC_SOURCE_MASK;
3454 temp |= DREF_SSC_SOURCE_ENABLE;
3455 I915_WRITE(PCH_DREF_CONTROL, temp);
3456 POSTING_READ(PCH_DREF_CONTROL);
3457
3458 udelay(200);
3459
3460 if (is_edp) {
3461 if (dev_priv->lvds_use_ssc) {
3462 temp |= DREF_SSC1_ENABLE;
3463 I915_WRITE(PCH_DREF_CONTROL, temp);
3464 POSTING_READ(PCH_DREF_CONTROL);
3465
3466 udelay(200);
3467
3468 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
3469 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
3470 I915_WRITE(PCH_DREF_CONTROL, temp);
3471 POSTING_READ(PCH_DREF_CONTROL);
3472 } else {
3473 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
3474 I915_WRITE(PCH_DREF_CONTROL, temp);
3475 POSTING_READ(PCH_DREF_CONTROL);
3476 }
3477 }
3478 }
3479
f2b115e6 3480 if (IS_PINEVIEW(dev)) {
2177832f 3481 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
652c393a
JB
3482 if (has_reduced_clock)
3483 fp2 = (1 << reduced_clock.n) << 16 |
3484 reduced_clock.m1 << 8 | reduced_clock.m2;
3485 } else {
2177832f 3486 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
652c393a
JB
3487 if (has_reduced_clock)
3488 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
3489 reduced_clock.m2;
3490 }
79e53945 3491
bad720ff 3492 if (!HAS_PCH_SPLIT(dev))
2c07245f
ZW
3493 dpll = DPLL_VGA_MODE_DIS;
3494
79e53945
JB
3495 if (IS_I9XX(dev)) {
3496 if (is_lvds)
3497 dpll |= DPLLB_MODE_LVDS;
3498 else
3499 dpll |= DPLLB_MODE_DAC_SERIAL;
3500 if (is_sdvo) {
3501 dpll |= DPLL_DVO_HIGH_SPEED;
2c07245f 3502 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
942642a4 3503 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
79e53945 3504 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
bad720ff 3505 else if (HAS_PCH_SPLIT(dev))
2c07245f 3506 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
79e53945 3507 }
a4fc5ed6
KP
3508 if (is_dp)
3509 dpll |= DPLL_DVO_HIGH_SPEED;
79e53945
JB
3510
3511 /* compute bitmask from p1 value */
f2b115e6
AJ
3512 if (IS_PINEVIEW(dev))
3513 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
2c07245f 3514 else {
2177832f 3515 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2c07245f 3516 /* also FPA1 */
bad720ff 3517 if (HAS_PCH_SPLIT(dev))
2c07245f 3518 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
652c393a
JB
3519 if (IS_G4X(dev) && has_reduced_clock)
3520 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2c07245f 3521 }
79e53945
JB
3522 switch (clock.p2) {
3523 case 5:
3524 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3525 break;
3526 case 7:
3527 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3528 break;
3529 case 10:
3530 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3531 break;
3532 case 14:
3533 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3534 break;
3535 }
bad720ff 3536 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
79e53945
JB
3537 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3538 } else {
3539 if (is_lvds) {
3540 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3541 } else {
3542 if (clock.p1 == 2)
3543 dpll |= PLL_P1_DIVIDE_BY_TWO;
3544 else
3545 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3546 if (clock.p2 == 4)
3547 dpll |= PLL_P2_DIVIDE_BY_4;
3548 }
3549 }
3550
43565a06
KH
3551 if (is_sdvo && is_tv)
3552 dpll |= PLL_REF_INPUT_TVCLKINBC;
3553 else if (is_tv)
79e53945 3554 /* XXX: just matching BIOS for now */
43565a06 3555 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
79e53945 3556 dpll |= 3;
c751ce4f 3557 else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
43565a06 3558 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
3559 else
3560 dpll |= PLL_REF_INPUT_DREFCLK;
3561
3562 /* setup pipeconf */
3563 pipeconf = I915_READ(pipeconf_reg);
3564
3565 /* Set up the display plane register */
3566 dspcntr = DISPPLANE_GAMMA_ENABLE;
3567
f2b115e6 3568 /* Ironlake's plane is forced to pipe, bit 24 is to
2c07245f 3569 enable color space conversion */
bad720ff 3570 if (!HAS_PCH_SPLIT(dev)) {
2c07245f 3571 if (pipe == 0)
80824003 3572 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
2c07245f
ZW
3573 else
3574 dspcntr |= DISPPLANE_SEL_PIPE_B;
3575 }
79e53945
JB
3576
3577 if (pipe == 0 && !IS_I965G(dev)) {
3578 /* Enable pixel doubling when the dot clock is > 90% of the (display)
3579 * core speed.
3580 *
3581 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
3582 * pipe == 0 check?
3583 */
e70236a8
JB
3584 if (mode->clock >
3585 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
79e53945
JB
3586 pipeconf |= PIPEACONF_DOUBLE_WIDE;
3587 else
3588 pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
3589 }
3590
79e53945 3591 /* Disable the panel fitter if it was on our pipe */
bad720ff 3592 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
79e53945
JB
3593 I915_WRITE(PFIT_CONTROL, 0);
3594
28c97730 3595 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
79e53945
JB
3596 drm_mode_debug_printmodeline(mode);
3597
f2b115e6 3598 /* assign to Ironlake registers */
bad720ff 3599 if (HAS_PCH_SPLIT(dev)) {
2c07245f
ZW
3600 fp_reg = pch_fp_reg;
3601 dpll_reg = pch_dpll_reg;
3602 }
79e53945 3603
32f9d658 3604 if (is_edp) {
f2b115e6 3605 ironlake_disable_pll_edp(crtc);
32f9d658 3606 } else if ((dpll & DPLL_VCO_ENABLE)) {
79e53945
JB
3607 I915_WRITE(fp_reg, fp);
3608 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
3609 I915_READ(dpll_reg);
3610 udelay(150);
3611 }
3612
8db9d77b
ZW
3613 /* enable transcoder DPLL */
3614 if (HAS_PCH_CPT(dev)) {
3615 temp = I915_READ(PCH_DPLL_SEL);
3616 if (trans_dpll_sel == 0)
3617 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3618 else
3619 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3620 I915_WRITE(PCH_DPLL_SEL, temp);
3621 I915_READ(PCH_DPLL_SEL);
3622 udelay(150);
3623 }
3624
79e53945
JB
3625 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3626 * This is an exception to the general rule that mode_set doesn't turn
3627 * things on.
3628 */
3629 if (is_lvds) {
541998a1 3630 u32 lvds;
79e53945 3631
bad720ff 3632 if (HAS_PCH_SPLIT(dev))
541998a1
ZW
3633 lvds_reg = PCH_LVDS;
3634
3635 lvds = I915_READ(lvds_reg);
0f3ee801 3636 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
b3b095b3
ZW
3637 if (pipe == 1) {
3638 if (HAS_PCH_CPT(dev))
3639 lvds |= PORT_TRANS_B_SEL_CPT;
3640 else
3641 lvds |= LVDS_PIPEB_SELECT;
3642 } else {
3643 if (HAS_PCH_CPT(dev))
3644 lvds &= ~PORT_TRANS_SEL_MASK;
3645 else
3646 lvds &= ~LVDS_PIPEB_SELECT;
3647 }
a3e17eb8
ZY
3648 /* set the corresponsding LVDS_BORDER bit */
3649 lvds |= dev_priv->lvds_border_bits;
79e53945
JB
3650 /* Set the B0-B3 data pairs corresponding to whether we're going to
3651 * set the DPLLs for dual-channel mode or not.
3652 */
3653 if (clock.p2 == 7)
3654 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3655 else
3656 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3657
3658 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3659 * appropriately here, but we need to look more thoroughly into how
3660 * panels behave in the two modes.
3661 */
898822ce
ZY
3662 /* set the dithering flag */
3663 if (IS_I965G(dev)) {
3664 if (dev_priv->lvds_dither) {
c619eed4 3665 if (HAS_PCH_SPLIT(dev))
898822ce
ZY
3666 pipeconf |= PIPE_ENABLE_DITHER;
3667 else
3668 lvds |= LVDS_ENABLE_DITHER;
3669 } else {
c619eed4 3670 if (HAS_PCH_SPLIT(dev))
898822ce
ZY
3671 pipeconf &= ~PIPE_ENABLE_DITHER;
3672 else
3673 lvds &= ~LVDS_ENABLE_DITHER;
3674 }
3675 }
541998a1
ZW
3676 I915_WRITE(lvds_reg, lvds);
3677 I915_READ(lvds_reg);
79e53945 3678 }
a4fc5ed6
KP
3679 if (is_dp)
3680 intel_dp_set_m_n(crtc, mode, adjusted_mode);
8db9d77b
ZW
3681 else if (HAS_PCH_SPLIT(dev)) {
3682 /* For non-DP output, clear any trans DP clock recovery setting.*/
3683 if (pipe == 0) {
3684 I915_WRITE(TRANSA_DATA_M1, 0);
3685 I915_WRITE(TRANSA_DATA_N1, 0);
3686 I915_WRITE(TRANSA_DP_LINK_M1, 0);
3687 I915_WRITE(TRANSA_DP_LINK_N1, 0);
3688 } else {
3689 I915_WRITE(TRANSB_DATA_M1, 0);
3690 I915_WRITE(TRANSB_DATA_N1, 0);
3691 I915_WRITE(TRANSB_DP_LINK_M1, 0);
3692 I915_WRITE(TRANSB_DP_LINK_N1, 0);
3693 }
3694 }
79e53945 3695
32f9d658
ZW
3696 if (!is_edp) {
3697 I915_WRITE(fp_reg, fp);
79e53945 3698 I915_WRITE(dpll_reg, dpll);
32f9d658
ZW
3699 I915_READ(dpll_reg);
3700 /* Wait for the clocks to stabilize. */
3701 udelay(150);
3702
bad720ff 3703 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
bb66c512
ZY
3704 if (is_sdvo) {
3705 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3706 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
32f9d658 3707 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
bb66c512
ZY
3708 } else
3709 I915_WRITE(dpll_md_reg, 0);
32f9d658
ZW
3710 } else {
3711 /* write it again -- the BIOS does, after all */
3712 I915_WRITE(dpll_reg, dpll);
3713 }
3714 I915_READ(dpll_reg);
3715 /* Wait for the clocks to stabilize. */
3716 udelay(150);
79e53945 3717 }
79e53945 3718
652c393a
JB
3719 if (is_lvds && has_reduced_clock && i915_powersave) {
3720 I915_WRITE(fp_reg + 4, fp2);
3721 intel_crtc->lowfreq_avail = true;
3722 if (HAS_PIPE_CXSR(dev)) {
28c97730 3723 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
652c393a
JB
3724 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
3725 }
3726 } else {
3727 I915_WRITE(fp_reg + 4, fp);
3728 intel_crtc->lowfreq_avail = false;
3729 if (HAS_PIPE_CXSR(dev)) {
28c97730 3730 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
652c393a
JB
3731 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
3732 }
3733 }
3734
79e53945
JB
3735 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
3736 ((adjusted_mode->crtc_htotal - 1) << 16));
3737 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
3738 ((adjusted_mode->crtc_hblank_end - 1) << 16));
3739 I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
3740 ((adjusted_mode->crtc_hsync_end - 1) << 16));
3741 I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
3742 ((adjusted_mode->crtc_vtotal - 1) << 16));
3743 I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
3744 ((adjusted_mode->crtc_vblank_end - 1) << 16));
3745 I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
3746 ((adjusted_mode->crtc_vsync_end - 1) << 16));
3747 /* pipesrc and dspsize control the size that is scaled from, which should
3748 * always be the user's requested size.
3749 */
bad720ff 3750 if (!HAS_PCH_SPLIT(dev)) {
2c07245f
ZW
3751 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3752 (mode->hdisplay - 1));
3753 I915_WRITE(dsppos_reg, 0);
3754 }
79e53945 3755 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
2c07245f 3756
bad720ff 3757 if (HAS_PCH_SPLIT(dev)) {
2c07245f
ZW
3758 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3759 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3760 I915_WRITE(link_m1_reg, m_n.link_m);
3761 I915_WRITE(link_n1_reg, m_n.link_n);
3762
32f9d658 3763 if (is_edp) {
f2b115e6 3764 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
32f9d658
ZW
3765 } else {
3766 /* enable FDI RX PLL too */
3767 temp = I915_READ(fdi_rx_reg);
3768 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
8db9d77b
ZW
3769 I915_READ(fdi_rx_reg);
3770 udelay(200);
3771
3772 /* enable FDI TX PLL too */
3773 temp = I915_READ(fdi_tx_reg);
3774 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
3775 I915_READ(fdi_tx_reg);
3776
3777 /* enable FDI RX PCDCLK */
3778 temp = I915_READ(fdi_rx_reg);
3779 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
3780 I915_READ(fdi_rx_reg);
32f9d658
ZW
3781 udelay(200);
3782 }
2c07245f
ZW
3783 }
3784
79e53945
JB
3785 I915_WRITE(pipeconf_reg, pipeconf);
3786 I915_READ(pipeconf_reg);
3787
3788 intel_wait_for_vblank(dev);
3789
c2416fc6 3790 if (IS_IRONLAKE(dev)) {
553bd149
ZW
3791 /* enable address swizzle for tiling buffer */
3792 temp = I915_READ(DISP_ARB_CTL);
3793 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
3794 }
3795
79e53945
JB
3796 I915_WRITE(dspcntr_reg, dspcntr);
3797
3798 /* Flush the plane changes */
5c3b82e2 3799 ret = intel_pipe_set_base(crtc, x, y, old_fb);
7662c8bd 3800
74dff282
JB
3801 if ((IS_I965G(dev) || plane == 0))
3802 intel_update_fbc(crtc, &crtc->mode);
e70236a8 3803
7662c8bd
SL
3804 intel_update_watermarks(dev);
3805
79e53945 3806 drm_vblank_post_modeset(dev, pipe);
5c3b82e2 3807
1f803ee5 3808 return ret;
79e53945
JB
3809}
3810
3811/** Loads the palette/gamma unit for the CRTC with the prepared values */
3812void intel_crtc_load_lut(struct drm_crtc *crtc)
3813{
3814 struct drm_device *dev = crtc->dev;
3815 struct drm_i915_private *dev_priv = dev->dev_private;
3816 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3817 int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
3818 int i;
3819
3820 /* The clocks have to be on to load the palette. */
3821 if (!crtc->enabled)
3822 return;
3823
f2b115e6 3824 /* use legacy palette for Ironlake */
bad720ff 3825 if (HAS_PCH_SPLIT(dev))
2c07245f
ZW
3826 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3827 LGC_PALETTE_B;
3828
79e53945
JB
3829 for (i = 0; i < 256; i++) {
3830 I915_WRITE(palreg + 4 * i,
3831 (intel_crtc->lut_r[i] << 16) |
3832 (intel_crtc->lut_g[i] << 8) |
3833 intel_crtc->lut_b[i]);
3834 }
3835}
3836
3837static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3838 struct drm_file *file_priv,
3839 uint32_t handle,
3840 uint32_t width, uint32_t height)
3841{
3842 struct drm_device *dev = crtc->dev;
3843 struct drm_i915_private *dev_priv = dev->dev_private;
3844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3845 struct drm_gem_object *bo;
3846 struct drm_i915_gem_object *obj_priv;
3847 int pipe = intel_crtc->pipe;
3848 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
3849 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
14b60391 3850 uint32_t temp = I915_READ(control);
79e53945 3851 size_t addr;
3f8bc370 3852 int ret;
79e53945 3853
28c97730 3854 DRM_DEBUG_KMS("\n");
79e53945
JB
3855
3856 /* if we want to turn off the cursor ignore width and height */
3857 if (!handle) {
28c97730 3858 DRM_DEBUG_KMS("cursor off\n");
14b60391
JB
3859 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
3860 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
3861 temp |= CURSOR_MODE_DISABLE;
3862 } else {
3863 temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
3864 }
3f8bc370
KH
3865 addr = 0;
3866 bo = NULL;
5004417d 3867 mutex_lock(&dev->struct_mutex);
3f8bc370 3868 goto finish;
79e53945
JB
3869 }
3870
3871 /* Currently we only support 64x64 cursors */
3872 if (width != 64 || height != 64) {
3873 DRM_ERROR("we currently only support 64x64 cursors\n");
3874 return -EINVAL;
3875 }
3876
3877 bo = drm_gem_object_lookup(dev, file_priv, handle);
3878 if (!bo)
3879 return -ENOENT;
3880
23010e43 3881 obj_priv = to_intel_bo(bo);
79e53945
JB
3882
3883 if (bo->size < width * height * 4) {
3884 DRM_ERROR("buffer is to small\n");
34b8686e
DA
3885 ret = -ENOMEM;
3886 goto fail;
79e53945
JB
3887 }
3888
71acb5eb 3889 /* we only need to pin inside GTT if cursor is non-phy */
7f9872e0 3890 mutex_lock(&dev->struct_mutex);
b295d1b6 3891 if (!dev_priv->info->cursor_needs_physical) {
71acb5eb
DA
3892 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3893 if (ret) {
3894 DRM_ERROR("failed to pin cursor bo\n");
7f9872e0 3895 goto fail_locked;
71acb5eb 3896 }
79e53945 3897 addr = obj_priv->gtt_offset;
71acb5eb
DA
3898 } else {
3899 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
3900 if (ret) {
3901 DRM_ERROR("failed to attach phys object\n");
7f9872e0 3902 goto fail_locked;
71acb5eb
DA
3903 }
3904 addr = obj_priv->phys_obj->handle->busaddr;
3f8bc370
KH
3905 }
3906
14b60391
JB
3907 if (!IS_I9XX(dev))
3908 I915_WRITE(CURSIZE, (height << 12) | width);
3909
3910 /* Hooray for CUR*CNTR differences */
3911 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
3912 temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
3913 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
3914 temp |= (pipe << 28); /* Connect to correct pipe */
3915 } else {
3916 temp &= ~(CURSOR_FORMAT_MASK);
3917 temp |= CURSOR_ENABLE;
3918 temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
3919 }
79e53945 3920
3f8bc370 3921 finish:
79e53945
JB
3922 I915_WRITE(control, temp);
3923 I915_WRITE(base, addr);
3924
3f8bc370 3925 if (intel_crtc->cursor_bo) {
b295d1b6 3926 if (dev_priv->info->cursor_needs_physical) {
71acb5eb
DA
3927 if (intel_crtc->cursor_bo != bo)
3928 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3929 } else
3930 i915_gem_object_unpin(intel_crtc->cursor_bo);
3f8bc370
KH
3931 drm_gem_object_unreference(intel_crtc->cursor_bo);
3932 }
80824003 3933
7f9872e0 3934 mutex_unlock(&dev->struct_mutex);
3f8bc370
KH
3935
3936 intel_crtc->cursor_addr = addr;
3937 intel_crtc->cursor_bo = bo;
3938
79e53945 3939 return 0;
7f9872e0 3940fail_locked:
34b8686e 3941 mutex_unlock(&dev->struct_mutex);
bc9025bd
LB
3942fail:
3943 drm_gem_object_unreference_unlocked(bo);
34b8686e 3944 return ret;
79e53945
JB
3945}
3946
3947static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
3948{
3949 struct drm_device *dev = crtc->dev;
3950 struct drm_i915_private *dev_priv = dev->dev_private;
3951 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
652c393a 3952 struct intel_framebuffer *intel_fb;
79e53945
JB
3953 int pipe = intel_crtc->pipe;
3954 uint32_t temp = 0;
3955 uint32_t adder;
3956
652c393a
JB
3957 if (crtc->fb) {
3958 intel_fb = to_intel_framebuffer(crtc->fb);
3959 intel_mark_busy(dev, intel_fb->obj);
3960 }
3961
79e53945 3962 if (x < 0) {
2245fda8 3963 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
79e53945
JB
3964 x = -x;
3965 }
3966 if (y < 0) {
2245fda8 3967 temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
79e53945
JB
3968 y = -y;
3969 }
3970
2245fda8
KP
3971 temp |= x << CURSOR_X_SHIFT;
3972 temp |= y << CURSOR_Y_SHIFT;
79e53945
JB
3973
3974 adder = intel_crtc->cursor_addr;
3975 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
3976 I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
3977
3978 return 0;
3979}
3980
3981/** Sets the color ramps on behalf of RandR */
3982void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
3983 u16 blue, int regno)
3984{
3985 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3986
3987 intel_crtc->lut_r[regno] = red >> 8;
3988 intel_crtc->lut_g[regno] = green >> 8;
3989 intel_crtc->lut_b[regno] = blue >> 8;
3990}
3991
b8c00ac5
DA
3992void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
3993 u16 *blue, int regno)
3994{
3995 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3996
3997 *red = intel_crtc->lut_r[regno] << 8;
3998 *green = intel_crtc->lut_g[regno] << 8;
3999 *blue = intel_crtc->lut_b[regno] << 8;
4000}
4001
79e53945
JB
4002static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
4003 u16 *blue, uint32_t size)
4004{
4005 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4006 int i;
4007
4008 if (size != 256)
4009 return;
4010
4011 for (i = 0; i < 256; i++) {
4012 intel_crtc->lut_r[i] = red[i] >> 8;
4013 intel_crtc->lut_g[i] = green[i] >> 8;
4014 intel_crtc->lut_b[i] = blue[i] >> 8;
4015 }
4016
4017 intel_crtc_load_lut(crtc);
4018}
4019
4020/**
4021 * Get a pipe with a simple mode set on it for doing load-based monitor
4022 * detection.
4023 *
4024 * It will be up to the load-detect code to adjust the pipe as appropriate for
c751ce4f 4025 * its requirements. The pipe will be connected to no other encoders.
79e53945 4026 *
c751ce4f 4027 * Currently this code will only succeed if there is a pipe with no encoders
79e53945
JB
4028 * configured for it. In the future, it could choose to temporarily disable
4029 * some outputs to free up a pipe for its use.
4030 *
4031 * \return crtc, or NULL if no pipes are available.
4032 */
4033
4034/* VESA 640x480x72Hz mode to set on the pipe */
4035static struct drm_display_mode load_detect_mode = {
4036 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4037 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4038};
4039
21d40d37 4040struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
c1c43977 4041 struct drm_connector *connector,
79e53945
JB
4042 struct drm_display_mode *mode,
4043 int *dpms_mode)
4044{
4045 struct intel_crtc *intel_crtc;
4046 struct drm_crtc *possible_crtc;
4047 struct drm_crtc *supported_crtc =NULL;
21d40d37 4048 struct drm_encoder *encoder = &intel_encoder->enc;
79e53945
JB
4049 struct drm_crtc *crtc = NULL;
4050 struct drm_device *dev = encoder->dev;
4051 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
4052 struct drm_crtc_helper_funcs *crtc_funcs;
4053 int i = -1;
4054
4055 /*
4056 * Algorithm gets a little messy:
4057 * - if the connector already has an assigned crtc, use it (but make
4058 * sure it's on first)
4059 * - try to find the first unused crtc that can drive this connector,
4060 * and use that if we find one
4061 * - if there are no unused crtcs available, try to use the first
4062 * one we found that supports the connector
4063 */
4064
4065 /* See if we already have a CRTC for this connector */
4066 if (encoder->crtc) {
4067 crtc = encoder->crtc;
4068 /* Make sure the crtc and connector are running */
4069 intel_crtc = to_intel_crtc(crtc);
4070 *dpms_mode = intel_crtc->dpms_mode;
4071 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
4072 crtc_funcs = crtc->helper_private;
4073 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
4074 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
4075 }
4076 return crtc;
4077 }
4078
4079 /* Find an unused one (if possible) */
4080 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
4081 i++;
4082 if (!(encoder->possible_crtcs & (1 << i)))
4083 continue;
4084 if (!possible_crtc->enabled) {
4085 crtc = possible_crtc;
4086 break;
4087 }
4088 if (!supported_crtc)
4089 supported_crtc = possible_crtc;
4090 }
4091
4092 /*
4093 * If we didn't find an unused CRTC, don't use any.
4094 */
4095 if (!crtc) {
4096 return NULL;
4097 }
4098
4099 encoder->crtc = crtc;
c1c43977 4100 connector->encoder = encoder;
21d40d37 4101 intel_encoder->load_detect_temp = true;
79e53945
JB
4102
4103 intel_crtc = to_intel_crtc(crtc);
4104 *dpms_mode = intel_crtc->dpms_mode;
4105
4106 if (!crtc->enabled) {
4107 if (!mode)
4108 mode = &load_detect_mode;
3c4fdcfb 4109 drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
79e53945
JB
4110 } else {
4111 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
4112 crtc_funcs = crtc->helper_private;
4113 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
4114 }
4115
4116 /* Add this connector to the crtc */
4117 encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
4118 encoder_funcs->commit(encoder);
4119 }
4120 /* let the connector get through one full cycle before testing */
4121 intel_wait_for_vblank(dev);
4122
4123 return crtc;
4124}
4125
c1c43977
ZW
4126void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
4127 struct drm_connector *connector, int dpms_mode)
79e53945 4128{
21d40d37 4129 struct drm_encoder *encoder = &intel_encoder->enc;
79e53945
JB
4130 struct drm_device *dev = encoder->dev;
4131 struct drm_crtc *crtc = encoder->crtc;
4132 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
4133 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
4134
21d40d37 4135 if (intel_encoder->load_detect_temp) {
79e53945 4136 encoder->crtc = NULL;
c1c43977 4137 connector->encoder = NULL;
21d40d37 4138 intel_encoder->load_detect_temp = false;
79e53945
JB
4139 crtc->enabled = drm_helper_crtc_in_use(crtc);
4140 drm_helper_disable_unused_functions(dev);
4141 }
4142
c751ce4f 4143 /* Switch crtc and encoder back off if necessary */
79e53945
JB
4144 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
4145 if (encoder->crtc == crtc)
4146 encoder_funcs->dpms(encoder, dpms_mode);
4147 crtc_funcs->dpms(crtc, dpms_mode);
4148 }
4149}
4150
4151/* Returns the clock of the currently programmed mode of the given pipe. */
4152static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
4153{
4154 struct drm_i915_private *dev_priv = dev->dev_private;
4155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4156 int pipe = intel_crtc->pipe;
4157 u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
4158 u32 fp;
4159 intel_clock_t clock;
4160
4161 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4162 fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
4163 else
4164 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
4165
4166 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
4167 if (IS_PINEVIEW(dev)) {
4168 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4169 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
4170 } else {
4171 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4172 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4173 }
4174
79e53945 4175 if (IS_I9XX(dev)) {
f2b115e6
AJ
4176 if (IS_PINEVIEW(dev))
4177 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4178 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
4179 else
4180 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
4181 DPLL_FPA01_P1_POST_DIV_SHIFT);
4182
4183 switch (dpll & DPLL_MODE_MASK) {
4184 case DPLLB_MODE_DAC_SERIAL:
4185 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4186 5 : 10;
4187 break;
4188 case DPLLB_MODE_LVDS:
4189 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4190 7 : 14;
4191 break;
4192 default:
28c97730 4193 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945
JB
4194 "mode\n", (int)(dpll & DPLL_MODE_MASK));
4195 return 0;
4196 }
4197
4198 /* XXX: Handle the 100Mhz refclk */
2177832f 4199 intel_clock(dev, 96000, &clock);
79e53945
JB
4200 } else {
4201 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
4202
4203 if (is_lvds) {
4204 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4205 DPLL_FPA01_P1_POST_DIV_SHIFT);
4206 clock.p2 = 14;
4207
4208 if ((dpll & PLL_REF_INPUT_MASK) ==
4209 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
4210 /* XXX: might not be 66MHz */
2177832f 4211 intel_clock(dev, 66000, &clock);
79e53945 4212 } else
2177832f 4213 intel_clock(dev, 48000, &clock);
79e53945
JB
4214 } else {
4215 if (dpll & PLL_P1_DIVIDE_BY_TWO)
4216 clock.p1 = 2;
4217 else {
4218 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4219 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4220 }
4221 if (dpll & PLL_P2_DIVIDE_BY_4)
4222 clock.p2 = 4;
4223 else
4224 clock.p2 = 2;
4225
2177832f 4226 intel_clock(dev, 48000, &clock);
79e53945
JB
4227 }
4228 }
4229
4230 /* XXX: It would be nice to validate the clocks, but we can't reuse
4231 * i830PllIsValid() because it relies on the xf86_config connector
4232 * configuration being accurate, which it isn't necessarily.
4233 */
4234
4235 return clock.dot;
4236}
4237
4238/** Returns the currently programmed mode of the given pipe. */
4239struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
4240 struct drm_crtc *crtc)
4241{
4242 struct drm_i915_private *dev_priv = dev->dev_private;
4243 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4244 int pipe = intel_crtc->pipe;
4245 struct drm_display_mode *mode;
4246 int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
4247 int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
4248 int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
4249 int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
4250
4251 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4252 if (!mode)
4253 return NULL;
4254
4255 mode->clock = intel_crtc_clock_get(dev, crtc);
4256 mode->hdisplay = (htot & 0xffff) + 1;
4257 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
4258 mode->hsync_start = (hsync & 0xffff) + 1;
4259 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
4260 mode->vdisplay = (vtot & 0xffff) + 1;
4261 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
4262 mode->vsync_start = (vsync & 0xffff) + 1;
4263 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
4264
4265 drm_mode_set_name(mode);
4266 drm_mode_set_crtcinfo(mode, 0);
4267
4268 return mode;
4269}
4270
652c393a
JB
4271#define GPU_IDLE_TIMEOUT 500 /* ms */
4272
4273/* When this timer fires, we've been idle for awhile */
4274static void intel_gpu_idle_timer(unsigned long arg)
4275{
4276 struct drm_device *dev = (struct drm_device *)arg;
4277 drm_i915_private_t *dev_priv = dev->dev_private;
4278
44d98a61 4279 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
652c393a
JB
4280
4281 dev_priv->busy = false;
4282
01dfba93 4283 queue_work(dev_priv->wq, &dev_priv->idle_work);
652c393a
JB
4284}
4285
652c393a
JB
4286#define CRTC_IDLE_TIMEOUT 1000 /* ms */
4287
4288static void intel_crtc_idle_timer(unsigned long arg)
4289{
4290 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
4291 struct drm_crtc *crtc = &intel_crtc->base;
4292 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
4293
44d98a61 4294 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
652c393a
JB
4295
4296 intel_crtc->busy = false;
4297
01dfba93 4298 queue_work(dev_priv->wq, &dev_priv->idle_work);
652c393a
JB
4299}
4300
4301static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
4302{
4303 struct drm_device *dev = crtc->dev;
4304 drm_i915_private_t *dev_priv = dev->dev_private;
4305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4306 int pipe = intel_crtc->pipe;
4307 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
4308 int dpll = I915_READ(dpll_reg);
4309
bad720ff 4310 if (HAS_PCH_SPLIT(dev))
652c393a
JB
4311 return;
4312
4313 if (!dev_priv->lvds_downclock_avail)
4314 return;
4315
4316 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
44d98a61 4317 DRM_DEBUG_DRIVER("upclocking LVDS\n");
652c393a
JB
4318
4319 /* Unlock panel regs */
4320 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
4321
4322 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
4323 I915_WRITE(dpll_reg, dpll);
4324 dpll = I915_READ(dpll_reg);
4325 intel_wait_for_vblank(dev);
4326 dpll = I915_READ(dpll_reg);
4327 if (dpll & DISPLAY_RATE_SELECT_FPA1)
44d98a61 4328 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
652c393a
JB
4329
4330 /* ...and lock them again */
4331 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
4332 }
4333
4334 /* Schedule downclock */
4335 if (schedule)
4336 mod_timer(&intel_crtc->idle_timer, jiffies +
4337 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
4338}
4339
4340static void intel_decrease_pllclock(struct drm_crtc *crtc)
4341{
4342 struct drm_device *dev = crtc->dev;
4343 drm_i915_private_t *dev_priv = dev->dev_private;
4344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4345 int pipe = intel_crtc->pipe;
4346 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
4347 int dpll = I915_READ(dpll_reg);
4348
bad720ff 4349 if (HAS_PCH_SPLIT(dev))
652c393a
JB
4350 return;
4351
4352 if (!dev_priv->lvds_downclock_avail)
4353 return;
4354
4355 /*
4356 * Since this is called by a timer, we should never get here in
4357 * the manual case.
4358 */
4359 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
44d98a61 4360 DRM_DEBUG_DRIVER("downclocking LVDS\n");
652c393a
JB
4361
4362 /* Unlock panel regs */
4363 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
4364
4365 dpll |= DISPLAY_RATE_SELECT_FPA1;
4366 I915_WRITE(dpll_reg, dpll);
4367 dpll = I915_READ(dpll_reg);
4368 intel_wait_for_vblank(dev);
4369 dpll = I915_READ(dpll_reg);
4370 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
44d98a61 4371 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
652c393a
JB
4372
4373 /* ...and lock them again */
4374 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
4375 }
4376
4377}
4378
4379/**
4380 * intel_idle_update - adjust clocks for idleness
4381 * @work: work struct
4382 *
4383 * Either the GPU or display (or both) went idle. Check the busy status
4384 * here and adjust the CRTC and GPU clocks as necessary.
4385 */
4386static void intel_idle_update(struct work_struct *work)
4387{
4388 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
4389 idle_work);
4390 struct drm_device *dev = dev_priv->dev;
4391 struct drm_crtc *crtc;
4392 struct intel_crtc *intel_crtc;
4393
4394 if (!i915_powersave)
4395 return;
4396
4397 mutex_lock(&dev->struct_mutex);
4398
ee980b80
LP
4399 if (IS_I945G(dev) || IS_I945GM(dev)) {
4400 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4401 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4402 }
4403
652c393a
JB
4404 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4405 /* Skip inactive CRTCs */
4406 if (!crtc->fb)
4407 continue;
4408
4409 intel_crtc = to_intel_crtc(crtc);
4410 if (!intel_crtc->busy)
4411 intel_decrease_pllclock(crtc);
4412 }
4413
4414 mutex_unlock(&dev->struct_mutex);
4415}
4416
4417/**
4418 * intel_mark_busy - mark the GPU and possibly the display busy
4419 * @dev: drm device
4420 * @obj: object we're operating on
4421 *
4422 * Callers can use this function to indicate that the GPU is busy processing
4423 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
4424 * buffer), we'll also mark the display as busy, so we know to increase its
4425 * clock frequency.
4426 */
4427void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4428{
4429 drm_i915_private_t *dev_priv = dev->dev_private;
4430 struct drm_crtc *crtc = NULL;
4431 struct intel_framebuffer *intel_fb;
4432 struct intel_crtc *intel_crtc;
4433
5e17ee74
ZW
4434 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4435 return;
4436
060e645a
LP
4437 if (!dev_priv->busy) {
4438 if (IS_I945G(dev) || IS_I945GM(dev)) {
4439 u32 fw_blc_self;
ee980b80 4440
060e645a
LP
4441 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4442 fw_blc_self = I915_READ(FW_BLC_SELF);
4443 fw_blc_self &= ~FW_BLC_SELF_EN;
4444 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4445 }
28cf798f 4446 dev_priv->busy = true;
060e645a 4447 } else
28cf798f
CW
4448 mod_timer(&dev_priv->idle_timer, jiffies +
4449 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
652c393a
JB
4450
4451 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4452 if (!crtc->fb)
4453 continue;
4454
4455 intel_crtc = to_intel_crtc(crtc);
4456 intel_fb = to_intel_framebuffer(crtc->fb);
4457 if (intel_fb->obj == obj) {
4458 if (!intel_crtc->busy) {
060e645a
LP
4459 if (IS_I945G(dev) || IS_I945GM(dev)) {
4460 u32 fw_blc_self;
4461
4462 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4463 fw_blc_self = I915_READ(FW_BLC_SELF);
4464 fw_blc_self &= ~FW_BLC_SELF_EN;
4465 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4466 }
652c393a
JB
4467 /* Non-busy -> busy, upclock */
4468 intel_increase_pllclock(crtc, true);
4469 intel_crtc->busy = true;
4470 } else {
4471 /* Busy -> busy, put off timer */
4472 mod_timer(&intel_crtc->idle_timer, jiffies +
4473 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
4474 }
4475 }
4476 }
4477}
4478
79e53945
JB
4479static void intel_crtc_destroy(struct drm_crtc *crtc)
4480{
4481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4482
4483 drm_crtc_cleanup(crtc);
4484 kfree(intel_crtc);
4485}
4486
6b95a207
KH
4487struct intel_unpin_work {
4488 struct work_struct work;
4489 struct drm_device *dev;
b1b87f6b
JB
4490 struct drm_gem_object *old_fb_obj;
4491 struct drm_gem_object *pending_flip_obj;
6b95a207
KH
4492 struct drm_pending_vblank_event *event;
4493 int pending;
4494};
4495
4496static void intel_unpin_work_fn(struct work_struct *__work)
4497{
4498 struct intel_unpin_work *work =
4499 container_of(__work, struct intel_unpin_work, work);
4500
4501 mutex_lock(&work->dev->struct_mutex);
b1b87f6b 4502 i915_gem_object_unpin(work->old_fb_obj);
75dfca80 4503 drm_gem_object_unreference(work->pending_flip_obj);
b1b87f6b 4504 drm_gem_object_unreference(work->old_fb_obj);
6b95a207
KH
4505 mutex_unlock(&work->dev->struct_mutex);
4506 kfree(work);
4507}
4508
4509void intel_finish_page_flip(struct drm_device *dev, int pipe)
4510{
4511 drm_i915_private_t *dev_priv = dev->dev_private;
4512 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4513 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4514 struct intel_unpin_work *work;
4515 struct drm_i915_gem_object *obj_priv;
4516 struct drm_pending_vblank_event *e;
4517 struct timeval now;
4518 unsigned long flags;
4519
4520 /* Ignore early vblank irqs */
4521 if (intel_crtc == NULL)
4522 return;
4523
4524 spin_lock_irqsave(&dev->event_lock, flags);
4525 work = intel_crtc->unpin_work;
4526 if (work == NULL || !work->pending) {
de3f440f 4527 if (work && !work->pending) {
23010e43 4528 obj_priv = to_intel_bo(work->pending_flip_obj);
de3f440f
JB
4529 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4530 obj_priv,
4531 atomic_read(&obj_priv->pending_flip));
4532 }
6b95a207
KH
4533 spin_unlock_irqrestore(&dev->event_lock, flags);
4534 return;
4535 }
4536
4537 intel_crtc->unpin_work = NULL;
4538 drm_vblank_put(dev, intel_crtc->pipe);
4539
4540 if (work->event) {
4541 e = work->event;
4542 do_gettimeofday(&now);
4543 e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
4544 e->event.tv_sec = now.tv_sec;
4545 e->event.tv_usec = now.tv_usec;
4546 list_add_tail(&e->base.link,
4547 &e->base.file_priv->event_list);
4548 wake_up_interruptible(&e->base.file_priv->event_wait);
4549 }
4550
4551 spin_unlock_irqrestore(&dev->event_lock, flags);
4552
23010e43 4553 obj_priv = to_intel_bo(work->pending_flip_obj);
de3f440f
JB
4554
4555 /* Initial scanout buffer will have a 0 pending flip count */
4556 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4557 atomic_dec_and_test(&obj_priv->pending_flip))
6b95a207
KH
4558 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4559 schedule_work(&work->work);
4560}
4561
4562void intel_prepare_page_flip(struct drm_device *dev, int plane)
4563{
4564 drm_i915_private_t *dev_priv = dev->dev_private;
4565 struct intel_crtc *intel_crtc =
4566 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
4567 unsigned long flags;
4568
4569 spin_lock_irqsave(&dev->event_lock, flags);
de3f440f 4570 if (intel_crtc->unpin_work) {
6b95a207 4571 intel_crtc->unpin_work->pending = 1;
de3f440f
JB
4572 } else {
4573 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4574 }
6b95a207
KH
4575 spin_unlock_irqrestore(&dev->event_lock, flags);
4576}
4577
4578static int intel_crtc_page_flip(struct drm_crtc *crtc,
4579 struct drm_framebuffer *fb,
4580 struct drm_pending_vblank_event *event)
4581{
4582 struct drm_device *dev = crtc->dev;
4583 struct drm_i915_private *dev_priv = dev->dev_private;
4584 struct intel_framebuffer *intel_fb;
4585 struct drm_i915_gem_object *obj_priv;
4586 struct drm_gem_object *obj;
4587 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4588 struct intel_unpin_work *work;
4589 unsigned long flags;
aacef09b
ZW
4590 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4591 int ret, pipesrc;
6b95a207
KH
4592 RING_LOCALS;
4593
4594 work = kzalloc(sizeof *work, GFP_KERNEL);
4595 if (work == NULL)
4596 return -ENOMEM;
4597
4598 mutex_lock(&dev->struct_mutex);
4599
4600 work->event = event;
4601 work->dev = crtc->dev;
4602 intel_fb = to_intel_framebuffer(crtc->fb);
b1b87f6b 4603 work->old_fb_obj = intel_fb->obj;
6b95a207
KH
4604 INIT_WORK(&work->work, intel_unpin_work_fn);
4605
4606 /* We borrow the event spin lock for protecting unpin_work */
4607 spin_lock_irqsave(&dev->event_lock, flags);
4608 if (intel_crtc->unpin_work) {
de3f440f 4609 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6b95a207
KH
4610 spin_unlock_irqrestore(&dev->event_lock, flags);
4611 kfree(work);
4612 mutex_unlock(&dev->struct_mutex);
4613 return -EBUSY;
4614 }
4615 intel_crtc->unpin_work = work;
4616 spin_unlock_irqrestore(&dev->event_lock, flags);
4617
4618 intel_fb = to_intel_framebuffer(fb);
4619 obj = intel_fb->obj;
4620
4621 ret = intel_pin_and_fence_fb_obj(dev, obj);
4622 if (ret != 0) {
de3f440f 4623 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
23010e43 4624 to_intel_bo(obj));
6b95a207 4625 kfree(work);
de3f440f 4626 intel_crtc->unpin_work = NULL;
6b95a207
KH
4627 mutex_unlock(&dev->struct_mutex);
4628 return ret;
4629 }
4630
75dfca80 4631 /* Reference the objects for the scheduled work. */
b1b87f6b 4632 drm_gem_object_reference(work->old_fb_obj);
75dfca80 4633 drm_gem_object_reference(obj);
6b95a207
KH
4634
4635 crtc->fb = fb;
4636 i915_gem_object_flush_write_domain(obj);
4637 drm_vblank_get(dev, intel_crtc->pipe);
23010e43 4638 obj_priv = to_intel_bo(obj);
6b95a207 4639 atomic_inc(&obj_priv->pending_flip);
b1b87f6b 4640 work->pending_flip_obj = obj;
6b95a207
KH
4641
4642 BEGIN_LP_RING(4);
4643 OUT_RING(MI_DISPLAY_FLIP |
4644 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4645 OUT_RING(fb->pitch);
22fd0fab
JB
4646 if (IS_I965G(dev)) {
4647 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
aacef09b
ZW
4648 pipesrc = I915_READ(pipesrc_reg);
4649 OUT_RING(pipesrc & 0x0fff0fff);
22fd0fab
JB
4650 } else {
4651 OUT_RING(obj_priv->gtt_offset);
4652 OUT_RING(MI_NOOP);
4653 }
6b95a207
KH
4654 ADVANCE_LP_RING();
4655
4656 mutex_unlock(&dev->struct_mutex);
4657
4658 return 0;
4659}
4660
79e53945
JB
4661static const struct drm_crtc_helper_funcs intel_helper_funcs = {
4662 .dpms = intel_crtc_dpms,
4663 .mode_fixup = intel_crtc_mode_fixup,
4664 .mode_set = intel_crtc_mode_set,
4665 .mode_set_base = intel_pipe_set_base,
4666 .prepare = intel_crtc_prepare,
4667 .commit = intel_crtc_commit,
068143d3 4668 .load_lut = intel_crtc_load_lut,
79e53945
JB
4669};
4670
4671static const struct drm_crtc_funcs intel_crtc_funcs = {
4672 .cursor_set = intel_crtc_cursor_set,
4673 .cursor_move = intel_crtc_cursor_move,
4674 .gamma_set = intel_crtc_gamma_set,
4675 .set_config = drm_crtc_helper_set_config,
4676 .destroy = intel_crtc_destroy,
6b95a207 4677 .page_flip = intel_crtc_page_flip,
79e53945
JB
4678};
4679
4680
b358d0a6 4681static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 4682{
22fd0fab 4683 drm_i915_private_t *dev_priv = dev->dev_private;
79e53945
JB
4684 struct intel_crtc *intel_crtc;
4685 int i;
4686
4687 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
4688 if (intel_crtc == NULL)
4689 return;
4690
4691 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
4692
4693 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
4694 intel_crtc->pipe = pipe;
7662c8bd 4695 intel_crtc->plane = pipe;
79e53945
JB
4696 for (i = 0; i < 256; i++) {
4697 intel_crtc->lut_r[i] = i;
4698 intel_crtc->lut_g[i] = i;
4699 intel_crtc->lut_b[i] = i;
4700 }
4701
80824003
JB
4702 /* Swap pipes & planes for FBC on pre-965 */
4703 intel_crtc->pipe = pipe;
4704 intel_crtc->plane = pipe;
4705 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
28c97730 4706 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
80824003
JB
4707 intel_crtc->plane = ((pipe == 0) ? 1 : 0);
4708 }
4709
22fd0fab
JB
4710 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
4711 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
4712 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
4713 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
4714
79e53945
JB
4715 intel_crtc->cursor_addr = 0;
4716 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4717 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
4718
652c393a
JB
4719 intel_crtc->busy = false;
4720
4721 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
4722 (unsigned long)intel_crtc);
79e53945
JB
4723}
4724
08d7b3d1
CW
4725int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
4726 struct drm_file *file_priv)
4727{
4728 drm_i915_private_t *dev_priv = dev->dev_private;
4729 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
c05422d5
DV
4730 struct drm_mode_object *drmmode_obj;
4731 struct intel_crtc *crtc;
08d7b3d1
CW
4732
4733 if (!dev_priv) {
4734 DRM_ERROR("called with no initialization\n");
4735 return -EINVAL;
4736 }
4737
c05422d5
DV
4738 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
4739 DRM_MODE_OBJECT_CRTC);
08d7b3d1 4740
c05422d5 4741 if (!drmmode_obj) {
08d7b3d1
CW
4742 DRM_ERROR("no such CRTC id\n");
4743 return -EINVAL;
4744 }
4745
c05422d5
DV
4746 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
4747 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 4748
c05422d5 4749 return 0;
08d7b3d1
CW
4750}
4751
79e53945
JB
4752struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
4753{
4754 struct drm_crtc *crtc = NULL;
4755
4756 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4757 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4758 if (intel_crtc->pipe == pipe)
4759 break;
4760 }
4761 return crtc;
4762}
4763
c5e4df33 4764static int intel_encoder_clones(struct drm_device *dev, int type_mask)
79e53945
JB
4765{
4766 int index_mask = 0;
c5e4df33 4767 struct drm_encoder *encoder;
79e53945
JB
4768 int entry = 0;
4769
c5e4df33
ZW
4770 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4771 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
21d40d37 4772 if (type_mask & intel_encoder->clone_mask)
79e53945
JB
4773 index_mask |= (1 << entry);
4774 entry++;
4775 }
4776 return index_mask;
4777}
4778
4779
4780static void intel_setup_outputs(struct drm_device *dev)
4781{
725e30ad 4782 struct drm_i915_private *dev_priv = dev->dev_private;
c5e4df33 4783 struct drm_encoder *encoder;
79e53945
JB
4784
4785 intel_crt_init(dev);
4786
4787 /* Set up integrated LVDS */
541998a1 4788 if (IS_MOBILE(dev) && !IS_I830(dev))
79e53945
JB
4789 intel_lvds_init(dev);
4790
bad720ff 4791 if (HAS_PCH_SPLIT(dev)) {
30ad48b7
ZW
4792 int found;
4793
32f9d658
ZW
4794 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
4795 intel_dp_init(dev, DP_A);
4796
30ad48b7 4797 if (I915_READ(HDMIB) & PORT_DETECTED) {
461ed3ca
ZY
4798 /* PCH SDVOB multiplex with HDMIB */
4799 found = intel_sdvo_init(dev, PCH_SDVOB);
30ad48b7
ZW
4800 if (!found)
4801 intel_hdmi_init(dev, HDMIB);
5eb08b69
ZW
4802 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
4803 intel_dp_init(dev, PCH_DP_B);
30ad48b7
ZW
4804 }
4805
4806 if (I915_READ(HDMIC) & PORT_DETECTED)
4807 intel_hdmi_init(dev, HDMIC);
4808
4809 if (I915_READ(HDMID) & PORT_DETECTED)
4810 intel_hdmi_init(dev, HDMID);
4811
5eb08b69
ZW
4812 if (I915_READ(PCH_DP_C) & DP_DETECTED)
4813 intel_dp_init(dev, PCH_DP_C);
4814
4815 if (I915_READ(PCH_DP_D) & DP_DETECTED)
4816 intel_dp_init(dev, PCH_DP_D);
4817
103a196f 4818 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
27185ae1 4819 bool found = false;
7d57382e 4820
725e30ad 4821 if (I915_READ(SDVOB) & SDVO_DETECTED) {
b01f2c3a 4822 DRM_DEBUG_KMS("probing SDVOB\n");
725e30ad 4823 found = intel_sdvo_init(dev, SDVOB);
b01f2c3a
JB
4824 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4825 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
725e30ad 4826 intel_hdmi_init(dev, SDVOB);
b01f2c3a 4827 }
27185ae1 4828
b01f2c3a
JB
4829 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4830 DRM_DEBUG_KMS("probing DP_B\n");
a4fc5ed6 4831 intel_dp_init(dev, DP_B);
b01f2c3a 4832 }
725e30ad 4833 }
13520b05
KH
4834
4835 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 4836
b01f2c3a
JB
4837 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4838 DRM_DEBUG_KMS("probing SDVOC\n");
725e30ad 4839 found = intel_sdvo_init(dev, SDVOC);
b01f2c3a 4840 }
27185ae1
ML
4841
4842 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4843
b01f2c3a
JB
4844 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4845 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
725e30ad 4846 intel_hdmi_init(dev, SDVOC);
b01f2c3a
JB
4847 }
4848 if (SUPPORTS_INTEGRATED_DP(dev)) {
4849 DRM_DEBUG_KMS("probing DP_C\n");
a4fc5ed6 4850 intel_dp_init(dev, DP_C);
b01f2c3a 4851 }
725e30ad 4852 }
27185ae1 4853
b01f2c3a
JB
4854 if (SUPPORTS_INTEGRATED_DP(dev) &&
4855 (I915_READ(DP_D) & DP_DETECTED)) {
4856 DRM_DEBUG_KMS("probing DP_D\n");
a4fc5ed6 4857 intel_dp_init(dev, DP_D);
b01f2c3a 4858 }
bad720ff 4859 } else if (IS_GEN2(dev))
79e53945
JB
4860 intel_dvo_init(dev);
4861
103a196f 4862 if (SUPPORTS_TV(dev))
79e53945
JB
4863 intel_tv_init(dev);
4864
c5e4df33
ZW
4865 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4866 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
79e53945 4867
21d40d37 4868 encoder->possible_crtcs = intel_encoder->crtc_mask;
c5e4df33 4869 encoder->possible_clones = intel_encoder_clones(dev,
21d40d37 4870 intel_encoder->clone_mask);
79e53945
JB
4871 }
4872}
4873
4874static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
4875{
4876 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
4877 struct drm_device *dev = fb->dev;
4878
4879 if (fb->fbdev)
4880 intelfb_remove(dev, fb);
4881
4882 drm_framebuffer_cleanup(fb);
bc9025bd 4883 drm_gem_object_unreference_unlocked(intel_fb->obj);
79e53945
JB
4884
4885 kfree(intel_fb);
4886}
4887
4888static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
4889 struct drm_file *file_priv,
4890 unsigned int *handle)
4891{
4892 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
4893 struct drm_gem_object *object = intel_fb->obj;
4894
4895 return drm_gem_handle_create(file_priv, object, handle);
4896}
4897
4898static const struct drm_framebuffer_funcs intel_fb_funcs = {
4899 .destroy = intel_user_framebuffer_destroy,
4900 .create_handle = intel_user_framebuffer_create_handle,
4901};
4902
4903int intel_framebuffer_create(struct drm_device *dev,
4904 struct drm_mode_fb_cmd *mode_cmd,
4905 struct drm_framebuffer **fb,
4906 struct drm_gem_object *obj)
4907{
4908 struct intel_framebuffer *intel_fb;
4909 int ret;
4910
4911 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
4912 if (!intel_fb)
4913 return -ENOMEM;
4914
4915 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
4916 if (ret) {
4917 DRM_ERROR("framebuffer init failed %d\n", ret);
4918 return ret;
4919 }
4920
4921 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
4922
4923 intel_fb->obj = obj;
4924
4925 *fb = &intel_fb->base;
4926
4927 return 0;
4928}
4929
4930
4931static struct drm_framebuffer *
4932intel_user_framebuffer_create(struct drm_device *dev,
4933 struct drm_file *filp,
4934 struct drm_mode_fb_cmd *mode_cmd)
4935{
4936 struct drm_gem_object *obj;
4937 struct drm_framebuffer *fb;
4938 int ret;
4939
4940 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
4941 if (!obj)
4942 return NULL;
4943
4944 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
4945 if (ret) {
bc9025bd 4946 drm_gem_object_unreference_unlocked(obj);
79e53945
JB
4947 return NULL;
4948 }
4949
4950 return fb;
4951}
4952
79e53945 4953static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945
JB
4954 .fb_create = intel_user_framebuffer_create,
4955 .fb_changed = intelfb_probe,
4956};
4957
9ea8d059
CW
4958static struct drm_gem_object *
4959intel_alloc_power_context(struct drm_device *dev)
4960{
4961 struct drm_gem_object *pwrctx;
4962 int ret;
4963
4964 pwrctx = drm_gem_object_alloc(dev, 4096);
4965 if (!pwrctx) {
4966 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4967 return NULL;
4968 }
4969
4970 mutex_lock(&dev->struct_mutex);
4971 ret = i915_gem_object_pin(pwrctx, 4096);
4972 if (ret) {
4973 DRM_ERROR("failed to pin power context: %d\n", ret);
4974 goto err_unref;
4975 }
4976
4977 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4978 if (ret) {
4979 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4980 goto err_unpin;
4981 }
4982 mutex_unlock(&dev->struct_mutex);
4983
4984 return pwrctx;
4985
4986err_unpin:
4987 i915_gem_object_unpin(pwrctx);
4988err_unref:
4989 drm_gem_object_unreference(pwrctx);
4990 mutex_unlock(&dev->struct_mutex);
4991 return NULL;
4992}
4993
f97108d1
JB
4994void ironlake_enable_drps(struct drm_device *dev)
4995{
4996 struct drm_i915_private *dev_priv = dev->dev_private;
4997 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4998 u8 fmax, fmin, fstart, vstart;
4999 int i = 0;
5000
5001 /* 100ms RC evaluation intervals */
5002 I915_WRITE(RCUPEI, 100000);
5003 I915_WRITE(RCDNEI, 100000);
5004
5005 /* Set max/min thresholds to 90ms and 80ms respectively */
5006 I915_WRITE(RCBMAXAVG, 90000);
5007 I915_WRITE(RCBMINAVG, 80000);
5008
5009 I915_WRITE(MEMIHYST, 1);
5010
5011 /* Set up min, max, and cur for interrupt handling */
5012 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
5013 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5014 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5015 MEMMODE_FSTART_SHIFT;
5016 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5017 PXVFREQ_PX_SHIFT;
5018
5019 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
5020 dev_priv->min_delay = fmin;
5021 dev_priv->cur_delay = fstart;
5022
5023 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5024
5025 /*
5026 * Interrupts will be enabled in ironlake_irq_postinstall
5027 */
5028
5029 I915_WRITE(VIDSTART, vstart);
5030 POSTING_READ(VIDSTART);
5031
5032 rgvmodectl |= MEMMODE_SWMODE_EN;
5033 I915_WRITE(MEMMODECTL, rgvmodectl);
5034
5035 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
5036 if (i++ > 100) {
5037 DRM_ERROR("stuck trying to change perf mode\n");
5038 break;
5039 }
5040 msleep(1);
5041 }
5042 msleep(1);
5043
5044 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5045 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5046 I915_WRITE(MEMSWCTL, rgvswctl);
5047 POSTING_READ(MEMSWCTL);
5048
5049 rgvswctl |= MEMCTL_CMD_STS;
5050 I915_WRITE(MEMSWCTL, rgvswctl);
5051}
5052
5053void ironlake_disable_drps(struct drm_device *dev)
5054{
5055 struct drm_i915_private *dev_priv = dev->dev_private;
5056 u32 rgvswctl;
5057 u8 fstart;
5058
5059 /* Ack interrupts, disable EFC interrupt */
5060 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
5061 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
5062 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
5063 I915_WRITE(DEIIR, DE_PCU_EVENT);
5064 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5065
5066 /* Go back to the starting frequency */
5067 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
5068 MEMMODE_FSTART_SHIFT;
5069 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
357b13c3 5070 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
f97108d1
JB
5071 I915_WRITE(MEMSWCTL, rgvswctl);
5072 msleep(1);
5073 rgvswctl |= MEMCTL_CMD_STS;
5074 I915_WRITE(MEMSWCTL, rgvswctl);
5075 msleep(1);
5076
5077}
5078
652c393a
JB
5079void intel_init_clock_gating(struct drm_device *dev)
5080{
5081 struct drm_i915_private *dev_priv = dev->dev_private;
5082
5083 /*
5084 * Disable clock gating reported to work incorrectly according to the
5085 * specs, but enable as much else as we can.
5086 */
bad720ff 5087 if (HAS_PCH_SPLIT(dev)) {
8956c8bb
EA
5088 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
5089
5090 if (IS_IRONLAKE(dev)) {
5091 /* Required for FBC */
5092 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
5093 /* Required for CxSR */
5094 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
5095
5096 I915_WRITE(PCH_3DCGDIS0,
5097 MARIUNIT_CLOCK_GATE_DISABLE |
5098 SVSMUNIT_CLOCK_GATE_DISABLE);
5099 }
5100
5101 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7f8a8569
ZW
5102
5103 /*
5104 * According to the spec the following bits should be set in
5105 * order to enable memory self-refresh
5106 * The bit 22/21 of 0x42004
5107 * The bit 5 of 0x42020
5108 * The bit 15 of 0x45000
5109 */
5110 if (IS_IRONLAKE(dev)) {
5111 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5112 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5113 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5114 I915_WRITE(ILK_DSPCLK_GATE,
5115 (I915_READ(ILK_DSPCLK_GATE) |
5116 ILK_DPARB_CLK_GATE));
5117 I915_WRITE(DISP_ARB_CTL,
5118 (I915_READ(DISP_ARB_CTL) |
5119 DISP_FBC_WM_DIS));
5120 }
c03342fa
ZW
5121 return;
5122 } else if (IS_G4X(dev)) {
652c393a
JB
5123 uint32_t dspclk_gate;
5124 I915_WRITE(RENCLK_GATE_D1, 0);
5125 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5126 GS_UNIT_CLOCK_GATE_DISABLE |
5127 CL_UNIT_CLOCK_GATE_DISABLE);
5128 I915_WRITE(RAMCLK_GATE_D, 0);
5129 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5130 OVRUNIT_CLOCK_GATE_DISABLE |
5131 OVCUNIT_CLOCK_GATE_DISABLE;
5132 if (IS_GM45(dev))
5133 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5134 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5135 } else if (IS_I965GM(dev)) {
5136 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5137 I915_WRITE(RENCLK_GATE_D2, 0);
5138 I915_WRITE(DSPCLK_GATE_D, 0);
5139 I915_WRITE(RAMCLK_GATE_D, 0);
5140 I915_WRITE16(DEUC, 0);
5141 } else if (IS_I965G(dev)) {
5142 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5143 I965_RCC_CLOCK_GATE_DISABLE |
5144 I965_RCPB_CLOCK_GATE_DISABLE |
5145 I965_ISC_CLOCK_GATE_DISABLE |
5146 I965_FBC_CLOCK_GATE_DISABLE);
5147 I915_WRITE(RENCLK_GATE_D2, 0);
5148 } else if (IS_I9XX(dev)) {
5149 u32 dstate = I915_READ(D_STATE);
5150
5151 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5152 DSTATE_DOT_CLOCK_GATING;
5153 I915_WRITE(D_STATE, dstate);
f0f8a9ce 5154 } else if (IS_I85X(dev) || IS_I865G(dev)) {
652c393a
JB
5155 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5156 } else if (IS_I830(dev)) {
5157 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5158 }
97f5ab66
JB
5159
5160 /*
5161 * GPU can automatically power down the render unit if given a page
5162 * to save state.
5163 */
1d3c36ad 5164 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
9ea8d059 5165 struct drm_i915_gem_object *obj_priv = NULL;
97f5ab66 5166
7e8b60fa 5167 if (dev_priv->pwrctx) {
23010e43 5168 obj_priv = to_intel_bo(dev_priv->pwrctx);
7e8b60fa 5169 } else {
9ea8d059 5170 struct drm_gem_object *pwrctx;
97f5ab66 5171
9ea8d059
CW
5172 pwrctx = intel_alloc_power_context(dev);
5173 if (pwrctx) {
5174 dev_priv->pwrctx = pwrctx;
23010e43 5175 obj_priv = to_intel_bo(pwrctx);
7e8b60fa 5176 }
7e8b60fa 5177 }
97f5ab66 5178
9ea8d059
CW
5179 if (obj_priv) {
5180 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
5181 I915_WRITE(MCHBAR_RENDER_STANDBY,
5182 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
5183 }
97f5ab66 5184 }
652c393a
JB
5185}
5186
e70236a8
JB
5187/* Set up chip specific display functions */
5188static void intel_init_display(struct drm_device *dev)
5189{
5190 struct drm_i915_private *dev_priv = dev->dev_private;
5191
5192 /* We always want a DPMS function */
bad720ff 5193 if (HAS_PCH_SPLIT(dev))
f2b115e6 5194 dev_priv->display.dpms = ironlake_crtc_dpms;
e70236a8
JB
5195 else
5196 dev_priv->display.dpms = i9xx_crtc_dpms;
5197
5198 /* Only mobile has FBC, leave pointers NULL for other chips */
5199 if (IS_MOBILE(dev)) {
74dff282
JB
5200 if (IS_GM45(dev)) {
5201 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5202 dev_priv->display.enable_fbc = g4x_enable_fbc;
5203 dev_priv->display.disable_fbc = g4x_disable_fbc;
8d06a1e1 5204 } else if (IS_I965GM(dev)) {
e70236a8
JB
5205 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5206 dev_priv->display.enable_fbc = i8xx_enable_fbc;
5207 dev_priv->display.disable_fbc = i8xx_disable_fbc;
5208 }
74dff282 5209 /* 855GM needs testing */
e70236a8
JB
5210 }
5211
5212 /* Returns the core display clock speed */
f2b115e6 5213 if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
e70236a8
JB
5214 dev_priv->display.get_display_clock_speed =
5215 i945_get_display_clock_speed;
5216 else if (IS_I915G(dev))
5217 dev_priv->display.get_display_clock_speed =
5218 i915_get_display_clock_speed;
f2b115e6 5219 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
e70236a8
JB
5220 dev_priv->display.get_display_clock_speed =
5221 i9xx_misc_get_display_clock_speed;
5222 else if (IS_I915GM(dev))
5223 dev_priv->display.get_display_clock_speed =
5224 i915gm_get_display_clock_speed;
5225 else if (IS_I865G(dev))
5226 dev_priv->display.get_display_clock_speed =
5227 i865_get_display_clock_speed;
f0f8a9ce 5228 else if (IS_I85X(dev))
e70236a8
JB
5229 dev_priv->display.get_display_clock_speed =
5230 i855_get_display_clock_speed;
5231 else /* 852, 830 */
5232 dev_priv->display.get_display_clock_speed =
5233 i830_get_display_clock_speed;
5234
5235 /* For FIFO watermark updates */
7f8a8569
ZW
5236 if (HAS_PCH_SPLIT(dev)) {
5237 if (IS_IRONLAKE(dev)) {
5238 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
5239 dev_priv->display.update_wm = ironlake_update_wm;
5240 else {
5241 DRM_DEBUG_KMS("Failed to get proper latency. "
5242 "Disable CxSR\n");
5243 dev_priv->display.update_wm = NULL;
5244 }
5245 } else
5246 dev_priv->display.update_wm = NULL;
5247 } else if (IS_PINEVIEW(dev)) {
d4294342
ZY
5248 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5249 dev_priv->fsb_freq,
5250 dev_priv->mem_freq)) {
5251 DRM_INFO("failed to find known CxSR latency "
5252 "(found fsb freq %d, mem freq %d), "
5253 "disabling CxSR\n",
5254 dev_priv->fsb_freq, dev_priv->mem_freq);
5255 /* Disable CxSR and never update its watermark again */
5256 pineview_disable_cxsr(dev);
5257 dev_priv->display.update_wm = NULL;
5258 } else
5259 dev_priv->display.update_wm = pineview_update_wm;
5260 } else if (IS_G4X(dev))
e70236a8
JB
5261 dev_priv->display.update_wm = g4x_update_wm;
5262 else if (IS_I965G(dev))
5263 dev_priv->display.update_wm = i965_update_wm;
5264 else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
5265 dev_priv->display.update_wm = i9xx_update_wm;
5266 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5267 } else {
5268 if (IS_I85X(dev))
5269 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5270 else if (IS_845G(dev))
5271 dev_priv->display.get_fifo_size = i845_get_fifo_size;
5272 else
5273 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5274 dev_priv->display.update_wm = i830_update_wm;
5275 }
5276}
5277
79e53945
JB
5278void intel_modeset_init(struct drm_device *dev)
5279{
652c393a 5280 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
5281 int num_pipe;
5282 int i;
5283
5284 drm_mode_config_init(dev);
5285
5286 dev->mode_config.min_width = 0;
5287 dev->mode_config.min_height = 0;
5288
5289 dev->mode_config.funcs = (void *)&intel_mode_funcs;
5290
e70236a8
JB
5291 intel_init_display(dev);
5292
79e53945
JB
5293 if (IS_I965G(dev)) {
5294 dev->mode_config.max_width = 8192;
5295 dev->mode_config.max_height = 8192;
5e4d6fa7
KP
5296 } else if (IS_I9XX(dev)) {
5297 dev->mode_config.max_width = 4096;
5298 dev->mode_config.max_height = 4096;
79e53945
JB
5299 } else {
5300 dev->mode_config.max_width = 2048;
5301 dev->mode_config.max_height = 2048;
5302 }
5303
5304 /* set memory base */
5305 if (IS_I9XX(dev))
5306 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
5307 else
5308 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
5309
5310 if (IS_MOBILE(dev) || IS_I9XX(dev))
5311 num_pipe = 2;
5312 else
5313 num_pipe = 1;
28c97730 5314 DRM_DEBUG_KMS("%d display pipe%s available.\n",
79e53945
JB
5315 num_pipe, num_pipe > 1 ? "s" : "");
5316
5317 for (i = 0; i < num_pipe; i++) {
5318 intel_crtc_init(dev, i);
5319 }
5320
5321 intel_setup_outputs(dev);
652c393a
JB
5322
5323 intel_init_clock_gating(dev);
5324
f97108d1
JB
5325 if (IS_IRONLAKE_M(dev))
5326 ironlake_enable_drps(dev);
5327
652c393a
JB
5328 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
5329 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
5330 (unsigned long)dev);
02e792fb
DV
5331
5332 intel_setup_overlay(dev);
79e53945
JB
5333}
5334
5335void intel_modeset_cleanup(struct drm_device *dev)
5336{
652c393a
JB
5337 struct drm_i915_private *dev_priv = dev->dev_private;
5338 struct drm_crtc *crtc;
5339 struct intel_crtc *intel_crtc;
5340
5341 mutex_lock(&dev->struct_mutex);
5342
5343 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5344 /* Skip inactive CRTCs */
5345 if (!crtc->fb)
5346 continue;
5347
5348 intel_crtc = to_intel_crtc(crtc);
5349 intel_increase_pllclock(crtc, false);
5350 del_timer_sync(&intel_crtc->idle_timer);
5351 }
5352
652c393a
JB
5353 del_timer_sync(&dev_priv->idle_timer);
5354
e70236a8
JB
5355 if (dev_priv->display.disable_fbc)
5356 dev_priv->display.disable_fbc(dev);
5357
97f5ab66 5358 if (dev_priv->pwrctx) {
c1b5dea0
KH
5359 struct drm_i915_gem_object *obj_priv;
5360
23010e43 5361 obj_priv = to_intel_bo(dev_priv->pwrctx);
c1b5dea0
KH
5362 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
5363 I915_READ(PWRCTXA);
97f5ab66
JB
5364 i915_gem_object_unpin(dev_priv->pwrctx);
5365 drm_gem_object_unreference(dev_priv->pwrctx);
5366 }
5367
f97108d1
JB
5368 if (IS_IRONLAKE_M(dev))
5369 ironlake_disable_drps(dev);
5370
69341a5e
KH
5371 mutex_unlock(&dev->struct_mutex);
5372
79e53945
JB
5373 drm_mode_config_cleanup(dev);
5374}
5375
5376
f1c79df3
ZW
5377/*
5378 * Return which encoder is currently attached for connector.
5379 */
5380struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
5381{
5382 struct drm_mode_object *obj;
5383 struct drm_encoder *encoder;
5384 int i;
5385
5386 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
5387 if (connector->encoder_ids[i] == 0)
5388 break;
5389
5390 obj = drm_mode_object_find(connector->dev,
5391 connector->encoder_ids[i],
5392 DRM_MODE_OBJECT_ENCODER);
5393 if (!obj)
5394 continue;
5395
5396 encoder = obj_to_encoder(obj);
5397 return encoder;
5398 }
5399 return NULL;
5400}
5401
28d52043
DA
5402/*
5403 * set vga decode state - true == enable VGA decode
5404 */
5405int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
5406{
5407 struct drm_i915_private *dev_priv = dev->dev_private;
5408 u16 gmch_ctrl;
5409
5410 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
5411 if (state)
5412 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
5413 else
5414 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
5415 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
5416 return 0;
5417}