]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright © 2006-2007 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | */ | |
26 | ||
27 | #include <linux/module.h> | |
28 | #include <linux/input.h> | |
29 | #include <linux/i2c.h> | |
30 | #include <linux/kernel.h> | |
31 | #include "drmP.h" | |
32 | #include "intel_drv.h" | |
33 | #include "i915_drm.h" | |
34 | #include "i915_drv.h" | |
35 | #include "drm_dp_helper.h" | |
36 | ||
37 | #include "drm_crtc_helper.h" | |
38 | ||
39 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) | |
40 | ||
41 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | |
42 | static void intel_update_watermarks(struct drm_device *dev); | |
43 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); | |
44 | ||
45 | typedef struct { | |
46 | /* given values */ | |
47 | int n; | |
48 | int m1, m2; | |
49 | int p1, p2; | |
50 | /* derived values */ | |
51 | int dot; | |
52 | int vco; | |
53 | int m; | |
54 | int p; | |
55 | } intel_clock_t; | |
56 | ||
57 | typedef struct { | |
58 | int min, max; | |
59 | } intel_range_t; | |
60 | ||
61 | typedef struct { | |
62 | int dot_limit; | |
63 | int p2_slow, p2_fast; | |
64 | } intel_p2_t; | |
65 | ||
66 | #define INTEL_P2_NUM 2 | |
67 | typedef struct intel_limit intel_limit_t; | |
68 | struct intel_limit { | |
69 | intel_range_t dot, vco, n, m, m1, m2, p, p1; | |
70 | intel_p2_t p2; | |
71 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, | |
72 | int, int, intel_clock_t *); | |
73 | }; | |
74 | ||
75 | #define I8XX_DOT_MIN 25000 | |
76 | #define I8XX_DOT_MAX 350000 | |
77 | #define I8XX_VCO_MIN 930000 | |
78 | #define I8XX_VCO_MAX 1400000 | |
79 | #define I8XX_N_MIN 3 | |
80 | #define I8XX_N_MAX 16 | |
81 | #define I8XX_M_MIN 96 | |
82 | #define I8XX_M_MAX 140 | |
83 | #define I8XX_M1_MIN 18 | |
84 | #define I8XX_M1_MAX 26 | |
85 | #define I8XX_M2_MIN 6 | |
86 | #define I8XX_M2_MAX 16 | |
87 | #define I8XX_P_MIN 4 | |
88 | #define I8XX_P_MAX 128 | |
89 | #define I8XX_P1_MIN 2 | |
90 | #define I8XX_P1_MAX 33 | |
91 | #define I8XX_P1_LVDS_MIN 1 | |
92 | #define I8XX_P1_LVDS_MAX 6 | |
93 | #define I8XX_P2_SLOW 4 | |
94 | #define I8XX_P2_FAST 2 | |
95 | #define I8XX_P2_LVDS_SLOW 14 | |
96 | #define I8XX_P2_LVDS_FAST 7 | |
97 | #define I8XX_P2_SLOW_LIMIT 165000 | |
98 | ||
99 | #define I9XX_DOT_MIN 20000 | |
100 | #define I9XX_DOT_MAX 400000 | |
101 | #define I9XX_VCO_MIN 1400000 | |
102 | #define I9XX_VCO_MAX 2800000 | |
103 | #define PINEVIEW_VCO_MIN 1700000 | |
104 | #define PINEVIEW_VCO_MAX 3500000 | |
105 | #define I9XX_N_MIN 1 | |
106 | #define I9XX_N_MAX 6 | |
107 | /* Pineview's Ncounter is a ring counter */ | |
108 | #define PINEVIEW_N_MIN 3 | |
109 | #define PINEVIEW_N_MAX 6 | |
110 | #define I9XX_M_MIN 70 | |
111 | #define I9XX_M_MAX 120 | |
112 | #define PINEVIEW_M_MIN 2 | |
113 | #define PINEVIEW_M_MAX 256 | |
114 | #define I9XX_M1_MIN 10 | |
115 | #define I9XX_M1_MAX 22 | |
116 | #define I9XX_M2_MIN 5 | |
117 | #define I9XX_M2_MAX 9 | |
118 | /* Pineview M1 is reserved, and must be 0 */ | |
119 | #define PINEVIEW_M1_MIN 0 | |
120 | #define PINEVIEW_M1_MAX 0 | |
121 | #define PINEVIEW_M2_MIN 0 | |
122 | #define PINEVIEW_M2_MAX 254 | |
123 | #define I9XX_P_SDVO_DAC_MIN 5 | |
124 | #define I9XX_P_SDVO_DAC_MAX 80 | |
125 | #define I9XX_P_LVDS_MIN 7 | |
126 | #define I9XX_P_LVDS_MAX 98 | |
127 | #define PINEVIEW_P_LVDS_MIN 7 | |
128 | #define PINEVIEW_P_LVDS_MAX 112 | |
129 | #define I9XX_P1_MIN 1 | |
130 | #define I9XX_P1_MAX 8 | |
131 | #define I9XX_P2_SDVO_DAC_SLOW 10 | |
132 | #define I9XX_P2_SDVO_DAC_FAST 5 | |
133 | #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 | |
134 | #define I9XX_P2_LVDS_SLOW 14 | |
135 | #define I9XX_P2_LVDS_FAST 7 | |
136 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 | |
137 | ||
138 | /*The parameter is for SDVO on G4x platform*/ | |
139 | #define G4X_DOT_SDVO_MIN 25000 | |
140 | #define G4X_DOT_SDVO_MAX 270000 | |
141 | #define G4X_VCO_MIN 1750000 | |
142 | #define G4X_VCO_MAX 3500000 | |
143 | #define G4X_N_SDVO_MIN 1 | |
144 | #define G4X_N_SDVO_MAX 4 | |
145 | #define G4X_M_SDVO_MIN 104 | |
146 | #define G4X_M_SDVO_MAX 138 | |
147 | #define G4X_M1_SDVO_MIN 17 | |
148 | #define G4X_M1_SDVO_MAX 23 | |
149 | #define G4X_M2_SDVO_MIN 5 | |
150 | #define G4X_M2_SDVO_MAX 11 | |
151 | #define G4X_P_SDVO_MIN 10 | |
152 | #define G4X_P_SDVO_MAX 30 | |
153 | #define G4X_P1_SDVO_MIN 1 | |
154 | #define G4X_P1_SDVO_MAX 3 | |
155 | #define G4X_P2_SDVO_SLOW 10 | |
156 | #define G4X_P2_SDVO_FAST 10 | |
157 | #define G4X_P2_SDVO_LIMIT 270000 | |
158 | ||
159 | /*The parameter is for HDMI_DAC on G4x platform*/ | |
160 | #define G4X_DOT_HDMI_DAC_MIN 22000 | |
161 | #define G4X_DOT_HDMI_DAC_MAX 400000 | |
162 | #define G4X_N_HDMI_DAC_MIN 1 | |
163 | #define G4X_N_HDMI_DAC_MAX 4 | |
164 | #define G4X_M_HDMI_DAC_MIN 104 | |
165 | #define G4X_M_HDMI_DAC_MAX 138 | |
166 | #define G4X_M1_HDMI_DAC_MIN 16 | |
167 | #define G4X_M1_HDMI_DAC_MAX 23 | |
168 | #define G4X_M2_HDMI_DAC_MIN 5 | |
169 | #define G4X_M2_HDMI_DAC_MAX 11 | |
170 | #define G4X_P_HDMI_DAC_MIN 5 | |
171 | #define G4X_P_HDMI_DAC_MAX 80 | |
172 | #define G4X_P1_HDMI_DAC_MIN 1 | |
173 | #define G4X_P1_HDMI_DAC_MAX 8 | |
174 | #define G4X_P2_HDMI_DAC_SLOW 10 | |
175 | #define G4X_P2_HDMI_DAC_FAST 5 | |
176 | #define G4X_P2_HDMI_DAC_LIMIT 165000 | |
177 | ||
178 | /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ | |
179 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 | |
180 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 | |
181 | #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 | |
182 | #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 | |
183 | #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 | |
184 | #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 | |
185 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 | |
186 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 | |
187 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 | |
188 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 | |
189 | #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 | |
190 | #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 | |
191 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 | |
192 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 | |
193 | #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 | |
194 | #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 | |
195 | #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 | |
196 | ||
197 | /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ | |
198 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 | |
199 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 | |
200 | #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 | |
201 | #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 | |
202 | #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 | |
203 | #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 | |
204 | #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 | |
205 | #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 | |
206 | #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 | |
207 | #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 | |
208 | #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 | |
209 | #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 | |
210 | #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 | |
211 | #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 | |
212 | #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 | |
213 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 | |
214 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 | |
215 | ||
216 | /*The parameter is for DISPLAY PORT on G4x platform*/ | |
217 | #define G4X_DOT_DISPLAY_PORT_MIN 161670 | |
218 | #define G4X_DOT_DISPLAY_PORT_MAX 227000 | |
219 | #define G4X_N_DISPLAY_PORT_MIN 1 | |
220 | #define G4X_N_DISPLAY_PORT_MAX 2 | |
221 | #define G4X_M_DISPLAY_PORT_MIN 97 | |
222 | #define G4X_M_DISPLAY_PORT_MAX 108 | |
223 | #define G4X_M1_DISPLAY_PORT_MIN 0x10 | |
224 | #define G4X_M1_DISPLAY_PORT_MAX 0x12 | |
225 | #define G4X_M2_DISPLAY_PORT_MIN 0x05 | |
226 | #define G4X_M2_DISPLAY_PORT_MAX 0x06 | |
227 | #define G4X_P_DISPLAY_PORT_MIN 10 | |
228 | #define G4X_P_DISPLAY_PORT_MAX 20 | |
229 | #define G4X_P1_DISPLAY_PORT_MIN 1 | |
230 | #define G4X_P1_DISPLAY_PORT_MAX 2 | |
231 | #define G4X_P2_DISPLAY_PORT_SLOW 10 | |
232 | #define G4X_P2_DISPLAY_PORT_FAST 10 | |
233 | #define G4X_P2_DISPLAY_PORT_LIMIT 0 | |
234 | ||
235 | /* Ironlake / Sandybridge */ | |
236 | /* as we calculate clock using (register_value + 2) for | |
237 | N/M1/M2, so here the range value for them is (actual_value-2). | |
238 | */ | |
239 | #define IRONLAKE_DOT_MIN 25000 | |
240 | #define IRONLAKE_DOT_MAX 350000 | |
241 | #define IRONLAKE_VCO_MIN 1760000 | |
242 | #define IRONLAKE_VCO_MAX 3510000 | |
243 | #define IRONLAKE_M1_MIN 12 | |
244 | #define IRONLAKE_M1_MAX 22 | |
245 | #define IRONLAKE_M2_MIN 5 | |
246 | #define IRONLAKE_M2_MAX 9 | |
247 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | |
248 | ||
249 | /* We have parameter ranges for different type of outputs. */ | |
250 | ||
251 | /* DAC & HDMI Refclk 120Mhz */ | |
252 | #define IRONLAKE_DAC_N_MIN 1 | |
253 | #define IRONLAKE_DAC_N_MAX 5 | |
254 | #define IRONLAKE_DAC_M_MIN 79 | |
255 | #define IRONLAKE_DAC_M_MAX 127 | |
256 | #define IRONLAKE_DAC_P_MIN 5 | |
257 | #define IRONLAKE_DAC_P_MAX 80 | |
258 | #define IRONLAKE_DAC_P1_MIN 1 | |
259 | #define IRONLAKE_DAC_P1_MAX 8 | |
260 | #define IRONLAKE_DAC_P2_SLOW 10 | |
261 | #define IRONLAKE_DAC_P2_FAST 5 | |
262 | ||
263 | /* LVDS single-channel 120Mhz refclk */ | |
264 | #define IRONLAKE_LVDS_S_N_MIN 1 | |
265 | #define IRONLAKE_LVDS_S_N_MAX 3 | |
266 | #define IRONLAKE_LVDS_S_M_MIN 79 | |
267 | #define IRONLAKE_LVDS_S_M_MAX 118 | |
268 | #define IRONLAKE_LVDS_S_P_MIN 28 | |
269 | #define IRONLAKE_LVDS_S_P_MAX 112 | |
270 | #define IRONLAKE_LVDS_S_P1_MIN 2 | |
271 | #define IRONLAKE_LVDS_S_P1_MAX 8 | |
272 | #define IRONLAKE_LVDS_S_P2_SLOW 14 | |
273 | #define IRONLAKE_LVDS_S_P2_FAST 14 | |
274 | ||
275 | /* LVDS dual-channel 120Mhz refclk */ | |
276 | #define IRONLAKE_LVDS_D_N_MIN 1 | |
277 | #define IRONLAKE_LVDS_D_N_MAX 3 | |
278 | #define IRONLAKE_LVDS_D_M_MIN 79 | |
279 | #define IRONLAKE_LVDS_D_M_MAX 127 | |
280 | #define IRONLAKE_LVDS_D_P_MIN 14 | |
281 | #define IRONLAKE_LVDS_D_P_MAX 56 | |
282 | #define IRONLAKE_LVDS_D_P1_MIN 2 | |
283 | #define IRONLAKE_LVDS_D_P1_MAX 8 | |
284 | #define IRONLAKE_LVDS_D_P2_SLOW 7 | |
285 | #define IRONLAKE_LVDS_D_P2_FAST 7 | |
286 | ||
287 | /* LVDS single-channel 100Mhz refclk */ | |
288 | #define IRONLAKE_LVDS_S_SSC_N_MIN 1 | |
289 | #define IRONLAKE_LVDS_S_SSC_N_MAX 2 | |
290 | #define IRONLAKE_LVDS_S_SSC_M_MIN 79 | |
291 | #define IRONLAKE_LVDS_S_SSC_M_MAX 126 | |
292 | #define IRONLAKE_LVDS_S_SSC_P_MIN 28 | |
293 | #define IRONLAKE_LVDS_S_SSC_P_MAX 112 | |
294 | #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 | |
295 | #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 | |
296 | #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 | |
297 | #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 | |
298 | ||
299 | /* LVDS dual-channel 100Mhz refclk */ | |
300 | #define IRONLAKE_LVDS_D_SSC_N_MIN 1 | |
301 | #define IRONLAKE_LVDS_D_SSC_N_MAX 3 | |
302 | #define IRONLAKE_LVDS_D_SSC_M_MIN 79 | |
303 | #define IRONLAKE_LVDS_D_SSC_M_MAX 126 | |
304 | #define IRONLAKE_LVDS_D_SSC_P_MIN 14 | |
305 | #define IRONLAKE_LVDS_D_SSC_P_MAX 42 | |
306 | #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 | |
307 | #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 | |
308 | #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 | |
309 | #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 | |
310 | ||
311 | /* DisplayPort */ | |
312 | #define IRONLAKE_DP_N_MIN 1 | |
313 | #define IRONLAKE_DP_N_MAX 2 | |
314 | #define IRONLAKE_DP_M_MIN 81 | |
315 | #define IRONLAKE_DP_M_MAX 90 | |
316 | #define IRONLAKE_DP_P_MIN 10 | |
317 | #define IRONLAKE_DP_P_MAX 20 | |
318 | #define IRONLAKE_DP_P2_FAST 10 | |
319 | #define IRONLAKE_DP_P2_SLOW 10 | |
320 | #define IRONLAKE_DP_P2_LIMIT 0 | |
321 | #define IRONLAKE_DP_P1_MIN 1 | |
322 | #define IRONLAKE_DP_P1_MAX 2 | |
323 | ||
324 | static bool | |
325 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
326 | int target, int refclk, intel_clock_t *best_clock); | |
327 | static bool | |
328 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
329 | int target, int refclk, intel_clock_t *best_clock); | |
330 | ||
331 | static bool | |
332 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | |
333 | int target, int refclk, intel_clock_t *best_clock); | |
334 | static bool | |
335 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, | |
336 | int target, int refclk, intel_clock_t *best_clock); | |
337 | ||
338 | static const intel_limit_t intel_limits_i8xx_dvo = { | |
339 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | |
340 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | |
341 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | |
342 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | |
343 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | |
344 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | |
345 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | |
346 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, | |
347 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | |
348 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | |
349 | .find_pll = intel_find_best_PLL, | |
350 | }; | |
351 | ||
352 | static const intel_limit_t intel_limits_i8xx_lvds = { | |
353 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | |
354 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | |
355 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | |
356 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | |
357 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | |
358 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | |
359 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | |
360 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, | |
361 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | |
362 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | |
363 | .find_pll = intel_find_best_PLL, | |
364 | }; | |
365 | ||
366 | static const intel_limit_t intel_limits_i9xx_sdvo = { | |
367 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | |
368 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | |
369 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | |
370 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | |
371 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | |
372 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | |
373 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | |
374 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | |
375 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | |
376 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | |
377 | .find_pll = intel_find_best_PLL, | |
378 | }; | |
379 | ||
380 | static const intel_limit_t intel_limits_i9xx_lvds = { | |
381 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | |
382 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | |
383 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | |
384 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | |
385 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | |
386 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | |
387 | .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, | |
388 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | |
389 | /* The single-channel range is 25-112Mhz, and dual-channel | |
390 | * is 80-224Mhz. Prefer single channel as much as possible. | |
391 | */ | |
392 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | |
393 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | |
394 | .find_pll = intel_find_best_PLL, | |
395 | }; | |
396 | ||
397 | /* below parameter and function is for G4X Chipset Family*/ | |
398 | static const intel_limit_t intel_limits_g4x_sdvo = { | |
399 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, | |
400 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | |
401 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, | |
402 | .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, | |
403 | .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, | |
404 | .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, | |
405 | .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, | |
406 | .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, | |
407 | .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, | |
408 | .p2_slow = G4X_P2_SDVO_SLOW, | |
409 | .p2_fast = G4X_P2_SDVO_FAST | |
410 | }, | |
411 | .find_pll = intel_g4x_find_best_PLL, | |
412 | }; | |
413 | ||
414 | static const intel_limit_t intel_limits_g4x_hdmi = { | |
415 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, | |
416 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | |
417 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, | |
418 | .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, | |
419 | .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, | |
420 | .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, | |
421 | .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, | |
422 | .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, | |
423 | .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, | |
424 | .p2_slow = G4X_P2_HDMI_DAC_SLOW, | |
425 | .p2_fast = G4X_P2_HDMI_DAC_FAST | |
426 | }, | |
427 | .find_pll = intel_g4x_find_best_PLL, | |
428 | }; | |
429 | ||
430 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | |
431 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, | |
432 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, | |
433 | .vco = { .min = G4X_VCO_MIN, | |
434 | .max = G4X_VCO_MAX }, | |
435 | .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, | |
436 | .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, | |
437 | .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, | |
438 | .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, | |
439 | .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, | |
440 | .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, | |
441 | .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, | |
442 | .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, | |
443 | .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, | |
444 | .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, | |
445 | .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, | |
446 | .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, | |
447 | .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, | |
448 | .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, | |
449 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | |
450 | }, | |
451 | .find_pll = intel_g4x_find_best_PLL, | |
452 | }; | |
453 | ||
454 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | |
455 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, | |
456 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, | |
457 | .vco = { .min = G4X_VCO_MIN, | |
458 | .max = G4X_VCO_MAX }, | |
459 | .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, | |
460 | .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, | |
461 | .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, | |
462 | .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, | |
463 | .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, | |
464 | .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, | |
465 | .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, | |
466 | .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, | |
467 | .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, | |
468 | .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, | |
469 | .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, | |
470 | .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, | |
471 | .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, | |
472 | .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, | |
473 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | |
474 | }, | |
475 | .find_pll = intel_g4x_find_best_PLL, | |
476 | }; | |
477 | ||
478 | static const intel_limit_t intel_limits_g4x_display_port = { | |
479 | .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, | |
480 | .max = G4X_DOT_DISPLAY_PORT_MAX }, | |
481 | .vco = { .min = G4X_VCO_MIN, | |
482 | .max = G4X_VCO_MAX}, | |
483 | .n = { .min = G4X_N_DISPLAY_PORT_MIN, | |
484 | .max = G4X_N_DISPLAY_PORT_MAX }, | |
485 | .m = { .min = G4X_M_DISPLAY_PORT_MIN, | |
486 | .max = G4X_M_DISPLAY_PORT_MAX }, | |
487 | .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, | |
488 | .max = G4X_M1_DISPLAY_PORT_MAX }, | |
489 | .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, | |
490 | .max = G4X_M2_DISPLAY_PORT_MAX }, | |
491 | .p = { .min = G4X_P_DISPLAY_PORT_MIN, | |
492 | .max = G4X_P_DISPLAY_PORT_MAX }, | |
493 | .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, | |
494 | .max = G4X_P1_DISPLAY_PORT_MAX}, | |
495 | .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, | |
496 | .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, | |
497 | .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, | |
498 | .find_pll = intel_find_pll_g4x_dp, | |
499 | }; | |
500 | ||
501 | static const intel_limit_t intel_limits_pineview_sdvo = { | |
502 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | |
503 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, | |
504 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, | |
505 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, | |
506 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, | |
507 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, | |
508 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | |
509 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | |
510 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | |
511 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | |
512 | .find_pll = intel_find_best_PLL, | |
513 | }; | |
514 | ||
515 | static const intel_limit_t intel_limits_pineview_lvds = { | |
516 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | |
517 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, | |
518 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, | |
519 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, | |
520 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, | |
521 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, | |
522 | .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, | |
523 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | |
524 | /* Pineview only supports single-channel mode. */ | |
525 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | |
526 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | |
527 | .find_pll = intel_find_best_PLL, | |
528 | }; | |
529 | ||
530 | static const intel_limit_t intel_limits_ironlake_dac = { | |
531 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | |
532 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | |
533 | .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, | |
534 | .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, | |
535 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | |
536 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | |
537 | .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, | |
538 | .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, | |
539 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | |
540 | .p2_slow = IRONLAKE_DAC_P2_SLOW, | |
541 | .p2_fast = IRONLAKE_DAC_P2_FAST }, | |
542 | .find_pll = intel_g4x_find_best_PLL, | |
543 | }; | |
544 | ||
545 | static const intel_limit_t intel_limits_ironlake_single_lvds = { | |
546 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | |
547 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | |
548 | .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, | |
549 | .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, | |
550 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | |
551 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | |
552 | .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, | |
553 | .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, | |
554 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | |
555 | .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, | |
556 | .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, | |
557 | .find_pll = intel_g4x_find_best_PLL, | |
558 | }; | |
559 | ||
560 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | |
561 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | |
562 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | |
563 | .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, | |
564 | .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, | |
565 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | |
566 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | |
567 | .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, | |
568 | .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, | |
569 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | |
570 | .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, | |
571 | .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, | |
572 | .find_pll = intel_g4x_find_best_PLL, | |
573 | }; | |
574 | ||
575 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { | |
576 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | |
577 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | |
578 | .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, | |
579 | .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, | |
580 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | |
581 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | |
582 | .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, | |
583 | .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, | |
584 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | |
585 | .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, | |
586 | .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, | |
587 | .find_pll = intel_g4x_find_best_PLL, | |
588 | }; | |
589 | ||
590 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | |
591 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | |
592 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | |
593 | .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, | |
594 | .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, | |
595 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | |
596 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | |
597 | .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, | |
598 | .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, | |
599 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | |
600 | .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, | |
601 | .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, | |
602 | .find_pll = intel_g4x_find_best_PLL, | |
603 | }; | |
604 | ||
605 | static const intel_limit_t intel_limits_ironlake_display_port = { | |
606 | .dot = { .min = IRONLAKE_DOT_MIN, | |
607 | .max = IRONLAKE_DOT_MAX }, | |
608 | .vco = { .min = IRONLAKE_VCO_MIN, | |
609 | .max = IRONLAKE_VCO_MAX}, | |
610 | .n = { .min = IRONLAKE_DP_N_MIN, | |
611 | .max = IRONLAKE_DP_N_MAX }, | |
612 | .m = { .min = IRONLAKE_DP_M_MIN, | |
613 | .max = IRONLAKE_DP_M_MAX }, | |
614 | .m1 = { .min = IRONLAKE_M1_MIN, | |
615 | .max = IRONLAKE_M1_MAX }, | |
616 | .m2 = { .min = IRONLAKE_M2_MIN, | |
617 | .max = IRONLAKE_M2_MAX }, | |
618 | .p = { .min = IRONLAKE_DP_P_MIN, | |
619 | .max = IRONLAKE_DP_P_MAX }, | |
620 | .p1 = { .min = IRONLAKE_DP_P1_MIN, | |
621 | .max = IRONLAKE_DP_P1_MAX}, | |
622 | .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, | |
623 | .p2_slow = IRONLAKE_DP_P2_SLOW, | |
624 | .p2_fast = IRONLAKE_DP_P2_FAST }, | |
625 | .find_pll = intel_find_pll_ironlake_dp, | |
626 | }; | |
627 | ||
628 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | |
629 | { | |
630 | struct drm_device *dev = crtc->dev; | |
631 | struct drm_i915_private *dev_priv = dev->dev_private; | |
632 | const intel_limit_t *limit; | |
633 | int refclk = 120; | |
634 | ||
635 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
636 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | |
637 | refclk = 100; | |
638 | ||
639 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | |
640 | LVDS_CLKB_POWER_UP) { | |
641 | /* LVDS dual channel */ | |
642 | if (refclk == 100) | |
643 | limit = &intel_limits_ironlake_dual_lvds_100m; | |
644 | else | |
645 | limit = &intel_limits_ironlake_dual_lvds; | |
646 | } else { | |
647 | if (refclk == 100) | |
648 | limit = &intel_limits_ironlake_single_lvds_100m; | |
649 | else | |
650 | limit = &intel_limits_ironlake_single_lvds; | |
651 | } | |
652 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | |
653 | HAS_eDP) | |
654 | limit = &intel_limits_ironlake_display_port; | |
655 | else | |
656 | limit = &intel_limits_ironlake_dac; | |
657 | ||
658 | return limit; | |
659 | } | |
660 | ||
661 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |
662 | { | |
663 | struct drm_device *dev = crtc->dev; | |
664 | struct drm_i915_private *dev_priv = dev->dev_private; | |
665 | const intel_limit_t *limit; | |
666 | ||
667 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
668 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | |
669 | LVDS_CLKB_POWER_UP) | |
670 | /* LVDS with dual channel */ | |
671 | limit = &intel_limits_g4x_dual_channel_lvds; | |
672 | else | |
673 | /* LVDS with dual channel */ | |
674 | limit = &intel_limits_g4x_single_channel_lvds; | |
675 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || | |
676 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | |
677 | limit = &intel_limits_g4x_hdmi; | |
678 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { | |
679 | limit = &intel_limits_g4x_sdvo; | |
680 | } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { | |
681 | limit = &intel_limits_g4x_display_port; | |
682 | } else /* The option is for other outputs */ | |
683 | limit = &intel_limits_i9xx_sdvo; | |
684 | ||
685 | return limit; | |
686 | } | |
687 | ||
688 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |
689 | { | |
690 | struct drm_device *dev = crtc->dev; | |
691 | const intel_limit_t *limit; | |
692 | ||
693 | if (HAS_PCH_SPLIT(dev)) | |
694 | limit = intel_ironlake_limit(crtc); | |
695 | else if (IS_G4X(dev)) { | |
696 | limit = intel_g4x_limit(crtc); | |
697 | } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) { | |
698 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | |
699 | limit = &intel_limits_i9xx_lvds; | |
700 | else | |
701 | limit = &intel_limits_i9xx_sdvo; | |
702 | } else if (IS_PINEVIEW(dev)) { | |
703 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | |
704 | limit = &intel_limits_pineview_lvds; | |
705 | else | |
706 | limit = &intel_limits_pineview_sdvo; | |
707 | } else { | |
708 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | |
709 | limit = &intel_limits_i8xx_lvds; | |
710 | else | |
711 | limit = &intel_limits_i8xx_dvo; | |
712 | } | |
713 | return limit; | |
714 | } | |
715 | ||
716 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ | |
717 | static void pineview_clock(int refclk, intel_clock_t *clock) | |
718 | { | |
719 | clock->m = clock->m2 + 2; | |
720 | clock->p = clock->p1 * clock->p2; | |
721 | clock->vco = refclk * clock->m / clock->n; | |
722 | clock->dot = clock->vco / clock->p; | |
723 | } | |
724 | ||
725 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) | |
726 | { | |
727 | if (IS_PINEVIEW(dev)) { | |
728 | pineview_clock(refclk, clock); | |
729 | return; | |
730 | } | |
731 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | |
732 | clock->p = clock->p1 * clock->p2; | |
733 | clock->vco = refclk * clock->m / (clock->n + 2); | |
734 | clock->dot = clock->vco / clock->p; | |
735 | } | |
736 | ||
737 | /** | |
738 | * Returns whether any output on the specified pipe is of the specified type | |
739 | */ | |
740 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |
741 | { | |
742 | struct drm_device *dev = crtc->dev; | |
743 | struct drm_mode_config *mode_config = &dev->mode_config; | |
744 | struct drm_encoder *l_entry; | |
745 | ||
746 | list_for_each_entry(l_entry, &mode_config->encoder_list, head) { | |
747 | if (l_entry && l_entry->crtc == crtc) { | |
748 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); | |
749 | if (intel_encoder->type == type) | |
750 | return true; | |
751 | } | |
752 | } | |
753 | return false; | |
754 | } | |
755 | ||
756 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | |
757 | /** | |
758 | * Returns whether the given set of divisors are valid for a given refclk with | |
759 | * the given connectors. | |
760 | */ | |
761 | ||
762 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | |
763 | { | |
764 | const intel_limit_t *limit = intel_limit (crtc); | |
765 | struct drm_device *dev = crtc->dev; | |
766 | ||
767 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | |
768 | INTELPllInvalid ("p1 out of range\n"); | |
769 | if (clock->p < limit->p.min || limit->p.max < clock->p) | |
770 | INTELPllInvalid ("p out of range\n"); | |
771 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) | |
772 | INTELPllInvalid ("m2 out of range\n"); | |
773 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | |
774 | INTELPllInvalid ("m1 out of range\n"); | |
775 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) | |
776 | INTELPllInvalid ("m1 <= m2\n"); | |
777 | if (clock->m < limit->m.min || limit->m.max < clock->m) | |
778 | INTELPllInvalid ("m out of range\n"); | |
779 | if (clock->n < limit->n.min || limit->n.max < clock->n) | |
780 | INTELPllInvalid ("n out of range\n"); | |
781 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) | |
782 | INTELPllInvalid ("vco out of range\n"); | |
783 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, | |
784 | * connector, etc., rather than just a single range. | |
785 | */ | |
786 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) | |
787 | INTELPllInvalid ("dot out of range\n"); | |
788 | ||
789 | return true; | |
790 | } | |
791 | ||
792 | static bool | |
793 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
794 | int target, int refclk, intel_clock_t *best_clock) | |
795 | ||
796 | { | |
797 | struct drm_device *dev = crtc->dev; | |
798 | struct drm_i915_private *dev_priv = dev->dev_private; | |
799 | intel_clock_t clock; | |
800 | int err = target; | |
801 | ||
802 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | |
803 | (I915_READ(LVDS)) != 0) { | |
804 | /* | |
805 | * For LVDS, if the panel is on, just rely on its current | |
806 | * settings for dual-channel. We haven't figured out how to | |
807 | * reliably set up different single/dual channel state, if we | |
808 | * even can. | |
809 | */ | |
810 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | |
811 | LVDS_CLKB_POWER_UP) | |
812 | clock.p2 = limit->p2.p2_fast; | |
813 | else | |
814 | clock.p2 = limit->p2.p2_slow; | |
815 | } else { | |
816 | if (target < limit->p2.dot_limit) | |
817 | clock.p2 = limit->p2.p2_slow; | |
818 | else | |
819 | clock.p2 = limit->p2.p2_fast; | |
820 | } | |
821 | ||
822 | memset (best_clock, 0, sizeof (*best_clock)); | |
823 | ||
824 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; | |
825 | clock.m1++) { | |
826 | for (clock.m2 = limit->m2.min; | |
827 | clock.m2 <= limit->m2.max; clock.m2++) { | |
828 | /* m1 is always 0 in Pineview */ | |
829 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) | |
830 | break; | |
831 | for (clock.n = limit->n.min; | |
832 | clock.n <= limit->n.max; clock.n++) { | |
833 | for (clock.p1 = limit->p1.min; | |
834 | clock.p1 <= limit->p1.max; clock.p1++) { | |
835 | int this_err; | |
836 | ||
837 | intel_clock(dev, refclk, &clock); | |
838 | ||
839 | if (!intel_PLL_is_valid(crtc, &clock)) | |
840 | continue; | |
841 | ||
842 | this_err = abs(clock.dot - target); | |
843 | if (this_err < err) { | |
844 | *best_clock = clock; | |
845 | err = this_err; | |
846 | } | |
847 | } | |
848 | } | |
849 | } | |
850 | } | |
851 | ||
852 | return (err != target); | |
853 | } | |
854 | ||
855 | static bool | |
856 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
857 | int target, int refclk, intel_clock_t *best_clock) | |
858 | { | |
859 | struct drm_device *dev = crtc->dev; | |
860 | struct drm_i915_private *dev_priv = dev->dev_private; | |
861 | intel_clock_t clock; | |
862 | int max_n; | |
863 | bool found; | |
864 | /* approximately equals target * 0.00488 */ | |
865 | int err_most = (target >> 8) + (target >> 10); | |
866 | found = false; | |
867 | ||
868 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
869 | int lvds_reg; | |
870 | ||
871 | if (HAS_PCH_SPLIT(dev)) | |
872 | lvds_reg = PCH_LVDS; | |
873 | else | |
874 | lvds_reg = LVDS; | |
875 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == | |
876 | LVDS_CLKB_POWER_UP) | |
877 | clock.p2 = limit->p2.p2_fast; | |
878 | else | |
879 | clock.p2 = limit->p2.p2_slow; | |
880 | } else { | |
881 | if (target < limit->p2.dot_limit) | |
882 | clock.p2 = limit->p2.p2_slow; | |
883 | else | |
884 | clock.p2 = limit->p2.p2_fast; | |
885 | } | |
886 | ||
887 | memset(best_clock, 0, sizeof(*best_clock)); | |
888 | max_n = limit->n.max; | |
889 | /* based on hardware requriment prefer smaller n to precision */ | |
890 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { | |
891 | /* based on hardware requirment prefere larger m1,m2 */ | |
892 | for (clock.m1 = limit->m1.max; | |
893 | clock.m1 >= limit->m1.min; clock.m1--) { | |
894 | for (clock.m2 = limit->m2.max; | |
895 | clock.m2 >= limit->m2.min; clock.m2--) { | |
896 | for (clock.p1 = limit->p1.max; | |
897 | clock.p1 >= limit->p1.min; clock.p1--) { | |
898 | int this_err; | |
899 | ||
900 | intel_clock(dev, refclk, &clock); | |
901 | if (!intel_PLL_is_valid(crtc, &clock)) | |
902 | continue; | |
903 | this_err = abs(clock.dot - target) ; | |
904 | if (this_err < err_most) { | |
905 | *best_clock = clock; | |
906 | err_most = this_err; | |
907 | max_n = clock.n; | |
908 | found = true; | |
909 | } | |
910 | } | |
911 | } | |
912 | } | |
913 | } | |
914 | return found; | |
915 | } | |
916 | ||
917 | static bool | |
918 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |
919 | int target, int refclk, intel_clock_t *best_clock) | |
920 | { | |
921 | struct drm_device *dev = crtc->dev; | |
922 | intel_clock_t clock; | |
923 | ||
924 | /* return directly when it is eDP */ | |
925 | if (HAS_eDP) | |
926 | return true; | |
927 | ||
928 | if (target < 200000) { | |
929 | clock.n = 1; | |
930 | clock.p1 = 2; | |
931 | clock.p2 = 10; | |
932 | clock.m1 = 12; | |
933 | clock.m2 = 9; | |
934 | } else { | |
935 | clock.n = 2; | |
936 | clock.p1 = 1; | |
937 | clock.p2 = 10; | |
938 | clock.m1 = 14; | |
939 | clock.m2 = 8; | |
940 | } | |
941 | intel_clock(dev, refclk, &clock); | |
942 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | |
943 | return true; | |
944 | } | |
945 | ||
946 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | |
947 | static bool | |
948 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |
949 | int target, int refclk, intel_clock_t *best_clock) | |
950 | { | |
951 | intel_clock_t clock; | |
952 | if (target < 200000) { | |
953 | clock.p1 = 2; | |
954 | clock.p2 = 10; | |
955 | clock.n = 2; | |
956 | clock.m1 = 23; | |
957 | clock.m2 = 8; | |
958 | } else { | |
959 | clock.p1 = 1; | |
960 | clock.p2 = 10; | |
961 | clock.n = 1; | |
962 | clock.m1 = 14; | |
963 | clock.m2 = 2; | |
964 | } | |
965 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); | |
966 | clock.p = (clock.p1 * clock.p2); | |
967 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; | |
968 | clock.vco = 0; | |
969 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | |
970 | return true; | |
971 | } | |
972 | ||
973 | void | |
974 | intel_wait_for_vblank(struct drm_device *dev) | |
975 | { | |
976 | /* Wait for 20ms, i.e. one cycle at 50hz. */ | |
977 | msleep(20); | |
978 | } | |
979 | ||
980 | /* Parameters have changed, update FBC info */ | |
981 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |
982 | { | |
983 | struct drm_device *dev = crtc->dev; | |
984 | struct drm_i915_private *dev_priv = dev->dev_private; | |
985 | struct drm_framebuffer *fb = crtc->fb; | |
986 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
987 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | |
988 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
989 | int plane, i; | |
990 | u32 fbc_ctl, fbc_ctl2; | |
991 | ||
992 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; | |
993 | ||
994 | if (fb->pitch < dev_priv->cfb_pitch) | |
995 | dev_priv->cfb_pitch = fb->pitch; | |
996 | ||
997 | /* FBC_CTL wants 64B units */ | |
998 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | |
999 | dev_priv->cfb_fence = obj_priv->fence_reg; | |
1000 | dev_priv->cfb_plane = intel_crtc->plane; | |
1001 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | |
1002 | ||
1003 | /* Clear old tags */ | |
1004 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | |
1005 | I915_WRITE(FBC_TAG + (i * 4), 0); | |
1006 | ||
1007 | /* Set it up... */ | |
1008 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | |
1009 | if (obj_priv->tiling_mode != I915_TILING_NONE) | |
1010 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | |
1011 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | |
1012 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | |
1013 | ||
1014 | /* enable it... */ | |
1015 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | |
1016 | if (IS_I945GM(dev)) | |
1017 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | |
1018 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | |
1019 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | |
1020 | if (obj_priv->tiling_mode != I915_TILING_NONE) | |
1021 | fbc_ctl |= dev_priv->cfb_fence; | |
1022 | I915_WRITE(FBC_CONTROL, fbc_ctl); | |
1023 | ||
1024 | DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", | |
1025 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); | |
1026 | } | |
1027 | ||
1028 | void i8xx_disable_fbc(struct drm_device *dev) | |
1029 | { | |
1030 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1031 | u32 fbc_ctl; | |
1032 | ||
1033 | if (!I915_HAS_FBC(dev)) | |
1034 | return; | |
1035 | ||
1036 | /* Disable compression */ | |
1037 | fbc_ctl = I915_READ(FBC_CONTROL); | |
1038 | fbc_ctl &= ~FBC_CTL_EN; | |
1039 | I915_WRITE(FBC_CONTROL, fbc_ctl); | |
1040 | ||
1041 | /* Wait for compressing bit to clear */ | |
1042 | while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) | |
1043 | ; /* nothing */ | |
1044 | ||
1045 | intel_wait_for_vblank(dev); | |
1046 | ||
1047 | DRM_DEBUG_KMS("disabled FBC\n"); | |
1048 | } | |
1049 | ||
1050 | static bool i8xx_fbc_enabled(struct drm_crtc *crtc) | |
1051 | { | |
1052 | struct drm_device *dev = crtc->dev; | |
1053 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1054 | ||
1055 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | |
1056 | } | |
1057 | ||
1058 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |
1059 | { | |
1060 | struct drm_device *dev = crtc->dev; | |
1061 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1062 | struct drm_framebuffer *fb = crtc->fb; | |
1063 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
1064 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | |
1065 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1066 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | |
1067 | DPFC_CTL_PLANEB); | |
1068 | unsigned long stall_watermark = 200; | |
1069 | u32 dpfc_ctl; | |
1070 | ||
1071 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | |
1072 | dev_priv->cfb_fence = obj_priv->fence_reg; | |
1073 | dev_priv->cfb_plane = intel_crtc->plane; | |
1074 | ||
1075 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | |
1076 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | |
1077 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | |
1078 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | |
1079 | } else { | |
1080 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); | |
1081 | } | |
1082 | ||
1083 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | |
1084 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | |
1085 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | |
1086 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | |
1087 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | |
1088 | ||
1089 | /* enable it... */ | |
1090 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); | |
1091 | ||
1092 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | |
1093 | } | |
1094 | ||
1095 | void g4x_disable_fbc(struct drm_device *dev) | |
1096 | { | |
1097 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1098 | u32 dpfc_ctl; | |
1099 | ||
1100 | /* Disable compression */ | |
1101 | dpfc_ctl = I915_READ(DPFC_CONTROL); | |
1102 | dpfc_ctl &= ~DPFC_CTL_EN; | |
1103 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | |
1104 | intel_wait_for_vblank(dev); | |
1105 | ||
1106 | DRM_DEBUG_KMS("disabled FBC\n"); | |
1107 | } | |
1108 | ||
1109 | static bool g4x_fbc_enabled(struct drm_crtc *crtc) | |
1110 | { | |
1111 | struct drm_device *dev = crtc->dev; | |
1112 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1113 | ||
1114 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | |
1115 | } | |
1116 | ||
1117 | /** | |
1118 | * intel_update_fbc - enable/disable FBC as needed | |
1119 | * @crtc: CRTC to point the compressor at | |
1120 | * @mode: mode in use | |
1121 | * | |
1122 | * Set up the framebuffer compression hardware at mode set time. We | |
1123 | * enable it if possible: | |
1124 | * - plane A only (on pre-965) | |
1125 | * - no pixel mulitply/line duplication | |
1126 | * - no alpha buffer discard | |
1127 | * - no dual wide | |
1128 | * - framebuffer <= 2048 in width, 1536 in height | |
1129 | * | |
1130 | * We can't assume that any compression will take place (worst case), | |
1131 | * so the compressed buffer has to be the same size as the uncompressed | |
1132 | * one. It also must reside (along with the line length buffer) in | |
1133 | * stolen memory. | |
1134 | * | |
1135 | * We need to enable/disable FBC on a global basis. | |
1136 | */ | |
1137 | static void intel_update_fbc(struct drm_crtc *crtc, | |
1138 | struct drm_display_mode *mode) | |
1139 | { | |
1140 | struct drm_device *dev = crtc->dev; | |
1141 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1142 | struct drm_framebuffer *fb = crtc->fb; | |
1143 | struct intel_framebuffer *intel_fb; | |
1144 | struct drm_i915_gem_object *obj_priv; | |
1145 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1146 | int plane = intel_crtc->plane; | |
1147 | ||
1148 | if (!i915_powersave) | |
1149 | return; | |
1150 | ||
1151 | if (!dev_priv->display.fbc_enabled || | |
1152 | !dev_priv->display.enable_fbc || | |
1153 | !dev_priv->display.disable_fbc) | |
1154 | return; | |
1155 | ||
1156 | if (!crtc->fb) | |
1157 | return; | |
1158 | ||
1159 | intel_fb = to_intel_framebuffer(fb); | |
1160 | obj_priv = to_intel_bo(intel_fb->obj); | |
1161 | ||
1162 | /* | |
1163 | * If FBC is already on, we just have to verify that we can | |
1164 | * keep it that way... | |
1165 | * Need to disable if: | |
1166 | * - changing FBC params (stride, fence, mode) | |
1167 | * - new fb is too large to fit in compressed buffer | |
1168 | * - going to an unsupported config (interlace, pixel multiply, etc.) | |
1169 | */ | |
1170 | if (intel_fb->obj->size > dev_priv->cfb_size) { | |
1171 | DRM_DEBUG_KMS("framebuffer too large, disabling " | |
1172 | "compression\n"); | |
1173 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | |
1174 | goto out_disable; | |
1175 | } | |
1176 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | |
1177 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | |
1178 | DRM_DEBUG_KMS("mode incompatible with compression, " | |
1179 | "disabling\n"); | |
1180 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; | |
1181 | goto out_disable; | |
1182 | } | |
1183 | if ((mode->hdisplay > 2048) || | |
1184 | (mode->vdisplay > 1536)) { | |
1185 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | |
1186 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; | |
1187 | goto out_disable; | |
1188 | } | |
1189 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { | |
1190 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); | |
1191 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | |
1192 | goto out_disable; | |
1193 | } | |
1194 | if (obj_priv->tiling_mode != I915_TILING_X) { | |
1195 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | |
1196 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | |
1197 | goto out_disable; | |
1198 | } | |
1199 | ||
1200 | if (dev_priv->display.fbc_enabled(crtc)) { | |
1201 | /* We can re-enable it in this case, but need to update pitch */ | |
1202 | if (fb->pitch > dev_priv->cfb_pitch) | |
1203 | dev_priv->display.disable_fbc(dev); | |
1204 | if (obj_priv->fence_reg != dev_priv->cfb_fence) | |
1205 | dev_priv->display.disable_fbc(dev); | |
1206 | if (plane != dev_priv->cfb_plane) | |
1207 | dev_priv->display.disable_fbc(dev); | |
1208 | } | |
1209 | ||
1210 | if (!dev_priv->display.fbc_enabled(crtc)) { | |
1211 | /* Now try to turn it back on if possible */ | |
1212 | dev_priv->display.enable_fbc(crtc, 500); | |
1213 | } | |
1214 | ||
1215 | return; | |
1216 | ||
1217 | out_disable: | |
1218 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); | |
1219 | /* Multiple disables should be harmless */ | |
1220 | if (dev_priv->display.fbc_enabled(crtc)) | |
1221 | dev_priv->display.disable_fbc(dev); | |
1222 | } | |
1223 | ||
1224 | static int | |
1225 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | |
1226 | { | |
1227 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | |
1228 | u32 alignment; | |
1229 | int ret; | |
1230 | ||
1231 | switch (obj_priv->tiling_mode) { | |
1232 | case I915_TILING_NONE: | |
1233 | alignment = 64 * 1024; | |
1234 | break; | |
1235 | case I915_TILING_X: | |
1236 | /* pin() will align the object as required by fence */ | |
1237 | alignment = 0; | |
1238 | break; | |
1239 | case I915_TILING_Y: | |
1240 | /* FIXME: Is this true? */ | |
1241 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | |
1242 | return -EINVAL; | |
1243 | default: | |
1244 | BUG(); | |
1245 | } | |
1246 | ||
1247 | ret = i915_gem_object_pin(obj, alignment); | |
1248 | if (ret != 0) | |
1249 | return ret; | |
1250 | ||
1251 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | |
1252 | * fence, whereas 965+ only requires a fence if using | |
1253 | * framebuffer compression. For simplicity, we always install | |
1254 | * a fence as the cost is not that onerous. | |
1255 | */ | |
1256 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | |
1257 | obj_priv->tiling_mode != I915_TILING_NONE) { | |
1258 | ret = i915_gem_object_get_fence_reg(obj); | |
1259 | if (ret != 0) { | |
1260 | i915_gem_object_unpin(obj); | |
1261 | return ret; | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | return 0; | |
1266 | } | |
1267 | ||
1268 | static int | |
1269 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |
1270 | struct drm_framebuffer *old_fb) | |
1271 | { | |
1272 | struct drm_device *dev = crtc->dev; | |
1273 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1274 | struct drm_i915_master_private *master_priv; | |
1275 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1276 | struct intel_framebuffer *intel_fb; | |
1277 | struct drm_i915_gem_object *obj_priv; | |
1278 | struct drm_gem_object *obj; | |
1279 | int pipe = intel_crtc->pipe; | |
1280 | int plane = intel_crtc->plane; | |
1281 | unsigned long Start, Offset; | |
1282 | int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); | |
1283 | int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); | |
1284 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; | |
1285 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); | |
1286 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | |
1287 | u32 dspcntr; | |
1288 | int ret; | |
1289 | ||
1290 | /* no fb bound */ | |
1291 | if (!crtc->fb) { | |
1292 | DRM_DEBUG_KMS("No FB bound\n"); | |
1293 | return 0; | |
1294 | } | |
1295 | ||
1296 | switch (plane) { | |
1297 | case 0: | |
1298 | case 1: | |
1299 | break; | |
1300 | default: | |
1301 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | |
1302 | return -EINVAL; | |
1303 | } | |
1304 | ||
1305 | intel_fb = to_intel_framebuffer(crtc->fb); | |
1306 | obj = intel_fb->obj; | |
1307 | obj_priv = to_intel_bo(obj); | |
1308 | ||
1309 | mutex_lock(&dev->struct_mutex); | |
1310 | ret = intel_pin_and_fence_fb_obj(dev, obj); | |
1311 | if (ret != 0) { | |
1312 | mutex_unlock(&dev->struct_mutex); | |
1313 | return ret; | |
1314 | } | |
1315 | ||
1316 | ret = i915_gem_object_set_to_display_plane(obj); | |
1317 | if (ret != 0) { | |
1318 | i915_gem_object_unpin(obj); | |
1319 | mutex_unlock(&dev->struct_mutex); | |
1320 | return ret; | |
1321 | } | |
1322 | ||
1323 | dspcntr = I915_READ(dspcntr_reg); | |
1324 | /* Mask out pixel format bits in case we change it */ | |
1325 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | |
1326 | switch (crtc->fb->bits_per_pixel) { | |
1327 | case 8: | |
1328 | dspcntr |= DISPPLANE_8BPP; | |
1329 | break; | |
1330 | case 16: | |
1331 | if (crtc->fb->depth == 15) | |
1332 | dspcntr |= DISPPLANE_15_16BPP; | |
1333 | else | |
1334 | dspcntr |= DISPPLANE_16BPP; | |
1335 | break; | |
1336 | case 24: | |
1337 | case 32: | |
1338 | if (crtc->fb->depth == 30) | |
1339 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; | |
1340 | else | |
1341 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | |
1342 | break; | |
1343 | default: | |
1344 | DRM_ERROR("Unknown color depth\n"); | |
1345 | i915_gem_object_unpin(obj); | |
1346 | mutex_unlock(&dev->struct_mutex); | |
1347 | return -EINVAL; | |
1348 | } | |
1349 | if (IS_I965G(dev)) { | |
1350 | if (obj_priv->tiling_mode != I915_TILING_NONE) | |
1351 | dspcntr |= DISPPLANE_TILED; | |
1352 | else | |
1353 | dspcntr &= ~DISPPLANE_TILED; | |
1354 | } | |
1355 | ||
1356 | if (HAS_PCH_SPLIT(dev)) | |
1357 | /* must disable */ | |
1358 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | |
1359 | ||
1360 | I915_WRITE(dspcntr_reg, dspcntr); | |
1361 | ||
1362 | Start = obj_priv->gtt_offset; | |
1363 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | |
1364 | ||
1365 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | |
1366 | I915_WRITE(dspstride, crtc->fb->pitch); | |
1367 | if (IS_I965G(dev)) { | |
1368 | I915_WRITE(dspbase, Offset); | |
1369 | I915_READ(dspbase); | |
1370 | I915_WRITE(dspsurf, Start); | |
1371 | I915_READ(dspsurf); | |
1372 | I915_WRITE(dsptileoff, (y << 16) | x); | |
1373 | } else { | |
1374 | I915_WRITE(dspbase, Start + Offset); | |
1375 | I915_READ(dspbase); | |
1376 | } | |
1377 | ||
1378 | if ((IS_I965G(dev) || plane == 0)) | |
1379 | intel_update_fbc(crtc, &crtc->mode); | |
1380 | ||
1381 | intel_wait_for_vblank(dev); | |
1382 | ||
1383 | if (old_fb) { | |
1384 | intel_fb = to_intel_framebuffer(old_fb); | |
1385 | obj_priv = to_intel_bo(intel_fb->obj); | |
1386 | i915_gem_object_unpin(intel_fb->obj); | |
1387 | } | |
1388 | intel_increase_pllclock(crtc, true); | |
1389 | ||
1390 | mutex_unlock(&dev->struct_mutex); | |
1391 | ||
1392 | if (!dev->primary->master) | |
1393 | return 0; | |
1394 | ||
1395 | master_priv = dev->primary->master->driver_priv; | |
1396 | if (!master_priv->sarea_priv) | |
1397 | return 0; | |
1398 | ||
1399 | if (pipe) { | |
1400 | master_priv->sarea_priv->pipeB_x = x; | |
1401 | master_priv->sarea_priv->pipeB_y = y; | |
1402 | } else { | |
1403 | master_priv->sarea_priv->pipeA_x = x; | |
1404 | master_priv->sarea_priv->pipeA_y = y; | |
1405 | } | |
1406 | ||
1407 | return 0; | |
1408 | } | |
1409 | ||
1410 | /* Disable the VGA plane that we never use */ | |
1411 | static void i915_disable_vga (struct drm_device *dev) | |
1412 | { | |
1413 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1414 | u8 sr1; | |
1415 | u32 vga_reg; | |
1416 | ||
1417 | if (HAS_PCH_SPLIT(dev)) | |
1418 | vga_reg = CPU_VGACNTRL; | |
1419 | else | |
1420 | vga_reg = VGACNTRL; | |
1421 | ||
1422 | if (I915_READ(vga_reg) & VGA_DISP_DISABLE) | |
1423 | return; | |
1424 | ||
1425 | I915_WRITE8(VGA_SR_INDEX, 1); | |
1426 | sr1 = I915_READ8(VGA_SR_DATA); | |
1427 | I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5)); | |
1428 | udelay(100); | |
1429 | ||
1430 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | |
1431 | } | |
1432 | ||
1433 | static void ironlake_disable_pll_edp (struct drm_crtc *crtc) | |
1434 | { | |
1435 | struct drm_device *dev = crtc->dev; | |
1436 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1437 | u32 dpa_ctl; | |
1438 | ||
1439 | DRM_DEBUG_KMS("\n"); | |
1440 | dpa_ctl = I915_READ(DP_A); | |
1441 | dpa_ctl &= ~DP_PLL_ENABLE; | |
1442 | I915_WRITE(DP_A, dpa_ctl); | |
1443 | } | |
1444 | ||
1445 | static void ironlake_enable_pll_edp (struct drm_crtc *crtc) | |
1446 | { | |
1447 | struct drm_device *dev = crtc->dev; | |
1448 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1449 | u32 dpa_ctl; | |
1450 | ||
1451 | dpa_ctl = I915_READ(DP_A); | |
1452 | dpa_ctl |= DP_PLL_ENABLE; | |
1453 | I915_WRITE(DP_A, dpa_ctl); | |
1454 | udelay(200); | |
1455 | } | |
1456 | ||
1457 | ||
1458 | static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) | |
1459 | { | |
1460 | struct drm_device *dev = crtc->dev; | |
1461 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1462 | u32 dpa_ctl; | |
1463 | ||
1464 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); | |
1465 | dpa_ctl = I915_READ(DP_A); | |
1466 | dpa_ctl &= ~DP_PLL_FREQ_MASK; | |
1467 | ||
1468 | if (clock < 200000) { | |
1469 | u32 temp; | |
1470 | dpa_ctl |= DP_PLL_FREQ_160MHZ; | |
1471 | /* workaround for 160Mhz: | |
1472 | 1) program 0x4600c bits 15:0 = 0x8124 | |
1473 | 2) program 0x46010 bit 0 = 1 | |
1474 | 3) program 0x46034 bit 24 = 1 | |
1475 | 4) program 0x64000 bit 14 = 1 | |
1476 | */ | |
1477 | temp = I915_READ(0x4600c); | |
1478 | temp &= 0xffff0000; | |
1479 | I915_WRITE(0x4600c, temp | 0x8124); | |
1480 | ||
1481 | temp = I915_READ(0x46010); | |
1482 | I915_WRITE(0x46010, temp | 1); | |
1483 | ||
1484 | temp = I915_READ(0x46034); | |
1485 | I915_WRITE(0x46034, temp | (1 << 24)); | |
1486 | } else { | |
1487 | dpa_ctl |= DP_PLL_FREQ_270MHZ; | |
1488 | } | |
1489 | I915_WRITE(DP_A, dpa_ctl); | |
1490 | ||
1491 | udelay(500); | |
1492 | } | |
1493 | ||
1494 | /* The FDI link training functions for ILK/Ibexpeak. */ | |
1495 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |
1496 | { | |
1497 | struct drm_device *dev = crtc->dev; | |
1498 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1499 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1500 | int pipe = intel_crtc->pipe; | |
1501 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | |
1502 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | |
1503 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | |
1504 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | |
1505 | u32 temp, tries = 0; | |
1506 | ||
1507 | /* enable CPU FDI TX and PCH FDI RX */ | |
1508 | temp = I915_READ(fdi_tx_reg); | |
1509 | temp |= FDI_TX_ENABLE; | |
1510 | temp |= FDI_DP_PORT_WIDTH_X4; /* default */ | |
1511 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1512 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
1513 | I915_WRITE(fdi_tx_reg, temp); | |
1514 | I915_READ(fdi_tx_reg); | |
1515 | ||
1516 | temp = I915_READ(fdi_rx_reg); | |
1517 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1518 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
1519 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | |
1520 | I915_READ(fdi_rx_reg); | |
1521 | udelay(150); | |
1522 | ||
1523 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | |
1524 | for train result */ | |
1525 | temp = I915_READ(fdi_rx_imr_reg); | |
1526 | temp &= ~FDI_RX_SYMBOL_LOCK; | |
1527 | temp &= ~FDI_RX_BIT_LOCK; | |
1528 | I915_WRITE(fdi_rx_imr_reg, temp); | |
1529 | I915_READ(fdi_rx_imr_reg); | |
1530 | udelay(150); | |
1531 | ||
1532 | for (;;) { | |
1533 | temp = I915_READ(fdi_rx_iir_reg); | |
1534 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | |
1535 | ||
1536 | if ((temp & FDI_RX_BIT_LOCK)) { | |
1537 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | |
1538 | I915_WRITE(fdi_rx_iir_reg, | |
1539 | temp | FDI_RX_BIT_LOCK); | |
1540 | break; | |
1541 | } | |
1542 | ||
1543 | tries++; | |
1544 | ||
1545 | if (tries > 5) { | |
1546 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | |
1547 | break; | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | /* Train 2 */ | |
1552 | temp = I915_READ(fdi_tx_reg); | |
1553 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1554 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
1555 | I915_WRITE(fdi_tx_reg, temp); | |
1556 | ||
1557 | temp = I915_READ(fdi_rx_reg); | |
1558 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1559 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
1560 | I915_WRITE(fdi_rx_reg, temp); | |
1561 | udelay(150); | |
1562 | ||
1563 | tries = 0; | |
1564 | ||
1565 | for (;;) { | |
1566 | temp = I915_READ(fdi_rx_iir_reg); | |
1567 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | |
1568 | ||
1569 | if (temp & FDI_RX_SYMBOL_LOCK) { | |
1570 | I915_WRITE(fdi_rx_iir_reg, | |
1571 | temp | FDI_RX_SYMBOL_LOCK); | |
1572 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | |
1573 | break; | |
1574 | } | |
1575 | ||
1576 | tries++; | |
1577 | ||
1578 | if (tries > 5) { | |
1579 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | |
1580 | break; | |
1581 | } | |
1582 | } | |
1583 | ||
1584 | DRM_DEBUG_KMS("FDI train done\n"); | |
1585 | } | |
1586 | ||
1587 | static int snb_b_fdi_train_param [] = { | |
1588 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, | |
1589 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | |
1590 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | |
1591 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, | |
1592 | }; | |
1593 | ||
1594 | /* The FDI link training functions for SNB/Cougarpoint. */ | |
1595 | static void gen6_fdi_link_train(struct drm_crtc *crtc) | |
1596 | { | |
1597 | struct drm_device *dev = crtc->dev; | |
1598 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1599 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1600 | int pipe = intel_crtc->pipe; | |
1601 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | |
1602 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | |
1603 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | |
1604 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | |
1605 | u32 temp, i; | |
1606 | ||
1607 | /* enable CPU FDI TX and PCH FDI RX */ | |
1608 | temp = I915_READ(fdi_tx_reg); | |
1609 | temp |= FDI_TX_ENABLE; | |
1610 | temp |= FDI_DP_PORT_WIDTH_X4; /* default */ | |
1611 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1612 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
1613 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
1614 | /* SNB-B */ | |
1615 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | |
1616 | I915_WRITE(fdi_tx_reg, temp); | |
1617 | I915_READ(fdi_tx_reg); | |
1618 | ||
1619 | temp = I915_READ(fdi_rx_reg); | |
1620 | if (HAS_PCH_CPT(dev)) { | |
1621 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
1622 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | |
1623 | } else { | |
1624 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1625 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
1626 | } | |
1627 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | |
1628 | I915_READ(fdi_rx_reg); | |
1629 | udelay(150); | |
1630 | ||
1631 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | |
1632 | for train result */ | |
1633 | temp = I915_READ(fdi_rx_imr_reg); | |
1634 | temp &= ~FDI_RX_SYMBOL_LOCK; | |
1635 | temp &= ~FDI_RX_BIT_LOCK; | |
1636 | I915_WRITE(fdi_rx_imr_reg, temp); | |
1637 | I915_READ(fdi_rx_imr_reg); | |
1638 | udelay(150); | |
1639 | ||
1640 | for (i = 0; i < 4; i++ ) { | |
1641 | temp = I915_READ(fdi_tx_reg); | |
1642 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
1643 | temp |= snb_b_fdi_train_param[i]; | |
1644 | I915_WRITE(fdi_tx_reg, temp); | |
1645 | udelay(500); | |
1646 | ||
1647 | temp = I915_READ(fdi_rx_iir_reg); | |
1648 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | |
1649 | ||
1650 | if (temp & FDI_RX_BIT_LOCK) { | |
1651 | I915_WRITE(fdi_rx_iir_reg, | |
1652 | temp | FDI_RX_BIT_LOCK); | |
1653 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | |
1654 | break; | |
1655 | } | |
1656 | } | |
1657 | if (i == 4) | |
1658 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | |
1659 | ||
1660 | /* Train 2 */ | |
1661 | temp = I915_READ(fdi_tx_reg); | |
1662 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1663 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
1664 | if (IS_GEN6(dev)) { | |
1665 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
1666 | /* SNB-B */ | |
1667 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | |
1668 | } | |
1669 | I915_WRITE(fdi_tx_reg, temp); | |
1670 | ||
1671 | temp = I915_READ(fdi_rx_reg); | |
1672 | if (HAS_PCH_CPT(dev)) { | |
1673 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
1674 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | |
1675 | } else { | |
1676 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1677 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
1678 | } | |
1679 | I915_WRITE(fdi_rx_reg, temp); | |
1680 | udelay(150); | |
1681 | ||
1682 | for (i = 0; i < 4; i++ ) { | |
1683 | temp = I915_READ(fdi_tx_reg); | |
1684 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
1685 | temp |= snb_b_fdi_train_param[i]; | |
1686 | I915_WRITE(fdi_tx_reg, temp); | |
1687 | udelay(500); | |
1688 | ||
1689 | temp = I915_READ(fdi_rx_iir_reg); | |
1690 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | |
1691 | ||
1692 | if (temp & FDI_RX_SYMBOL_LOCK) { | |
1693 | I915_WRITE(fdi_rx_iir_reg, | |
1694 | temp | FDI_RX_SYMBOL_LOCK); | |
1695 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | |
1696 | break; | |
1697 | } | |
1698 | } | |
1699 | if (i == 4) | |
1700 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | |
1701 | ||
1702 | DRM_DEBUG_KMS("FDI train done.\n"); | |
1703 | } | |
1704 | ||
1705 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |
1706 | { | |
1707 | struct drm_device *dev = crtc->dev; | |
1708 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1709 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1710 | int pipe = intel_crtc->pipe; | |
1711 | int plane = intel_crtc->plane; | |
1712 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | |
1713 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | |
1714 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | |
1715 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; | |
1716 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | |
1717 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | |
1718 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | |
1719 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | |
1720 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | |
1721 | int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; | |
1722 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | |
1723 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | |
1724 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | |
1725 | int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | |
1726 | int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | |
1727 | int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | |
1728 | int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B; | |
1729 | int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B; | |
1730 | int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B; | |
1731 | int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; | |
1732 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; | |
1733 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | |
1734 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | |
1735 | u32 temp; | |
1736 | int n; | |
1737 | u32 pipe_bpc; | |
1738 | ||
1739 | temp = I915_READ(pipeconf_reg); | |
1740 | pipe_bpc = temp & PIPE_BPC_MASK; | |
1741 | ||
1742 | /* XXX: When our outputs are all unaware of DPMS modes other than off | |
1743 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | |
1744 | */ | |
1745 | switch (mode) { | |
1746 | case DRM_MODE_DPMS_ON: | |
1747 | case DRM_MODE_DPMS_STANDBY: | |
1748 | case DRM_MODE_DPMS_SUSPEND: | |
1749 | DRM_DEBUG_KMS("crtc %d dpms on\n", pipe); | |
1750 | ||
1751 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
1752 | temp = I915_READ(PCH_LVDS); | |
1753 | if ((temp & LVDS_PORT_EN) == 0) { | |
1754 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | |
1755 | POSTING_READ(PCH_LVDS); | |
1756 | } | |
1757 | } | |
1758 | ||
1759 | if (HAS_eDP) { | |
1760 | /* enable eDP PLL */ | |
1761 | ironlake_enable_pll_edp(crtc); | |
1762 | } else { | |
1763 | ||
1764 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | |
1765 | temp = I915_READ(fdi_rx_reg); | |
1766 | /* | |
1767 | * make the BPC in FDI Rx be consistent with that in | |
1768 | * pipeconf reg. | |
1769 | */ | |
1770 | temp &= ~(0x7 << 16); | |
1771 | temp |= (pipe_bpc << 11); | |
1772 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | |
1773 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | |
1774 | I915_READ(fdi_rx_reg); | |
1775 | udelay(200); | |
1776 | ||
1777 | /* Switch from Rawclk to PCDclk */ | |
1778 | temp = I915_READ(fdi_rx_reg); | |
1779 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | |
1780 | I915_READ(fdi_rx_reg); | |
1781 | udelay(200); | |
1782 | ||
1783 | /* Enable CPU FDI TX PLL, always on for Ironlake */ | |
1784 | temp = I915_READ(fdi_tx_reg); | |
1785 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | |
1786 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | |
1787 | I915_READ(fdi_tx_reg); | |
1788 | udelay(100); | |
1789 | } | |
1790 | } | |
1791 | ||
1792 | /* Enable panel fitting for LVDS */ | |
1793 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
1794 | temp = I915_READ(pf_ctl_reg); | |
1795 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); | |
1796 | ||
1797 | /* currently full aspect */ | |
1798 | I915_WRITE(pf_win_pos, 0); | |
1799 | ||
1800 | I915_WRITE(pf_win_size, | |
1801 | (dev_priv->panel_fixed_mode->hdisplay << 16) | | |
1802 | (dev_priv->panel_fixed_mode->vdisplay)); | |
1803 | } | |
1804 | ||
1805 | /* Enable CPU pipe */ | |
1806 | temp = I915_READ(pipeconf_reg); | |
1807 | if ((temp & PIPEACONF_ENABLE) == 0) { | |
1808 | I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); | |
1809 | I915_READ(pipeconf_reg); | |
1810 | udelay(100); | |
1811 | } | |
1812 | ||
1813 | /* configure and enable CPU plane */ | |
1814 | temp = I915_READ(dspcntr_reg); | |
1815 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | |
1816 | I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); | |
1817 | /* Flush the plane changes */ | |
1818 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | |
1819 | } | |
1820 | ||
1821 | if (!HAS_eDP) { | |
1822 | /* For PCH output, training FDI link */ | |
1823 | if (IS_GEN6(dev)) | |
1824 | gen6_fdi_link_train(crtc); | |
1825 | else | |
1826 | ironlake_fdi_link_train(crtc); | |
1827 | ||
1828 | /* enable PCH DPLL */ | |
1829 | temp = I915_READ(pch_dpll_reg); | |
1830 | if ((temp & DPLL_VCO_ENABLE) == 0) { | |
1831 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); | |
1832 | I915_READ(pch_dpll_reg); | |
1833 | } | |
1834 | udelay(200); | |
1835 | ||
1836 | if (HAS_PCH_CPT(dev)) { | |
1837 | /* Be sure PCH DPLL SEL is set */ | |
1838 | temp = I915_READ(PCH_DPLL_SEL); | |
1839 | if (trans_dpll_sel == 0 && | |
1840 | (temp & TRANSA_DPLL_ENABLE) == 0) | |
1841 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | |
1842 | else if (trans_dpll_sel == 1 && | |
1843 | (temp & TRANSB_DPLL_ENABLE) == 0) | |
1844 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | |
1845 | I915_WRITE(PCH_DPLL_SEL, temp); | |
1846 | I915_READ(PCH_DPLL_SEL); | |
1847 | } | |
1848 | ||
1849 | /* set transcoder timing */ | |
1850 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); | |
1851 | I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); | |
1852 | I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); | |
1853 | ||
1854 | I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); | |
1855 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); | |
1856 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); | |
1857 | ||
1858 | /* enable normal train */ | |
1859 | temp = I915_READ(fdi_tx_reg); | |
1860 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1861 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | | |
1862 | FDI_TX_ENHANCE_FRAME_ENABLE); | |
1863 | I915_READ(fdi_tx_reg); | |
1864 | ||
1865 | temp = I915_READ(fdi_rx_reg); | |
1866 | if (HAS_PCH_CPT(dev)) { | |
1867 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
1868 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | |
1869 | } else { | |
1870 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1871 | temp |= FDI_LINK_TRAIN_NONE; | |
1872 | } | |
1873 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | |
1874 | I915_READ(fdi_rx_reg); | |
1875 | ||
1876 | /* wait one idle pattern time */ | |
1877 | udelay(100); | |
1878 | ||
1879 | /* For PCH DP, enable TRANS_DP_CTL */ | |
1880 | if (HAS_PCH_CPT(dev) && | |
1881 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | |
1882 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | |
1883 | int reg; | |
1884 | ||
1885 | reg = I915_READ(trans_dp_ctl); | |
1886 | reg &= ~TRANS_DP_PORT_SEL_MASK; | |
1887 | reg = TRANS_DP_OUTPUT_ENABLE | | |
1888 | TRANS_DP_ENH_FRAMING | | |
1889 | TRANS_DP_VSYNC_ACTIVE_HIGH | | |
1890 | TRANS_DP_HSYNC_ACTIVE_HIGH; | |
1891 | ||
1892 | switch (intel_trans_dp_port_sel(crtc)) { | |
1893 | case PCH_DP_B: | |
1894 | reg |= TRANS_DP_PORT_SEL_B; | |
1895 | break; | |
1896 | case PCH_DP_C: | |
1897 | reg |= TRANS_DP_PORT_SEL_C; | |
1898 | break; | |
1899 | case PCH_DP_D: | |
1900 | reg |= TRANS_DP_PORT_SEL_D; | |
1901 | break; | |
1902 | default: | |
1903 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | |
1904 | reg |= TRANS_DP_PORT_SEL_B; | |
1905 | break; | |
1906 | } | |
1907 | ||
1908 | I915_WRITE(trans_dp_ctl, reg); | |
1909 | POSTING_READ(trans_dp_ctl); | |
1910 | } | |
1911 | ||
1912 | /* enable PCH transcoder */ | |
1913 | temp = I915_READ(transconf_reg); | |
1914 | /* | |
1915 | * make the BPC in transcoder be consistent with | |
1916 | * that in pipeconf reg. | |
1917 | */ | |
1918 | temp &= ~PIPE_BPC_MASK; | |
1919 | temp |= pipe_bpc; | |
1920 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | |
1921 | I915_READ(transconf_reg); | |
1922 | ||
1923 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) | |
1924 | ; | |
1925 | ||
1926 | } | |
1927 | ||
1928 | intel_crtc_load_lut(crtc); | |
1929 | ||
1930 | break; | |
1931 | case DRM_MODE_DPMS_OFF: | |
1932 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | |
1933 | ||
1934 | drm_vblank_off(dev, pipe); | |
1935 | /* Disable display plane */ | |
1936 | temp = I915_READ(dspcntr_reg); | |
1937 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | |
1938 | I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); | |
1939 | /* Flush the plane changes */ | |
1940 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | |
1941 | I915_READ(dspbase_reg); | |
1942 | } | |
1943 | ||
1944 | i915_disable_vga(dev); | |
1945 | ||
1946 | /* disable cpu pipe, disable after all planes disabled */ | |
1947 | temp = I915_READ(pipeconf_reg); | |
1948 | if ((temp & PIPEACONF_ENABLE) != 0) { | |
1949 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | |
1950 | I915_READ(pipeconf_reg); | |
1951 | n = 0; | |
1952 | /* wait for cpu pipe off, pipe state */ | |
1953 | while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { | |
1954 | n++; | |
1955 | if (n < 60) { | |
1956 | udelay(500); | |
1957 | continue; | |
1958 | } else { | |
1959 | DRM_DEBUG_KMS("pipe %d off delay\n", | |
1960 | pipe); | |
1961 | break; | |
1962 | } | |
1963 | } | |
1964 | } else | |
1965 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
1966 | ||
1967 | udelay(100); | |
1968 | ||
1969 | /* Disable PF */ | |
1970 | temp = I915_READ(pf_ctl_reg); | |
1971 | if ((temp & PF_ENABLE) != 0) { | |
1972 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | |
1973 | I915_READ(pf_ctl_reg); | |
1974 | } | |
1975 | I915_WRITE(pf_win_size, 0); | |
1976 | POSTING_READ(pf_win_size); | |
1977 | ||
1978 | ||
1979 | /* disable CPU FDI tx and PCH FDI rx */ | |
1980 | temp = I915_READ(fdi_tx_reg); | |
1981 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); | |
1982 | I915_READ(fdi_tx_reg); | |
1983 | ||
1984 | temp = I915_READ(fdi_rx_reg); | |
1985 | /* BPC in FDI rx is consistent with that in pipeconf */ | |
1986 | temp &= ~(0x07 << 16); | |
1987 | temp |= (pipe_bpc << 11); | |
1988 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | |
1989 | I915_READ(fdi_rx_reg); | |
1990 | ||
1991 | udelay(100); | |
1992 | ||
1993 | /* still set train pattern 1 */ | |
1994 | temp = I915_READ(fdi_tx_reg); | |
1995 | temp &= ~FDI_LINK_TRAIN_NONE; | |
1996 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
1997 | I915_WRITE(fdi_tx_reg, temp); | |
1998 | POSTING_READ(fdi_tx_reg); | |
1999 | ||
2000 | temp = I915_READ(fdi_rx_reg); | |
2001 | if (HAS_PCH_CPT(dev)) { | |
2002 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
2003 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | |
2004 | } else { | |
2005 | temp &= ~FDI_LINK_TRAIN_NONE; | |
2006 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
2007 | } | |
2008 | I915_WRITE(fdi_rx_reg, temp); | |
2009 | POSTING_READ(fdi_rx_reg); | |
2010 | ||
2011 | udelay(100); | |
2012 | ||
2013 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
2014 | temp = I915_READ(PCH_LVDS); | |
2015 | I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); | |
2016 | I915_READ(PCH_LVDS); | |
2017 | udelay(100); | |
2018 | } | |
2019 | ||
2020 | /* disable PCH transcoder */ | |
2021 | temp = I915_READ(transconf_reg); | |
2022 | if ((temp & TRANS_ENABLE) != 0) { | |
2023 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); | |
2024 | I915_READ(transconf_reg); | |
2025 | n = 0; | |
2026 | /* wait for PCH transcoder off, transcoder state */ | |
2027 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { | |
2028 | n++; | |
2029 | if (n < 60) { | |
2030 | udelay(500); | |
2031 | continue; | |
2032 | } else { | |
2033 | DRM_DEBUG_KMS("transcoder %d off " | |
2034 | "delay\n", pipe); | |
2035 | break; | |
2036 | } | |
2037 | } | |
2038 | } | |
2039 | ||
2040 | temp = I915_READ(transconf_reg); | |
2041 | /* BPC in transcoder is consistent with that in pipeconf */ | |
2042 | temp &= ~PIPE_BPC_MASK; | |
2043 | temp |= pipe_bpc; | |
2044 | I915_WRITE(transconf_reg, temp); | |
2045 | I915_READ(transconf_reg); | |
2046 | udelay(100); | |
2047 | ||
2048 | if (HAS_PCH_CPT(dev)) { | |
2049 | /* disable TRANS_DP_CTL */ | |
2050 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | |
2051 | int reg; | |
2052 | ||
2053 | reg = I915_READ(trans_dp_ctl); | |
2054 | reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | |
2055 | I915_WRITE(trans_dp_ctl, reg); | |
2056 | POSTING_READ(trans_dp_ctl); | |
2057 | ||
2058 | /* disable DPLL_SEL */ | |
2059 | temp = I915_READ(PCH_DPLL_SEL); | |
2060 | if (trans_dpll_sel == 0) | |
2061 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); | |
2062 | else | |
2063 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | |
2064 | I915_WRITE(PCH_DPLL_SEL, temp); | |
2065 | I915_READ(PCH_DPLL_SEL); | |
2066 | ||
2067 | } | |
2068 | ||
2069 | /* disable PCH DPLL */ | |
2070 | temp = I915_READ(pch_dpll_reg); | |
2071 | I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); | |
2072 | I915_READ(pch_dpll_reg); | |
2073 | ||
2074 | if (HAS_eDP) { | |
2075 | ironlake_disable_pll_edp(crtc); | |
2076 | } | |
2077 | ||
2078 | /* Switch from PCDclk to Rawclk */ | |
2079 | temp = I915_READ(fdi_rx_reg); | |
2080 | temp &= ~FDI_SEL_PCDCLK; | |
2081 | I915_WRITE(fdi_rx_reg, temp); | |
2082 | I915_READ(fdi_rx_reg); | |
2083 | ||
2084 | /* Disable CPU FDI TX PLL */ | |
2085 | temp = I915_READ(fdi_tx_reg); | |
2086 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); | |
2087 | I915_READ(fdi_tx_reg); | |
2088 | udelay(100); | |
2089 | ||
2090 | temp = I915_READ(fdi_rx_reg); | |
2091 | temp &= ~FDI_RX_PLL_ENABLE; | |
2092 | I915_WRITE(fdi_rx_reg, temp); | |
2093 | I915_READ(fdi_rx_reg); | |
2094 | ||
2095 | /* Wait for the clocks to turn off. */ | |
2096 | udelay(100); | |
2097 | break; | |
2098 | } | |
2099 | } | |
2100 | ||
2101 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | |
2102 | { | |
2103 | struct intel_overlay *overlay; | |
2104 | int ret; | |
2105 | ||
2106 | if (!enable && intel_crtc->overlay) { | |
2107 | overlay = intel_crtc->overlay; | |
2108 | mutex_lock(&overlay->dev->struct_mutex); | |
2109 | for (;;) { | |
2110 | ret = intel_overlay_switch_off(overlay); | |
2111 | if (ret == 0) | |
2112 | break; | |
2113 | ||
2114 | ret = intel_overlay_recover_from_interrupt(overlay, 0); | |
2115 | if (ret != 0) { | |
2116 | /* overlay doesn't react anymore. Usually | |
2117 | * results in a black screen and an unkillable | |
2118 | * X server. */ | |
2119 | BUG(); | |
2120 | overlay->hw_wedged = HW_WEDGED; | |
2121 | break; | |
2122 | } | |
2123 | } | |
2124 | mutex_unlock(&overlay->dev->struct_mutex); | |
2125 | } | |
2126 | /* Let userspace switch the overlay on again. In most cases userspace | |
2127 | * has to recompute where to put it anyway. */ | |
2128 | ||
2129 | return; | |
2130 | } | |
2131 | ||
2132 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |
2133 | { | |
2134 | struct drm_device *dev = crtc->dev; | |
2135 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2136 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2137 | int pipe = intel_crtc->pipe; | |
2138 | int plane = intel_crtc->plane; | |
2139 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | |
2140 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | |
2141 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; | |
2142 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | |
2143 | u32 temp; | |
2144 | ||
2145 | /* XXX: When our outputs are all unaware of DPMS modes other than off | |
2146 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | |
2147 | */ | |
2148 | switch (mode) { | |
2149 | case DRM_MODE_DPMS_ON: | |
2150 | case DRM_MODE_DPMS_STANDBY: | |
2151 | case DRM_MODE_DPMS_SUSPEND: | |
2152 | intel_update_watermarks(dev); | |
2153 | ||
2154 | /* Enable the DPLL */ | |
2155 | temp = I915_READ(dpll_reg); | |
2156 | if ((temp & DPLL_VCO_ENABLE) == 0) { | |
2157 | I915_WRITE(dpll_reg, temp); | |
2158 | I915_READ(dpll_reg); | |
2159 | /* Wait for the clocks to stabilize. */ | |
2160 | udelay(150); | |
2161 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | |
2162 | I915_READ(dpll_reg); | |
2163 | /* Wait for the clocks to stabilize. */ | |
2164 | udelay(150); | |
2165 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | |
2166 | I915_READ(dpll_reg); | |
2167 | /* Wait for the clocks to stabilize. */ | |
2168 | udelay(150); | |
2169 | } | |
2170 | ||
2171 | /* Enable the pipe */ | |
2172 | temp = I915_READ(pipeconf_reg); | |
2173 | if ((temp & PIPEACONF_ENABLE) == 0) | |
2174 | I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); | |
2175 | ||
2176 | /* Enable the plane */ | |
2177 | temp = I915_READ(dspcntr_reg); | |
2178 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | |
2179 | I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); | |
2180 | /* Flush the plane changes */ | |
2181 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | |
2182 | } | |
2183 | ||
2184 | intel_crtc_load_lut(crtc); | |
2185 | ||
2186 | if ((IS_I965G(dev) || plane == 0)) | |
2187 | intel_update_fbc(crtc, &crtc->mode); | |
2188 | ||
2189 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | |
2190 | intel_crtc_dpms_overlay(intel_crtc, true); | |
2191 | break; | |
2192 | case DRM_MODE_DPMS_OFF: | |
2193 | intel_update_watermarks(dev); | |
2194 | ||
2195 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | |
2196 | intel_crtc_dpms_overlay(intel_crtc, false); | |
2197 | drm_vblank_off(dev, pipe); | |
2198 | ||
2199 | if (dev_priv->cfb_plane == plane && | |
2200 | dev_priv->display.disable_fbc) | |
2201 | dev_priv->display.disable_fbc(dev); | |
2202 | ||
2203 | /* Disable the VGA plane that we never use */ | |
2204 | i915_disable_vga(dev); | |
2205 | ||
2206 | /* Disable display plane */ | |
2207 | temp = I915_READ(dspcntr_reg); | |
2208 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | |
2209 | I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); | |
2210 | /* Flush the plane changes */ | |
2211 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | |
2212 | I915_READ(dspbase_reg); | |
2213 | } | |
2214 | ||
2215 | if (!IS_I9XX(dev)) { | |
2216 | /* Wait for vblank for the disable to take effect */ | |
2217 | intel_wait_for_vblank(dev); | |
2218 | } | |
2219 | ||
2220 | /* Next, disable display pipes */ | |
2221 | temp = I915_READ(pipeconf_reg); | |
2222 | if ((temp & PIPEACONF_ENABLE) != 0) { | |
2223 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | |
2224 | I915_READ(pipeconf_reg); | |
2225 | } | |
2226 | ||
2227 | /* Wait for vblank for the disable to take effect. */ | |
2228 | intel_wait_for_vblank(dev); | |
2229 | ||
2230 | temp = I915_READ(dpll_reg); | |
2231 | if ((temp & DPLL_VCO_ENABLE) != 0) { | |
2232 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | |
2233 | I915_READ(dpll_reg); | |
2234 | } | |
2235 | ||
2236 | /* Wait for the clocks to turn off. */ | |
2237 | udelay(150); | |
2238 | break; | |
2239 | } | |
2240 | } | |
2241 | ||
2242 | /** | |
2243 | * Sets the power management mode of the pipe and plane. | |
2244 | * | |
2245 | * This code should probably grow support for turning the cursor off and back | |
2246 | * on appropriately at the same time as we're turning the pipe off/on. | |
2247 | */ | |
2248 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |
2249 | { | |
2250 | struct drm_device *dev = crtc->dev; | |
2251 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2252 | struct drm_i915_master_private *master_priv; | |
2253 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2254 | int pipe = intel_crtc->pipe; | |
2255 | bool enabled; | |
2256 | ||
2257 | dev_priv->display.dpms(crtc, mode); | |
2258 | ||
2259 | intel_crtc->dpms_mode = mode; | |
2260 | ||
2261 | if (!dev->primary->master) | |
2262 | return; | |
2263 | ||
2264 | master_priv = dev->primary->master->driver_priv; | |
2265 | if (!master_priv->sarea_priv) | |
2266 | return; | |
2267 | ||
2268 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; | |
2269 | ||
2270 | switch (pipe) { | |
2271 | case 0: | |
2272 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; | |
2273 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; | |
2274 | break; | |
2275 | case 1: | |
2276 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; | |
2277 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; | |
2278 | break; | |
2279 | default: | |
2280 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | |
2281 | break; | |
2282 | } | |
2283 | } | |
2284 | ||
2285 | static void intel_crtc_prepare (struct drm_crtc *crtc) | |
2286 | { | |
2287 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | |
2288 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | |
2289 | } | |
2290 | ||
2291 | static void intel_crtc_commit (struct drm_crtc *crtc) | |
2292 | { | |
2293 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | |
2294 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | |
2295 | } | |
2296 | ||
2297 | void intel_encoder_prepare (struct drm_encoder *encoder) | |
2298 | { | |
2299 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
2300 | /* lvds has its own version of prepare see intel_lvds_prepare */ | |
2301 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); | |
2302 | } | |
2303 | ||
2304 | void intel_encoder_commit (struct drm_encoder *encoder) | |
2305 | { | |
2306 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
2307 | /* lvds has its own version of commit see intel_lvds_commit */ | |
2308 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | |
2309 | } | |
2310 | ||
2311 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |
2312 | struct drm_display_mode *mode, | |
2313 | struct drm_display_mode *adjusted_mode) | |
2314 | { | |
2315 | struct drm_device *dev = crtc->dev; | |
2316 | if (HAS_PCH_SPLIT(dev)) { | |
2317 | /* FDI link clock is fixed at 2.7G */ | |
2318 | if (mode->clock * 3 > 27000 * 4) | |
2319 | return MODE_CLOCK_HIGH; | |
2320 | } | |
2321 | return true; | |
2322 | } | |
2323 | ||
2324 | static int i945_get_display_clock_speed(struct drm_device *dev) | |
2325 | { | |
2326 | return 400000; | |
2327 | } | |
2328 | ||
2329 | static int i915_get_display_clock_speed(struct drm_device *dev) | |
2330 | { | |
2331 | return 333000; | |
2332 | } | |
2333 | ||
2334 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) | |
2335 | { | |
2336 | return 200000; | |
2337 | } | |
2338 | ||
2339 | static int i915gm_get_display_clock_speed(struct drm_device *dev) | |
2340 | { | |
2341 | u16 gcfgc = 0; | |
2342 | ||
2343 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | |
2344 | ||
2345 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) | |
2346 | return 133000; | |
2347 | else { | |
2348 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | |
2349 | case GC_DISPLAY_CLOCK_333_MHZ: | |
2350 | return 333000; | |
2351 | default: | |
2352 | case GC_DISPLAY_CLOCK_190_200_MHZ: | |
2353 | return 190000; | |
2354 | } | |
2355 | } | |
2356 | } | |
2357 | ||
2358 | static int i865_get_display_clock_speed(struct drm_device *dev) | |
2359 | { | |
2360 | return 266000; | |
2361 | } | |
2362 | ||
2363 | static int i855_get_display_clock_speed(struct drm_device *dev) | |
2364 | { | |
2365 | u16 hpllcc = 0; | |
2366 | /* Assume that the hardware is in the high speed state. This | |
2367 | * should be the default. | |
2368 | */ | |
2369 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | |
2370 | case GC_CLOCK_133_200: | |
2371 | case GC_CLOCK_100_200: | |
2372 | return 200000; | |
2373 | case GC_CLOCK_166_250: | |
2374 | return 250000; | |
2375 | case GC_CLOCK_100_133: | |
2376 | return 133000; | |
2377 | } | |
2378 | ||
2379 | /* Shouldn't happen */ | |
2380 | return 0; | |
2381 | } | |
2382 | ||
2383 | static int i830_get_display_clock_speed(struct drm_device *dev) | |
2384 | { | |
2385 | return 133000; | |
2386 | } | |
2387 | ||
2388 | /** | |
2389 | * Return the pipe currently connected to the panel fitter, | |
2390 | * or -1 if the panel fitter is not present or not in use | |
2391 | */ | |
2392 | int intel_panel_fitter_pipe (struct drm_device *dev) | |
2393 | { | |
2394 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2395 | u32 pfit_control; | |
2396 | ||
2397 | /* i830 doesn't have a panel fitter */ | |
2398 | if (IS_I830(dev)) | |
2399 | return -1; | |
2400 | ||
2401 | pfit_control = I915_READ(PFIT_CONTROL); | |
2402 | ||
2403 | /* See if the panel fitter is in use */ | |
2404 | if ((pfit_control & PFIT_ENABLE) == 0) | |
2405 | return -1; | |
2406 | ||
2407 | /* 965 can place panel fitter on either pipe */ | |
2408 | if (IS_I965G(dev)) | |
2409 | return (pfit_control >> 29) & 0x3; | |
2410 | ||
2411 | /* older chips can only use pipe 1 */ | |
2412 | return 1; | |
2413 | } | |
2414 | ||
2415 | struct fdi_m_n { | |
2416 | u32 tu; | |
2417 | u32 gmch_m; | |
2418 | u32 gmch_n; | |
2419 | u32 link_m; | |
2420 | u32 link_n; | |
2421 | }; | |
2422 | ||
2423 | static void | |
2424 | fdi_reduce_ratio(u32 *num, u32 *den) | |
2425 | { | |
2426 | while (*num > 0xffffff || *den > 0xffffff) { | |
2427 | *num >>= 1; | |
2428 | *den >>= 1; | |
2429 | } | |
2430 | } | |
2431 | ||
2432 | #define DATA_N 0x800000 | |
2433 | #define LINK_N 0x80000 | |
2434 | ||
2435 | static void | |
2436 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, | |
2437 | int link_clock, struct fdi_m_n *m_n) | |
2438 | { | |
2439 | u64 temp; | |
2440 | ||
2441 | m_n->tu = 64; /* default size */ | |
2442 | ||
2443 | temp = (u64) DATA_N * pixel_clock; | |
2444 | temp = div_u64(temp, link_clock); | |
2445 | m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); | |
2446 | m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ | |
2447 | m_n->gmch_n = DATA_N; | |
2448 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | |
2449 | ||
2450 | temp = (u64) LINK_N * pixel_clock; | |
2451 | m_n->link_m = div_u64(temp, link_clock); | |
2452 | m_n->link_n = LINK_N; | |
2453 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); | |
2454 | } | |
2455 | ||
2456 | ||
2457 | struct intel_watermark_params { | |
2458 | unsigned long fifo_size; | |
2459 | unsigned long max_wm; | |
2460 | unsigned long default_wm; | |
2461 | unsigned long guard_size; | |
2462 | unsigned long cacheline_size; | |
2463 | }; | |
2464 | ||
2465 | /* Pineview has different values for various configs */ | |
2466 | static struct intel_watermark_params pineview_display_wm = { | |
2467 | PINEVIEW_DISPLAY_FIFO, | |
2468 | PINEVIEW_MAX_WM, | |
2469 | PINEVIEW_DFT_WM, | |
2470 | PINEVIEW_GUARD_WM, | |
2471 | PINEVIEW_FIFO_LINE_SIZE | |
2472 | }; | |
2473 | static struct intel_watermark_params pineview_display_hplloff_wm = { | |
2474 | PINEVIEW_DISPLAY_FIFO, | |
2475 | PINEVIEW_MAX_WM, | |
2476 | PINEVIEW_DFT_HPLLOFF_WM, | |
2477 | PINEVIEW_GUARD_WM, | |
2478 | PINEVIEW_FIFO_LINE_SIZE | |
2479 | }; | |
2480 | static struct intel_watermark_params pineview_cursor_wm = { | |
2481 | PINEVIEW_CURSOR_FIFO, | |
2482 | PINEVIEW_CURSOR_MAX_WM, | |
2483 | PINEVIEW_CURSOR_DFT_WM, | |
2484 | PINEVIEW_CURSOR_GUARD_WM, | |
2485 | PINEVIEW_FIFO_LINE_SIZE, | |
2486 | }; | |
2487 | static struct intel_watermark_params pineview_cursor_hplloff_wm = { | |
2488 | PINEVIEW_CURSOR_FIFO, | |
2489 | PINEVIEW_CURSOR_MAX_WM, | |
2490 | PINEVIEW_CURSOR_DFT_WM, | |
2491 | PINEVIEW_CURSOR_GUARD_WM, | |
2492 | PINEVIEW_FIFO_LINE_SIZE | |
2493 | }; | |
2494 | static struct intel_watermark_params g4x_wm_info = { | |
2495 | G4X_FIFO_SIZE, | |
2496 | G4X_MAX_WM, | |
2497 | G4X_MAX_WM, | |
2498 | 2, | |
2499 | G4X_FIFO_LINE_SIZE, | |
2500 | }; | |
2501 | static struct intel_watermark_params i945_wm_info = { | |
2502 | I945_FIFO_SIZE, | |
2503 | I915_MAX_WM, | |
2504 | 1, | |
2505 | 2, | |
2506 | I915_FIFO_LINE_SIZE | |
2507 | }; | |
2508 | static struct intel_watermark_params i915_wm_info = { | |
2509 | I915_FIFO_SIZE, | |
2510 | I915_MAX_WM, | |
2511 | 1, | |
2512 | 2, | |
2513 | I915_FIFO_LINE_SIZE | |
2514 | }; | |
2515 | static struct intel_watermark_params i855_wm_info = { | |
2516 | I855GM_FIFO_SIZE, | |
2517 | I915_MAX_WM, | |
2518 | 1, | |
2519 | 2, | |
2520 | I830_FIFO_LINE_SIZE | |
2521 | }; | |
2522 | static struct intel_watermark_params i830_wm_info = { | |
2523 | I830_FIFO_SIZE, | |
2524 | I915_MAX_WM, | |
2525 | 1, | |
2526 | 2, | |
2527 | I830_FIFO_LINE_SIZE | |
2528 | }; | |
2529 | ||
2530 | /** | |
2531 | * intel_calculate_wm - calculate watermark level | |
2532 | * @clock_in_khz: pixel clock | |
2533 | * @wm: chip FIFO params | |
2534 | * @pixel_size: display pixel size | |
2535 | * @latency_ns: memory latency for the platform | |
2536 | * | |
2537 | * Calculate the watermark level (the level at which the display plane will | |
2538 | * start fetching from memory again). Each chip has a different display | |
2539 | * FIFO size and allocation, so the caller needs to figure that out and pass | |
2540 | * in the correct intel_watermark_params structure. | |
2541 | * | |
2542 | * As the pixel clock runs, the FIFO will be drained at a rate that depends | |
2543 | * on the pixel size. When it reaches the watermark level, it'll start | |
2544 | * fetching FIFO line sized based chunks from memory until the FIFO fills | |
2545 | * past the watermark point. If the FIFO drains completely, a FIFO underrun | |
2546 | * will occur, and a display engine hang could result. | |
2547 | */ | |
2548 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |
2549 | struct intel_watermark_params *wm, | |
2550 | int pixel_size, | |
2551 | unsigned long latency_ns) | |
2552 | { | |
2553 | long entries_required, wm_size; | |
2554 | ||
2555 | /* | |
2556 | * Note: we need to make sure we don't overflow for various clock & | |
2557 | * latency values. | |
2558 | * clocks go from a few thousand to several hundred thousand. | |
2559 | * latency is usually a few thousand | |
2560 | */ | |
2561 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / | |
2562 | 1000; | |
2563 | entries_required /= wm->cacheline_size; | |
2564 | ||
2565 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); | |
2566 | ||
2567 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); | |
2568 | ||
2569 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); | |
2570 | ||
2571 | /* Don't promote wm_size to unsigned... */ | |
2572 | if (wm_size > (long)wm->max_wm) | |
2573 | wm_size = wm->max_wm; | |
2574 | if (wm_size <= 0) | |
2575 | wm_size = wm->default_wm; | |
2576 | return wm_size; | |
2577 | } | |
2578 | ||
2579 | struct cxsr_latency { | |
2580 | int is_desktop; | |
2581 | unsigned long fsb_freq; | |
2582 | unsigned long mem_freq; | |
2583 | unsigned long display_sr; | |
2584 | unsigned long display_hpll_disable; | |
2585 | unsigned long cursor_sr; | |
2586 | unsigned long cursor_hpll_disable; | |
2587 | }; | |
2588 | ||
2589 | static struct cxsr_latency cxsr_latency_table[] = { | |
2590 | {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ | |
2591 | {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | |
2592 | {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | |
2593 | ||
2594 | {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ | |
2595 | {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | |
2596 | {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ | |
2597 | ||
2598 | {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ | |
2599 | {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ | |
2600 | {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ | |
2601 | ||
2602 | {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ | |
2603 | {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | |
2604 | {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ | |
2605 | ||
2606 | {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | |
2607 | {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | |
2608 | {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | |
2609 | ||
2610 | {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | |
2611 | {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | |
2612 | {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | |
2613 | }; | |
2614 | ||
2615 | static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, | |
2616 | int mem) | |
2617 | { | |
2618 | int i; | |
2619 | struct cxsr_latency *latency; | |
2620 | ||
2621 | if (fsb == 0 || mem == 0) | |
2622 | return NULL; | |
2623 | ||
2624 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | |
2625 | latency = &cxsr_latency_table[i]; | |
2626 | if (is_desktop == latency->is_desktop && | |
2627 | fsb == latency->fsb_freq && mem == latency->mem_freq) | |
2628 | return latency; | |
2629 | } | |
2630 | ||
2631 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | |
2632 | ||
2633 | return NULL; | |
2634 | } | |
2635 | ||
2636 | static void pineview_disable_cxsr(struct drm_device *dev) | |
2637 | { | |
2638 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2639 | u32 reg; | |
2640 | ||
2641 | /* deactivate cxsr */ | |
2642 | reg = I915_READ(DSPFW3); | |
2643 | reg &= ~(PINEVIEW_SELF_REFRESH_EN); | |
2644 | I915_WRITE(DSPFW3, reg); | |
2645 | DRM_INFO("Big FIFO is disabled\n"); | |
2646 | } | |
2647 | ||
2648 | static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, | |
2649 | int pixel_size) | |
2650 | { | |
2651 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2652 | u32 reg; | |
2653 | unsigned long wm; | |
2654 | struct cxsr_latency *latency; | |
2655 | ||
2656 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, | |
2657 | dev_priv->mem_freq); | |
2658 | if (!latency) { | |
2659 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | |
2660 | pineview_disable_cxsr(dev); | |
2661 | return; | |
2662 | } | |
2663 | ||
2664 | /* Display SR */ | |
2665 | wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size, | |
2666 | latency->display_sr); | |
2667 | reg = I915_READ(DSPFW1); | |
2668 | reg &= 0x7fffff; | |
2669 | reg |= wm << 23; | |
2670 | I915_WRITE(DSPFW1, reg); | |
2671 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | |
2672 | ||
2673 | /* cursor SR */ | |
2674 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size, | |
2675 | latency->cursor_sr); | |
2676 | reg = I915_READ(DSPFW3); | |
2677 | reg &= ~(0x3f << 24); | |
2678 | reg |= (wm & 0x3f) << 24; | |
2679 | I915_WRITE(DSPFW3, reg); | |
2680 | ||
2681 | /* Display HPLL off SR */ | |
2682 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, | |
2683 | latency->display_hpll_disable, I915_FIFO_LINE_SIZE); | |
2684 | reg = I915_READ(DSPFW3); | |
2685 | reg &= 0xfffffe00; | |
2686 | reg |= wm & 0x1ff; | |
2687 | I915_WRITE(DSPFW3, reg); | |
2688 | ||
2689 | /* cursor HPLL off SR */ | |
2690 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size, | |
2691 | latency->cursor_hpll_disable); | |
2692 | reg = I915_READ(DSPFW3); | |
2693 | reg &= ~(0x3f << 16); | |
2694 | reg |= (wm & 0x3f) << 16; | |
2695 | I915_WRITE(DSPFW3, reg); | |
2696 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | |
2697 | ||
2698 | /* activate cxsr */ | |
2699 | reg = I915_READ(DSPFW3); | |
2700 | reg |= PINEVIEW_SELF_REFRESH_EN; | |
2701 | I915_WRITE(DSPFW3, reg); | |
2702 | ||
2703 | DRM_INFO("Big FIFO is enabled\n"); | |
2704 | ||
2705 | return; | |
2706 | } | |
2707 | ||
2708 | /* | |
2709 | * Latency for FIFO fetches is dependent on several factors: | |
2710 | * - memory configuration (speed, channels) | |
2711 | * - chipset | |
2712 | * - current MCH state | |
2713 | * It can be fairly high in some situations, so here we assume a fairly | |
2714 | * pessimal value. It's a tradeoff between extra memory fetches (if we | |
2715 | * set this value too high, the FIFO will fetch frequently to stay full) | |
2716 | * and power consumption (set it too low to save power and we might see | |
2717 | * FIFO underruns and display "flicker"). | |
2718 | * | |
2719 | * A value of 5us seems to be a good balance; safe for very low end | |
2720 | * platforms but not overly aggressive on lower latency configs. | |
2721 | */ | |
2722 | static const int latency_ns = 5000; | |
2723 | ||
2724 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |
2725 | { | |
2726 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2727 | uint32_t dsparb = I915_READ(DSPARB); | |
2728 | int size; | |
2729 | ||
2730 | if (plane == 0) | |
2731 | size = dsparb & 0x7f; | |
2732 | else | |
2733 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | |
2734 | (dsparb & 0x7f); | |
2735 | ||
2736 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | |
2737 | plane ? "B" : "A", size); | |
2738 | ||
2739 | return size; | |
2740 | } | |
2741 | ||
2742 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) | |
2743 | { | |
2744 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2745 | uint32_t dsparb = I915_READ(DSPARB); | |
2746 | int size; | |
2747 | ||
2748 | if (plane == 0) | |
2749 | size = dsparb & 0x1ff; | |
2750 | else | |
2751 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | |
2752 | (dsparb & 0x1ff); | |
2753 | size >>= 1; /* Convert to cachelines */ | |
2754 | ||
2755 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | |
2756 | plane ? "B" : "A", size); | |
2757 | ||
2758 | return size; | |
2759 | } | |
2760 | ||
2761 | static int i845_get_fifo_size(struct drm_device *dev, int plane) | |
2762 | { | |
2763 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2764 | uint32_t dsparb = I915_READ(DSPARB); | |
2765 | int size; | |
2766 | ||
2767 | size = dsparb & 0x7f; | |
2768 | size >>= 2; /* Convert to cachelines */ | |
2769 | ||
2770 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | |
2771 | plane ? "B" : "A", | |
2772 | size); | |
2773 | ||
2774 | return size; | |
2775 | } | |
2776 | ||
2777 | static int i830_get_fifo_size(struct drm_device *dev, int plane) | |
2778 | { | |
2779 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2780 | uint32_t dsparb = I915_READ(DSPARB); | |
2781 | int size; | |
2782 | ||
2783 | size = dsparb & 0x7f; | |
2784 | size >>= 1; /* Convert to cachelines */ | |
2785 | ||
2786 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | |
2787 | plane ? "B" : "A", size); | |
2788 | ||
2789 | return size; | |
2790 | } | |
2791 | ||
2792 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |
2793 | int planeb_clock, int sr_hdisplay, int pixel_size) | |
2794 | { | |
2795 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2796 | int total_size, cacheline_size; | |
2797 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr; | |
2798 | struct intel_watermark_params planea_params, planeb_params; | |
2799 | unsigned long line_time_us; | |
2800 | int sr_clock, sr_entries = 0, entries_required; | |
2801 | ||
2802 | /* Create copies of the base settings for each pipe */ | |
2803 | planea_params = planeb_params = g4x_wm_info; | |
2804 | ||
2805 | /* Grab a couple of global values before we overwrite them */ | |
2806 | total_size = planea_params.fifo_size; | |
2807 | cacheline_size = planea_params.cacheline_size; | |
2808 | ||
2809 | /* | |
2810 | * Note: we need to make sure we don't overflow for various clock & | |
2811 | * latency values. | |
2812 | * clocks go from a few thousand to several hundred thousand. | |
2813 | * latency is usually a few thousand | |
2814 | */ | |
2815 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / | |
2816 | 1000; | |
2817 | entries_required /= G4X_FIFO_LINE_SIZE; | |
2818 | planea_wm = entries_required + planea_params.guard_size; | |
2819 | ||
2820 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / | |
2821 | 1000; | |
2822 | entries_required /= G4X_FIFO_LINE_SIZE; | |
2823 | planeb_wm = entries_required + planeb_params.guard_size; | |
2824 | ||
2825 | cursora_wm = cursorb_wm = 16; | |
2826 | cursor_sr = 32; | |
2827 | ||
2828 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | |
2829 | ||
2830 | /* Calc sr entries for one plane configs */ | |
2831 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | |
2832 | /* self-refresh has much higher latency */ | |
2833 | static const int sr_latency_ns = 12000; | |
2834 | ||
2835 | sr_clock = planea_clock ? planea_clock : planeb_clock; | |
2836 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | |
2837 | ||
2838 | /* Use ns/us then divide to preserve precision */ | |
2839 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | |
2840 | pixel_size * sr_hdisplay) / 1000; | |
2841 | sr_entries = roundup(sr_entries / cacheline_size, 1); | |
2842 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | |
2843 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | |
2844 | } else { | |
2845 | /* Turn off self refresh if both pipes are enabled */ | |
2846 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | |
2847 | & ~FW_BLC_SELF_EN); | |
2848 | } | |
2849 | ||
2850 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | |
2851 | planea_wm, planeb_wm, sr_entries); | |
2852 | ||
2853 | planea_wm &= 0x3f; | |
2854 | planeb_wm &= 0x3f; | |
2855 | ||
2856 | I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) | | |
2857 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | |
2858 | (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); | |
2859 | I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | |
2860 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | |
2861 | /* HPLL off in SR has some issues on G4x... disable it */ | |
2862 | I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | |
2863 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | |
2864 | } | |
2865 | ||
2866 | static void i965_update_wm(struct drm_device *dev, int planea_clock, | |
2867 | int planeb_clock, int sr_hdisplay, int pixel_size) | |
2868 | { | |
2869 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2870 | unsigned long line_time_us; | |
2871 | int sr_clock, sr_entries, srwm = 1; | |
2872 | ||
2873 | /* Calc sr entries for one plane configs */ | |
2874 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | |
2875 | /* self-refresh has much higher latency */ | |
2876 | static const int sr_latency_ns = 12000; | |
2877 | ||
2878 | sr_clock = planea_clock ? planea_clock : planeb_clock; | |
2879 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | |
2880 | ||
2881 | /* Use ns/us then divide to preserve precision */ | |
2882 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | |
2883 | pixel_size * sr_hdisplay) / 1000; | |
2884 | sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); | |
2885 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | |
2886 | srwm = I945_FIFO_SIZE - sr_entries; | |
2887 | if (srwm < 0) | |
2888 | srwm = 1; | |
2889 | srwm &= 0x3f; | |
2890 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | |
2891 | } else { | |
2892 | /* Turn off self refresh if both pipes are enabled */ | |
2893 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | |
2894 | & ~FW_BLC_SELF_EN); | |
2895 | } | |
2896 | ||
2897 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | |
2898 | srwm); | |
2899 | ||
2900 | /* 965 has limitations... */ | |
2901 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | | |
2902 | (8 << 0)); | |
2903 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | |
2904 | } | |
2905 | ||
2906 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |
2907 | int planeb_clock, int sr_hdisplay, int pixel_size) | |
2908 | { | |
2909 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2910 | uint32_t fwater_lo; | |
2911 | uint32_t fwater_hi; | |
2912 | int total_size, cacheline_size, cwm, srwm = 1; | |
2913 | int planea_wm, planeb_wm; | |
2914 | struct intel_watermark_params planea_params, planeb_params; | |
2915 | unsigned long line_time_us; | |
2916 | int sr_clock, sr_entries = 0; | |
2917 | ||
2918 | /* Create copies of the base settings for each pipe */ | |
2919 | if (IS_I965GM(dev) || IS_I945GM(dev)) | |
2920 | planea_params = planeb_params = i945_wm_info; | |
2921 | else if (IS_I9XX(dev)) | |
2922 | planea_params = planeb_params = i915_wm_info; | |
2923 | else | |
2924 | planea_params = planeb_params = i855_wm_info; | |
2925 | ||
2926 | /* Grab a couple of global values before we overwrite them */ | |
2927 | total_size = planea_params.fifo_size; | |
2928 | cacheline_size = planea_params.cacheline_size; | |
2929 | ||
2930 | /* Update per-plane FIFO sizes */ | |
2931 | planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | |
2932 | planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); | |
2933 | ||
2934 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, | |
2935 | pixel_size, latency_ns); | |
2936 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, | |
2937 | pixel_size, latency_ns); | |
2938 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | |
2939 | ||
2940 | /* | |
2941 | * Overlay gets an aggressive default since video jitter is bad. | |
2942 | */ | |
2943 | cwm = 2; | |
2944 | ||
2945 | /* Calc sr entries for one plane configs */ | |
2946 | if (HAS_FW_BLC(dev) && sr_hdisplay && | |
2947 | (!planea_clock || !planeb_clock)) { | |
2948 | /* self-refresh has much higher latency */ | |
2949 | static const int sr_latency_ns = 6000; | |
2950 | ||
2951 | sr_clock = planea_clock ? planea_clock : planeb_clock; | |
2952 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | |
2953 | ||
2954 | /* Use ns/us then divide to preserve precision */ | |
2955 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | |
2956 | pixel_size * sr_hdisplay) / 1000; | |
2957 | sr_entries = roundup(sr_entries / cacheline_size, 1); | |
2958 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); | |
2959 | srwm = total_size - sr_entries; | |
2960 | if (srwm < 0) | |
2961 | srwm = 1; | |
2962 | ||
2963 | if (IS_I945G(dev) || IS_I945GM(dev)) | |
2964 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | |
2965 | else if (IS_I915GM(dev)) { | |
2966 | /* 915M has a smaller SRWM field */ | |
2967 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); | |
2968 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | |
2969 | } | |
2970 | } else { | |
2971 | /* Turn off self refresh if both pipes are enabled */ | |
2972 | if (IS_I945G(dev) || IS_I945GM(dev)) { | |
2973 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | |
2974 | & ~FW_BLC_SELF_EN); | |
2975 | } else if (IS_I915GM(dev)) { | |
2976 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | |
2977 | } | |
2978 | } | |
2979 | ||
2980 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | |
2981 | planea_wm, planeb_wm, cwm, srwm); | |
2982 | ||
2983 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | |
2984 | fwater_hi = (cwm & 0x1f); | |
2985 | ||
2986 | /* Set request length to 8 cachelines per fetch */ | |
2987 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); | |
2988 | fwater_hi = fwater_hi | (1 << 8); | |
2989 | ||
2990 | I915_WRITE(FW_BLC, fwater_lo); | |
2991 | I915_WRITE(FW_BLC2, fwater_hi); | |
2992 | } | |
2993 | ||
2994 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |
2995 | int unused2, int pixel_size) | |
2996 | { | |
2997 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2998 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | |
2999 | int planea_wm; | |
3000 | ||
3001 | i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | |
3002 | ||
3003 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, | |
3004 | pixel_size, latency_ns); | |
3005 | fwater_lo |= (3<<8) | planea_wm; | |
3006 | ||
3007 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); | |
3008 | ||
3009 | I915_WRITE(FW_BLC, fwater_lo); | |
3010 | } | |
3011 | ||
3012 | /** | |
3013 | * intel_update_watermarks - update FIFO watermark values based on current modes | |
3014 | * | |
3015 | * Calculate watermark values for the various WM regs based on current mode | |
3016 | * and plane configuration. | |
3017 | * | |
3018 | * There are several cases to deal with here: | |
3019 | * - normal (i.e. non-self-refresh) | |
3020 | * - self-refresh (SR) mode | |
3021 | * - lines are large relative to FIFO size (buffer can hold up to 2) | |
3022 | * - lines are small relative to FIFO size (buffer can hold more than 2 | |
3023 | * lines), so need to account for TLB latency | |
3024 | * | |
3025 | * The normal calculation is: | |
3026 | * watermark = dotclock * bytes per pixel * latency | |
3027 | * where latency is platform & configuration dependent (we assume pessimal | |
3028 | * values here). | |
3029 | * | |
3030 | * The SR calculation is: | |
3031 | * watermark = (trunc(latency/line time)+1) * surface width * | |
3032 | * bytes per pixel | |
3033 | * where | |
3034 | * line time = htotal / dotclock | |
3035 | * and latency is assumed to be high, as above. | |
3036 | * | |
3037 | * The final value programmed to the register should always be rounded up, | |
3038 | * and include an extra 2 entries to account for clock crossings. | |
3039 | * | |
3040 | * We don't use the sprite, so we can ignore that. And on Crestline we have | |
3041 | * to set the non-SR watermarks to 8. | |
3042 | */ | |
3043 | static void intel_update_watermarks(struct drm_device *dev) | |
3044 | { | |
3045 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3046 | struct drm_crtc *crtc; | |
3047 | struct intel_crtc *intel_crtc; | |
3048 | int sr_hdisplay = 0; | |
3049 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | |
3050 | int enabled = 0, pixel_size = 0; | |
3051 | ||
3052 | if (!dev_priv->display.update_wm) | |
3053 | return; | |
3054 | ||
3055 | /* Get the clock config from both planes */ | |
3056 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
3057 | intel_crtc = to_intel_crtc(crtc); | |
3058 | if (crtc->enabled) { | |
3059 | enabled++; | |
3060 | if (intel_crtc->plane == 0) { | |
3061 | DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", | |
3062 | intel_crtc->pipe, crtc->mode.clock); | |
3063 | planea_clock = crtc->mode.clock; | |
3064 | } else { | |
3065 | DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", | |
3066 | intel_crtc->pipe, crtc->mode.clock); | |
3067 | planeb_clock = crtc->mode.clock; | |
3068 | } | |
3069 | sr_hdisplay = crtc->mode.hdisplay; | |
3070 | sr_clock = crtc->mode.clock; | |
3071 | if (crtc->fb) | |
3072 | pixel_size = crtc->fb->bits_per_pixel / 8; | |
3073 | else | |
3074 | pixel_size = 4; /* by default */ | |
3075 | } | |
3076 | } | |
3077 | ||
3078 | if (enabled <= 0) | |
3079 | return; | |
3080 | ||
3081 | /* Single plane configs can enable self refresh */ | |
3082 | if (enabled == 1 && IS_PINEVIEW(dev)) | |
3083 | pineview_enable_cxsr(dev, sr_clock, pixel_size); | |
3084 | else if (IS_PINEVIEW(dev)) | |
3085 | pineview_disable_cxsr(dev); | |
3086 | ||
3087 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | |
3088 | sr_hdisplay, pixel_size); | |
3089 | } | |
3090 | ||
3091 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | |
3092 | struct drm_display_mode *mode, | |
3093 | struct drm_display_mode *adjusted_mode, | |
3094 | int x, int y, | |
3095 | struct drm_framebuffer *old_fb) | |
3096 | { | |
3097 | struct drm_device *dev = crtc->dev; | |
3098 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3099 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3100 | int pipe = intel_crtc->pipe; | |
3101 | int plane = intel_crtc->plane; | |
3102 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; | |
3103 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | |
3104 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; | |
3105 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | |
3106 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | |
3107 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | |
3108 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | |
3109 | int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | |
3110 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | |
3111 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | |
3112 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | |
3113 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; | |
3114 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; | |
3115 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | |
3116 | int refclk, num_connectors = 0; | |
3117 | intel_clock_t clock, reduced_clock; | |
3118 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | |
3119 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | |
3120 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | |
3121 | bool is_edp = false; | |
3122 | struct drm_mode_config *mode_config = &dev->mode_config; | |
3123 | struct drm_encoder *encoder; | |
3124 | struct intel_encoder *intel_encoder = NULL; | |
3125 | const intel_limit_t *limit; | |
3126 | int ret; | |
3127 | struct fdi_m_n m_n = {0}; | |
3128 | int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; | |
3129 | int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1; | |
3130 | int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1; | |
3131 | int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1; | |
3132 | int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; | |
3133 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | |
3134 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | |
3135 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | |
3136 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | |
3137 | int lvds_reg = LVDS; | |
3138 | u32 temp; | |
3139 | int sdvo_pixel_multiply; | |
3140 | int target_clock; | |
3141 | ||
3142 | drm_vblank_pre_modeset(dev, pipe); | |
3143 | ||
3144 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | |
3145 | ||
3146 | if (!encoder || encoder->crtc != crtc) | |
3147 | continue; | |
3148 | ||
3149 | intel_encoder = enc_to_intel_encoder(encoder); | |
3150 | ||
3151 | switch (intel_encoder->type) { | |
3152 | case INTEL_OUTPUT_LVDS: | |
3153 | is_lvds = true; | |
3154 | break; | |
3155 | case INTEL_OUTPUT_SDVO: | |
3156 | case INTEL_OUTPUT_HDMI: | |
3157 | is_sdvo = true; | |
3158 | if (intel_encoder->needs_tv_clock) | |
3159 | is_tv = true; | |
3160 | break; | |
3161 | case INTEL_OUTPUT_DVO: | |
3162 | is_dvo = true; | |
3163 | break; | |
3164 | case INTEL_OUTPUT_TVOUT: | |
3165 | is_tv = true; | |
3166 | break; | |
3167 | case INTEL_OUTPUT_ANALOG: | |
3168 | is_crt = true; | |
3169 | break; | |
3170 | case INTEL_OUTPUT_DISPLAYPORT: | |
3171 | is_dp = true; | |
3172 | break; | |
3173 | case INTEL_OUTPUT_EDP: | |
3174 | is_edp = true; | |
3175 | break; | |
3176 | } | |
3177 | ||
3178 | num_connectors++; | |
3179 | } | |
3180 | ||
3181 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { | |
3182 | refclk = dev_priv->lvds_ssc_freq * 1000; | |
3183 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | |
3184 | refclk / 1000); | |
3185 | } else if (IS_I9XX(dev)) { | |
3186 | refclk = 96000; | |
3187 | if (HAS_PCH_SPLIT(dev)) | |
3188 | refclk = 120000; /* 120Mhz refclk */ | |
3189 | } else { | |
3190 | refclk = 48000; | |
3191 | } | |
3192 | ||
3193 | ||
3194 | /* | |
3195 | * Returns a set of divisors for the desired target clock with the given | |
3196 | * refclk, or FALSE. The returned values represent the clock equation: | |
3197 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | |
3198 | */ | |
3199 | limit = intel_limit(crtc); | |
3200 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | |
3201 | if (!ok) { | |
3202 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | |
3203 | drm_vblank_post_modeset(dev, pipe); | |
3204 | return -EINVAL; | |
3205 | } | |
3206 | ||
3207 | if (is_lvds && dev_priv->lvds_downclock_avail) { | |
3208 | has_reduced_clock = limit->find_pll(limit, crtc, | |
3209 | dev_priv->lvds_downclock, | |
3210 | refclk, | |
3211 | &reduced_clock); | |
3212 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | |
3213 | /* | |
3214 | * If the different P is found, it means that we can't | |
3215 | * switch the display clock by using the FP0/FP1. | |
3216 | * In such case we will disable the LVDS downclock | |
3217 | * feature. | |
3218 | */ | |
3219 | DRM_DEBUG_KMS("Different P is found for " | |
3220 | "LVDS clock/downclock\n"); | |
3221 | has_reduced_clock = 0; | |
3222 | } | |
3223 | } | |
3224 | /* SDVO TV has fixed PLL values depend on its clock range, | |
3225 | this mirrors vbios setting. */ | |
3226 | if (is_sdvo && is_tv) { | |
3227 | if (adjusted_mode->clock >= 100000 | |
3228 | && adjusted_mode->clock < 140500) { | |
3229 | clock.p1 = 2; | |
3230 | clock.p2 = 10; | |
3231 | clock.n = 3; | |
3232 | clock.m1 = 16; | |
3233 | clock.m2 = 8; | |
3234 | } else if (adjusted_mode->clock >= 140500 | |
3235 | && adjusted_mode->clock <= 200000) { | |
3236 | clock.p1 = 1; | |
3237 | clock.p2 = 10; | |
3238 | clock.n = 6; | |
3239 | clock.m1 = 12; | |
3240 | clock.m2 = 8; | |
3241 | } | |
3242 | } | |
3243 | ||
3244 | /* FDI link */ | |
3245 | if (HAS_PCH_SPLIT(dev)) { | |
3246 | int lane, link_bw, bpp; | |
3247 | /* eDP doesn't require FDI link, so just set DP M/N | |
3248 | according to current link config */ | |
3249 | if (is_edp) { | |
3250 | target_clock = mode->clock; | |
3251 | intel_edp_link_config(intel_encoder, | |
3252 | &lane, &link_bw); | |
3253 | } else { | |
3254 | /* DP over FDI requires target mode clock | |
3255 | instead of link clock */ | |
3256 | if (is_dp) | |
3257 | target_clock = mode->clock; | |
3258 | else | |
3259 | target_clock = adjusted_mode->clock; | |
3260 | lane = 4; | |
3261 | link_bw = 270000; | |
3262 | } | |
3263 | ||
3264 | /* determine panel color depth */ | |
3265 | temp = I915_READ(pipeconf_reg); | |
3266 | temp &= ~PIPE_BPC_MASK; | |
3267 | if (is_lvds) { | |
3268 | int lvds_reg = I915_READ(PCH_LVDS); | |
3269 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | |
3270 | if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | |
3271 | temp |= PIPE_8BPC; | |
3272 | else | |
3273 | temp |= PIPE_6BPC; | |
3274 | } else if (is_edp) { | |
3275 | switch (dev_priv->edp_bpp/3) { | |
3276 | case 8: | |
3277 | temp |= PIPE_8BPC; | |
3278 | break; | |
3279 | case 10: | |
3280 | temp |= PIPE_10BPC; | |
3281 | break; | |
3282 | case 6: | |
3283 | temp |= PIPE_6BPC; | |
3284 | break; | |
3285 | case 12: | |
3286 | temp |= PIPE_12BPC; | |
3287 | break; | |
3288 | } | |
3289 | } else | |
3290 | temp |= PIPE_8BPC; | |
3291 | I915_WRITE(pipeconf_reg, temp); | |
3292 | I915_READ(pipeconf_reg); | |
3293 | ||
3294 | switch (temp & PIPE_BPC_MASK) { | |
3295 | case PIPE_8BPC: | |
3296 | bpp = 24; | |
3297 | break; | |
3298 | case PIPE_10BPC: | |
3299 | bpp = 30; | |
3300 | break; | |
3301 | case PIPE_6BPC: | |
3302 | bpp = 18; | |
3303 | break; | |
3304 | case PIPE_12BPC: | |
3305 | bpp = 36; | |
3306 | break; | |
3307 | default: | |
3308 | DRM_ERROR("unknown pipe bpc value\n"); | |
3309 | bpp = 24; | |
3310 | } | |
3311 | ||
3312 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | |
3313 | } | |
3314 | ||
3315 | /* Ironlake: try to setup display ref clock before DPLL | |
3316 | * enabling. This is only under driver's control after | |
3317 | * PCH B stepping, previous chipset stepping should be | |
3318 | * ignoring this setting. | |
3319 | */ | |
3320 | if (HAS_PCH_SPLIT(dev)) { | |
3321 | temp = I915_READ(PCH_DREF_CONTROL); | |
3322 | /* Always enable nonspread source */ | |
3323 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | |
3324 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | |
3325 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
3326 | POSTING_READ(PCH_DREF_CONTROL); | |
3327 | ||
3328 | temp &= ~DREF_SSC_SOURCE_MASK; | |
3329 | temp |= DREF_SSC_SOURCE_ENABLE; | |
3330 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
3331 | POSTING_READ(PCH_DREF_CONTROL); | |
3332 | ||
3333 | udelay(200); | |
3334 | ||
3335 | if (is_edp) { | |
3336 | if (dev_priv->lvds_use_ssc) { | |
3337 | temp |= DREF_SSC1_ENABLE; | |
3338 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
3339 | POSTING_READ(PCH_DREF_CONTROL); | |
3340 | ||
3341 | udelay(200); | |
3342 | ||
3343 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | |
3344 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | |
3345 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
3346 | POSTING_READ(PCH_DREF_CONTROL); | |
3347 | } else { | |
3348 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | |
3349 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
3350 | POSTING_READ(PCH_DREF_CONTROL); | |
3351 | } | |
3352 | } | |
3353 | } | |
3354 | ||
3355 | if (IS_PINEVIEW(dev)) { | |
3356 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | |
3357 | if (has_reduced_clock) | |
3358 | fp2 = (1 << reduced_clock.n) << 16 | | |
3359 | reduced_clock.m1 << 8 | reduced_clock.m2; | |
3360 | } else { | |
3361 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | |
3362 | if (has_reduced_clock) | |
3363 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | |
3364 | reduced_clock.m2; | |
3365 | } | |
3366 | ||
3367 | if (!HAS_PCH_SPLIT(dev)) | |
3368 | dpll = DPLL_VGA_MODE_DIS; | |
3369 | ||
3370 | if (IS_I9XX(dev)) { | |
3371 | if (is_lvds) | |
3372 | dpll |= DPLLB_MODE_LVDS; | |
3373 | else | |
3374 | dpll |= DPLLB_MODE_DAC_SERIAL; | |
3375 | if (is_sdvo) { | |
3376 | dpll |= DPLL_DVO_HIGH_SPEED; | |
3377 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | |
3378 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | |
3379 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | |
3380 | else if (HAS_PCH_SPLIT(dev)) | |
3381 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | |
3382 | } | |
3383 | if (is_dp) | |
3384 | dpll |= DPLL_DVO_HIGH_SPEED; | |
3385 | ||
3386 | /* compute bitmask from p1 value */ | |
3387 | if (IS_PINEVIEW(dev)) | |
3388 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; | |
3389 | else { | |
3390 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | |
3391 | /* also FPA1 */ | |
3392 | if (HAS_PCH_SPLIT(dev)) | |
3393 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | |
3394 | if (IS_G4X(dev) && has_reduced_clock) | |
3395 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | |
3396 | } | |
3397 | switch (clock.p2) { | |
3398 | case 5: | |
3399 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | |
3400 | break; | |
3401 | case 7: | |
3402 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | |
3403 | break; | |
3404 | case 10: | |
3405 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | |
3406 | break; | |
3407 | case 14: | |
3408 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | |
3409 | break; | |
3410 | } | |
3411 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) | |
3412 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); | |
3413 | } else { | |
3414 | if (is_lvds) { | |
3415 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | |
3416 | } else { | |
3417 | if (clock.p1 == 2) | |
3418 | dpll |= PLL_P1_DIVIDE_BY_TWO; | |
3419 | else | |
3420 | dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; | |
3421 | if (clock.p2 == 4) | |
3422 | dpll |= PLL_P2_DIVIDE_BY_4; | |
3423 | } | |
3424 | } | |
3425 | ||
3426 | if (is_sdvo && is_tv) | |
3427 | dpll |= PLL_REF_INPUT_TVCLKINBC; | |
3428 | else if (is_tv) | |
3429 | /* XXX: just matching BIOS for now */ | |
3430 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | |
3431 | dpll |= 3; | |
3432 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) | |
3433 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | |
3434 | else | |
3435 | dpll |= PLL_REF_INPUT_DREFCLK; | |
3436 | ||
3437 | /* setup pipeconf */ | |
3438 | pipeconf = I915_READ(pipeconf_reg); | |
3439 | ||
3440 | /* Set up the display plane register */ | |
3441 | dspcntr = DISPPLANE_GAMMA_ENABLE; | |
3442 | ||
3443 | /* Ironlake's plane is forced to pipe, bit 24 is to | |
3444 | enable color space conversion */ | |
3445 | if (!HAS_PCH_SPLIT(dev)) { | |
3446 | if (pipe == 0) | |
3447 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; | |
3448 | else | |
3449 | dspcntr |= DISPPLANE_SEL_PIPE_B; | |
3450 | } | |
3451 | ||
3452 | if (pipe == 0 && !IS_I965G(dev)) { | |
3453 | /* Enable pixel doubling when the dot clock is > 90% of the (display) | |
3454 | * core speed. | |
3455 | * | |
3456 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the | |
3457 | * pipe == 0 check? | |
3458 | */ | |
3459 | if (mode->clock > | |
3460 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) | |
3461 | pipeconf |= PIPEACONF_DOUBLE_WIDE; | |
3462 | else | |
3463 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | |
3464 | } | |
3465 | ||
3466 | /* Disable the panel fitter if it was on our pipe */ | |
3467 | if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) | |
3468 | I915_WRITE(PFIT_CONTROL, 0); | |
3469 | ||
3470 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | |
3471 | drm_mode_debug_printmodeline(mode); | |
3472 | ||
3473 | /* assign to Ironlake registers */ | |
3474 | if (HAS_PCH_SPLIT(dev)) { | |
3475 | fp_reg = pch_fp_reg; | |
3476 | dpll_reg = pch_dpll_reg; | |
3477 | } | |
3478 | ||
3479 | if (is_edp) { | |
3480 | ironlake_disable_pll_edp(crtc); | |
3481 | } else if ((dpll & DPLL_VCO_ENABLE)) { | |
3482 | I915_WRITE(fp_reg, fp); | |
3483 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | |
3484 | I915_READ(dpll_reg); | |
3485 | udelay(150); | |
3486 | } | |
3487 | ||
3488 | /* enable transcoder DPLL */ | |
3489 | if (HAS_PCH_CPT(dev)) { | |
3490 | temp = I915_READ(PCH_DPLL_SEL); | |
3491 | if (trans_dpll_sel == 0) | |
3492 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | |
3493 | else | |
3494 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | |
3495 | I915_WRITE(PCH_DPLL_SEL, temp); | |
3496 | I915_READ(PCH_DPLL_SEL); | |
3497 | udelay(150); | |
3498 | } | |
3499 | ||
3500 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | |
3501 | * This is an exception to the general rule that mode_set doesn't turn | |
3502 | * things on. | |
3503 | */ | |
3504 | if (is_lvds) { | |
3505 | u32 lvds; | |
3506 | ||
3507 | if (HAS_PCH_SPLIT(dev)) | |
3508 | lvds_reg = PCH_LVDS; | |
3509 | ||
3510 | lvds = I915_READ(lvds_reg); | |
3511 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | |
3512 | if (pipe == 1) { | |
3513 | if (HAS_PCH_CPT(dev)) | |
3514 | lvds |= PORT_TRANS_B_SEL_CPT; | |
3515 | else | |
3516 | lvds |= LVDS_PIPEB_SELECT; | |
3517 | } else { | |
3518 | if (HAS_PCH_CPT(dev)) | |
3519 | lvds &= ~PORT_TRANS_SEL_MASK; | |
3520 | else | |
3521 | lvds &= ~LVDS_PIPEB_SELECT; | |
3522 | } | |
3523 | /* set the corresponsding LVDS_BORDER bit */ | |
3524 | lvds |= dev_priv->lvds_border_bits; | |
3525 | /* Set the B0-B3 data pairs corresponding to whether we're going to | |
3526 | * set the DPLLs for dual-channel mode or not. | |
3527 | */ | |
3528 | if (clock.p2 == 7) | |
3529 | lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; | |
3530 | else | |
3531 | lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); | |
3532 | ||
3533 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | |
3534 | * appropriately here, but we need to look more thoroughly into how | |
3535 | * panels behave in the two modes. | |
3536 | */ | |
3537 | /* set the dithering flag */ | |
3538 | if (IS_I965G(dev)) { | |
3539 | if (dev_priv->lvds_dither) { | |
3540 | if (HAS_PCH_SPLIT(dev)) | |
3541 | pipeconf |= PIPE_ENABLE_DITHER; | |
3542 | else | |
3543 | lvds |= LVDS_ENABLE_DITHER; | |
3544 | } else { | |
3545 | if (HAS_PCH_SPLIT(dev)) | |
3546 | pipeconf &= ~PIPE_ENABLE_DITHER; | |
3547 | else | |
3548 | lvds &= ~LVDS_ENABLE_DITHER; | |
3549 | } | |
3550 | } | |
3551 | I915_WRITE(lvds_reg, lvds); | |
3552 | I915_READ(lvds_reg); | |
3553 | } | |
3554 | if (is_dp) | |
3555 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | |
3556 | else if (HAS_PCH_SPLIT(dev)) { | |
3557 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | |
3558 | if (pipe == 0) { | |
3559 | I915_WRITE(TRANSA_DATA_M1, 0); | |
3560 | I915_WRITE(TRANSA_DATA_N1, 0); | |
3561 | I915_WRITE(TRANSA_DP_LINK_M1, 0); | |
3562 | I915_WRITE(TRANSA_DP_LINK_N1, 0); | |
3563 | } else { | |
3564 | I915_WRITE(TRANSB_DATA_M1, 0); | |
3565 | I915_WRITE(TRANSB_DATA_N1, 0); | |
3566 | I915_WRITE(TRANSB_DP_LINK_M1, 0); | |
3567 | I915_WRITE(TRANSB_DP_LINK_N1, 0); | |
3568 | } | |
3569 | } | |
3570 | ||
3571 | if (!is_edp) { | |
3572 | I915_WRITE(fp_reg, fp); | |
3573 | I915_WRITE(dpll_reg, dpll); | |
3574 | I915_READ(dpll_reg); | |
3575 | /* Wait for the clocks to stabilize. */ | |
3576 | udelay(150); | |
3577 | ||
3578 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | |
3579 | if (is_sdvo) { | |
3580 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | |
3581 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | |
3582 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | |
3583 | } else | |
3584 | I915_WRITE(dpll_md_reg, 0); | |
3585 | } else { | |
3586 | /* write it again -- the BIOS does, after all */ | |
3587 | I915_WRITE(dpll_reg, dpll); | |
3588 | } | |
3589 | I915_READ(dpll_reg); | |
3590 | /* Wait for the clocks to stabilize. */ | |
3591 | udelay(150); | |
3592 | } | |
3593 | ||
3594 | if (is_lvds && has_reduced_clock && i915_powersave) { | |
3595 | I915_WRITE(fp_reg + 4, fp2); | |
3596 | intel_crtc->lowfreq_avail = true; | |
3597 | if (HAS_PIPE_CXSR(dev)) { | |
3598 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | |
3599 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | |
3600 | } | |
3601 | } else { | |
3602 | I915_WRITE(fp_reg + 4, fp); | |
3603 | intel_crtc->lowfreq_avail = false; | |
3604 | if (HAS_PIPE_CXSR(dev)) { | |
3605 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | |
3606 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | |
3607 | } | |
3608 | } | |
3609 | ||
3610 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | |
3611 | ((adjusted_mode->crtc_htotal - 1) << 16)); | |
3612 | I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | | |
3613 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | |
3614 | I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | | |
3615 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | |
3616 | I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | | |
3617 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | |
3618 | I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | | |
3619 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | |
3620 | I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | | |
3621 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | |
3622 | /* pipesrc and dspsize control the size that is scaled from, which should | |
3623 | * always be the user's requested size. | |
3624 | */ | |
3625 | if (!HAS_PCH_SPLIT(dev)) { | |
3626 | I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | | |
3627 | (mode->hdisplay - 1)); | |
3628 | I915_WRITE(dsppos_reg, 0); | |
3629 | } | |
3630 | I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | |
3631 | ||
3632 | if (HAS_PCH_SPLIT(dev)) { | |
3633 | I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); | |
3634 | I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); | |
3635 | I915_WRITE(link_m1_reg, m_n.link_m); | |
3636 | I915_WRITE(link_n1_reg, m_n.link_n); | |
3637 | ||
3638 | if (is_edp) { | |
3639 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | |
3640 | } else { | |
3641 | /* enable FDI RX PLL too */ | |
3642 | temp = I915_READ(fdi_rx_reg); | |
3643 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | |
3644 | I915_READ(fdi_rx_reg); | |
3645 | udelay(200); | |
3646 | ||
3647 | /* enable FDI TX PLL too */ | |
3648 | temp = I915_READ(fdi_tx_reg); | |
3649 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | |
3650 | I915_READ(fdi_tx_reg); | |
3651 | ||
3652 | /* enable FDI RX PCDCLK */ | |
3653 | temp = I915_READ(fdi_rx_reg); | |
3654 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | |
3655 | I915_READ(fdi_rx_reg); | |
3656 | udelay(200); | |
3657 | } | |
3658 | } | |
3659 | ||
3660 | I915_WRITE(pipeconf_reg, pipeconf); | |
3661 | I915_READ(pipeconf_reg); | |
3662 | ||
3663 | intel_wait_for_vblank(dev); | |
3664 | ||
3665 | if (IS_IRONLAKE(dev)) { | |
3666 | /* enable address swizzle for tiling buffer */ | |
3667 | temp = I915_READ(DISP_ARB_CTL); | |
3668 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | |
3669 | } | |
3670 | ||
3671 | I915_WRITE(dspcntr_reg, dspcntr); | |
3672 | ||
3673 | /* Flush the plane changes */ | |
3674 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | |
3675 | ||
3676 | if ((IS_I965G(dev) || plane == 0)) | |
3677 | intel_update_fbc(crtc, &crtc->mode); | |
3678 | ||
3679 | intel_update_watermarks(dev); | |
3680 | ||
3681 | drm_vblank_post_modeset(dev, pipe); | |
3682 | ||
3683 | return ret; | |
3684 | } | |
3685 | ||
3686 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ | |
3687 | void intel_crtc_load_lut(struct drm_crtc *crtc) | |
3688 | { | |
3689 | struct drm_device *dev = crtc->dev; | |
3690 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3691 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3692 | int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; | |
3693 | int i; | |
3694 | ||
3695 | /* The clocks have to be on to load the palette. */ | |
3696 | if (!crtc->enabled) | |
3697 | return; | |
3698 | ||
3699 | /* use legacy palette for Ironlake */ | |
3700 | if (HAS_PCH_SPLIT(dev)) | |
3701 | palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : | |
3702 | LGC_PALETTE_B; | |
3703 | ||
3704 | for (i = 0; i < 256; i++) { | |
3705 | I915_WRITE(palreg + 4 * i, | |
3706 | (intel_crtc->lut_r[i] << 16) | | |
3707 | (intel_crtc->lut_g[i] << 8) | | |
3708 | intel_crtc->lut_b[i]); | |
3709 | } | |
3710 | } | |
3711 | ||
3712 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |
3713 | struct drm_file *file_priv, | |
3714 | uint32_t handle, | |
3715 | uint32_t width, uint32_t height) | |
3716 | { | |
3717 | struct drm_device *dev = crtc->dev; | |
3718 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3719 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3720 | struct drm_gem_object *bo; | |
3721 | struct drm_i915_gem_object *obj_priv; | |
3722 | int pipe = intel_crtc->pipe; | |
3723 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; | |
3724 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; | |
3725 | uint32_t temp = I915_READ(control); | |
3726 | size_t addr; | |
3727 | int ret; | |
3728 | ||
3729 | DRM_DEBUG_KMS("\n"); | |
3730 | ||
3731 | /* if we want to turn off the cursor ignore width and height */ | |
3732 | if (!handle) { | |
3733 | DRM_DEBUG_KMS("cursor off\n"); | |
3734 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | |
3735 | temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | |
3736 | temp |= CURSOR_MODE_DISABLE; | |
3737 | } else { | |
3738 | temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | |
3739 | } | |
3740 | addr = 0; | |
3741 | bo = NULL; | |
3742 | mutex_lock(&dev->struct_mutex); | |
3743 | goto finish; | |
3744 | } | |
3745 | ||
3746 | /* Currently we only support 64x64 cursors */ | |
3747 | if (width != 64 || height != 64) { | |
3748 | DRM_ERROR("we currently only support 64x64 cursors\n"); | |
3749 | return -EINVAL; | |
3750 | } | |
3751 | ||
3752 | bo = drm_gem_object_lookup(dev, file_priv, handle); | |
3753 | if (!bo) | |
3754 | return -ENOENT; | |
3755 | ||
3756 | obj_priv = to_intel_bo(bo); | |
3757 | ||
3758 | if (bo->size < width * height * 4) { | |
3759 | DRM_ERROR("buffer is to small\n"); | |
3760 | ret = -ENOMEM; | |
3761 | goto fail; | |
3762 | } | |
3763 | ||
3764 | /* we only need to pin inside GTT if cursor is non-phy */ | |
3765 | mutex_lock(&dev->struct_mutex); | |
3766 | if (!dev_priv->info->cursor_needs_physical) { | |
3767 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | |
3768 | if (ret) { | |
3769 | DRM_ERROR("failed to pin cursor bo\n"); | |
3770 | goto fail_locked; | |
3771 | } | |
3772 | addr = obj_priv->gtt_offset; | |
3773 | } else { | |
3774 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | |
3775 | if (ret) { | |
3776 | DRM_ERROR("failed to attach phys object\n"); | |
3777 | goto fail_locked; | |
3778 | } | |
3779 | addr = obj_priv->phys_obj->handle->busaddr; | |
3780 | } | |
3781 | ||
3782 | if (!IS_I9XX(dev)) | |
3783 | I915_WRITE(CURSIZE, (height << 12) | width); | |
3784 | ||
3785 | /* Hooray for CUR*CNTR differences */ | |
3786 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | |
3787 | temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | |
3788 | temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | |
3789 | temp |= (pipe << 28); /* Connect to correct pipe */ | |
3790 | } else { | |
3791 | temp &= ~(CURSOR_FORMAT_MASK); | |
3792 | temp |= CURSOR_ENABLE; | |
3793 | temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; | |
3794 | } | |
3795 | ||
3796 | finish: | |
3797 | I915_WRITE(control, temp); | |
3798 | I915_WRITE(base, addr); | |
3799 | ||
3800 | if (intel_crtc->cursor_bo) { | |
3801 | if (dev_priv->info->cursor_needs_physical) { | |
3802 | if (intel_crtc->cursor_bo != bo) | |
3803 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | |
3804 | } else | |
3805 | i915_gem_object_unpin(intel_crtc->cursor_bo); | |
3806 | drm_gem_object_unreference(intel_crtc->cursor_bo); | |
3807 | } | |
3808 | ||
3809 | mutex_unlock(&dev->struct_mutex); | |
3810 | ||
3811 | intel_crtc->cursor_addr = addr; | |
3812 | intel_crtc->cursor_bo = bo; | |
3813 | ||
3814 | return 0; | |
3815 | fail_locked: | |
3816 | mutex_unlock(&dev->struct_mutex); | |
3817 | fail: | |
3818 | drm_gem_object_unreference_unlocked(bo); | |
3819 | return ret; | |
3820 | } | |
3821 | ||
3822 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |
3823 | { | |
3824 | struct drm_device *dev = crtc->dev; | |
3825 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3826 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3827 | struct intel_framebuffer *intel_fb; | |
3828 | int pipe = intel_crtc->pipe; | |
3829 | uint32_t temp = 0; | |
3830 | uint32_t adder; | |
3831 | ||
3832 | if (crtc->fb) { | |
3833 | intel_fb = to_intel_framebuffer(crtc->fb); | |
3834 | intel_mark_busy(dev, intel_fb->obj); | |
3835 | } | |
3836 | ||
3837 | if (x < 0) { | |
3838 | temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | |
3839 | x = -x; | |
3840 | } | |
3841 | if (y < 0) { | |
3842 | temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | |
3843 | y = -y; | |
3844 | } | |
3845 | ||
3846 | temp |= x << CURSOR_X_SHIFT; | |
3847 | temp |= y << CURSOR_Y_SHIFT; | |
3848 | ||
3849 | adder = intel_crtc->cursor_addr; | |
3850 | I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); | |
3851 | I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); | |
3852 | ||
3853 | return 0; | |
3854 | } | |
3855 | ||
3856 | /** Sets the color ramps on behalf of RandR */ | |
3857 | void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |
3858 | u16 blue, int regno) | |
3859 | { | |
3860 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3861 | ||
3862 | intel_crtc->lut_r[regno] = red >> 8; | |
3863 | intel_crtc->lut_g[regno] = green >> 8; | |
3864 | intel_crtc->lut_b[regno] = blue >> 8; | |
3865 | } | |
3866 | ||
3867 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | |
3868 | u16 *blue, int regno) | |
3869 | { | |
3870 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3871 | ||
3872 | *red = intel_crtc->lut_r[regno] << 8; | |
3873 | *green = intel_crtc->lut_g[regno] << 8; | |
3874 | *blue = intel_crtc->lut_b[regno] << 8; | |
3875 | } | |
3876 | ||
3877 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |
3878 | u16 *blue, uint32_t size) | |
3879 | { | |
3880 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3881 | int i; | |
3882 | ||
3883 | if (size != 256) | |
3884 | return; | |
3885 | ||
3886 | for (i = 0; i < 256; i++) { | |
3887 | intel_crtc->lut_r[i] = red[i] >> 8; | |
3888 | intel_crtc->lut_g[i] = green[i] >> 8; | |
3889 | intel_crtc->lut_b[i] = blue[i] >> 8; | |
3890 | } | |
3891 | ||
3892 | intel_crtc_load_lut(crtc); | |
3893 | } | |
3894 | ||
3895 | /** | |
3896 | * Get a pipe with a simple mode set on it for doing load-based monitor | |
3897 | * detection. | |
3898 | * | |
3899 | * It will be up to the load-detect code to adjust the pipe as appropriate for | |
3900 | * its requirements. The pipe will be connected to no other encoders. | |
3901 | * | |
3902 | * Currently this code will only succeed if there is a pipe with no encoders | |
3903 | * configured for it. In the future, it could choose to temporarily disable | |
3904 | * some outputs to free up a pipe for its use. | |
3905 | * | |
3906 | * \return crtc, or NULL if no pipes are available. | |
3907 | */ | |
3908 | ||
3909 | /* VESA 640x480x72Hz mode to set on the pipe */ | |
3910 | static struct drm_display_mode load_detect_mode = { | |
3911 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, | |
3912 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | |
3913 | }; | |
3914 | ||
3915 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |
3916 | struct drm_connector *connector, | |
3917 | struct drm_display_mode *mode, | |
3918 | int *dpms_mode) | |
3919 | { | |
3920 | struct intel_crtc *intel_crtc; | |
3921 | struct drm_crtc *possible_crtc; | |
3922 | struct drm_crtc *supported_crtc =NULL; | |
3923 | struct drm_encoder *encoder = &intel_encoder->enc; | |
3924 | struct drm_crtc *crtc = NULL; | |
3925 | struct drm_device *dev = encoder->dev; | |
3926 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
3927 | struct drm_crtc_helper_funcs *crtc_funcs; | |
3928 | int i = -1; | |
3929 | ||
3930 | /* | |
3931 | * Algorithm gets a little messy: | |
3932 | * - if the connector already has an assigned crtc, use it (but make | |
3933 | * sure it's on first) | |
3934 | * - try to find the first unused crtc that can drive this connector, | |
3935 | * and use that if we find one | |
3936 | * - if there are no unused crtcs available, try to use the first | |
3937 | * one we found that supports the connector | |
3938 | */ | |
3939 | ||
3940 | /* See if we already have a CRTC for this connector */ | |
3941 | if (encoder->crtc) { | |
3942 | crtc = encoder->crtc; | |
3943 | /* Make sure the crtc and connector are running */ | |
3944 | intel_crtc = to_intel_crtc(crtc); | |
3945 | *dpms_mode = intel_crtc->dpms_mode; | |
3946 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | |
3947 | crtc_funcs = crtc->helper_private; | |
3948 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | |
3949 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | |
3950 | } | |
3951 | return crtc; | |
3952 | } | |
3953 | ||
3954 | /* Find an unused one (if possible) */ | |
3955 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { | |
3956 | i++; | |
3957 | if (!(encoder->possible_crtcs & (1 << i))) | |
3958 | continue; | |
3959 | if (!possible_crtc->enabled) { | |
3960 | crtc = possible_crtc; | |
3961 | break; | |
3962 | } | |
3963 | if (!supported_crtc) | |
3964 | supported_crtc = possible_crtc; | |
3965 | } | |
3966 | ||
3967 | /* | |
3968 | * If we didn't find an unused CRTC, don't use any. | |
3969 | */ | |
3970 | if (!crtc) { | |
3971 | return NULL; | |
3972 | } | |
3973 | ||
3974 | encoder->crtc = crtc; | |
3975 | connector->encoder = encoder; | |
3976 | intel_encoder->load_detect_temp = true; | |
3977 | ||
3978 | intel_crtc = to_intel_crtc(crtc); | |
3979 | *dpms_mode = intel_crtc->dpms_mode; | |
3980 | ||
3981 | if (!crtc->enabled) { | |
3982 | if (!mode) | |
3983 | mode = &load_detect_mode; | |
3984 | drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb); | |
3985 | } else { | |
3986 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | |
3987 | crtc_funcs = crtc->helper_private; | |
3988 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | |
3989 | } | |
3990 | ||
3991 | /* Add this connector to the crtc */ | |
3992 | encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); | |
3993 | encoder_funcs->commit(encoder); | |
3994 | } | |
3995 | /* let the connector get through one full cycle before testing */ | |
3996 | intel_wait_for_vblank(dev); | |
3997 | ||
3998 | return crtc; | |
3999 | } | |
4000 | ||
4001 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |
4002 | struct drm_connector *connector, int dpms_mode) | |
4003 | { | |
4004 | struct drm_encoder *encoder = &intel_encoder->enc; | |
4005 | struct drm_device *dev = encoder->dev; | |
4006 | struct drm_crtc *crtc = encoder->crtc; | |
4007 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
4008 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | |
4009 | ||
4010 | if (intel_encoder->load_detect_temp) { | |
4011 | encoder->crtc = NULL; | |
4012 | connector->encoder = NULL; | |
4013 | intel_encoder->load_detect_temp = false; | |
4014 | crtc->enabled = drm_helper_crtc_in_use(crtc); | |
4015 | drm_helper_disable_unused_functions(dev); | |
4016 | } | |
4017 | ||
4018 | /* Switch crtc and encoder back off if necessary */ | |
4019 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { | |
4020 | if (encoder->crtc == crtc) | |
4021 | encoder_funcs->dpms(encoder, dpms_mode); | |
4022 | crtc_funcs->dpms(crtc, dpms_mode); | |
4023 | } | |
4024 | } | |
4025 | ||
4026 | /* Returns the clock of the currently programmed mode of the given pipe. */ | |
4027 | static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |
4028 | { | |
4029 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4030 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4031 | int pipe = intel_crtc->pipe; | |
4032 | u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); | |
4033 | u32 fp; | |
4034 | intel_clock_t clock; | |
4035 | ||
4036 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | |
4037 | fp = I915_READ((pipe == 0) ? FPA0 : FPB0); | |
4038 | else | |
4039 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | |
4040 | ||
4041 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | |
4042 | if (IS_PINEVIEW(dev)) { | |
4043 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; | |
4044 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; | |
4045 | } else { | |
4046 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | |
4047 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | |
4048 | } | |
4049 | ||
4050 | if (IS_I9XX(dev)) { | |
4051 | if (IS_PINEVIEW(dev)) | |
4052 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> | |
4053 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); | |
4054 | else | |
4055 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | |
4056 | DPLL_FPA01_P1_POST_DIV_SHIFT); | |
4057 | ||
4058 | switch (dpll & DPLL_MODE_MASK) { | |
4059 | case DPLLB_MODE_DAC_SERIAL: | |
4060 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? | |
4061 | 5 : 10; | |
4062 | break; | |
4063 | case DPLLB_MODE_LVDS: | |
4064 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? | |
4065 | 7 : 14; | |
4066 | break; | |
4067 | default: | |
4068 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " | |
4069 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); | |
4070 | return 0; | |
4071 | } | |
4072 | ||
4073 | /* XXX: Handle the 100Mhz refclk */ | |
4074 | intel_clock(dev, 96000, &clock); | |
4075 | } else { | |
4076 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | |
4077 | ||
4078 | if (is_lvds) { | |
4079 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> | |
4080 | DPLL_FPA01_P1_POST_DIV_SHIFT); | |
4081 | clock.p2 = 14; | |
4082 | ||
4083 | if ((dpll & PLL_REF_INPUT_MASK) == | |
4084 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | |
4085 | /* XXX: might not be 66MHz */ | |
4086 | intel_clock(dev, 66000, &clock); | |
4087 | } else | |
4088 | intel_clock(dev, 48000, &clock); | |
4089 | } else { | |
4090 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | |
4091 | clock.p1 = 2; | |
4092 | else { | |
4093 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> | |
4094 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; | |
4095 | } | |
4096 | if (dpll & PLL_P2_DIVIDE_BY_4) | |
4097 | clock.p2 = 4; | |
4098 | else | |
4099 | clock.p2 = 2; | |
4100 | ||
4101 | intel_clock(dev, 48000, &clock); | |
4102 | } | |
4103 | } | |
4104 | ||
4105 | /* XXX: It would be nice to validate the clocks, but we can't reuse | |
4106 | * i830PllIsValid() because it relies on the xf86_config connector | |
4107 | * configuration being accurate, which it isn't necessarily. | |
4108 | */ | |
4109 | ||
4110 | return clock.dot; | |
4111 | } | |
4112 | ||
4113 | /** Returns the currently programmed mode of the given pipe. */ | |
4114 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |
4115 | struct drm_crtc *crtc) | |
4116 | { | |
4117 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4118 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4119 | int pipe = intel_crtc->pipe; | |
4120 | struct drm_display_mode *mode; | |
4121 | int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); | |
4122 | int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); | |
4123 | int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); | |
4124 | int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); | |
4125 | ||
4126 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | |
4127 | if (!mode) | |
4128 | return NULL; | |
4129 | ||
4130 | mode->clock = intel_crtc_clock_get(dev, crtc); | |
4131 | mode->hdisplay = (htot & 0xffff) + 1; | |
4132 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | |
4133 | mode->hsync_start = (hsync & 0xffff) + 1; | |
4134 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; | |
4135 | mode->vdisplay = (vtot & 0xffff) + 1; | |
4136 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; | |
4137 | mode->vsync_start = (vsync & 0xffff) + 1; | |
4138 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; | |
4139 | ||
4140 | drm_mode_set_name(mode); | |
4141 | drm_mode_set_crtcinfo(mode, 0); | |
4142 | ||
4143 | return mode; | |
4144 | } | |
4145 | ||
4146 | #define GPU_IDLE_TIMEOUT 500 /* ms */ | |
4147 | ||
4148 | /* When this timer fires, we've been idle for awhile */ | |
4149 | static void intel_gpu_idle_timer(unsigned long arg) | |
4150 | { | |
4151 | struct drm_device *dev = (struct drm_device *)arg; | |
4152 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4153 | ||
4154 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); | |
4155 | ||
4156 | dev_priv->busy = false; | |
4157 | ||
4158 | queue_work(dev_priv->wq, &dev_priv->idle_work); | |
4159 | } | |
4160 | ||
4161 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ | |
4162 | ||
4163 | static void intel_crtc_idle_timer(unsigned long arg) | |
4164 | { | |
4165 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; | |
4166 | struct drm_crtc *crtc = &intel_crtc->base; | |
4167 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | |
4168 | ||
4169 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); | |
4170 | ||
4171 | intel_crtc->busy = false; | |
4172 | ||
4173 | queue_work(dev_priv->wq, &dev_priv->idle_work); | |
4174 | } | |
4175 | ||
4176 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |
4177 | { | |
4178 | struct drm_device *dev = crtc->dev; | |
4179 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4180 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4181 | int pipe = intel_crtc->pipe; | |
4182 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | |
4183 | int dpll = I915_READ(dpll_reg); | |
4184 | ||
4185 | if (HAS_PCH_SPLIT(dev)) | |
4186 | return; | |
4187 | ||
4188 | if (!dev_priv->lvds_downclock_avail) | |
4189 | return; | |
4190 | ||
4191 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | |
4192 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | |
4193 | ||
4194 | /* Unlock panel regs */ | |
4195 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | |
4196 | ||
4197 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | |
4198 | I915_WRITE(dpll_reg, dpll); | |
4199 | dpll = I915_READ(dpll_reg); | |
4200 | intel_wait_for_vblank(dev); | |
4201 | dpll = I915_READ(dpll_reg); | |
4202 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | |
4203 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | |
4204 | ||
4205 | /* ...and lock them again */ | |
4206 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | |
4207 | } | |
4208 | ||
4209 | /* Schedule downclock */ | |
4210 | if (schedule) | |
4211 | mod_timer(&intel_crtc->idle_timer, jiffies + | |
4212 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | |
4213 | } | |
4214 | ||
4215 | static void intel_decrease_pllclock(struct drm_crtc *crtc) | |
4216 | { | |
4217 | struct drm_device *dev = crtc->dev; | |
4218 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4219 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4220 | int pipe = intel_crtc->pipe; | |
4221 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | |
4222 | int dpll = I915_READ(dpll_reg); | |
4223 | ||
4224 | if (HAS_PCH_SPLIT(dev)) | |
4225 | return; | |
4226 | ||
4227 | if (!dev_priv->lvds_downclock_avail) | |
4228 | return; | |
4229 | ||
4230 | /* | |
4231 | * Since this is called by a timer, we should never get here in | |
4232 | * the manual case. | |
4233 | */ | |
4234 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { | |
4235 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | |
4236 | ||
4237 | /* Unlock panel regs */ | |
4238 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | |
4239 | ||
4240 | dpll |= DISPLAY_RATE_SELECT_FPA1; | |
4241 | I915_WRITE(dpll_reg, dpll); | |
4242 | dpll = I915_READ(dpll_reg); | |
4243 | intel_wait_for_vblank(dev); | |
4244 | dpll = I915_READ(dpll_reg); | |
4245 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | |
4246 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); | |
4247 | ||
4248 | /* ...and lock them again */ | |
4249 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | |
4250 | } | |
4251 | ||
4252 | } | |
4253 | ||
4254 | /** | |
4255 | * intel_idle_update - adjust clocks for idleness | |
4256 | * @work: work struct | |
4257 | * | |
4258 | * Either the GPU or display (or both) went idle. Check the busy status | |
4259 | * here and adjust the CRTC and GPU clocks as necessary. | |
4260 | */ | |
4261 | static void intel_idle_update(struct work_struct *work) | |
4262 | { | |
4263 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
4264 | idle_work); | |
4265 | struct drm_device *dev = dev_priv->dev; | |
4266 | struct drm_crtc *crtc; | |
4267 | struct intel_crtc *intel_crtc; | |
4268 | ||
4269 | if (!i915_powersave) | |
4270 | return; | |
4271 | ||
4272 | mutex_lock(&dev->struct_mutex); | |
4273 | ||
4274 | if (IS_I945G(dev) || IS_I945GM(dev)) { | |
4275 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | |
4276 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | |
4277 | } | |
4278 | ||
4279 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
4280 | /* Skip inactive CRTCs */ | |
4281 | if (!crtc->fb) | |
4282 | continue; | |
4283 | ||
4284 | intel_crtc = to_intel_crtc(crtc); | |
4285 | if (!intel_crtc->busy) | |
4286 | intel_decrease_pllclock(crtc); | |
4287 | } | |
4288 | ||
4289 | mutex_unlock(&dev->struct_mutex); | |
4290 | } | |
4291 | ||
4292 | /** | |
4293 | * intel_mark_busy - mark the GPU and possibly the display busy | |
4294 | * @dev: drm device | |
4295 | * @obj: object we're operating on | |
4296 | * | |
4297 | * Callers can use this function to indicate that the GPU is busy processing | |
4298 | * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout | |
4299 | * buffer), we'll also mark the display as busy, so we know to increase its | |
4300 | * clock frequency. | |
4301 | */ | |
4302 | void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |
4303 | { | |
4304 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4305 | struct drm_crtc *crtc = NULL; | |
4306 | struct intel_framebuffer *intel_fb; | |
4307 | struct intel_crtc *intel_crtc; | |
4308 | ||
4309 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | |
4310 | return; | |
4311 | ||
4312 | if (!dev_priv->busy) { | |
4313 | if (IS_I945G(dev) || IS_I945GM(dev)) { | |
4314 | u32 fw_blc_self; | |
4315 | ||
4316 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | |
4317 | fw_blc_self = I915_READ(FW_BLC_SELF); | |
4318 | fw_blc_self &= ~FW_BLC_SELF_EN; | |
4319 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | |
4320 | } | |
4321 | dev_priv->busy = true; | |
4322 | } else | |
4323 | mod_timer(&dev_priv->idle_timer, jiffies + | |
4324 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | |
4325 | ||
4326 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
4327 | if (!crtc->fb) | |
4328 | continue; | |
4329 | ||
4330 | intel_crtc = to_intel_crtc(crtc); | |
4331 | intel_fb = to_intel_framebuffer(crtc->fb); | |
4332 | if (intel_fb->obj == obj) { | |
4333 | if (!intel_crtc->busy) { | |
4334 | if (IS_I945G(dev) || IS_I945GM(dev)) { | |
4335 | u32 fw_blc_self; | |
4336 | ||
4337 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | |
4338 | fw_blc_self = I915_READ(FW_BLC_SELF); | |
4339 | fw_blc_self &= ~FW_BLC_SELF_EN; | |
4340 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | |
4341 | } | |
4342 | /* Non-busy -> busy, upclock */ | |
4343 | intel_increase_pllclock(crtc, true); | |
4344 | intel_crtc->busy = true; | |
4345 | } else { | |
4346 | /* Busy -> busy, put off timer */ | |
4347 | mod_timer(&intel_crtc->idle_timer, jiffies + | |
4348 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | |
4349 | } | |
4350 | } | |
4351 | } | |
4352 | } | |
4353 | ||
4354 | static void intel_crtc_destroy(struct drm_crtc *crtc) | |
4355 | { | |
4356 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4357 | ||
4358 | drm_crtc_cleanup(crtc); | |
4359 | kfree(intel_crtc); | |
4360 | } | |
4361 | ||
4362 | struct intel_unpin_work { | |
4363 | struct work_struct work; | |
4364 | struct drm_device *dev; | |
4365 | struct drm_gem_object *old_fb_obj; | |
4366 | struct drm_gem_object *pending_flip_obj; | |
4367 | struct drm_pending_vblank_event *event; | |
4368 | int pending; | |
4369 | }; | |
4370 | ||
4371 | static void intel_unpin_work_fn(struct work_struct *__work) | |
4372 | { | |
4373 | struct intel_unpin_work *work = | |
4374 | container_of(__work, struct intel_unpin_work, work); | |
4375 | ||
4376 | mutex_lock(&work->dev->struct_mutex); | |
4377 | i915_gem_object_unpin(work->old_fb_obj); | |
4378 | drm_gem_object_unreference(work->pending_flip_obj); | |
4379 | drm_gem_object_unreference(work->old_fb_obj); | |
4380 | mutex_unlock(&work->dev->struct_mutex); | |
4381 | kfree(work); | |
4382 | } | |
4383 | ||
4384 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | |
4385 | { | |
4386 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4387 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
4388 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4389 | struct intel_unpin_work *work; | |
4390 | struct drm_i915_gem_object *obj_priv; | |
4391 | struct drm_pending_vblank_event *e; | |
4392 | struct timeval now; | |
4393 | unsigned long flags; | |
4394 | ||
4395 | /* Ignore early vblank irqs */ | |
4396 | if (intel_crtc == NULL) | |
4397 | return; | |
4398 | ||
4399 | spin_lock_irqsave(&dev->event_lock, flags); | |
4400 | work = intel_crtc->unpin_work; | |
4401 | if (work == NULL || !work->pending) { | |
4402 | if (work && !work->pending) { | |
4403 | obj_priv = to_intel_bo(work->pending_flip_obj); | |
4404 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | |
4405 | obj_priv, | |
4406 | atomic_read(&obj_priv->pending_flip)); | |
4407 | } | |
4408 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
4409 | return; | |
4410 | } | |
4411 | ||
4412 | intel_crtc->unpin_work = NULL; | |
4413 | drm_vblank_put(dev, intel_crtc->pipe); | |
4414 | ||
4415 | if (work->event) { | |
4416 | e = work->event; | |
4417 | do_gettimeofday(&now); | |
4418 | e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); | |
4419 | e->event.tv_sec = now.tv_sec; | |
4420 | e->event.tv_usec = now.tv_usec; | |
4421 | list_add_tail(&e->base.link, | |
4422 | &e->base.file_priv->event_list); | |
4423 | wake_up_interruptible(&e->base.file_priv->event_wait); | |
4424 | } | |
4425 | ||
4426 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
4427 | ||
4428 | obj_priv = to_intel_bo(work->pending_flip_obj); | |
4429 | ||
4430 | /* Initial scanout buffer will have a 0 pending flip count */ | |
4431 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | |
4432 | atomic_dec_and_test(&obj_priv->pending_flip)) | |
4433 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | |
4434 | schedule_work(&work->work); | |
4435 | } | |
4436 | ||
4437 | void intel_prepare_page_flip(struct drm_device *dev, int plane) | |
4438 | { | |
4439 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4440 | struct intel_crtc *intel_crtc = | |
4441 | to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); | |
4442 | unsigned long flags; | |
4443 | ||
4444 | spin_lock_irqsave(&dev->event_lock, flags); | |
4445 | if (intel_crtc->unpin_work) { | |
4446 | intel_crtc->unpin_work->pending = 1; | |
4447 | } else { | |
4448 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | |
4449 | } | |
4450 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
4451 | } | |
4452 | ||
4453 | static int intel_crtc_page_flip(struct drm_crtc *crtc, | |
4454 | struct drm_framebuffer *fb, | |
4455 | struct drm_pending_vblank_event *event) | |
4456 | { | |
4457 | struct drm_device *dev = crtc->dev; | |
4458 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4459 | struct intel_framebuffer *intel_fb; | |
4460 | struct drm_i915_gem_object *obj_priv; | |
4461 | struct drm_gem_object *obj; | |
4462 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4463 | struct intel_unpin_work *work; | |
4464 | unsigned long flags; | |
4465 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | |
4466 | int ret, pipesrc; | |
4467 | RING_LOCALS; | |
4468 | ||
4469 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
4470 | if (work == NULL) | |
4471 | return -ENOMEM; | |
4472 | ||
4473 | mutex_lock(&dev->struct_mutex); | |
4474 | ||
4475 | work->event = event; | |
4476 | work->dev = crtc->dev; | |
4477 | intel_fb = to_intel_framebuffer(crtc->fb); | |
4478 | work->old_fb_obj = intel_fb->obj; | |
4479 | INIT_WORK(&work->work, intel_unpin_work_fn); | |
4480 | ||
4481 | /* We borrow the event spin lock for protecting unpin_work */ | |
4482 | spin_lock_irqsave(&dev->event_lock, flags); | |
4483 | if (intel_crtc->unpin_work) { | |
4484 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | |
4485 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
4486 | kfree(work); | |
4487 | mutex_unlock(&dev->struct_mutex); | |
4488 | return -EBUSY; | |
4489 | } | |
4490 | intel_crtc->unpin_work = work; | |
4491 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
4492 | ||
4493 | intel_fb = to_intel_framebuffer(fb); | |
4494 | obj = intel_fb->obj; | |
4495 | ||
4496 | ret = intel_pin_and_fence_fb_obj(dev, obj); | |
4497 | if (ret != 0) { | |
4498 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | |
4499 | to_intel_bo(obj)); | |
4500 | kfree(work); | |
4501 | intel_crtc->unpin_work = NULL; | |
4502 | mutex_unlock(&dev->struct_mutex); | |
4503 | return ret; | |
4504 | } | |
4505 | ||
4506 | /* Reference the objects for the scheduled work. */ | |
4507 | drm_gem_object_reference(work->old_fb_obj); | |
4508 | drm_gem_object_reference(obj); | |
4509 | ||
4510 | crtc->fb = fb; | |
4511 | i915_gem_object_flush_write_domain(obj); | |
4512 | drm_vblank_get(dev, intel_crtc->pipe); | |
4513 | obj_priv = to_intel_bo(obj); | |
4514 | atomic_inc(&obj_priv->pending_flip); | |
4515 | work->pending_flip_obj = obj; | |
4516 | ||
4517 | BEGIN_LP_RING(4); | |
4518 | OUT_RING(MI_DISPLAY_FLIP | | |
4519 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | |
4520 | OUT_RING(fb->pitch); | |
4521 | if (IS_I965G(dev)) { | |
4522 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | |
4523 | pipesrc = I915_READ(pipesrc_reg); | |
4524 | OUT_RING(pipesrc & 0x0fff0fff); | |
4525 | } else { | |
4526 | OUT_RING(obj_priv->gtt_offset); | |
4527 | OUT_RING(MI_NOOP); | |
4528 | } | |
4529 | ADVANCE_LP_RING(); | |
4530 | ||
4531 | mutex_unlock(&dev->struct_mutex); | |
4532 | ||
4533 | return 0; | |
4534 | } | |
4535 | ||
4536 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | |
4537 | .dpms = intel_crtc_dpms, | |
4538 | .mode_fixup = intel_crtc_mode_fixup, | |
4539 | .mode_set = intel_crtc_mode_set, | |
4540 | .mode_set_base = intel_pipe_set_base, | |
4541 | .prepare = intel_crtc_prepare, | |
4542 | .commit = intel_crtc_commit, | |
4543 | .load_lut = intel_crtc_load_lut, | |
4544 | }; | |
4545 | ||
4546 | static const struct drm_crtc_funcs intel_crtc_funcs = { | |
4547 | .cursor_set = intel_crtc_cursor_set, | |
4548 | .cursor_move = intel_crtc_cursor_move, | |
4549 | .gamma_set = intel_crtc_gamma_set, | |
4550 | .set_config = drm_crtc_helper_set_config, | |
4551 | .destroy = intel_crtc_destroy, | |
4552 | .page_flip = intel_crtc_page_flip, | |
4553 | }; | |
4554 | ||
4555 | ||
4556 | static void intel_crtc_init(struct drm_device *dev, int pipe) | |
4557 | { | |
4558 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4559 | struct intel_crtc *intel_crtc; | |
4560 | int i; | |
4561 | ||
4562 | intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); | |
4563 | if (intel_crtc == NULL) | |
4564 | return; | |
4565 | ||
4566 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | |
4567 | ||
4568 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | |
4569 | intel_crtc->pipe = pipe; | |
4570 | intel_crtc->plane = pipe; | |
4571 | for (i = 0; i < 256; i++) { | |
4572 | intel_crtc->lut_r[i] = i; | |
4573 | intel_crtc->lut_g[i] = i; | |
4574 | intel_crtc->lut_b[i] = i; | |
4575 | } | |
4576 | ||
4577 | /* Swap pipes & planes for FBC on pre-965 */ | |
4578 | intel_crtc->pipe = pipe; | |
4579 | intel_crtc->plane = pipe; | |
4580 | if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { | |
4581 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); | |
4582 | intel_crtc->plane = ((pipe == 0) ? 1 : 0); | |
4583 | } | |
4584 | ||
4585 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || | |
4586 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); | |
4587 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | |
4588 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | |
4589 | ||
4590 | intel_crtc->cursor_addr = 0; | |
4591 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; | |
4592 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | |
4593 | ||
4594 | intel_crtc->busy = false; | |
4595 | ||
4596 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, | |
4597 | (unsigned long)intel_crtc); | |
4598 | } | |
4599 | ||
4600 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |
4601 | struct drm_file *file_priv) | |
4602 | { | |
4603 | drm_i915_private_t *dev_priv = dev->dev_private; | |
4604 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | |
4605 | struct drm_mode_object *drmmode_obj; | |
4606 | struct intel_crtc *crtc; | |
4607 | ||
4608 | if (!dev_priv) { | |
4609 | DRM_ERROR("called with no initialization\n"); | |
4610 | return -EINVAL; | |
4611 | } | |
4612 | ||
4613 | drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, | |
4614 | DRM_MODE_OBJECT_CRTC); | |
4615 | ||
4616 | if (!drmmode_obj) { | |
4617 | DRM_ERROR("no such CRTC id\n"); | |
4618 | return -EINVAL; | |
4619 | } | |
4620 | ||
4621 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); | |
4622 | pipe_from_crtc_id->pipe = crtc->pipe; | |
4623 | ||
4624 | return 0; | |
4625 | } | |
4626 | ||
4627 | struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) | |
4628 | { | |
4629 | struct drm_crtc *crtc = NULL; | |
4630 | ||
4631 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
4632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4633 | if (intel_crtc->pipe == pipe) | |
4634 | break; | |
4635 | } | |
4636 | return crtc; | |
4637 | } | |
4638 | ||
4639 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) | |
4640 | { | |
4641 | int index_mask = 0; | |
4642 | struct drm_encoder *encoder; | |
4643 | int entry = 0; | |
4644 | ||
4645 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | |
4646 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | |
4647 | if (type_mask & intel_encoder->clone_mask) | |
4648 | index_mask |= (1 << entry); | |
4649 | entry++; | |
4650 | } | |
4651 | return index_mask; | |
4652 | } | |
4653 | ||
4654 | ||
4655 | static void intel_setup_outputs(struct drm_device *dev) | |
4656 | { | |
4657 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4658 | struct drm_encoder *encoder; | |
4659 | ||
4660 | intel_crt_init(dev); | |
4661 | ||
4662 | /* Set up integrated LVDS */ | |
4663 | if (IS_MOBILE(dev) && !IS_I830(dev)) | |
4664 | intel_lvds_init(dev); | |
4665 | ||
4666 | if (HAS_PCH_SPLIT(dev)) { | |
4667 | int found; | |
4668 | ||
4669 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | |
4670 | intel_dp_init(dev, DP_A); | |
4671 | ||
4672 | if (I915_READ(HDMIB) & PORT_DETECTED) { | |
4673 | /* PCH SDVOB multiplex with HDMIB */ | |
4674 | found = intel_sdvo_init(dev, PCH_SDVOB); | |
4675 | if (!found) | |
4676 | intel_hdmi_init(dev, HDMIB); | |
4677 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | |
4678 | intel_dp_init(dev, PCH_DP_B); | |
4679 | } | |
4680 | ||
4681 | if (I915_READ(HDMIC) & PORT_DETECTED) | |
4682 | intel_hdmi_init(dev, HDMIC); | |
4683 | ||
4684 | if (I915_READ(HDMID) & PORT_DETECTED) | |
4685 | intel_hdmi_init(dev, HDMID); | |
4686 | ||
4687 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | |
4688 | intel_dp_init(dev, PCH_DP_C); | |
4689 | ||
4690 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | |
4691 | intel_dp_init(dev, PCH_DP_D); | |
4692 | ||
4693 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | |
4694 | bool found = false; | |
4695 | ||
4696 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | |
4697 | DRM_DEBUG_KMS("probing SDVOB\n"); | |
4698 | found = intel_sdvo_init(dev, SDVOB); | |
4699 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { | |
4700 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | |
4701 | intel_hdmi_init(dev, SDVOB); | |
4702 | } | |
4703 | ||
4704 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { | |
4705 | DRM_DEBUG_KMS("probing DP_B\n"); | |
4706 | intel_dp_init(dev, DP_B); | |
4707 | } | |
4708 | } | |
4709 | ||
4710 | /* Before G4X SDVOC doesn't have its own detect register */ | |
4711 | ||
4712 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | |
4713 | DRM_DEBUG_KMS("probing SDVOC\n"); | |
4714 | found = intel_sdvo_init(dev, SDVOC); | |
4715 | } | |
4716 | ||
4717 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | |
4718 | ||
4719 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { | |
4720 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | |
4721 | intel_hdmi_init(dev, SDVOC); | |
4722 | } | |
4723 | if (SUPPORTS_INTEGRATED_DP(dev)) { | |
4724 | DRM_DEBUG_KMS("probing DP_C\n"); | |
4725 | intel_dp_init(dev, DP_C); | |
4726 | } | |
4727 | } | |
4728 | ||
4729 | if (SUPPORTS_INTEGRATED_DP(dev) && | |
4730 | (I915_READ(DP_D) & DP_DETECTED)) { | |
4731 | DRM_DEBUG_KMS("probing DP_D\n"); | |
4732 | intel_dp_init(dev, DP_D); | |
4733 | } | |
4734 | } else if (IS_GEN2(dev)) | |
4735 | intel_dvo_init(dev); | |
4736 | ||
4737 | if (SUPPORTS_TV(dev)) | |
4738 | intel_tv_init(dev); | |
4739 | ||
4740 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | |
4741 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | |
4742 | ||
4743 | encoder->possible_crtcs = intel_encoder->crtc_mask; | |
4744 | encoder->possible_clones = intel_encoder_clones(dev, | |
4745 | intel_encoder->clone_mask); | |
4746 | } | |
4747 | } | |
4748 | ||
4749 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |
4750 | { | |
4751 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
4752 | struct drm_device *dev = fb->dev; | |
4753 | ||
4754 | if (fb->fbdev) | |
4755 | intelfb_remove(dev, fb); | |
4756 | ||
4757 | drm_framebuffer_cleanup(fb); | |
4758 | drm_gem_object_unreference_unlocked(intel_fb->obj); | |
4759 | ||
4760 | kfree(intel_fb); | |
4761 | } | |
4762 | ||
4763 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | |
4764 | struct drm_file *file_priv, | |
4765 | unsigned int *handle) | |
4766 | { | |
4767 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
4768 | struct drm_gem_object *object = intel_fb->obj; | |
4769 | ||
4770 | return drm_gem_handle_create(file_priv, object, handle); | |
4771 | } | |
4772 | ||
4773 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | |
4774 | .destroy = intel_user_framebuffer_destroy, | |
4775 | .create_handle = intel_user_framebuffer_create_handle, | |
4776 | }; | |
4777 | ||
4778 | int intel_framebuffer_create(struct drm_device *dev, | |
4779 | struct drm_mode_fb_cmd *mode_cmd, | |
4780 | struct drm_framebuffer **fb, | |
4781 | struct drm_gem_object *obj) | |
4782 | { | |
4783 | struct intel_framebuffer *intel_fb; | |
4784 | int ret; | |
4785 | ||
4786 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | |
4787 | if (!intel_fb) | |
4788 | return -ENOMEM; | |
4789 | ||
4790 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); | |
4791 | if (ret) { | |
4792 | DRM_ERROR("framebuffer init failed %d\n", ret); | |
4793 | return ret; | |
4794 | } | |
4795 | ||
4796 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); | |
4797 | ||
4798 | intel_fb->obj = obj; | |
4799 | ||
4800 | *fb = &intel_fb->base; | |
4801 | ||
4802 | return 0; | |
4803 | } | |
4804 | ||
4805 | ||
4806 | static struct drm_framebuffer * | |
4807 | intel_user_framebuffer_create(struct drm_device *dev, | |
4808 | struct drm_file *filp, | |
4809 | struct drm_mode_fb_cmd *mode_cmd) | |
4810 | { | |
4811 | struct drm_gem_object *obj; | |
4812 | struct drm_framebuffer *fb; | |
4813 | int ret; | |
4814 | ||
4815 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); | |
4816 | if (!obj) | |
4817 | return NULL; | |
4818 | ||
4819 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); | |
4820 | if (ret) { | |
4821 | drm_gem_object_unreference_unlocked(obj); | |
4822 | return NULL; | |
4823 | } | |
4824 | ||
4825 | return fb; | |
4826 | } | |
4827 | ||
4828 | static const struct drm_mode_config_funcs intel_mode_funcs = { | |
4829 | .fb_create = intel_user_framebuffer_create, | |
4830 | .fb_changed = intelfb_probe, | |
4831 | }; | |
4832 | ||
4833 | static struct drm_gem_object * | |
4834 | intel_alloc_power_context(struct drm_device *dev) | |
4835 | { | |
4836 | struct drm_gem_object *pwrctx; | |
4837 | int ret; | |
4838 | ||
4839 | pwrctx = drm_gem_object_alloc(dev, 4096); | |
4840 | if (!pwrctx) { | |
4841 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | |
4842 | return NULL; | |
4843 | } | |
4844 | ||
4845 | mutex_lock(&dev->struct_mutex); | |
4846 | ret = i915_gem_object_pin(pwrctx, 4096); | |
4847 | if (ret) { | |
4848 | DRM_ERROR("failed to pin power context: %d\n", ret); | |
4849 | goto err_unref; | |
4850 | } | |
4851 | ||
4852 | ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); | |
4853 | if (ret) { | |
4854 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | |
4855 | goto err_unpin; | |
4856 | } | |
4857 | mutex_unlock(&dev->struct_mutex); | |
4858 | ||
4859 | return pwrctx; | |
4860 | ||
4861 | err_unpin: | |
4862 | i915_gem_object_unpin(pwrctx); | |
4863 | err_unref: | |
4864 | drm_gem_object_unreference(pwrctx); | |
4865 | mutex_unlock(&dev->struct_mutex); | |
4866 | return NULL; | |
4867 | } | |
4868 | ||
4869 | void ironlake_enable_drps(struct drm_device *dev) | |
4870 | { | |
4871 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4872 | u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; | |
4873 | u8 fmax, fmin, fstart, vstart; | |
4874 | int i = 0; | |
4875 | ||
4876 | /* 100ms RC evaluation intervals */ | |
4877 | I915_WRITE(RCUPEI, 100000); | |
4878 | I915_WRITE(RCDNEI, 100000); | |
4879 | ||
4880 | /* Set max/min thresholds to 90ms and 80ms respectively */ | |
4881 | I915_WRITE(RCBMAXAVG, 90000); | |
4882 | I915_WRITE(RCBMINAVG, 80000); | |
4883 | ||
4884 | I915_WRITE(MEMIHYST, 1); | |
4885 | ||
4886 | /* Set up min, max, and cur for interrupt handling */ | |
4887 | fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; | |
4888 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | |
4889 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | |
4890 | MEMMODE_FSTART_SHIFT; | |
4891 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | |
4892 | PXVFREQ_PX_SHIFT; | |
4893 | ||
4894 | dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ | |
4895 | dev_priv->min_delay = fmin; | |
4896 | dev_priv->cur_delay = fstart; | |
4897 | ||
4898 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | |
4899 | ||
4900 | /* | |
4901 | * Interrupts will be enabled in ironlake_irq_postinstall | |
4902 | */ | |
4903 | ||
4904 | I915_WRITE(VIDSTART, vstart); | |
4905 | POSTING_READ(VIDSTART); | |
4906 | ||
4907 | rgvmodectl |= MEMMODE_SWMODE_EN; | |
4908 | I915_WRITE(MEMMODECTL, rgvmodectl); | |
4909 | ||
4910 | while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) { | |
4911 | if (i++ > 100) { | |
4912 | DRM_ERROR("stuck trying to change perf mode\n"); | |
4913 | break; | |
4914 | } | |
4915 | msleep(1); | |
4916 | } | |
4917 | msleep(1); | |
4918 | ||
4919 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | |
4920 | (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | |
4921 | I915_WRITE(MEMSWCTL, rgvswctl); | |
4922 | POSTING_READ(MEMSWCTL); | |
4923 | ||
4924 | rgvswctl |= MEMCTL_CMD_STS; | |
4925 | I915_WRITE(MEMSWCTL, rgvswctl); | |
4926 | } | |
4927 | ||
4928 | void ironlake_disable_drps(struct drm_device *dev) | |
4929 | { | |
4930 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4931 | u32 rgvswctl; | |
4932 | u8 fstart; | |
4933 | ||
4934 | /* Ack interrupts, disable EFC interrupt */ | |
4935 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); | |
4936 | I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); | |
4937 | I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); | |
4938 | I915_WRITE(DEIIR, DE_PCU_EVENT); | |
4939 | I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); | |
4940 | ||
4941 | /* Go back to the starting frequency */ | |
4942 | fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> | |
4943 | MEMMODE_FSTART_SHIFT; | |
4944 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | |
4945 | (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | |
4946 | I915_WRITE(MEMSWCTL, rgvswctl); | |
4947 | msleep(1); | |
4948 | rgvswctl |= MEMCTL_CMD_STS; | |
4949 | I915_WRITE(MEMSWCTL, rgvswctl); | |
4950 | msleep(1); | |
4951 | ||
4952 | } | |
4953 | ||
4954 | void intel_init_clock_gating(struct drm_device *dev) | |
4955 | { | |
4956 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4957 | ||
4958 | /* | |
4959 | * Disable clock gating reported to work incorrectly according to the | |
4960 | * specs, but enable as much else as we can. | |
4961 | */ | |
4962 | if (HAS_PCH_SPLIT(dev)) { | |
4963 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | |
4964 | ||
4965 | if (IS_IRONLAKE(dev)) { | |
4966 | /* Required for FBC */ | |
4967 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | |
4968 | /* Required for CxSR */ | |
4969 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | |
4970 | ||
4971 | I915_WRITE(PCH_3DCGDIS0, | |
4972 | MARIUNIT_CLOCK_GATE_DISABLE | | |
4973 | SVSMUNIT_CLOCK_GATE_DISABLE); | |
4974 | } | |
4975 | ||
4976 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | |
4977 | return; | |
4978 | } else if (IS_G4X(dev)) { | |
4979 | uint32_t dspclk_gate; | |
4980 | I915_WRITE(RENCLK_GATE_D1, 0); | |
4981 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | |
4982 | GS_UNIT_CLOCK_GATE_DISABLE | | |
4983 | CL_UNIT_CLOCK_GATE_DISABLE); | |
4984 | I915_WRITE(RAMCLK_GATE_D, 0); | |
4985 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | |
4986 | OVRUNIT_CLOCK_GATE_DISABLE | | |
4987 | OVCUNIT_CLOCK_GATE_DISABLE; | |
4988 | if (IS_GM45(dev)) | |
4989 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | |
4990 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | |
4991 | } else if (IS_I965GM(dev)) { | |
4992 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); | |
4993 | I915_WRITE(RENCLK_GATE_D2, 0); | |
4994 | I915_WRITE(DSPCLK_GATE_D, 0); | |
4995 | I915_WRITE(RAMCLK_GATE_D, 0); | |
4996 | I915_WRITE16(DEUC, 0); | |
4997 | } else if (IS_I965G(dev)) { | |
4998 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | | |
4999 | I965_RCC_CLOCK_GATE_DISABLE | | |
5000 | I965_RCPB_CLOCK_GATE_DISABLE | | |
5001 | I965_ISC_CLOCK_GATE_DISABLE | | |
5002 | I965_FBC_CLOCK_GATE_DISABLE); | |
5003 | I915_WRITE(RENCLK_GATE_D2, 0); | |
5004 | } else if (IS_I9XX(dev)) { | |
5005 | u32 dstate = I915_READ(D_STATE); | |
5006 | ||
5007 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | |
5008 | DSTATE_DOT_CLOCK_GATING; | |
5009 | I915_WRITE(D_STATE, dstate); | |
5010 | } else if (IS_I85X(dev) || IS_I865G(dev)) { | |
5011 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | |
5012 | } else if (IS_I830(dev)) { | |
5013 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | |
5014 | } | |
5015 | ||
5016 | /* | |
5017 | * GPU can automatically power down the render unit if given a page | |
5018 | * to save state. | |
5019 | */ | |
5020 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | |
5021 | struct drm_i915_gem_object *obj_priv = NULL; | |
5022 | ||
5023 | if (dev_priv->pwrctx) { | |
5024 | obj_priv = to_intel_bo(dev_priv->pwrctx); | |
5025 | } else { | |
5026 | struct drm_gem_object *pwrctx; | |
5027 | ||
5028 | pwrctx = intel_alloc_power_context(dev); | |
5029 | if (pwrctx) { | |
5030 | dev_priv->pwrctx = pwrctx; | |
5031 | obj_priv = to_intel_bo(pwrctx); | |
5032 | } | |
5033 | } | |
5034 | ||
5035 | if (obj_priv) { | |
5036 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | |
5037 | I915_WRITE(MCHBAR_RENDER_STANDBY, | |
5038 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | |
5039 | } | |
5040 | } | |
5041 | } | |
5042 | ||
5043 | /* Set up chip specific display functions */ | |
5044 | static void intel_init_display(struct drm_device *dev) | |
5045 | { | |
5046 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5047 | ||
5048 | /* We always want a DPMS function */ | |
5049 | if (HAS_PCH_SPLIT(dev)) | |
5050 | dev_priv->display.dpms = ironlake_crtc_dpms; | |
5051 | else | |
5052 | dev_priv->display.dpms = i9xx_crtc_dpms; | |
5053 | ||
5054 | /* Only mobile has FBC, leave pointers NULL for other chips */ | |
5055 | if (IS_MOBILE(dev)) { | |
5056 | if (IS_GM45(dev)) { | |
5057 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | |
5058 | dev_priv->display.enable_fbc = g4x_enable_fbc; | |
5059 | dev_priv->display.disable_fbc = g4x_disable_fbc; | |
5060 | } else if (IS_I965GM(dev)) { | |
5061 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | |
5062 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | |
5063 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | |
5064 | } | |
5065 | /* 855GM needs testing */ | |
5066 | } | |
5067 | ||
5068 | /* Returns the core display clock speed */ | |
5069 | if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev))) | |
5070 | dev_priv->display.get_display_clock_speed = | |
5071 | i945_get_display_clock_speed; | |
5072 | else if (IS_I915G(dev)) | |
5073 | dev_priv->display.get_display_clock_speed = | |
5074 | i915_get_display_clock_speed; | |
5075 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) | |
5076 | dev_priv->display.get_display_clock_speed = | |
5077 | i9xx_misc_get_display_clock_speed; | |
5078 | else if (IS_I915GM(dev)) | |
5079 | dev_priv->display.get_display_clock_speed = | |
5080 | i915gm_get_display_clock_speed; | |
5081 | else if (IS_I865G(dev)) | |
5082 | dev_priv->display.get_display_clock_speed = | |
5083 | i865_get_display_clock_speed; | |
5084 | else if (IS_I85X(dev)) | |
5085 | dev_priv->display.get_display_clock_speed = | |
5086 | i855_get_display_clock_speed; | |
5087 | else /* 852, 830 */ | |
5088 | dev_priv->display.get_display_clock_speed = | |
5089 | i830_get_display_clock_speed; | |
5090 | ||
5091 | /* For FIFO watermark updates */ | |
5092 | if (HAS_PCH_SPLIT(dev)) | |
5093 | dev_priv->display.update_wm = NULL; | |
5094 | else if (IS_G4X(dev)) | |
5095 | dev_priv->display.update_wm = g4x_update_wm; | |
5096 | else if (IS_I965G(dev)) | |
5097 | dev_priv->display.update_wm = i965_update_wm; | |
5098 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) { | |
5099 | dev_priv->display.update_wm = i9xx_update_wm; | |
5100 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | |
5101 | } else { | |
5102 | if (IS_I85X(dev)) | |
5103 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | |
5104 | else if (IS_845G(dev)) | |
5105 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | |
5106 | else | |
5107 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | |
5108 | dev_priv->display.update_wm = i830_update_wm; | |
5109 | } | |
5110 | } | |
5111 | ||
5112 | void intel_modeset_init(struct drm_device *dev) | |
5113 | { | |
5114 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5115 | int num_pipe; | |
5116 | int i; | |
5117 | ||
5118 | drm_mode_config_init(dev); | |
5119 | ||
5120 | dev->mode_config.min_width = 0; | |
5121 | dev->mode_config.min_height = 0; | |
5122 | ||
5123 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | |
5124 | ||
5125 | intel_init_display(dev); | |
5126 | ||
5127 | if (IS_I965G(dev)) { | |
5128 | dev->mode_config.max_width = 8192; | |
5129 | dev->mode_config.max_height = 8192; | |
5130 | } else if (IS_I9XX(dev)) { | |
5131 | dev->mode_config.max_width = 4096; | |
5132 | dev->mode_config.max_height = 4096; | |
5133 | } else { | |
5134 | dev->mode_config.max_width = 2048; | |
5135 | dev->mode_config.max_height = 2048; | |
5136 | } | |
5137 | ||
5138 | /* set memory base */ | |
5139 | if (IS_I9XX(dev)) | |
5140 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); | |
5141 | else | |
5142 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | |
5143 | ||
5144 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | |
5145 | num_pipe = 2; | |
5146 | else | |
5147 | num_pipe = 1; | |
5148 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | |
5149 | num_pipe, num_pipe > 1 ? "s" : ""); | |
5150 | ||
5151 | for (i = 0; i < num_pipe; i++) { | |
5152 | intel_crtc_init(dev, i); | |
5153 | } | |
5154 | ||
5155 | intel_setup_outputs(dev); | |
5156 | ||
5157 | intel_init_clock_gating(dev); | |
5158 | ||
5159 | if (IS_IRONLAKE_M(dev)) | |
5160 | ironlake_enable_drps(dev); | |
5161 | ||
5162 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | |
5163 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | |
5164 | (unsigned long)dev); | |
5165 | ||
5166 | intel_setup_overlay(dev); | |
5167 | ||
5168 | if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev), | |
5169 | dev_priv->fsb_freq, | |
5170 | dev_priv->mem_freq)) | |
5171 | DRM_INFO("failed to find known CxSR latency " | |
5172 | "(found fsb freq %d, mem freq %d), disabling CxSR\n", | |
5173 | dev_priv->fsb_freq, dev_priv->mem_freq); | |
5174 | } | |
5175 | ||
5176 | void intel_modeset_cleanup(struct drm_device *dev) | |
5177 | { | |
5178 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5179 | struct drm_crtc *crtc; | |
5180 | struct intel_crtc *intel_crtc; | |
5181 | ||
5182 | mutex_lock(&dev->struct_mutex); | |
5183 | ||
5184 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
5185 | /* Skip inactive CRTCs */ | |
5186 | if (!crtc->fb) | |
5187 | continue; | |
5188 | ||
5189 | intel_crtc = to_intel_crtc(crtc); | |
5190 | intel_increase_pllclock(crtc, false); | |
5191 | del_timer_sync(&intel_crtc->idle_timer); | |
5192 | } | |
5193 | ||
5194 | del_timer_sync(&dev_priv->idle_timer); | |
5195 | ||
5196 | if (dev_priv->display.disable_fbc) | |
5197 | dev_priv->display.disable_fbc(dev); | |
5198 | ||
5199 | if (dev_priv->pwrctx) { | |
5200 | struct drm_i915_gem_object *obj_priv; | |
5201 | ||
5202 | obj_priv = to_intel_bo(dev_priv->pwrctx); | |
5203 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | |
5204 | I915_READ(PWRCTXA); | |
5205 | i915_gem_object_unpin(dev_priv->pwrctx); | |
5206 | drm_gem_object_unreference(dev_priv->pwrctx); | |
5207 | } | |
5208 | ||
5209 | if (IS_IRONLAKE_M(dev)) | |
5210 | ironlake_disable_drps(dev); | |
5211 | ||
5212 | mutex_unlock(&dev->struct_mutex); | |
5213 | ||
5214 | drm_mode_config_cleanup(dev); | |
5215 | } | |
5216 | ||
5217 | ||
5218 | /* | |
5219 | * Return which encoder is currently attached for connector. | |
5220 | */ | |
5221 | struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) | |
5222 | { | |
5223 | struct drm_mode_object *obj; | |
5224 | struct drm_encoder *encoder; | |
5225 | int i; | |
5226 | ||
5227 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | |
5228 | if (connector->encoder_ids[i] == 0) | |
5229 | break; | |
5230 | ||
5231 | obj = drm_mode_object_find(connector->dev, | |
5232 | connector->encoder_ids[i], | |
5233 | DRM_MODE_OBJECT_ENCODER); | |
5234 | if (!obj) | |
5235 | continue; | |
5236 | ||
5237 | encoder = obj_to_encoder(obj); | |
5238 | return encoder; | |
5239 | } | |
5240 | return NULL; | |
5241 | } | |
5242 | ||
5243 | /* | |
5244 | * set vga decode state - true == enable VGA decode | |
5245 | */ | |
5246 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | |
5247 | { | |
5248 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5249 | u16 gmch_ctrl; | |
5250 | ||
5251 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); | |
5252 | if (state) | |
5253 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; | |
5254 | else | |
5255 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; | |
5256 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | |
5257 | return 0; | |
5258 | } |