]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/i915_drv.c
Merge branch 'drm-fixes' of /home/airlied/kernel/linux-2.6 into drm-core-next
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_drv.c
CommitLineData
1da177e4
LT
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4 29
5669fcac 30#include <linux/device.h>
1da177e4
LT
31#include "drmP.h"
32#include "drm.h"
33#include "i915_drm.h"
34#include "i915_drv.h"
f49f0586 35#include "intel_drv.h"
1da177e4 36
79e53945 37#include <linux/console.h>
354ff967 38#include "drm_crtc_helper.h"
79e53945 39
d6073d77 40static int i915_modeset = -1;
79e53945
JB
41module_param_named(modeset, i915_modeset, int, 0400);
42
43unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
1da177e4 45
652c393a
JB
46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400);
48
33814341
JB
49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51
112b715e 52static struct drm_driver driver;
1f7a6e37 53extern int intel_agp_enabled;
112b715e 54
cfdf1fa2 55#define INTEL_VGA_DEVICE(id, info) { \
49ae35f2
KH
56 .class = PCI_CLASS_DISPLAY_VGA << 8, \
57 .class_mask = 0xffff00, \
58 .vendor = 0x8086, \
59 .device = id, \
60 .subvendor = PCI_ANY_ID, \
61 .subdevice = PCI_ANY_ID, \
cfdf1fa2
KH
62 .driver_data = (unsigned long) info }
63
9a7e8492 64static const struct intel_device_info intel_i830_info = {
a6c45cf0 65 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
31578148 66 .has_overlay = 1, .overlay_needs_physical = 1,
cfdf1fa2
KH
67};
68
9a7e8492 69static const struct intel_device_info intel_845g_info = {
a6c45cf0 70 .gen = 2,
31578148 71 .has_overlay = 1, .overlay_needs_physical = 1,
cfdf1fa2
KH
72};
73
9a7e8492 74static const struct intel_device_info intel_i85x_info = {
a6c45cf0 75 .gen = 2, .is_i85x = 1, .is_mobile = 1,
5ce8ba7c 76 .cursor_needs_physical = 1,
31578148 77 .has_overlay = 1, .overlay_needs_physical = 1,
cfdf1fa2
KH
78};
79
9a7e8492 80static const struct intel_device_info intel_i865g_info = {
a6c45cf0 81 .gen = 2,
31578148 82 .has_overlay = 1, .overlay_needs_physical = 1,
cfdf1fa2
KH
83};
84
9a7e8492 85static const struct intel_device_info intel_i915g_info = {
a6c45cf0 86 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
31578148 87 .has_overlay = 1, .overlay_needs_physical = 1,
cfdf1fa2 88};
9a7e8492 89static const struct intel_device_info intel_i915gm_info = {
a6c45cf0 90 .gen = 3, .is_mobile = 1,
b295d1b6 91 .cursor_needs_physical = 1,
31578148 92 .has_overlay = 1, .overlay_needs_physical = 1,
a6c45cf0 93 .supports_tv = 1,
cfdf1fa2 94};
9a7e8492 95static const struct intel_device_info intel_i945g_info = {
a6c45cf0 96 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
31578148 97 .has_overlay = 1, .overlay_needs_physical = 1,
cfdf1fa2 98};
9a7e8492 99static const struct intel_device_info intel_i945gm_info = {
a6c45cf0 100 .gen = 3, .is_i945gm = 1, .is_mobile = 1,
b295d1b6 101 .has_hotplug = 1, .cursor_needs_physical = 1,
31578148 102 .has_overlay = 1, .overlay_needs_physical = 1,
a6c45cf0 103 .supports_tv = 1,
cfdf1fa2
KH
104};
105
9a7e8492 106static const struct intel_device_info intel_i965g_info = {
a6c45cf0 107 .gen = 4, .is_broadwater = 1,
c96c3a8c 108 .has_hotplug = 1,
31578148 109 .has_overlay = 1,
cfdf1fa2
KH
110};
111
9a7e8492 112static const struct intel_device_info intel_i965gm_info = {
a6c45cf0 113 .gen = 4, .is_crestline = 1,
c96c3a8c 114 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
31578148 115 .has_overlay = 1,
a6c45cf0 116 .supports_tv = 1,
cfdf1fa2
KH
117};
118
9a7e8492 119static const struct intel_device_info intel_g33_info = {
a6c45cf0 120 .gen = 3, .is_g33 = 1,
c96c3a8c 121 .need_gfx_hws = 1, .has_hotplug = 1,
31578148 122 .has_overlay = 1,
cfdf1fa2
KH
123};
124
9a7e8492 125static const struct intel_device_info intel_g45_info = {
a6c45cf0 126 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
c96c3a8c 127 .has_pipe_cxsr = 1, .has_hotplug = 1,
92f49d9c 128 .has_bsd_ring = 1,
cfdf1fa2
KH
129};
130
9a7e8492 131static const struct intel_device_info intel_gm45_info = {
a6c45cf0 132 .gen = 4, .is_g4x = 1,
cfdf1fa2 133 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
c96c3a8c 134 .has_pipe_cxsr = 1, .has_hotplug = 1,
a6c45cf0 135 .supports_tv = 1,
92f49d9c 136 .has_bsd_ring = 1,
cfdf1fa2
KH
137};
138
9a7e8492 139static const struct intel_device_info intel_pineview_info = {
a6c45cf0 140 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
c96c3a8c 141 .need_gfx_hws = 1, .has_hotplug = 1,
31578148 142 .has_overlay = 1,
cfdf1fa2
KH
143};
144
9a7e8492 145static const struct intel_device_info intel_ironlake_d_info = {
a6c45cf0 146 .gen = 5, .is_ironlake = 1,
c96c3a8c 147 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
92f49d9c 148 .has_bsd_ring = 1,
cfdf1fa2
KH
149};
150
9a7e8492 151static const struct intel_device_info intel_ironlake_m_info = {
a6c45cf0 152 .gen = 5, .is_ironlake = 1, .is_mobile = 1,
c96c3a8c 153 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
92f49d9c 154 .has_bsd_ring = 1,
cfdf1fa2
KH
155};
156
9a7e8492 157static const struct intel_device_info intel_sandybridge_d_info = {
a6c45cf0 158 .gen = 6,
c96c3a8c 159 .need_gfx_hws = 1, .has_hotplug = 1,
881f47b6 160 .has_bsd_ring = 1,
f6e450a6
EA
161};
162
9a7e8492 163static const struct intel_device_info intel_sandybridge_m_info = {
a6c45cf0 164 .gen = 6, .is_mobile = 1,
c96c3a8c 165 .need_gfx_hws = 1, .has_hotplug = 1,
881f47b6 166 .has_bsd_ring = 1,
a13e4093
EA
167};
168
6103da0d
CW
169static const struct pci_device_id pciidlist[] = { /* aka */
170 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
171 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
172 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
5ce8ba7c 173 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
6103da0d
CW
174 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
175 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
176 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
177 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
178 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
179 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
180 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
181 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
182 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
183 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
184 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
185 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
186 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
187 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
188 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
189 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
190 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
191 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
192 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
193 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
194 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
195 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
41a51428 196 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
cfdf1fa2
KH
197 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
198 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
199 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
200 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
f6e450a6 201 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
85540480
ZW
202 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
203 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
a13e4093 204 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
85540480 205 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
4fefe435 206 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
85540480 207 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
49ae35f2 208 {0, 0, 0}
1da177e4
LT
209};
210
79e53945
JB
211#if defined(CONFIG_DRM_I915_KMS)
212MODULE_DEVICE_TABLE(pci, pciidlist);
213#endif
214
3bad0781
ZW
215#define INTEL_PCH_DEVICE_ID_MASK 0xff00
216#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
217
218void intel_detect_pch (struct drm_device *dev)
219{
220 struct drm_i915_private *dev_priv = dev->dev_private;
221 struct pci_dev *pch;
222
223 /*
224 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
225 * make graphics device passthrough work easy for VMM, that only
226 * need to expose ISA bridge to let driver know the real hardware
227 * underneath. This is a requirement from virtualization team.
228 */
229 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
230 if (pch) {
231 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
232 int id;
233 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
234
235 if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
236 dev_priv->pch_type = PCH_CPT;
237 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
238 }
239 }
240 pci_dev_put(pch);
241 }
242}
243
84b79f8d 244static int i915_drm_freeze(struct drm_device *dev)
ba8bbcf6 245{
61caf87c
RW
246 struct drm_i915_private *dev_priv = dev->dev_private;
247
ba8bbcf6 248 pci_save_state(dev->pdev);
ba8bbcf6 249
5669fcac 250 /* If KMS is active, we do the leavevt stuff here */
226485e9 251 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
84b79f8d
RW
252 int error = i915_gem_idle(dev);
253 if (error) {
226485e9 254 dev_err(&dev->pdev->dev,
84b79f8d
RW
255 "GEM idle failed, resume might fail\n");
256 return error;
257 }
226485e9 258 drm_irq_uninstall(dev);
5669fcac
JB
259 }
260
9e06dd39
JB
261 i915_save_state(dev);
262
44834a67 263 intel_opregion_fini(dev);
8ee1c3db 264
84b79f8d
RW
265 /* Modeset on resume, not lid events */
266 dev_priv->modeset_on_lid = 0;
61caf87c
RW
267
268 return 0;
84b79f8d
RW
269}
270
6a9ee8af 271int i915_suspend(struct drm_device *dev, pm_message_t state)
84b79f8d
RW
272{
273 int error;
274
275 if (!dev || !dev->dev_private) {
276 DRM_ERROR("dev: %p\n", dev);
277 DRM_ERROR("DRM not initialized, aborting suspend.\n");
278 return -ENODEV;
279 }
280
281 if (state.event == PM_EVENT_PRETHAW)
282 return 0;
283
6eecba33
CW
284 drm_kms_helper_poll_disable(dev);
285
84b79f8d
RW
286 error = i915_drm_freeze(dev);
287 if (error)
288 return error;
289
b932ccb5
DA
290 if (state.event == PM_EVENT_SUSPEND) {
291 /* Shut down the device */
292 pci_disable_device(dev->pdev);
293 pci_set_power_state(dev->pdev, PCI_D3hot);
294 }
ba8bbcf6
JB
295
296 return 0;
297}
298
84b79f8d 299static int i915_drm_thaw(struct drm_device *dev)
ba8bbcf6 300{
5669fcac 301 struct drm_i915_private *dev_priv = dev->dev_private;
84b79f8d 302 int error = 0;
8ee1c3db 303
61caf87c 304 i915_restore_state(dev);
44834a67 305 intel_opregion_setup(dev);
61caf87c 306
5669fcac
JB
307 /* KMS EnterVT equivalent */
308 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
309 mutex_lock(&dev->struct_mutex);
310 dev_priv->mm.suspended = 0;
311
84b79f8d 312 error = i915_gem_init_ringbuffer(dev);
5669fcac 313 mutex_unlock(&dev->struct_mutex);
226485e9
JB
314
315 drm_irq_install(dev);
84b79f8d 316
354ff967
ZY
317 /* Resume the modeset for every activated CRTC */
318 drm_helper_resume_force_mode(dev);
319 }
5669fcac 320
44834a67
CW
321 intel_opregion_init(dev);
322
c9354c85 323 dev_priv->modeset_on_lid = 0;
06891e27 324
84b79f8d
RW
325 return error;
326}
327
6a9ee8af 328int i915_resume(struct drm_device *dev)
84b79f8d 329{
6eecba33
CW
330 int ret;
331
84b79f8d
RW
332 if (pci_enable_device(dev->pdev))
333 return -EIO;
334
335 pci_set_master(dev->pdev);
336
6eecba33
CW
337 ret = i915_drm_thaw(dev);
338 if (ret)
339 return ret;
340
341 drm_kms_helper_poll_enable(dev);
342 return 0;
ba8bbcf6
JB
343}
344
dc96e9b8
CW
345static int i8xx_do_reset(struct drm_device *dev, u8 flags)
346{
347 struct drm_i915_private *dev_priv = dev->dev_private;
348
349 if (IS_I85X(dev))
350 return -ENODEV;
351
352 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
353 POSTING_READ(D_STATE);
354
355 if (IS_I830(dev) || IS_845G(dev)) {
356 I915_WRITE(DEBUG_RESET_I830,
357 DEBUG_RESET_DISPLAY |
358 DEBUG_RESET_RENDER |
359 DEBUG_RESET_FULL);
360 POSTING_READ(DEBUG_RESET_I830);
361 msleep(1);
362
363 I915_WRITE(DEBUG_RESET_I830, 0);
364 POSTING_READ(DEBUG_RESET_I830);
365 }
366
367 msleep(1);
368
369 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
370 POSTING_READ(D_STATE);
371
372 return 0;
373}
374
f49f0586
KG
375static int i965_reset_complete(struct drm_device *dev)
376{
377 u8 gdrst;
eeccdcac 378 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
f49f0586
KG
379 return gdrst & 0x1;
380}
381
0573ed4a
KG
382static int i965_do_reset(struct drm_device *dev, u8 flags)
383{
384 u8 gdrst;
385
ae681d96
CW
386 /*
387 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
388 * well as the reset bit (GR/bit 0). Setting the GR bit
389 * triggers the reset; when done, the hardware will clear it.
390 */
0573ed4a
KG
391 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
392 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
393
394 return wait_for(i965_reset_complete(dev), 500);
395}
396
397static int ironlake_do_reset(struct drm_device *dev, u8 flags)
398{
399 struct drm_i915_private *dev_priv = dev->dev_private;
400 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
401 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
402 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
403}
404
11ed50ec
BG
405/**
406 * i965_reset - reset chip after a hang
407 * @dev: drm device to reset
408 * @flags: reset domains
409 *
410 * Reset the chip. Useful if a hang is detected. Returns zero on successful
411 * reset or otherwise an error code.
412 *
413 * Procedure is fairly simple:
414 * - reset the chip using the reset reg
415 * - re-init context state
416 * - re-init hardware status page
417 * - re-init ring buffer
418 * - re-init interrupt state
419 * - re-init display
420 */
f803aa55 421int i915_reset(struct drm_device *dev, u8 flags)
11ed50ec
BG
422{
423 drm_i915_private_t *dev_priv = dev->dev_private;
11ed50ec
BG
424 /*
425 * We really should only reset the display subsystem if we actually
426 * need to
427 */
428 bool need_display = true;
0573ed4a 429 int ret;
11ed50ec
BG
430
431 mutex_lock(&dev->struct_mutex);
432
069efc1d 433 i915_gem_reset(dev);
77f01230 434
f803aa55 435 ret = -ENODEV;
ae681d96
CW
436 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
437 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
438 } else switch (INTEL_INFO(dev)->gen) {
f803aa55 439 case 5:
0573ed4a 440 ret = ironlake_do_reset(dev, flags);
f803aa55
CW
441 break;
442 case 4:
0573ed4a 443 ret = i965_do_reset(dev, flags);
f803aa55 444 break;
dc96e9b8
CW
445 case 2:
446 ret = i8xx_do_reset(dev, flags);
447 break;
f803aa55 448 }
ae681d96 449 dev_priv->last_gpu_reset = get_seconds();
0573ed4a 450 if (ret) {
f803aa55 451 DRM_ERROR("Failed to reset chip.\n");
f953c935 452 mutex_unlock(&dev->struct_mutex);
f803aa55 453 return ret;
11ed50ec
BG
454 }
455
456 /* Ok, now get things going again... */
457
458 /*
459 * Everything depends on having the GTT running, so we need to start
460 * there. Fortunately we don't need to do this unless we reset the
461 * chip at a PCI level.
462 *
463 * Next we need to restore the context, but we don't use those
464 * yet either...
465 *
466 * Ring buffer needs to be re-initialized in the KMS case, or if X
467 * was running at the time of the reset (i.e. we weren't VT
468 * switched away).
469 */
470 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
8187a2b7
ZN
471 !dev_priv->mm.suspended) {
472 struct intel_ring_buffer *ring = &dev_priv->render_ring;
11ed50ec 473 dev_priv->mm.suspended = 0;
8187a2b7 474 ring->init(dev, ring);
11ed50ec
BG
475 mutex_unlock(&dev->struct_mutex);
476 drm_irq_uninstall(dev);
477 drm_irq_install(dev);
478 mutex_lock(&dev->struct_mutex);
479 }
480
9fd98141
CW
481 mutex_unlock(&dev->struct_mutex);
482
11ed50ec 483 /*
9fd98141
CW
484 * Perform a full modeset as on later generations, e.g. Ironlake, we may
485 * need to retrain the display link and cannot just restore the register
486 * values.
11ed50ec 487 */
9fd98141
CW
488 if (need_display) {
489 mutex_lock(&dev->mode_config.mutex);
490 drm_helper_resume_force_mode(dev);
491 mutex_unlock(&dev->mode_config.mutex);
492 }
11ed50ec 493
11ed50ec
BG
494 return 0;
495}
496
497
112b715e
KH
498static int __devinit
499i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
500{
dcdb1674 501 return drm_get_pci_dev(pdev, ent, &driver);
112b715e
KH
502}
503
504static void
505i915_pci_remove(struct pci_dev *pdev)
506{
507 struct drm_device *dev = pci_get_drvdata(pdev);
508
509 drm_put_dev(dev);
510}
511
84b79f8d 512static int i915_pm_suspend(struct device *dev)
112b715e 513{
84b79f8d
RW
514 struct pci_dev *pdev = to_pci_dev(dev);
515 struct drm_device *drm_dev = pci_get_drvdata(pdev);
516 int error;
112b715e 517
84b79f8d
RW
518 if (!drm_dev || !drm_dev->dev_private) {
519 dev_err(dev, "DRM not initialized, aborting suspend.\n");
520 return -ENODEV;
521 }
112b715e 522
84b79f8d
RW
523 error = i915_drm_freeze(drm_dev);
524 if (error)
525 return error;
112b715e 526
84b79f8d
RW
527 pci_disable_device(pdev);
528 pci_set_power_state(pdev, PCI_D3hot);
cbda12d7 529
84b79f8d 530 return 0;
cbda12d7
ZW
531}
532
84b79f8d 533static int i915_pm_resume(struct device *dev)
cbda12d7 534{
84b79f8d
RW
535 struct pci_dev *pdev = to_pci_dev(dev);
536 struct drm_device *drm_dev = pci_get_drvdata(pdev);
537
538 return i915_resume(drm_dev);
cbda12d7
ZW
539}
540
84b79f8d 541static int i915_pm_freeze(struct device *dev)
cbda12d7 542{
84b79f8d
RW
543 struct pci_dev *pdev = to_pci_dev(dev);
544 struct drm_device *drm_dev = pci_get_drvdata(pdev);
545
546 if (!drm_dev || !drm_dev->dev_private) {
547 dev_err(dev, "DRM not initialized, aborting suspend.\n");
548 return -ENODEV;
549 }
550
551 return i915_drm_freeze(drm_dev);
cbda12d7
ZW
552}
553
84b79f8d 554static int i915_pm_thaw(struct device *dev)
cbda12d7 555{
84b79f8d
RW
556 struct pci_dev *pdev = to_pci_dev(dev);
557 struct drm_device *drm_dev = pci_get_drvdata(pdev);
558
559 return i915_drm_thaw(drm_dev);
cbda12d7
ZW
560}
561
84b79f8d 562static int i915_pm_poweroff(struct device *dev)
cbda12d7 563{
84b79f8d
RW
564 struct pci_dev *pdev = to_pci_dev(dev);
565 struct drm_device *drm_dev = pci_get_drvdata(pdev);
84b79f8d 566
61caf87c 567 return i915_drm_freeze(drm_dev);
cbda12d7
ZW
568}
569
b4b78d12 570static const struct dev_pm_ops i915_pm_ops = {
cbda12d7
ZW
571 .suspend = i915_pm_suspend,
572 .resume = i915_pm_resume,
573 .freeze = i915_pm_freeze,
574 .thaw = i915_pm_thaw,
575 .poweroff = i915_pm_poweroff,
84b79f8d 576 .restore = i915_pm_resume,
cbda12d7
ZW
577};
578
de151cf6
JB
579static struct vm_operations_struct i915_gem_vm_ops = {
580 .fault = i915_gem_fault,
ab00b3e5
JB
581 .open = drm_gem_vm_open,
582 .close = drm_gem_vm_close,
de151cf6
JB
583};
584
1da177e4 585static struct drm_driver driver = {
792d2b9a
DA
586 /* don't use mtrr's here, the Xserver or user space app should
587 * deal with them for intel hardware.
588 */
673a394b
EA
589 .driver_features =
590 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
591 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
22eae947 592 .load = i915_driver_load,
ba8bbcf6 593 .unload = i915_driver_unload,
673a394b 594 .open = i915_driver_open,
22eae947
DA
595 .lastclose = i915_driver_lastclose,
596 .preclose = i915_driver_preclose,
673a394b 597 .postclose = i915_driver_postclose,
d8e29209
RW
598
599 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
600 .suspend = i915_suspend,
601 .resume = i915_resume,
602
cda17380 603 .device_is_agp = i915_driver_device_is_agp,
0a3e67a4
JB
604 .enable_vblank = i915_enable_vblank,
605 .disable_vblank = i915_disable_vblank,
1da177e4
LT
606 .irq_preinstall = i915_driver_irq_preinstall,
607 .irq_postinstall = i915_driver_irq_postinstall,
608 .irq_uninstall = i915_driver_irq_uninstall,
609 .irq_handler = i915_driver_irq_handler,
610 .reclaim_buffers = drm_core_reclaim_buffers,
7c1c2871
DA
611 .master_create = i915_master_create,
612 .master_destroy = i915_master_destroy,
955b12de 613#if defined(CONFIG_DEBUG_FS)
27c202ad
BG
614 .debugfs_init = i915_debugfs_init,
615 .debugfs_cleanup = i915_debugfs_cleanup,
955b12de 616#endif
673a394b
EA
617 .gem_init_object = i915_gem_init_object,
618 .gem_free_object = i915_gem_free_object,
de151cf6 619 .gem_vm_ops = &i915_gem_vm_ops,
1da177e4
LT
620 .ioctls = i915_ioctls,
621 .fops = {
b5e89ed5
DA
622 .owner = THIS_MODULE,
623 .open = drm_open,
624 .release = drm_release,
ed8b6704 625 .unlocked_ioctl = drm_ioctl,
de151cf6 626 .mmap = drm_gem_mmap,
b5e89ed5
DA
627 .poll = drm_poll,
628 .fasync = drm_fasync,
c9a9c5e0 629 .read = drm_read,
8ca7c1df 630#ifdef CONFIG_COMPAT
b5e89ed5 631 .compat_ioctl = i915_compat_ioctl,
8ca7c1df 632#endif
22eae947
DA
633 },
634
1da177e4 635 .pci_driver = {
22eae947
DA
636 .name = DRIVER_NAME,
637 .id_table = pciidlist,
112b715e
KH
638 .probe = i915_pci_probe,
639 .remove = i915_pci_remove,
cbda12d7 640 .driver.pm = &i915_pm_ops,
22eae947 641 },
bc5f4523 642
22eae947
DA
643 .name = DRIVER_NAME,
644 .desc = DRIVER_DESC,
645 .date = DRIVER_DATE,
646 .major = DRIVER_MAJOR,
647 .minor = DRIVER_MINOR,
648 .patchlevel = DRIVER_PATCHLEVEL,
1da177e4
LT
649};
650
651static int __init i915_init(void)
652{
1f7a6e37
ZW
653 if (!intel_agp_enabled) {
654 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
655 return -ENODEV;
656 }
657
1da177e4 658 driver.num_ioctls = i915_max_ioctl;
79e53945 659
31169714
CW
660 i915_gem_shrinker_init();
661
79e53945
JB
662 /*
663 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
664 * explicitly disabled with the module pararmeter.
665 *
666 * Otherwise, just follow the parameter (defaulting to off).
667 *
668 * Allow optional vga_text_mode_force boot option to override
669 * the default behavior.
670 */
671#if defined(CONFIG_DRM_I915_KMS)
672 if (i915_modeset != 0)
673 driver.driver_features |= DRIVER_MODESET;
674#endif
675 if (i915_modeset == 1)
676 driver.driver_features |= DRIVER_MODESET;
677
678#ifdef CONFIG_VGA_CONSOLE
679 if (vgacon_text_force() && i915_modeset == -1)
680 driver.driver_features &= ~DRIVER_MODESET;
681#endif
682
f97108d1
JB
683 if (!(driver.driver_features & DRIVER_MODESET)) {
684 driver.suspend = i915_suspend;
685 driver.resume = i915_resume;
686 }
687
1da177e4
LT
688 return drm_init(&driver);
689}
690
691static void __exit i915_exit(void)
692{
31169714 693 i915_gem_shrinker_exit();
1da177e4
LT
694 drm_exit(&driver);
695}
696
697module_init(i915_init);
698module_exit(i915_exit);
699
b5e89ed5
DA
700MODULE_AUTHOR(DRIVER_AUTHOR);
701MODULE_DESCRIPTION(DRIVER_DESC);
1da177e4 702MODULE_LICENSE("GPL and additional rights");