]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/i915_irq.c
drm/i915: introduce intel_ring_buffer structure (V2)
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
63eeaf38 29#include <linux/sysrq.h>
5a0e3ad6 30#include <linux/slab.h>
1da177e4
LT
31#include "drmP.h"
32#include "drm.h"
33#include "i915_drm.h"
34#include "i915_drv.h"
1c5d22f7 35#include "i915_trace.h"
79e53945 36#include "intel_drv.h"
1da177e4 37
1da177e4 38#define MAX_NOPID ((u32)~0)
1da177e4 39
7c463586
KP
40/**
41 * Interrupts that are always left unmasked.
42 *
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
45 * PIPESTAT alone.
46 */
6b95a207
KH
47#define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
7c463586
KP
54
55/** Interrupts that we mask and unmask at runtime. */
56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
57
79e53945
JB
58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
60
61#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
63
64#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
66
036a4a7d 67void
f2b115e6 68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d
ZW
69{
70 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
71 dev_priv->gt_irq_mask_reg &= ~mask;
72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
73 (void) I915_READ(GTIMR);
74 }
75}
76
62fdfeaf 77void
f2b115e6 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d
ZW
79{
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
81 dev_priv->gt_irq_mask_reg |= mask;
82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
83 (void) I915_READ(GTIMR);
84 }
85}
86
87/* For display hotplug interrupt */
88void
f2b115e6 89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d
ZW
90{
91 if ((dev_priv->irq_mask_reg & mask) != 0) {
92 dev_priv->irq_mask_reg &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
94 (void) I915_READ(DEIMR);
95 }
96}
97
98static inline void
f2b115e6 99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d
ZW
100{
101 if ((dev_priv->irq_mask_reg & mask) != mask) {
102 dev_priv->irq_mask_reg |= mask;
103 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
104 (void) I915_READ(DEIMR);
105 }
106}
107
8ee1c3db 108void
ed4cb414
EA
109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
110{
111 if ((dev_priv->irq_mask_reg & mask) != 0) {
112 dev_priv->irq_mask_reg &= ~mask;
113 I915_WRITE(IMR, dev_priv->irq_mask_reg);
114 (void) I915_READ(IMR);
115 }
116}
117
62fdfeaf 118void
ed4cb414
EA
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{
121 if ((dev_priv->irq_mask_reg & mask) != mask) {
122 dev_priv->irq_mask_reg |= mask;
123 I915_WRITE(IMR, dev_priv->irq_mask_reg);
124 (void) I915_READ(IMR);
125 }
126}
127
7c463586
KP
128static inline u32
129i915_pipestat(int pipe)
130{
131 if (pipe == 0)
132 return PIPEASTAT;
133 if (pipe == 1)
134 return PIPEBSTAT;
9c84ba4e 135 BUG();
7c463586
KP
136}
137
138void
139i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
140{
141 if ((dev_priv->pipestat[pipe] & mask) != mask) {
142 u32 reg = i915_pipestat(pipe);
143
144 dev_priv->pipestat[pipe] |= mask;
145 /* Enable the interrupt, clear any pending status */
146 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
147 (void) I915_READ(reg);
148 }
149}
150
151void
152i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
153{
154 if ((dev_priv->pipestat[pipe] & mask) != 0) {
155 u32 reg = i915_pipestat(pipe);
156
157 dev_priv->pipestat[pipe] &= ~mask;
158 I915_WRITE(reg, dev_priv->pipestat[pipe]);
159 (void) I915_READ(reg);
160 }
161}
162
01c66889
ZY
163/**
164 * intel_enable_asle - enable ASLE interrupt for OpRegion
165 */
166void intel_enable_asle (struct drm_device *dev)
167{
168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
169
c619eed4 170 if (HAS_PCH_SPLIT(dev))
f2b115e6 171 ironlake_enable_display_irq(dev_priv, DE_GSE);
edcb49ca 172 else {
01c66889
ZY
173 i915_enable_pipestat(dev_priv, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE);
edcb49ca
ZY
175 if (IS_I965G(dev))
176 i915_enable_pipestat(dev_priv, 0,
177 I915_LEGACY_BLC_EVENT_ENABLE);
178 }
01c66889
ZY
179}
180
0a3e67a4
JB
181/**
182 * i915_pipe_enabled - check if a pipe is enabled
183 * @dev: DRM device
184 * @pipe: pipe to check
185 *
186 * Reading certain registers when the pipe is disabled can hang the chip.
187 * Use this routine to make sure the PLL is running and the pipe is active
188 * before reading such registers if unsure.
189 */
190static int
191i915_pipe_enabled(struct drm_device *dev, int pipe)
192{
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
195
196 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
197 return 1;
198
199 return 0;
200}
201
42f52ef8
KP
202/* Called from drm generic code, passed a 'crtc', which
203 * we use as a pipe index
204 */
205u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4
JB
206{
207 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
208 unsigned long high_frame;
209 unsigned long low_frame;
210 u32 high1, high2, low, count;
0a3e67a4 211
0a3e67a4
JB
212 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
213 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
214
215 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61
ZY
216 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
217 "pipe %d\n", pipe);
0a3e67a4
JB
218 return 0;
219 }
220
221 /*
222 * High & low register fields aren't synchronized, so make sure
223 * we get a low value that's stable across two reads of the high
224 * register.
225 */
226 do {
227 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
228 PIPE_FRAME_HIGH_SHIFT);
229 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
230 PIPE_FRAME_LOW_SHIFT);
231 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
232 PIPE_FRAME_HIGH_SHIFT);
233 } while (high1 != high2);
234
235 count = (high1 << 8) | low;
236
237 return count;
238}
239
9880b7a5
JB
240u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
241{
242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
244
245 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61
ZY
246 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
247 "pipe %d\n", pipe);
9880b7a5
JB
248 return 0;
249 }
250
251 return I915_READ(reg);
252}
253
5ca58282
JB
254/*
255 * Handle hotplug events outside the interrupt handler proper.
256 */
257static void i915_hotplug_work_func(struct work_struct *work)
258{
259 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
260 hotplug_work);
261 struct drm_device *dev = dev_priv->dev;
c31c4ba3 262 struct drm_mode_config *mode_config = &dev->mode_config;
5bf4c9c4 263 struct drm_encoder *encoder;
c31c4ba3 264
5bf4c9c4
ZW
265 if (mode_config->num_encoder) {
266 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
c31c4ba3 268
21d40d37
EA
269 if (intel_encoder->hot_plug)
270 (*intel_encoder->hot_plug) (intel_encoder);
c31c4ba3
KP
271 }
272 }
5ca58282 273 /* Just fire off a uevent and let userspace tell us what to do */
eb1f8e4f 274 drm_helper_hpd_irq_event(dev);
5ca58282
JB
275}
276
f97108d1
JB
277static void i915_handle_rps_change(struct drm_device *dev)
278{
279 drm_i915_private_t *dev_priv = dev->dev_private;
b5b72e89 280 u32 busy_up, busy_down, max_avg, min_avg;
f97108d1
JB
281 u16 rgvswctl;
282 u8 new_delay = dev_priv->cur_delay;
283
284 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
b5b72e89
MG
285 busy_up = I915_READ(RCPREVBSYTUPAVG);
286 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
287 max_avg = I915_READ(RCBMAXAVG);
288 min_avg = I915_READ(RCBMINAVG);
289
290 /* Handle RCS change request from hw */
b5b72e89 291 if (busy_up > max_avg) {
f97108d1
JB
292 if (dev_priv->cur_delay != dev_priv->max_delay)
293 new_delay = dev_priv->cur_delay - 1;
294 if (new_delay < dev_priv->max_delay)
295 new_delay = dev_priv->max_delay;
b5b72e89 296 } else if (busy_down < min_avg) {
f97108d1
JB
297 if (dev_priv->cur_delay != dev_priv->min_delay)
298 new_delay = dev_priv->cur_delay + 1;
299 if (new_delay > dev_priv->min_delay)
300 new_delay = dev_priv->min_delay;
301 }
302
303 DRM_DEBUG("rps change requested: %d -> %d\n",
304 dev_priv->cur_delay, new_delay);
305
306 rgvswctl = I915_READ(MEMSWCTL);
307 if (rgvswctl & MEMCTL_CMD_STS) {
b5b72e89
MG
308 DRM_ERROR("gpu busy, RCS change rejected\n");
309 return; /* still busy with another command */
f97108d1
JB
310 }
311
312 /* Program the new state */
313 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
314 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
315 I915_WRITE(MEMSWCTL, rgvswctl);
316 POSTING_READ(MEMSWCTL);
317
318 rgvswctl |= MEMCTL_CMD_STS;
319 I915_WRITE(MEMSWCTL, rgvswctl);
320
321 dev_priv->cur_delay = new_delay;
322
323 DRM_DEBUG("rps changed\n");
324
325 return;
326}
327
f2b115e6 328irqreturn_t ironlake_irq_handler(struct drm_device *dev)
036a4a7d
ZW
329{
330 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
331 int ret = IRQ_NONE;
3ff99164 332 u32 de_iir, gt_iir, de_ier, pch_iir;
036a4a7d
ZW
333 struct drm_i915_master_private *master_priv;
334
2d109a84
ZN
335 /* disable master interrupt before clearing iir */
336 de_ier = I915_READ(DEIER);
337 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
338 (void)I915_READ(DEIER);
339
036a4a7d
ZW
340 de_iir = I915_READ(DEIIR);
341 gt_iir = I915_READ(GTIIR);
c650156a 342 pch_iir = I915_READ(SDEIIR);
036a4a7d 343
c7c85101
ZN
344 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
345 goto done;
036a4a7d 346
c7c85101 347 ret = IRQ_HANDLED;
036a4a7d 348
c7c85101
ZN
349 if (dev->primary->master) {
350 master_priv = dev->primary->master->driver_priv;
351 if (master_priv->sarea_priv)
352 master_priv->sarea_priv->last_dispatch =
353 READ_BREADCRUMB(dev_priv);
354 }
036a4a7d 355
e552eb70 356 if (gt_iir & GT_PIPE_NOTIFY) {
c7c85101
ZN
357 u32 seqno = i915_get_gem_seqno(dev);
358 dev_priv->mm.irq_gem_seqno = seqno;
359 trace_i915_gem_request_complete(dev, seqno);
360 DRM_WAKEUP(&dev_priv->irq_queue);
361 dev_priv->hangcheck_count = 0;
362 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
363 }
01c66889 364
c7c85101
ZN
365 if (de_iir & DE_GSE)
366 ironlake_opregion_gse_intr(dev);
c650156a 367
f072d2e7 368 if (de_iir & DE_PLANEA_FLIP_DONE) {
013d5aa2 369 intel_prepare_page_flip(dev, 0);
f072d2e7
ZW
370 intel_finish_page_flip(dev, 0);
371 }
013d5aa2 372
f072d2e7 373 if (de_iir & DE_PLANEB_FLIP_DONE) {
013d5aa2 374 intel_prepare_page_flip(dev, 1);
f072d2e7
ZW
375 intel_finish_page_flip(dev, 1);
376 }
013d5aa2 377
f072d2e7 378 if (de_iir & DE_PIPEA_VBLANK)
c062df61
LP
379 drm_handle_vblank(dev, 0);
380
f072d2e7 381 if (de_iir & DE_PIPEB_VBLANK)
c062df61
LP
382 drm_handle_vblank(dev, 1);
383
c7c85101
ZN
384 /* check event from PCH */
385 if ((de_iir & DE_PCH_EVENT) &&
386 (pch_iir & SDE_HOTPLUG_MASK)) {
387 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
036a4a7d
ZW
388 }
389
f97108d1
JB
390 if (de_iir & DE_PCU_EVENT) {
391 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
392 i915_handle_rps_change(dev);
393 }
394
c7c85101
ZN
395 /* should clear PCH hotplug event before clear CPU irq */
396 I915_WRITE(SDEIIR, pch_iir);
397 I915_WRITE(GTIIR, gt_iir);
398 I915_WRITE(DEIIR, de_iir);
399
400done:
2d109a84
ZN
401 I915_WRITE(DEIER, de_ier);
402 (void)I915_READ(DEIER);
403
036a4a7d
ZW
404 return ret;
405}
406
8a905236
JB
407/**
408 * i915_error_work_func - do process context error handling work
409 * @work: work struct
410 *
411 * Fire an error uevent so userspace can see that a hang or error
412 * was detected.
413 */
414static void i915_error_work_func(struct work_struct *work)
415{
416 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
417 error_work);
418 struct drm_device *dev = dev_priv->dev;
f316a42c
BG
419 char *error_event[] = { "ERROR=1", NULL };
420 char *reset_event[] = { "RESET=1", NULL };
421 char *reset_done_event[] = { "ERROR=0", NULL };
8a905236 422
44d98a61 423 DRM_DEBUG_DRIVER("generating error event\n");
f316a42c
BG
424 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
425
ba1234d1 426 if (atomic_read(&dev_priv->mm.wedged)) {
f316a42c 427 if (IS_I965G(dev)) {
44d98a61 428 DRM_DEBUG_DRIVER("resetting chip\n");
f316a42c
BG
429 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
430 if (!i965_reset(dev, GDRST_RENDER)) {
ba1234d1 431 atomic_set(&dev_priv->mm.wedged, 0);
f316a42c
BG
432 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
433 }
434 } else {
44d98a61 435 DRM_DEBUG_DRIVER("reboot required\n");
f316a42c
BG
436 }
437 }
8a905236
JB
438}
439
9df30794
CW
440static struct drm_i915_error_object *
441i915_error_object_create(struct drm_device *dev,
442 struct drm_gem_object *src)
443{
444 struct drm_i915_error_object *dst;
445 struct drm_i915_gem_object *src_priv;
446 int page, page_count;
447
448 if (src == NULL)
449 return NULL;
450
23010e43 451 src_priv = to_intel_bo(src);
9df30794
CW
452 if (src_priv->pages == NULL)
453 return NULL;
454
455 page_count = src->size / PAGE_SIZE;
456
457 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
458 if (dst == NULL)
459 return NULL;
460
461 for (page = 0; page < page_count; page++) {
462 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
788885ae
AM
463 unsigned long flags;
464
9df30794
CW
465 if (d == NULL)
466 goto unwind;
788885ae
AM
467 local_irq_save(flags);
468 s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
9df30794 469 memcpy(d, s, PAGE_SIZE);
788885ae
AM
470 kunmap_atomic(s, KM_IRQ0);
471 local_irq_restore(flags);
9df30794
CW
472 dst->pages[page] = d;
473 }
474 dst->page_count = page_count;
475 dst->gtt_offset = src_priv->gtt_offset;
476
477 return dst;
478
479unwind:
480 while (page--)
481 kfree(dst->pages[page]);
482 kfree(dst);
483 return NULL;
484}
485
486static void
487i915_error_object_free(struct drm_i915_error_object *obj)
488{
489 int page;
490
491 if (obj == NULL)
492 return;
493
494 for (page = 0; page < obj->page_count; page++)
495 kfree(obj->pages[page]);
496
497 kfree(obj);
498}
499
500static void
501i915_error_state_free(struct drm_device *dev,
502 struct drm_i915_error_state *error)
503{
504 i915_error_object_free(error->batchbuffer[0]);
505 i915_error_object_free(error->batchbuffer[1]);
506 i915_error_object_free(error->ringbuffer);
507 kfree(error->active_bo);
508 kfree(error);
509}
510
511static u32
512i915_get_bbaddr(struct drm_device *dev, u32 *ring)
513{
514 u32 cmd;
515
516 if (IS_I830(dev) || IS_845G(dev))
517 cmd = MI_BATCH_BUFFER;
518 else if (IS_I965G(dev))
519 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
520 MI_BATCH_NON_SECURE_I965);
521 else
522 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
523
524 return ring[0] == cmd ? ring[1] : 0;
525}
526
527static u32
528i915_ringbuffer_last_batch(struct drm_device *dev)
529{
530 struct drm_i915_private *dev_priv = dev->dev_private;
531 u32 head, bbaddr;
532 u32 *ring;
533
534 /* Locate the current position in the ringbuffer and walk back
535 * to find the most recently dispatched batch buffer.
536 */
537 bbaddr = 0;
538 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
d3301d86 539 ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
9df30794 540
d3301d86 541 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
9df30794
CW
542 bbaddr = i915_get_bbaddr(dev, ring);
543 if (bbaddr)
544 break;
545 }
546
547 if (bbaddr == 0) {
8187a2b7
ZN
548 ring = (u32 *)(dev_priv->render_ring.virtual_start
549 + dev_priv->render_ring.size);
d3301d86 550 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
9df30794
CW
551 bbaddr = i915_get_bbaddr(dev, ring);
552 if (bbaddr)
553 break;
554 }
555 }
556
557 return bbaddr;
558}
559
8a905236
JB
560/**
561 * i915_capture_error_state - capture an error record for later analysis
562 * @dev: drm device
563 *
564 * Should be called when an error is detected (either a hang or an error
565 * interrupt) to capture error state from the time of the error. Fills
566 * out a structure which becomes available in debugfs for user level tools
567 * to pick up.
568 */
63eeaf38
JB
569static void i915_capture_error_state(struct drm_device *dev)
570{
571 struct drm_i915_private *dev_priv = dev->dev_private;
9df30794 572 struct drm_i915_gem_object *obj_priv;
63eeaf38 573 struct drm_i915_error_state *error;
9df30794 574 struct drm_gem_object *batchbuffer[2];
63eeaf38 575 unsigned long flags;
9df30794
CW
576 u32 bbaddr;
577 int count;
63eeaf38
JB
578
579 spin_lock_irqsave(&dev_priv->error_lock, flags);
9df30794
CW
580 error = dev_priv->first_error;
581 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
582 if (error)
583 return;
63eeaf38
JB
584
585 error = kmalloc(sizeof(*error), GFP_ATOMIC);
586 if (!error) {
9df30794
CW
587 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
588 return;
63eeaf38
JB
589 }
590
9df30794 591 error->seqno = i915_get_gem_seqno(dev);
63eeaf38
JB
592 error->eir = I915_READ(EIR);
593 error->pgtbl_er = I915_READ(PGTBL_ER);
594 error->pipeastat = I915_READ(PIPEASTAT);
595 error->pipebstat = I915_READ(PIPEBSTAT);
596 error->instpm = I915_READ(INSTPM);
597 if (!IS_I965G(dev)) {
598 error->ipeir = I915_READ(IPEIR);
599 error->ipehr = I915_READ(IPEHR);
600 error->instdone = I915_READ(INSTDONE);
601 error->acthd = I915_READ(ACTHD);
9df30794 602 error->bbaddr = 0;
63eeaf38
JB
603 } else {
604 error->ipeir = I915_READ(IPEIR_I965);
605 error->ipehr = I915_READ(IPEHR_I965);
606 error->instdone = I915_READ(INSTDONE_I965);
607 error->instps = I915_READ(INSTPS);
608 error->instdone1 = I915_READ(INSTDONE1);
609 error->acthd = I915_READ(ACTHD_I965);
9df30794 610 error->bbaddr = I915_READ64(BB_ADDR);
63eeaf38
JB
611 }
612
9df30794 613 bbaddr = i915_ringbuffer_last_batch(dev);
8a905236 614
9df30794
CW
615 /* Grab the current batchbuffer, most likely to have crashed. */
616 batchbuffer[0] = NULL;
617 batchbuffer[1] = NULL;
618 count = 0;
619 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
a8089e84 620 struct drm_gem_object *obj = &obj_priv->base;
63eeaf38 621
9df30794
CW
622 if (batchbuffer[0] == NULL &&
623 bbaddr >= obj_priv->gtt_offset &&
624 bbaddr < obj_priv->gtt_offset + obj->size)
625 batchbuffer[0] = obj;
626
627 if (batchbuffer[1] == NULL &&
628 error->acthd >= obj_priv->gtt_offset &&
629 error->acthd < obj_priv->gtt_offset + obj->size &&
630 batchbuffer[0] != obj)
631 batchbuffer[1] = obj;
632
633 count++;
634 }
635
636 /* We need to copy these to an anonymous buffer as the simplest
637 * method to avoid being overwritten by userpace.
638 */
639 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
640 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
641
642 /* Record the ringbuffer */
8187a2b7
ZN
643 error->ringbuffer = i915_error_object_create(dev,
644 dev_priv->render_ring.gem_object);
9df30794
CW
645
646 /* Record buffers on the active list. */
647 error->active_bo = NULL;
648 error->active_bo_count = 0;
649
650 if (count)
651 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
652 GFP_ATOMIC);
653
654 if (error->active_bo) {
655 int i = 0;
656 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
a8089e84 657 struct drm_gem_object *obj = &obj_priv->base;
9df30794
CW
658
659 error->active_bo[i].size = obj->size;
660 error->active_bo[i].name = obj->name;
661 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
662 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
663 error->active_bo[i].read_domains = obj->read_domains;
664 error->active_bo[i].write_domain = obj->write_domain;
665 error->active_bo[i].fence_reg = obj_priv->fence_reg;
666 error->active_bo[i].pinned = 0;
667 if (obj_priv->pin_count > 0)
668 error->active_bo[i].pinned = 1;
669 if (obj_priv->user_pin_count > 0)
670 error->active_bo[i].pinned = -1;
671 error->active_bo[i].tiling = obj_priv->tiling_mode;
672 error->active_bo[i].dirty = obj_priv->dirty;
673 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
674
675 if (++i == count)
676 break;
677 }
678 error->active_bo_count = i;
679 }
680
681 do_gettimeofday(&error->time);
682
683 spin_lock_irqsave(&dev_priv->error_lock, flags);
684 if (dev_priv->first_error == NULL) {
685 dev_priv->first_error = error;
686 error = NULL;
687 }
63eeaf38 688 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
9df30794
CW
689
690 if (error)
691 i915_error_state_free(dev, error);
692}
693
694void i915_destroy_error_state(struct drm_device *dev)
695{
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 struct drm_i915_error_state *error;
698
699 spin_lock(&dev_priv->error_lock);
700 error = dev_priv->first_error;
701 dev_priv->first_error = NULL;
702 spin_unlock(&dev_priv->error_lock);
703
704 if (error)
705 i915_error_state_free(dev, error);
63eeaf38
JB
706}
707
8a905236
JB
708/**
709 * i915_handle_error - handle an error interrupt
710 * @dev: drm device
711 *
712 * Do some basic checking of regsiter state at error interrupt time and
713 * dump it to the syslog. Also call i915_capture_error_state() to make
714 * sure we get a record and make it available in debugfs. Fire a uevent
715 * so userspace knows something bad happened (should trigger collection
716 * of a ring dump etc.).
717 */
ba1234d1 718static void i915_handle_error(struct drm_device *dev, bool wedged)
8a905236
JB
719{
720 struct drm_i915_private *dev_priv = dev->dev_private;
721 u32 eir = I915_READ(EIR);
722 u32 pipea_stats = I915_READ(PIPEASTAT);
723 u32 pipeb_stats = I915_READ(PIPEBSTAT);
724
725 i915_capture_error_state(dev);
726
727 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
728 eir);
729
730 if (IS_G4X(dev)) {
731 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
732 u32 ipeir = I915_READ(IPEIR_I965);
733
734 printk(KERN_ERR " IPEIR: 0x%08x\n",
735 I915_READ(IPEIR_I965));
736 printk(KERN_ERR " IPEHR: 0x%08x\n",
737 I915_READ(IPEHR_I965));
738 printk(KERN_ERR " INSTDONE: 0x%08x\n",
739 I915_READ(INSTDONE_I965));
740 printk(KERN_ERR " INSTPS: 0x%08x\n",
741 I915_READ(INSTPS));
742 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
743 I915_READ(INSTDONE1));
744 printk(KERN_ERR " ACTHD: 0x%08x\n",
745 I915_READ(ACTHD_I965));
746 I915_WRITE(IPEIR_I965, ipeir);
747 (void)I915_READ(IPEIR_I965);
748 }
749 if (eir & GM45_ERROR_PAGE_TABLE) {
750 u32 pgtbl_err = I915_READ(PGTBL_ER);
751 printk(KERN_ERR "page table error\n");
752 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
753 pgtbl_err);
754 I915_WRITE(PGTBL_ER, pgtbl_err);
755 (void)I915_READ(PGTBL_ER);
756 }
757 }
758
759 if (IS_I9XX(dev)) {
760 if (eir & I915_ERROR_PAGE_TABLE) {
761 u32 pgtbl_err = I915_READ(PGTBL_ER);
762 printk(KERN_ERR "page table error\n");
763 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
764 pgtbl_err);
765 I915_WRITE(PGTBL_ER, pgtbl_err);
766 (void)I915_READ(PGTBL_ER);
767 }
768 }
769
770 if (eir & I915_ERROR_MEMORY_REFRESH) {
771 printk(KERN_ERR "memory refresh error\n");
772 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
773 pipea_stats);
774 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
775 pipeb_stats);
776 /* pipestat has already been acked */
777 }
778 if (eir & I915_ERROR_INSTRUCTION) {
779 printk(KERN_ERR "instruction error\n");
780 printk(KERN_ERR " INSTPM: 0x%08x\n",
781 I915_READ(INSTPM));
782 if (!IS_I965G(dev)) {
783 u32 ipeir = I915_READ(IPEIR);
784
785 printk(KERN_ERR " IPEIR: 0x%08x\n",
786 I915_READ(IPEIR));
787 printk(KERN_ERR " IPEHR: 0x%08x\n",
788 I915_READ(IPEHR));
789 printk(KERN_ERR " INSTDONE: 0x%08x\n",
790 I915_READ(INSTDONE));
791 printk(KERN_ERR " ACTHD: 0x%08x\n",
792 I915_READ(ACTHD));
793 I915_WRITE(IPEIR, ipeir);
794 (void)I915_READ(IPEIR);
795 } else {
796 u32 ipeir = I915_READ(IPEIR_I965);
797
798 printk(KERN_ERR " IPEIR: 0x%08x\n",
799 I915_READ(IPEIR_I965));
800 printk(KERN_ERR " IPEHR: 0x%08x\n",
801 I915_READ(IPEHR_I965));
802 printk(KERN_ERR " INSTDONE: 0x%08x\n",
803 I915_READ(INSTDONE_I965));
804 printk(KERN_ERR " INSTPS: 0x%08x\n",
805 I915_READ(INSTPS));
806 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
807 I915_READ(INSTDONE1));
808 printk(KERN_ERR " ACTHD: 0x%08x\n",
809 I915_READ(ACTHD_I965));
810 I915_WRITE(IPEIR_I965, ipeir);
811 (void)I915_READ(IPEIR_I965);
812 }
813 }
814
815 I915_WRITE(EIR, eir);
816 (void)I915_READ(EIR);
817 eir = I915_READ(EIR);
818 if (eir) {
819 /*
820 * some errors might have become stuck,
821 * mask them.
822 */
823 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
824 I915_WRITE(EMR, I915_READ(EMR) | eir);
825 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
826 }
827
ba1234d1
BG
828 if (wedged) {
829 atomic_set(&dev_priv->mm.wedged, 1);
830
11ed50ec
BG
831 /*
832 * Wakeup waiting processes so they don't hang
833 */
11ed50ec
BG
834 DRM_WAKEUP(&dev_priv->irq_queue);
835 }
836
9c9fe1f8 837 queue_work(dev_priv->wq, &dev_priv->error_work);
8a905236
JB
838}
839
1da177e4
LT
840irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
841{
84b1fd10 842 struct drm_device *dev = (struct drm_device *) arg;
1da177e4 843 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 844 struct drm_i915_master_private *master_priv;
cdfbc41f
EA
845 u32 iir, new_iir;
846 u32 pipea_stats, pipeb_stats;
05eff845
KP
847 u32 vblank_status;
848 u32 vblank_enable;
0a3e67a4 849 int vblank = 0;
7c463586 850 unsigned long irqflags;
05eff845
KP
851 int irq_received;
852 int ret = IRQ_NONE;
6e5fca53 853
630681d9
EA
854 atomic_inc(&dev_priv->irq_received);
855
bad720ff 856 if (HAS_PCH_SPLIT(dev))
f2b115e6 857 return ironlake_irq_handler(dev);
036a4a7d 858
ed4cb414 859 iir = I915_READ(IIR);
a6b54f3f 860
05eff845
KP
861 if (IS_I965G(dev)) {
862 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
863 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
864 } else {
865 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
866 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
867 }
af6061af 868
05eff845
KP
869 for (;;) {
870 irq_received = iir != 0;
871
872 /* Can't rely on pipestat interrupt bit in iir as it might
873 * have been cleared after the pipestat interrupt was received.
874 * It doesn't set the bit in iir again, but it still produces
875 * interrupts (for non-MSI).
876 */
877 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
878 pipea_stats = I915_READ(PIPEASTAT);
879 pipeb_stats = I915_READ(PIPEBSTAT);
79e53945 880
8a905236 881 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
ba1234d1 882 i915_handle_error(dev, false);
8a905236 883
cdfbc41f
EA
884 /*
885 * Clear the PIPE(A|B)STAT regs before the IIR
886 */
05eff845 887 if (pipea_stats & 0x8000ffff) {
7662c8bd 888 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
44d98a61 889 DRM_DEBUG_DRIVER("pipe a underrun\n");
cdfbc41f 890 I915_WRITE(PIPEASTAT, pipea_stats);
05eff845 891 irq_received = 1;
cdfbc41f 892 }
1da177e4 893
05eff845 894 if (pipeb_stats & 0x8000ffff) {
7662c8bd 895 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
44d98a61 896 DRM_DEBUG_DRIVER("pipe b underrun\n");
cdfbc41f 897 I915_WRITE(PIPEBSTAT, pipeb_stats);
05eff845 898 irq_received = 1;
cdfbc41f 899 }
05eff845
KP
900 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
901
902 if (!irq_received)
903 break;
904
905 ret = IRQ_HANDLED;
8ee1c3db 906
5ca58282
JB
907 /* Consume port. Then clear IIR or we'll miss events */
908 if ((I915_HAS_HOTPLUG(dev)) &&
909 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
910 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
911
44d98a61 912 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
5ca58282
JB
913 hotplug_status);
914 if (hotplug_status & dev_priv->hotplug_supported_mask)
9c9fe1f8
EA
915 queue_work(dev_priv->wq,
916 &dev_priv->hotplug_work);
5ca58282
JB
917
918 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
919 I915_READ(PORT_HOTPLUG_STAT);
920 }
921
cdfbc41f
EA
922 I915_WRITE(IIR, iir);
923 new_iir = I915_READ(IIR); /* Flush posted writes */
7c463586 924
7c1c2871
DA
925 if (dev->primary->master) {
926 master_priv = dev->primary->master->driver_priv;
927 if (master_priv->sarea_priv)
928 master_priv->sarea_priv->last_dispatch =
929 READ_BREADCRUMB(dev_priv);
930 }
0a3e67a4 931
cdfbc41f 932 if (iir & I915_USER_INTERRUPT) {
1c5d22f7
CW
933 u32 seqno = i915_get_gem_seqno(dev);
934 dev_priv->mm.irq_gem_seqno = seqno;
935 trace_i915_gem_request_complete(dev, seqno);
cdfbc41f 936 DRM_WAKEUP(&dev_priv->irq_queue);
f65d9421
BG
937 dev_priv->hangcheck_count = 0;
938 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
cdfbc41f 939 }
673a394b 940
6b95a207
KH
941 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
942 intel_prepare_page_flip(dev, 0);
943
944 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
945 intel_prepare_page_flip(dev, 1);
946
05eff845 947 if (pipea_stats & vblank_status) {
cdfbc41f
EA
948 vblank++;
949 drm_handle_vblank(dev, 0);
6b95a207 950 intel_finish_page_flip(dev, 0);
cdfbc41f 951 }
7c463586 952
05eff845 953 if (pipeb_stats & vblank_status) {
cdfbc41f
EA
954 vblank++;
955 drm_handle_vblank(dev, 1);
6b95a207 956 intel_finish_page_flip(dev, 1);
cdfbc41f 957 }
7c463586 958
edcb49ca
ZY
959 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
960 (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
cdfbc41f
EA
961 (iir & I915_ASLE_INTERRUPT))
962 opregion_asle_intr(dev);
963
964 /* With MSI, interrupts are only generated when iir
965 * transitions from zero to nonzero. If another bit got
966 * set while we were handling the existing iir bits, then
967 * we would never get another interrupt.
968 *
969 * This is fine on non-MSI as well, as if we hit this path
970 * we avoid exiting the interrupt handler only to generate
971 * another one.
972 *
973 * Note that for MSI this could cause a stray interrupt report
974 * if an interrupt landed in the time between writing IIR and
975 * the posting read. This should be rare enough to never
976 * trigger the 99% of 100,000 interrupts test for disabling
977 * stray interrupts.
978 */
979 iir = new_iir;
05eff845 980 }
0a3e67a4 981
05eff845 982 return ret;
1da177e4
LT
983}
984
af6061af 985static int i915_emit_irq(struct drm_device * dev)
1da177e4
LT
986{
987 drm_i915_private_t *dev_priv = dev->dev_private;
7c1c2871 988 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4
LT
989
990 i915_kernel_lost_context(dev);
991
44d98a61 992 DRM_DEBUG_DRIVER("\n");
1da177e4 993
c99b058f 994 dev_priv->counter++;
c29b669c 995 if (dev_priv->counter > 0x7FFFFFFFUL)
c99b058f 996 dev_priv->counter = 1;
7c1c2871
DA
997 if (master_priv->sarea_priv)
998 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
c29b669c 999
0baf823a 1000 BEGIN_LP_RING(4);
585fb111 1001 OUT_RING(MI_STORE_DWORD_INDEX);
0baf823a 1002 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
c29b669c 1003 OUT_RING(dev_priv->counter);
585fb111 1004 OUT_RING(MI_USER_INTERRUPT);
1da177e4 1005 ADVANCE_LP_RING();
bc5f4523 1006
c29b669c 1007 return dev_priv->counter;
1da177e4
LT
1008}
1009
9d34e5db
CW
1010void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1011{
1012 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
8187a2b7 1013 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
9d34e5db
CW
1014
1015 if (dev_priv->trace_irq_seqno == 0)
8187a2b7 1016 render_ring->user_irq_get(dev, render_ring);
9d34e5db
CW
1017
1018 dev_priv->trace_irq_seqno = seqno;
1019}
1020
84b1fd10 1021static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1da177e4
LT
1022{
1023 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 1024 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4 1025 int ret = 0;
8187a2b7 1026 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1da177e4 1027
44d98a61 1028 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1da177e4
LT
1029 READ_BREADCRUMB(dev_priv));
1030
ed4cb414 1031 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
7c1c2871
DA
1032 if (master_priv->sarea_priv)
1033 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1da177e4 1034 return 0;
ed4cb414 1035 }
1da177e4 1036
7c1c2871
DA
1037 if (master_priv->sarea_priv)
1038 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1da177e4 1039
8187a2b7 1040 render_ring->user_irq_get(dev, render_ring);
1da177e4
LT
1041 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
1042 READ_BREADCRUMB(dev_priv) >= irq_nr);
8187a2b7 1043 render_ring->user_irq_put(dev, render_ring);
1da177e4 1044
20caafa6 1045 if (ret == -EBUSY) {
3e684eae 1046 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1da177e4
LT
1047 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1048 }
1049
af6061af
DA
1050 return ret;
1051}
1052
1da177e4
LT
1053/* Needs the lock as it touches the ring.
1054 */
c153f45f
EA
1055int i915_irq_emit(struct drm_device *dev, void *data,
1056 struct drm_file *file_priv)
1da177e4 1057{
1da177e4 1058 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 1059 drm_i915_irq_emit_t *emit = data;
1da177e4
LT
1060 int result;
1061
d3301d86 1062 if (!dev_priv || !dev_priv->render_ring.virtual_start) {
3e684eae 1063 DRM_ERROR("called with no initialization\n");
20caafa6 1064 return -EINVAL;
1da177e4 1065 }
299eb93c
EA
1066
1067 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1068
546b0974 1069 mutex_lock(&dev->struct_mutex);
1da177e4 1070 result = i915_emit_irq(dev);
546b0974 1071 mutex_unlock(&dev->struct_mutex);
1da177e4 1072
c153f45f 1073 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1da177e4 1074 DRM_ERROR("copy_to_user\n");
20caafa6 1075 return -EFAULT;
1da177e4
LT
1076 }
1077
1078 return 0;
1079}
1080
1081/* Doesn't need the hardware lock.
1082 */
c153f45f
EA
1083int i915_irq_wait(struct drm_device *dev, void *data,
1084 struct drm_file *file_priv)
1da177e4 1085{
1da177e4 1086 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 1087 drm_i915_irq_wait_t *irqwait = data;
1da177e4
LT
1088
1089 if (!dev_priv) {
3e684eae 1090 DRM_ERROR("called with no initialization\n");
20caafa6 1091 return -EINVAL;
1da177e4
LT
1092 }
1093
c153f45f 1094 return i915_wait_irq(dev, irqwait->irq_seq);
1da177e4
LT
1095}
1096
42f52ef8
KP
1097/* Called from drm generic code, passed 'crtc' which
1098 * we use as a pipe index
1099 */
1100int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
1101{
1102 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 1103 unsigned long irqflags;
71e0ffa5
JB
1104 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1105 u32 pipeconf;
1106
1107 pipeconf = I915_READ(pipeconf_reg);
1108 if (!(pipeconf & PIPEACONF_ENABLE))
1109 return -EINVAL;
0a3e67a4 1110
e9d21d7f 1111 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
bad720ff 1112 if (HAS_PCH_SPLIT(dev))
c062df61
LP
1113 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1114 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1115 else if (IS_I965G(dev))
7c463586
KP
1116 i915_enable_pipestat(dev_priv, pipe,
1117 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 1118 else
7c463586
KP
1119 i915_enable_pipestat(dev_priv, pipe,
1120 PIPE_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 1121 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
0a3e67a4
JB
1122 return 0;
1123}
1124
42f52ef8
KP
1125/* Called from drm generic code, passed 'crtc' which
1126 * we use as a pipe index
1127 */
1128void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
1129{
1130 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 1131 unsigned long irqflags;
0a3e67a4 1132
e9d21d7f 1133 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
bad720ff 1134 if (HAS_PCH_SPLIT(dev))
c062df61
LP
1135 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1136 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1137 else
1138 i915_disable_pipestat(dev_priv, pipe,
1139 PIPE_VBLANK_INTERRUPT_ENABLE |
1140 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 1141 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
0a3e67a4
JB
1142}
1143
79e53945
JB
1144void i915_enable_interrupt (struct drm_device *dev)
1145{
1146 struct drm_i915_private *dev_priv = dev->dev_private;
e170b030 1147
bad720ff 1148 if (!HAS_PCH_SPLIT(dev))
e170b030 1149 opregion_enable_asle(dev);
79e53945
JB
1150 dev_priv->irq_enabled = 1;
1151}
1152
1153
702880f2
DA
1154/* Set the vblank monitor pipe
1155 */
c153f45f
EA
1156int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1157 struct drm_file *file_priv)
702880f2 1158{
702880f2 1159 drm_i915_private_t *dev_priv = dev->dev_private;
702880f2
DA
1160
1161 if (!dev_priv) {
3e684eae 1162 DRM_ERROR("called with no initialization\n");
20caafa6 1163 return -EINVAL;
702880f2
DA
1164 }
1165
5b51694a 1166 return 0;
702880f2
DA
1167}
1168
c153f45f
EA
1169int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1170 struct drm_file *file_priv)
702880f2 1171{
702880f2 1172 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 1173 drm_i915_vblank_pipe_t *pipe = data;
702880f2
DA
1174
1175 if (!dev_priv) {
3e684eae 1176 DRM_ERROR("called with no initialization\n");
20caafa6 1177 return -EINVAL;
702880f2
DA
1178 }
1179
0a3e67a4 1180 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
c153f45f 1181
702880f2
DA
1182 return 0;
1183}
1184
a6b54f3f
MD
1185/**
1186 * Schedule buffer swap at given vertical blank.
1187 */
c153f45f
EA
1188int i915_vblank_swap(struct drm_device *dev, void *data,
1189 struct drm_file *file_priv)
a6b54f3f 1190{
bd95e0a4
EA
1191 /* The delayed swap mechanism was fundamentally racy, and has been
1192 * removed. The model was that the client requested a delayed flip/swap
1193 * from the kernel, then waited for vblank before continuing to perform
1194 * rendering. The problem was that the kernel might wake the client
1195 * up before it dispatched the vblank swap (since the lock has to be
1196 * held while touching the ringbuffer), in which case the client would
1197 * clear and start the next frame before the swap occurred, and
1198 * flicker would occur in addition to likely missing the vblank.
1199 *
1200 * In the absence of this ioctl, userland falls back to a correct path
1201 * of waiting for a vblank, then dispatching the swap on its own.
1202 * Context switching to userland and back is plenty fast enough for
1203 * meeting the requirements of vblank swapping.
0a3e67a4 1204 */
bd95e0a4 1205 return -EINVAL;
a6b54f3f
MD
1206}
1207
f65d9421
BG
1208struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
1209 drm_i915_private_t *dev_priv = dev->dev_private;
1210 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
1211}
1212
1213/**
1214 * This is called when the chip hasn't reported back with completed
1215 * batchbuffers in a long time. The first time this is called we simply record
1216 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1217 * again, we assume the chip is wedged and try to fix it.
1218 */
1219void i915_hangcheck_elapsed(unsigned long data)
1220{
1221 struct drm_device *dev = (struct drm_device *)data;
1222 drm_i915_private_t *dev_priv = dev->dev_private;
1223 uint32_t acthd;
b9201c14
EA
1224
1225 /* No reset support on this chip yet. */
1226 if (IS_GEN6(dev))
1227 return;
1228
f65d9421
BG
1229 if (!IS_I965G(dev))
1230 acthd = I915_READ(ACTHD);
1231 else
1232 acthd = I915_READ(ACTHD_I965);
1233
1234 /* If all work is done then ACTHD clearly hasn't advanced. */
1235 if (list_empty(&dev_priv->mm.request_list) ||
1236 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
1237 dev_priv->hangcheck_count = 0;
1238 return;
1239 }
1240
1241 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
1242 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
ba1234d1 1243 i915_handle_error(dev, true);
f65d9421
BG
1244 return;
1245 }
1246
1247 /* Reset timer case chip hangs without another request being added */
1248 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1249
1250 if (acthd != dev_priv->last_acthd)
1251 dev_priv->hangcheck_count = 0;
1252 else
1253 dev_priv->hangcheck_count++;
1254
1255 dev_priv->last_acthd = acthd;
1256}
1257
1da177e4
LT
1258/* drm_dma.h hooks
1259*/
f2b115e6 1260static void ironlake_irq_preinstall(struct drm_device *dev)
036a4a7d
ZW
1261{
1262 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1263
1264 I915_WRITE(HWSTAM, 0xeffe);
1265
1266 /* XXX hotplug from PCH */
1267
1268 I915_WRITE(DEIMR, 0xffffffff);
1269 I915_WRITE(DEIER, 0x0);
1270 (void) I915_READ(DEIER);
1271
1272 /* and GT */
1273 I915_WRITE(GTIMR, 0xffffffff);
1274 I915_WRITE(GTIER, 0x0);
1275 (void) I915_READ(GTIER);
c650156a
ZW
1276
1277 /* south display irq */
1278 I915_WRITE(SDEIMR, 0xffffffff);
1279 I915_WRITE(SDEIER, 0x0);
1280 (void) I915_READ(SDEIER);
036a4a7d
ZW
1281}
1282
f2b115e6 1283static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d
ZW
1284{
1285 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1286 /* enable kind of interrupts always enabled */
013d5aa2
JB
1287 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1288 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
e552eb70 1289 u32 render_mask = GT_PIPE_NOTIFY;
c650156a
ZW
1290 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1291 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
036a4a7d
ZW
1292
1293 dev_priv->irq_mask_reg = ~display_mask;
643ced9b 1294 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
036a4a7d
ZW
1295
1296 /* should always can generate irq */
1297 I915_WRITE(DEIIR, I915_READ(DEIIR));
1298 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
1299 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1300 (void) I915_READ(DEIER);
1301
1302 /* user interrupt should be enabled, but masked initial */
1303 dev_priv->gt_irq_mask_reg = 0xffffffff;
1304 dev_priv->gt_irq_enable_reg = render_mask;
1305
1306 I915_WRITE(GTIIR, I915_READ(GTIIR));
1307 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1308 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1309 (void) I915_READ(GTIER);
1310
c650156a
ZW
1311 dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1312 dev_priv->pch_irq_enable_reg = hotplug_mask;
1313
1314 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1315 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
1316 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1317 (void) I915_READ(SDEIER);
1318
f97108d1
JB
1319 if (IS_IRONLAKE_M(dev)) {
1320 /* Clear & enable PCU event interrupts */
1321 I915_WRITE(DEIIR, DE_PCU_EVENT);
1322 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1323 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1324 }
1325
036a4a7d
ZW
1326 return 0;
1327}
1328
84b1fd10 1329void i915_driver_irq_preinstall(struct drm_device * dev)
1da177e4
LT
1330{
1331 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1332
79e53945
JB
1333 atomic_set(&dev_priv->irq_received, 0);
1334
036a4a7d 1335 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
8a905236 1336 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
036a4a7d 1337
bad720ff 1338 if (HAS_PCH_SPLIT(dev)) {
f2b115e6 1339 ironlake_irq_preinstall(dev);
036a4a7d
ZW
1340 return;
1341 }
1342
5ca58282
JB
1343 if (I915_HAS_HOTPLUG(dev)) {
1344 I915_WRITE(PORT_HOTPLUG_EN, 0);
1345 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1346 }
1347
0a3e67a4 1348 I915_WRITE(HWSTAM, 0xeffe);
7c463586
KP
1349 I915_WRITE(PIPEASTAT, 0);
1350 I915_WRITE(PIPEBSTAT, 0);
0a3e67a4 1351 I915_WRITE(IMR, 0xffffffff);
ed4cb414 1352 I915_WRITE(IER, 0x0);
7c463586 1353 (void) I915_READ(IER);
1da177e4
LT
1354}
1355
b01f2c3a
JB
1356/*
1357 * Must be called after intel_modeset_init or hotplug interrupts won't be
1358 * enabled correctly.
1359 */
0a3e67a4 1360int i915_driver_irq_postinstall(struct drm_device *dev)
1da177e4
LT
1361{
1362 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
5ca58282 1363 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
63eeaf38 1364 u32 error_mask;
0a3e67a4 1365
036a4a7d
ZW
1366 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1367
0a3e67a4 1368 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
0a3e67a4 1369
bad720ff 1370 if (HAS_PCH_SPLIT(dev))
f2b115e6 1371 return ironlake_irq_postinstall(dev);
036a4a7d 1372
7c463586
KP
1373 /* Unmask the interrupts that we always want on. */
1374 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1375
1376 dev_priv->pipestat[0] = 0;
1377 dev_priv->pipestat[1] = 0;
1378
5ca58282
JB
1379 if (I915_HAS_HOTPLUG(dev)) {
1380 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1381
b01f2c3a
JB
1382 /* Note HDMI and DP share bits */
1383 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1384 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1385 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1386 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1387 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1388 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1389 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1390 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1391 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1392 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1393 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1394 hotplug_en |= CRT_HOTPLUG_INT_EN;
1395 /* Ignore TV since it's buggy */
1396
5ca58282
JB
1397 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1398
5ca58282
JB
1399 /* Enable in IER... */
1400 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1401 /* and unmask in IMR */
1402 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
1403 }
1404
63eeaf38
JB
1405 /*
1406 * Enable some error detection, note the instruction error mask
1407 * bit is reserved, so we leave it masked.
1408 */
1409 if (IS_G4X(dev)) {
1410 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1411 GM45_ERROR_MEM_PRIV |
1412 GM45_ERROR_CP_PRIV |
1413 I915_ERROR_MEMORY_REFRESH);
1414 } else {
1415 error_mask = ~(I915_ERROR_PAGE_TABLE |
1416 I915_ERROR_MEMORY_REFRESH);
1417 }
1418 I915_WRITE(EMR, error_mask);
1419
7c463586
KP
1420 /* Disable pipe interrupt enables, clear pending pipe status */
1421 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1422 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1423 /* Clear pending interrupt status */
1424 I915_WRITE(IIR, I915_READ(IIR));
8ee1c3db 1425
5ca58282 1426 I915_WRITE(IER, enable_mask);
7c463586 1427 I915_WRITE(IMR, dev_priv->irq_mask_reg);
ed4cb414
EA
1428 (void) I915_READ(IER);
1429
8ee1c3db 1430 opregion_enable_asle(dev);
0a3e67a4
JB
1431
1432 return 0;
1da177e4
LT
1433}
1434
f2b115e6 1435static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d
ZW
1436{
1437 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1438 I915_WRITE(HWSTAM, 0xffffffff);
1439
1440 I915_WRITE(DEIMR, 0xffffffff);
1441 I915_WRITE(DEIER, 0x0);
1442 I915_WRITE(DEIIR, I915_READ(DEIIR));
1443
1444 I915_WRITE(GTIMR, 0xffffffff);
1445 I915_WRITE(GTIER, 0x0);
1446 I915_WRITE(GTIIR, I915_READ(GTIIR));
1447}
1448
84b1fd10 1449void i915_driver_irq_uninstall(struct drm_device * dev)
1da177e4
LT
1450{
1451 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
91e3738e 1452
1da177e4
LT
1453 if (!dev_priv)
1454 return;
1455
0a3e67a4
JB
1456 dev_priv->vblank_pipe = 0;
1457
bad720ff 1458 if (HAS_PCH_SPLIT(dev)) {
f2b115e6 1459 ironlake_irq_uninstall(dev);
036a4a7d
ZW
1460 return;
1461 }
1462
5ca58282
JB
1463 if (I915_HAS_HOTPLUG(dev)) {
1464 I915_WRITE(PORT_HOTPLUG_EN, 0);
1465 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1466 }
1467
0a3e67a4 1468 I915_WRITE(HWSTAM, 0xffffffff);
7c463586
KP
1469 I915_WRITE(PIPEASTAT, 0);
1470 I915_WRITE(PIPEBSTAT, 0);
0a3e67a4 1471 I915_WRITE(IMR, 0xffffffff);
ed4cb414 1472 I915_WRITE(IER, 0x0);
af6061af 1473
7c463586
KP
1474 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1475 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1476 I915_WRITE(IIR, I915_READ(IIR));
1da177e4 1477}