]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/gpu/drm/i915/i915_dma.c
drm/i915: Pad ringbuffer with NOOPs before wrapping
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "drm_crtc_helper.h"
32 #include "intel_drv.h"
33 #include "i915_drm.h"
34 #include "i915_drv.h"
35
36 #define I915_DRV        "i915_drv"
37
38 /* Really want an OS-independent resettable timer.  Would like to have
39  * this loop run for (eg) 3 sec, but have the timer reset every time
40  * the head pointer changes, so that EBUSY only happens if the ring
41  * actually stalls for (eg) 3 seconds.
42  */
43 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
44 {
45         drm_i915_private_t *dev_priv = dev->dev_private;
46         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
47         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
48         u32 last_acthd = I915_READ(acthd_reg);
49         u32 acthd;
50         u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51         int i;
52
53         for (i = 0; i < 100000; i++) {
54                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
55                 acthd = I915_READ(acthd_reg);
56                 ring->space = ring->head - (ring->tail + 8);
57                 if (ring->space < 0)
58                         ring->space += ring->Size;
59                 if (ring->space >= n)
60                         return 0;
61
62                 if (dev->primary->master) {
63                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
64                         if (master_priv->sarea_priv)
65                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
66                 }
67
68
69                 if (ring->head != last_head)
70                         i = 0;
71                 if (acthd != last_acthd)
72                         i = 0;
73
74                 last_head = ring->head;
75                 last_acthd = acthd;
76                 msleep_interruptible(10);
77
78         }
79
80         return -EBUSY;
81 }
82
83 /* As a ringbuffer is only allowed to wrap between instructions, fill
84  * the tail with NOOPs.
85  */
86 int i915_wrap_ring(struct drm_device *dev)
87 {
88         drm_i915_private_t *dev_priv = dev->dev_private;
89         volatile unsigned int *virt;
90         int rem;
91
92         rem = dev_priv->ring.Size - dev_priv->ring.tail;
93         if (dev_priv->ring.space < rem) {
94                 int ret = i915_wait_ring(dev, rem, __func__);
95                 if (ret)
96                         return ret;
97         }
98         dev_priv->ring.space -= rem;
99
100         virt = (unsigned int *)
101                 (dev_priv->ring.virtual_start + dev_priv->ring.tail);
102         rem /= 4;
103         while (rem--)
104                 *virt++ = MI_NOOP;
105
106         dev_priv->ring.tail = 0;
107
108         return 0;
109 }
110
111 /**
112  * Sets up the hardware status page for devices that need a physical address
113  * in the register.
114  */
115 static int i915_init_phys_hws(struct drm_device *dev)
116 {
117         drm_i915_private_t *dev_priv = dev->dev_private;
118         /* Program Hardware Status Page */
119         dev_priv->status_page_dmah =
120                 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
121
122         if (!dev_priv->status_page_dmah) {
123                 DRM_ERROR("Can not allocate hardware status page\n");
124                 return -ENOMEM;
125         }
126         dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
127         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
128
129         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
130
131         I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
132         DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
133         return 0;
134 }
135
136 /**
137  * Frees the hardware status page, whether it's a physical address or a virtual
138  * address set up by the X Server.
139  */
140 static void i915_free_hws(struct drm_device *dev)
141 {
142         drm_i915_private_t *dev_priv = dev->dev_private;
143         if (dev_priv->status_page_dmah) {
144                 drm_pci_free(dev, dev_priv->status_page_dmah);
145                 dev_priv->status_page_dmah = NULL;
146         }
147
148         if (dev_priv->status_gfx_addr) {
149                 dev_priv->status_gfx_addr = 0;
150                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
151         }
152
153         /* Need to rewrite hardware status page */
154         I915_WRITE(HWS_PGA, 0x1ffff000);
155 }
156
157 void i915_kernel_lost_context(struct drm_device * dev)
158 {
159         drm_i915_private_t *dev_priv = dev->dev_private;
160         struct drm_i915_master_private *master_priv;
161         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
162
163         /*
164          * We should never lose context on the ring with modesetting
165          * as we don't expose it to userspace
166          */
167         if (drm_core_check_feature(dev, DRIVER_MODESET))
168                 return;
169
170         ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
171         ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
172         ring->space = ring->head - (ring->tail + 8);
173         if (ring->space < 0)
174                 ring->space += ring->Size;
175
176         if (!dev->primary->master)
177                 return;
178
179         master_priv = dev->primary->master->driver_priv;
180         if (ring->head == ring->tail && master_priv->sarea_priv)
181                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
182 }
183
184 static int i915_dma_cleanup(struct drm_device * dev)
185 {
186         drm_i915_private_t *dev_priv = dev->dev_private;
187         /* Make sure interrupts are disabled here because the uninstall ioctl
188          * may not have been called from userspace and after dev_private
189          * is freed, it's too late.
190          */
191         if (dev->irq_enabled)
192                 drm_irq_uninstall(dev);
193
194         if (dev_priv->ring.virtual_start) {
195                 drm_core_ioremapfree(&dev_priv->ring.map, dev);
196                 dev_priv->ring.virtual_start = NULL;
197                 dev_priv->ring.map.handle = NULL;
198                 dev_priv->ring.map.size = 0;
199         }
200
201         /* Clear the HWS virtual address at teardown */
202         if (I915_NEED_GFX_HWS(dev))
203                 i915_free_hws(dev);
204
205         return 0;
206 }
207
208 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
209 {
210         drm_i915_private_t *dev_priv = dev->dev_private;
211         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
212
213         master_priv->sarea = drm_getsarea(dev);
214         if (master_priv->sarea) {
215                 master_priv->sarea_priv = (drm_i915_sarea_t *)
216                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
217         } else {
218                 DRM_DEBUG_DRIVER(I915_DRV,
219                                 "sarea not found assuming DRI2 userspace\n");
220         }
221
222         if (init->ring_size != 0) {
223                 if (dev_priv->ring.ring_obj != NULL) {
224                         i915_dma_cleanup(dev);
225                         DRM_ERROR("Client tried to initialize ringbuffer in "
226                                   "GEM mode\n");
227                         return -EINVAL;
228                 }
229
230                 dev_priv->ring.Size = init->ring_size;
231
232                 dev_priv->ring.map.offset = init->ring_start;
233                 dev_priv->ring.map.size = init->ring_size;
234                 dev_priv->ring.map.type = 0;
235                 dev_priv->ring.map.flags = 0;
236                 dev_priv->ring.map.mtrr = 0;
237
238                 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
239
240                 if (dev_priv->ring.map.handle == NULL) {
241                         i915_dma_cleanup(dev);
242                         DRM_ERROR("can not ioremap virtual address for"
243                                   " ring buffer\n");
244                         return -ENOMEM;
245                 }
246         }
247
248         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
249
250         dev_priv->cpp = init->cpp;
251         dev_priv->back_offset = init->back_offset;
252         dev_priv->front_offset = init->front_offset;
253         dev_priv->current_page = 0;
254         if (master_priv->sarea_priv)
255                 master_priv->sarea_priv->pf_current_page = 0;
256
257         /* Allow hardware batchbuffers unless told otherwise.
258          */
259         dev_priv->allow_batchbuffer = 1;
260
261         return 0;
262 }
263
264 static int i915_dma_resume(struct drm_device * dev)
265 {
266         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
267
268         DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
269
270         if (dev_priv->ring.map.handle == NULL) {
271                 DRM_ERROR("can not ioremap virtual address for"
272                           " ring buffer\n");
273                 return -ENOMEM;
274         }
275
276         /* Program Hardware Status Page */
277         if (!dev_priv->hw_status_page) {
278                 DRM_ERROR("Can not find hardware status page\n");
279                 return -EINVAL;
280         }
281         DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
282                                 dev_priv->hw_status_page);
283
284         if (dev_priv->status_gfx_addr != 0)
285                 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
286         else
287                 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
288         DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
289
290         return 0;
291 }
292
293 static int i915_dma_init(struct drm_device *dev, void *data,
294                          struct drm_file *file_priv)
295 {
296         drm_i915_init_t *init = data;
297         int retcode = 0;
298
299         switch (init->func) {
300         case I915_INIT_DMA:
301                 retcode = i915_initialize(dev, init);
302                 break;
303         case I915_CLEANUP_DMA:
304                 retcode = i915_dma_cleanup(dev);
305                 break;
306         case I915_RESUME_DMA:
307                 retcode = i915_dma_resume(dev);
308                 break;
309         default:
310                 retcode = -EINVAL;
311                 break;
312         }
313
314         return retcode;
315 }
316
317 /* Implement basically the same security restrictions as hardware does
318  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
319  *
320  * Most of the calculations below involve calculating the size of a
321  * particular instruction.  It's important to get the size right as
322  * that tells us where the next instruction to check is.  Any illegal
323  * instruction detected will be given a size of zero, which is a
324  * signal to abort the rest of the buffer.
325  */
326 static int do_validate_cmd(int cmd)
327 {
328         switch (((cmd >> 29) & 0x7)) {
329         case 0x0:
330                 switch ((cmd >> 23) & 0x3f) {
331                 case 0x0:
332                         return 1;       /* MI_NOOP */
333                 case 0x4:
334                         return 1;       /* MI_FLUSH */
335                 default:
336                         return 0;       /* disallow everything else */
337                 }
338                 break;
339         case 0x1:
340                 return 0;       /* reserved */
341         case 0x2:
342                 return (cmd & 0xff) + 2;        /* 2d commands */
343         case 0x3:
344                 if (((cmd >> 24) & 0x1f) <= 0x18)
345                         return 1;
346
347                 switch ((cmd >> 24) & 0x1f) {
348                 case 0x1c:
349                         return 1;
350                 case 0x1d:
351                         switch ((cmd >> 16) & 0xff) {
352                         case 0x3:
353                                 return (cmd & 0x1f) + 2;
354                         case 0x4:
355                                 return (cmd & 0xf) + 2;
356                         default:
357                                 return (cmd & 0xffff) + 2;
358                         }
359                 case 0x1e:
360                         if (cmd & (1 << 23))
361                                 return (cmd & 0xffff) + 1;
362                         else
363                                 return 1;
364                 case 0x1f:
365                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
366                                 return (cmd & 0x1ffff) + 2;
367                         else if (cmd & (1 << 17))       /* indirect random */
368                                 if ((cmd & 0xffff) == 0)
369                                         return 0;       /* unknown length, too hard */
370                                 else
371                                         return (((cmd & 0xffff) + 1) / 2) + 1;
372                         else
373                                 return 2;       /* indirect sequential */
374                 default:
375                         return 0;
376                 }
377         default:
378                 return 0;
379         }
380
381         return 0;
382 }
383
384 static int validate_cmd(int cmd)
385 {
386         int ret = do_validate_cmd(cmd);
387
388 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
389
390         return ret;
391 }
392
393 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
394 {
395         drm_i915_private_t *dev_priv = dev->dev_private;
396         int i;
397         RING_LOCALS;
398
399         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
400                 return -EINVAL;
401
402         BEGIN_LP_RING((dwords+1)&~1);
403
404         for (i = 0; i < dwords;) {
405                 int cmd, sz;
406
407                 cmd = buffer[i];
408
409                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
410                         return -EINVAL;
411
412                 OUT_RING(cmd);
413
414                 while (++i, --sz) {
415                         OUT_RING(buffer[i]);
416                 }
417         }
418
419         if (dwords & 1)
420                 OUT_RING(0);
421
422         ADVANCE_LP_RING();
423
424         return 0;
425 }
426
427 int
428 i915_emit_box(struct drm_device *dev,
429               struct drm_clip_rect *boxes,
430               int i, int DR1, int DR4)
431 {
432         drm_i915_private_t *dev_priv = dev->dev_private;
433         struct drm_clip_rect box = boxes[i];
434         RING_LOCALS;
435
436         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
437                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
438                           box.x1, box.y1, box.x2, box.y2);
439                 return -EINVAL;
440         }
441
442         if (IS_I965G(dev)) {
443                 BEGIN_LP_RING(4);
444                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
445                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
446                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
447                 OUT_RING(DR4);
448                 ADVANCE_LP_RING();
449         } else {
450                 BEGIN_LP_RING(6);
451                 OUT_RING(GFX_OP_DRAWRECT_INFO);
452                 OUT_RING(DR1);
453                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
454                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
455                 OUT_RING(DR4);
456                 OUT_RING(0);
457                 ADVANCE_LP_RING();
458         }
459
460         return 0;
461 }
462
463 /* XXX: Emitting the counter should really be moved to part of the IRQ
464  * emit. For now, do it in both places:
465  */
466
467 static void i915_emit_breadcrumb(struct drm_device *dev)
468 {
469         drm_i915_private_t *dev_priv = dev->dev_private;
470         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
471         RING_LOCALS;
472
473         dev_priv->counter++;
474         if (dev_priv->counter > 0x7FFFFFFFUL)
475                 dev_priv->counter = 0;
476         if (master_priv->sarea_priv)
477                 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
478
479         BEGIN_LP_RING(4);
480         OUT_RING(MI_STORE_DWORD_INDEX);
481         OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
482         OUT_RING(dev_priv->counter);
483         OUT_RING(0);
484         ADVANCE_LP_RING();
485 }
486
487 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
488                                    drm_i915_cmdbuffer_t *cmd,
489                                    struct drm_clip_rect *cliprects,
490                                    void *cmdbuf)
491 {
492         int nbox = cmd->num_cliprects;
493         int i = 0, count, ret;
494
495         if (cmd->sz & 0x3) {
496                 DRM_ERROR("alignment");
497                 return -EINVAL;
498         }
499
500         i915_kernel_lost_context(dev);
501
502         count = nbox ? nbox : 1;
503
504         for (i = 0; i < count; i++) {
505                 if (i < nbox) {
506                         ret = i915_emit_box(dev, cliprects, i,
507                                             cmd->DR1, cmd->DR4);
508                         if (ret)
509                                 return ret;
510                 }
511
512                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
513                 if (ret)
514                         return ret;
515         }
516
517         i915_emit_breadcrumb(dev);
518         return 0;
519 }
520
521 static int i915_dispatch_batchbuffer(struct drm_device * dev,
522                                      drm_i915_batchbuffer_t * batch,
523                                      struct drm_clip_rect *cliprects)
524 {
525         drm_i915_private_t *dev_priv = dev->dev_private;
526         int nbox = batch->num_cliprects;
527         int i = 0, count;
528         RING_LOCALS;
529
530         if ((batch->start | batch->used) & 0x7) {
531                 DRM_ERROR("alignment");
532                 return -EINVAL;
533         }
534
535         i915_kernel_lost_context(dev);
536
537         count = nbox ? nbox : 1;
538
539         for (i = 0; i < count; i++) {
540                 if (i < nbox) {
541                         int ret = i915_emit_box(dev, cliprects, i,
542                                                 batch->DR1, batch->DR4);
543                         if (ret)
544                                 return ret;
545                 }
546
547                 if (!IS_I830(dev) && !IS_845G(dev)) {
548                         BEGIN_LP_RING(2);
549                         if (IS_I965G(dev)) {
550                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
551                                 OUT_RING(batch->start);
552                         } else {
553                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
554                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
555                         }
556                         ADVANCE_LP_RING();
557                 } else {
558                         BEGIN_LP_RING(4);
559                         OUT_RING(MI_BATCH_BUFFER);
560                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
561                         OUT_RING(batch->start + batch->used - 4);
562                         OUT_RING(0);
563                         ADVANCE_LP_RING();
564                 }
565         }
566
567         i915_emit_breadcrumb(dev);
568
569         return 0;
570 }
571
572 static int i915_dispatch_flip(struct drm_device * dev)
573 {
574         drm_i915_private_t *dev_priv = dev->dev_private;
575         struct drm_i915_master_private *master_priv =
576                 dev->primary->master->driver_priv;
577         RING_LOCALS;
578
579         if (!master_priv->sarea_priv)
580                 return -EINVAL;
581
582         DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
583                           __func__,
584                          dev_priv->current_page,
585                          master_priv->sarea_priv->pf_current_page);
586
587         i915_kernel_lost_context(dev);
588
589         BEGIN_LP_RING(2);
590         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
591         OUT_RING(0);
592         ADVANCE_LP_RING();
593
594         BEGIN_LP_RING(6);
595         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
596         OUT_RING(0);
597         if (dev_priv->current_page == 0) {
598                 OUT_RING(dev_priv->back_offset);
599                 dev_priv->current_page = 1;
600         } else {
601                 OUT_RING(dev_priv->front_offset);
602                 dev_priv->current_page = 0;
603         }
604         OUT_RING(0);
605         ADVANCE_LP_RING();
606
607         BEGIN_LP_RING(2);
608         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
609         OUT_RING(0);
610         ADVANCE_LP_RING();
611
612         master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
613
614         BEGIN_LP_RING(4);
615         OUT_RING(MI_STORE_DWORD_INDEX);
616         OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
617         OUT_RING(dev_priv->counter);
618         OUT_RING(0);
619         ADVANCE_LP_RING();
620
621         master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
622         return 0;
623 }
624
625 static int i915_quiescent(struct drm_device * dev)
626 {
627         drm_i915_private_t *dev_priv = dev->dev_private;
628
629         i915_kernel_lost_context(dev);
630         return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
631 }
632
633 static int i915_flush_ioctl(struct drm_device *dev, void *data,
634                             struct drm_file *file_priv)
635 {
636         int ret;
637
638         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
639
640         mutex_lock(&dev->struct_mutex);
641         ret = i915_quiescent(dev);
642         mutex_unlock(&dev->struct_mutex);
643
644         return ret;
645 }
646
647 static int i915_batchbuffer(struct drm_device *dev, void *data,
648                             struct drm_file *file_priv)
649 {
650         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
651         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
652         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
653             master_priv->sarea_priv;
654         drm_i915_batchbuffer_t *batch = data;
655         int ret;
656         struct drm_clip_rect *cliprects = NULL;
657
658         if (!dev_priv->allow_batchbuffer) {
659                 DRM_ERROR("Batchbuffer ioctl disabled\n");
660                 return -EINVAL;
661         }
662
663         DRM_DEBUG_DRIVER(I915_DRV,
664                         "i915 batchbuffer, start %x used %d cliprects %d\n",
665                         batch->start, batch->used, batch->num_cliprects);
666
667         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
668
669         if (batch->num_cliprects < 0)
670                 return -EINVAL;
671
672         if (batch->num_cliprects) {
673                 cliprects = kcalloc(batch->num_cliprects,
674                                     sizeof(struct drm_clip_rect),
675                                     GFP_KERNEL);
676                 if (cliprects == NULL)
677                         return -ENOMEM;
678
679                 ret = copy_from_user(cliprects, batch->cliprects,
680                                      batch->num_cliprects *
681                                      sizeof(struct drm_clip_rect));
682                 if (ret != 0)
683                         goto fail_free;
684         }
685
686         mutex_lock(&dev->struct_mutex);
687         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
688         mutex_unlock(&dev->struct_mutex);
689
690         if (sarea_priv)
691                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
692
693 fail_free:
694         kfree(cliprects);
695
696         return ret;
697 }
698
699 static int i915_cmdbuffer(struct drm_device *dev, void *data,
700                           struct drm_file *file_priv)
701 {
702         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
703         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
704         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
705             master_priv->sarea_priv;
706         drm_i915_cmdbuffer_t *cmdbuf = data;
707         struct drm_clip_rect *cliprects = NULL;
708         void *batch_data;
709         int ret;
710
711         DRM_DEBUG_DRIVER(I915_DRV,
712                         "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
713                         cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
714
715         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
716
717         if (cmdbuf->num_cliprects < 0)
718                 return -EINVAL;
719
720         batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
721         if (batch_data == NULL)
722                 return -ENOMEM;
723
724         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
725         if (ret != 0)
726                 goto fail_batch_free;
727
728         if (cmdbuf->num_cliprects) {
729                 cliprects = kcalloc(cmdbuf->num_cliprects,
730                                     sizeof(struct drm_clip_rect), GFP_KERNEL);
731                 if (cliprects == NULL)
732                         goto fail_batch_free;
733
734                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
735                                      cmdbuf->num_cliprects *
736                                      sizeof(struct drm_clip_rect));
737                 if (ret != 0)
738                         goto fail_clip_free;
739         }
740
741         mutex_lock(&dev->struct_mutex);
742         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
743         mutex_unlock(&dev->struct_mutex);
744         if (ret) {
745                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
746                 goto fail_clip_free;
747         }
748
749         if (sarea_priv)
750                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
751
752 fail_clip_free:
753         kfree(cliprects);
754 fail_batch_free:
755         kfree(batch_data);
756
757         return ret;
758 }
759
760 static int i915_flip_bufs(struct drm_device *dev, void *data,
761                           struct drm_file *file_priv)
762 {
763         int ret;
764
765         DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
766
767         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
768
769         mutex_lock(&dev->struct_mutex);
770         ret = i915_dispatch_flip(dev);
771         mutex_unlock(&dev->struct_mutex);
772
773         return ret;
774 }
775
776 static int i915_getparam(struct drm_device *dev, void *data,
777                          struct drm_file *file_priv)
778 {
779         drm_i915_private_t *dev_priv = dev->dev_private;
780         drm_i915_getparam_t *param = data;
781         int value;
782
783         if (!dev_priv) {
784                 DRM_ERROR("called with no initialization\n");
785                 return -EINVAL;
786         }
787
788         switch (param->param) {
789         case I915_PARAM_IRQ_ACTIVE:
790                 value = dev->pdev->irq ? 1 : 0;
791                 break;
792         case I915_PARAM_ALLOW_BATCHBUFFER:
793                 value = dev_priv->allow_batchbuffer ? 1 : 0;
794                 break;
795         case I915_PARAM_LAST_DISPATCH:
796                 value = READ_BREADCRUMB(dev_priv);
797                 break;
798         case I915_PARAM_CHIPSET_ID:
799                 value = dev->pci_device;
800                 break;
801         case I915_PARAM_HAS_GEM:
802                 value = dev_priv->has_gem;
803                 break;
804         case I915_PARAM_NUM_FENCES_AVAIL:
805                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
806                 break;
807         default:
808                 DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
809                                         param->param);
810                 return -EINVAL;
811         }
812
813         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
814                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
815                 return -EFAULT;
816         }
817
818         return 0;
819 }
820
821 static int i915_setparam(struct drm_device *dev, void *data,
822                          struct drm_file *file_priv)
823 {
824         drm_i915_private_t *dev_priv = dev->dev_private;
825         drm_i915_setparam_t *param = data;
826
827         if (!dev_priv) {
828                 DRM_ERROR("called with no initialization\n");
829                 return -EINVAL;
830         }
831
832         switch (param->param) {
833         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
834                 break;
835         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
836                 dev_priv->tex_lru_log_granularity = param->value;
837                 break;
838         case I915_SETPARAM_ALLOW_BATCHBUFFER:
839                 dev_priv->allow_batchbuffer = param->value;
840                 break;
841         case I915_SETPARAM_NUM_USED_FENCES:
842                 if (param->value > dev_priv->num_fence_regs ||
843                     param->value < 0)
844                         return -EINVAL;
845                 /* Userspace can use first N regs */
846                 dev_priv->fence_reg_start = param->value;
847                 break;
848         default:
849                 DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
850                                         param->param);
851                 return -EINVAL;
852         }
853
854         return 0;
855 }
856
857 static int i915_set_status_page(struct drm_device *dev, void *data,
858                                 struct drm_file *file_priv)
859 {
860         drm_i915_private_t *dev_priv = dev->dev_private;
861         drm_i915_hws_addr_t *hws = data;
862
863         if (!I915_NEED_GFX_HWS(dev))
864                 return -EINVAL;
865
866         if (!dev_priv) {
867                 DRM_ERROR("called with no initialization\n");
868                 return -EINVAL;
869         }
870
871         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
872                 WARN(1, "tried to set status page when mode setting active\n");
873                 return 0;
874         }
875
876         DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
877
878         dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
879
880         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
881         dev_priv->hws_map.size = 4*1024;
882         dev_priv->hws_map.type = 0;
883         dev_priv->hws_map.flags = 0;
884         dev_priv->hws_map.mtrr = 0;
885
886         drm_core_ioremap_wc(&dev_priv->hws_map, dev);
887         if (dev_priv->hws_map.handle == NULL) {
888                 i915_dma_cleanup(dev);
889                 dev_priv->status_gfx_addr = 0;
890                 DRM_ERROR("can not ioremap virtual address for"
891                                 " G33 hw status page\n");
892                 return -ENOMEM;
893         }
894         dev_priv->hw_status_page = dev_priv->hws_map.handle;
895
896         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
897         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
898         DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
899                                 dev_priv->status_gfx_addr);
900         DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
901                                 dev_priv->hw_status_page);
902         return 0;
903 }
904
905 /**
906  * i915_probe_agp - get AGP bootup configuration
907  * @pdev: PCI device
908  * @aperture_size: returns AGP aperture configured size
909  * @preallocated_size: returns size of BIOS preallocated AGP space
910  *
911  * Since Intel integrated graphics are UMA, the BIOS has to set aside
912  * some RAM for the framebuffer at early boot.  This code figures out
913  * how much was set aside so we can use it for our own purposes.
914  */
915 static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
916                           uint32_t *preallocated_size)
917 {
918         struct pci_dev *bridge_dev;
919         u16 tmp = 0;
920         unsigned long overhead;
921         unsigned long stolen;
922
923         bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
924         if (!bridge_dev) {
925                 DRM_ERROR("bridge device not found\n");
926                 return -1;
927         }
928
929         /* Get the fb aperture size and "stolen" memory amount. */
930         pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
931         pci_dev_put(bridge_dev);
932
933         *aperture_size = 1024 * 1024;
934         *preallocated_size = 1024 * 1024;
935
936         switch (dev->pdev->device) {
937         case PCI_DEVICE_ID_INTEL_82830_CGC:
938         case PCI_DEVICE_ID_INTEL_82845G_IG:
939         case PCI_DEVICE_ID_INTEL_82855GM_IG:
940         case PCI_DEVICE_ID_INTEL_82865_IG:
941                 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
942                         *aperture_size *= 64;
943                 else
944                         *aperture_size *= 128;
945                 break;
946         default:
947                 /* 9xx supports large sizes, just look at the length */
948                 *aperture_size = pci_resource_len(dev->pdev, 2);
949                 break;
950         }
951
952         /*
953          * Some of the preallocated space is taken by the GTT
954          * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
955          */
956         if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
957                 overhead = 4096;
958         else
959                 overhead = (*aperture_size / 1024) + 4096;
960
961         switch (tmp & INTEL_GMCH_GMS_MASK) {
962         case INTEL_855_GMCH_GMS_DISABLED:
963                 DRM_ERROR("video memory is disabled\n");
964                 return -1;
965         case INTEL_855_GMCH_GMS_STOLEN_1M:
966                 stolen = 1 * 1024 * 1024;
967                 break;
968         case INTEL_855_GMCH_GMS_STOLEN_4M:
969                 stolen = 4 * 1024 * 1024;
970                 break;
971         case INTEL_855_GMCH_GMS_STOLEN_8M:
972                 stolen = 8 * 1024 * 1024;
973                 break;
974         case INTEL_855_GMCH_GMS_STOLEN_16M:
975                 stolen = 16 * 1024 * 1024;
976                 break;
977         case INTEL_855_GMCH_GMS_STOLEN_32M:
978                 stolen = 32 * 1024 * 1024;
979                 break;
980         case INTEL_915G_GMCH_GMS_STOLEN_48M:
981                 stolen = 48 * 1024 * 1024;
982                 break;
983         case INTEL_915G_GMCH_GMS_STOLEN_64M:
984                 stolen = 64 * 1024 * 1024;
985                 break;
986         case INTEL_GMCH_GMS_STOLEN_128M:
987                 stolen = 128 * 1024 * 1024;
988                 break;
989         case INTEL_GMCH_GMS_STOLEN_256M:
990                 stolen = 256 * 1024 * 1024;
991                 break;
992         case INTEL_GMCH_GMS_STOLEN_96M:
993                 stolen = 96 * 1024 * 1024;
994                 break;
995         case INTEL_GMCH_GMS_STOLEN_160M:
996                 stolen = 160 * 1024 * 1024;
997                 break;
998         case INTEL_GMCH_GMS_STOLEN_224M:
999                 stolen = 224 * 1024 * 1024;
1000                 break;
1001         case INTEL_GMCH_GMS_STOLEN_352M:
1002                 stolen = 352 * 1024 * 1024;
1003                 break;
1004         default:
1005                 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1006                         tmp & INTEL_GMCH_GMS_MASK);
1007                 return -1;
1008         }
1009         *preallocated_size = stolen - overhead;
1010
1011         return 0;
1012 }
1013
1014 static int i915_load_modeset_init(struct drm_device *dev,
1015                                   unsigned long prealloc_size,
1016                                   unsigned long agp_size)
1017 {
1018         struct drm_i915_private *dev_priv = dev->dev_private;
1019         int fb_bar = IS_I9XX(dev) ? 2 : 0;
1020         int ret = 0;
1021
1022         dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
1023                 0xff000000;
1024
1025         if (IS_MOBILE(dev) || IS_I9XX(dev))
1026                 dev_priv->cursor_needs_physical = true;
1027         else
1028                 dev_priv->cursor_needs_physical = false;
1029
1030         if (IS_I965G(dev) || IS_G33(dev))
1031                 dev_priv->cursor_needs_physical = false;
1032
1033         /* Basic memrange allocator for stolen space (aka vram) */
1034         drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1035
1036         /* Let GEM Manage from end of prealloc space to end of aperture.
1037          *
1038          * However, leave one page at the end still bound to the scratch page.
1039          * There are a number of places where the hardware apparently
1040          * prefetches past the end of the object, and we've seen multiple
1041          * hangs with the GPU head pointer stuck in a batchbuffer bound
1042          * at the last page of the aperture.  One page should be enough to
1043          * keep any prefetching inside of the aperture.
1044          */
1045         i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
1046
1047         ret = i915_gem_init_ringbuffer(dev);
1048         if (ret)
1049                 goto out;
1050
1051         /* Allow hardware batchbuffers unless told otherwise.
1052          */
1053         dev_priv->allow_batchbuffer = 1;
1054
1055         ret = intel_init_bios(dev);
1056         if (ret)
1057                 DRM_INFO("failed to find VBIOS tables\n");
1058
1059         ret = drm_irq_install(dev);
1060         if (ret)
1061                 goto destroy_ringbuffer;
1062
1063         /* Always safe in the mode setting case. */
1064         /* FIXME: do pre/post-mode set stuff in core KMS code */
1065         dev->vblank_disable_allowed = 1;
1066
1067         /*
1068          * Initialize the hardware status page IRQ location.
1069          */
1070
1071         I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1072
1073         intel_modeset_init(dev);
1074
1075         drm_helper_initial_config(dev);
1076
1077         return 0;
1078
1079 destroy_ringbuffer:
1080         i915_gem_cleanup_ringbuffer(dev);
1081 out:
1082         return ret;
1083 }
1084
1085 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1086 {
1087         struct drm_i915_master_private *master_priv;
1088
1089         master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1090         if (!master_priv)
1091                 return -ENOMEM;
1092
1093         master->driver_priv = master_priv;
1094         return 0;
1095 }
1096
1097 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1098 {
1099         struct drm_i915_master_private *master_priv = master->driver_priv;
1100
1101         if (!master_priv)
1102                 return;
1103
1104         kfree(master_priv);
1105
1106         master->driver_priv = NULL;
1107 }
1108
1109 static void i915_get_mem_freq(struct drm_device *dev)
1110 {
1111         drm_i915_private_t *dev_priv = dev->dev_private;
1112         u32 tmp;
1113
1114         if (!IS_IGD(dev))
1115                 return;
1116
1117         tmp = I915_READ(CLKCFG);
1118
1119         switch (tmp & CLKCFG_FSB_MASK) {
1120         case CLKCFG_FSB_533:
1121                 dev_priv->fsb_freq = 533; /* 133*4 */
1122                 break;
1123         case CLKCFG_FSB_800:
1124                 dev_priv->fsb_freq = 800; /* 200*4 */
1125                 break;
1126         case CLKCFG_FSB_667:
1127                 dev_priv->fsb_freq =  667; /* 167*4 */
1128                 break;
1129         case CLKCFG_FSB_400:
1130                 dev_priv->fsb_freq = 400; /* 100*4 */
1131                 break;
1132         }
1133
1134         switch (tmp & CLKCFG_MEM_MASK) {
1135         case CLKCFG_MEM_533:
1136                 dev_priv->mem_freq = 533;
1137                 break;
1138         case CLKCFG_MEM_667:
1139                 dev_priv->mem_freq = 667;
1140                 break;
1141         case CLKCFG_MEM_800:
1142                 dev_priv->mem_freq = 800;
1143                 break;
1144         }
1145 }
1146
1147 /**
1148  * i915_driver_load - setup chip and create an initial config
1149  * @dev: DRM device
1150  * @flags: startup flags
1151  *
1152  * The driver load routine has to do several things:
1153  *   - drive output discovery via intel_modeset_init()
1154  *   - initialize the memory manager
1155  *   - allocate initial config memory
1156  *   - setup the DRM framebuffer with the allocated memory
1157  */
1158 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1159 {
1160         struct drm_i915_private *dev_priv = dev->dev_private;
1161         resource_size_t base, size;
1162         int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1163         uint32_t agp_size, prealloc_size;
1164
1165         /* i915 has 4 more counters */
1166         dev->counters += 4;
1167         dev->types[6] = _DRM_STAT_IRQ;
1168         dev->types[7] = _DRM_STAT_PRIMARY;
1169         dev->types[8] = _DRM_STAT_SECONDARY;
1170         dev->types[9] = _DRM_STAT_DMA;
1171
1172         dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1173         if (dev_priv == NULL)
1174                 return -ENOMEM;
1175
1176         dev->dev_private = (void *)dev_priv;
1177         dev_priv->dev = dev;
1178
1179         /* Add register map (needed for suspend/resume) */
1180         base = drm_get_resource_start(dev, mmio_bar);
1181         size = drm_get_resource_len(dev, mmio_bar);
1182
1183         dev_priv->regs = ioremap(base, size);
1184         if (!dev_priv->regs) {
1185                 DRM_ERROR("failed to map registers\n");
1186                 ret = -EIO;
1187                 goto free_priv;
1188         }
1189
1190         dev_priv->mm.gtt_mapping =
1191                 io_mapping_create_wc(dev->agp->base,
1192                                      dev->agp->agp_info.aper_size * 1024*1024);
1193         if (dev_priv->mm.gtt_mapping == NULL) {
1194                 ret = -EIO;
1195                 goto out_rmmap;
1196         }
1197
1198         /* Set up a WC MTRR for non-PAT systems.  This is more common than
1199          * one would think, because the kernel disables PAT on first
1200          * generation Core chips because WC PAT gets overridden by a UC
1201          * MTRR if present.  Even if a UC MTRR isn't present.
1202          */
1203         dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1204                                          dev->agp->agp_info.aper_size *
1205                                          1024 * 1024,
1206                                          MTRR_TYPE_WRCOMB, 1);
1207         if (dev_priv->mm.gtt_mtrr < 0) {
1208                 DRM_INFO("MTRR allocation failed.  Graphics "
1209                          "performance may suffer.\n");
1210         }
1211
1212         ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1213         if (ret)
1214                 goto out_iomapfree;
1215
1216         dev_priv->wq = create_workqueue("i915");
1217         if (dev_priv->wq == NULL) {
1218                 DRM_ERROR("Failed to create our workqueue.\n");
1219                 ret = -ENOMEM;
1220                 goto out_iomapfree;
1221         }
1222
1223         /* enable GEM by default */
1224         dev_priv->has_gem = 1;
1225
1226         if (prealloc_size > agp_size * 3 / 4) {
1227                 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
1228                           "memory stolen.\n",
1229                           prealloc_size / 1024, agp_size / 1024);
1230                 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
1231                           "updating the BIOS to fix).\n");
1232                 dev_priv->has_gem = 0;
1233         }
1234
1235         dev->driver->get_vblank_counter = i915_get_vblank_counter;
1236         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1237         if (IS_G4X(dev) || IS_IGDNG(dev)) {
1238                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1239                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1240         }
1241
1242         i915_gem_load(dev);
1243
1244         /* Init HWS */
1245         if (!I915_NEED_GFX_HWS(dev)) {
1246                 ret = i915_init_phys_hws(dev);
1247                 if (ret != 0)
1248                         goto out_workqueue_free;
1249         }
1250
1251         i915_get_mem_freq(dev);
1252
1253         /* On the 945G/GM, the chipset reports the MSI capability on the
1254          * integrated graphics even though the support isn't actually there
1255          * according to the published specs.  It doesn't appear to function
1256          * correctly in testing on 945G.
1257          * This may be a side effect of MSI having been made available for PEG
1258          * and the registers being closely associated.
1259          *
1260          * According to chipset errata, on the 965GM, MSI interrupts may
1261          * be lost or delayed, but we use them anyways to avoid
1262          * stuck interrupts on some machines.
1263          */
1264         if (!IS_I945G(dev) && !IS_I945GM(dev))
1265                 pci_enable_msi(dev->pdev);
1266
1267         spin_lock_init(&dev_priv->user_irq_lock);
1268         spin_lock_init(&dev_priv->error_lock);
1269         dev_priv->user_irq_refcount = 0;
1270
1271         ret = drm_vblank_init(dev, I915_NUM_PIPE);
1272
1273         if (ret) {
1274                 (void) i915_driver_unload(dev);
1275                 return ret;
1276         }
1277
1278         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1279                 ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
1280                 if (ret < 0) {
1281                         DRM_ERROR("failed to init modeset\n");
1282                         goto out_workqueue_free;
1283                 }
1284         }
1285
1286         /* Must be done after probing outputs */
1287         /* FIXME: verify on IGDNG */
1288         if (!IS_IGDNG(dev))
1289                 intel_opregion_init(dev, 0);
1290
1291         return 0;
1292
1293 out_workqueue_free:
1294         destroy_workqueue(dev_priv->wq);
1295 out_iomapfree:
1296         io_mapping_free(dev_priv->mm.gtt_mapping);
1297 out_rmmap:
1298         iounmap(dev_priv->regs);
1299 free_priv:
1300         kfree(dev_priv);
1301         return ret;
1302 }
1303
1304 int i915_driver_unload(struct drm_device *dev)
1305 {
1306         struct drm_i915_private *dev_priv = dev->dev_private;
1307
1308         destroy_workqueue(dev_priv->wq);
1309
1310         io_mapping_free(dev_priv->mm.gtt_mapping);
1311         if (dev_priv->mm.gtt_mtrr >= 0) {
1312                 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1313                          dev->agp->agp_info.aper_size * 1024 * 1024);
1314                 dev_priv->mm.gtt_mtrr = -1;
1315         }
1316
1317         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1318                 drm_irq_uninstall(dev);
1319         }
1320
1321         if (dev->pdev->msi_enabled)
1322                 pci_disable_msi(dev->pdev);
1323
1324         if (dev_priv->regs != NULL)
1325                 iounmap(dev_priv->regs);
1326
1327         if (!IS_IGDNG(dev))
1328                 intel_opregion_free(dev, 0);
1329
1330         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1331                 intel_modeset_cleanup(dev);
1332
1333                 i915_gem_free_all_phys_object(dev);
1334
1335                 mutex_lock(&dev->struct_mutex);
1336                 i915_gem_cleanup_ringbuffer(dev);
1337                 mutex_unlock(&dev->struct_mutex);
1338                 drm_mm_takedown(&dev_priv->vram);
1339                 i915_gem_lastclose(dev);
1340         }
1341
1342         kfree(dev->dev_private);
1343
1344         return 0;
1345 }
1346
1347 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1348 {
1349         struct drm_i915_file_private *i915_file_priv;
1350
1351         DRM_DEBUG_DRIVER(I915_DRV, "\n");
1352         i915_file_priv = (struct drm_i915_file_private *)
1353             kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
1354
1355         if (!i915_file_priv)
1356                 return -ENOMEM;
1357
1358         file_priv->driver_priv = i915_file_priv;
1359
1360         INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1361
1362         return 0;
1363 }
1364
1365 /**
1366  * i915_driver_lastclose - clean up after all DRM clients have exited
1367  * @dev: DRM device
1368  *
1369  * Take care of cleaning up after all DRM clients have exited.  In the
1370  * mode setting case, we want to restore the kernel's initial mode (just
1371  * in case the last client left us in a bad state).
1372  *
1373  * Additionally, in the non-mode setting case, we'll tear down the AGP
1374  * and DMA structures, since the kernel won't be using them, and clea
1375  * up any GEM state.
1376  */
1377 void i915_driver_lastclose(struct drm_device * dev)
1378 {
1379         drm_i915_private_t *dev_priv = dev->dev_private;
1380
1381         if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1382                 intelfb_restore();
1383                 return;
1384         }
1385
1386         i915_gem_lastclose(dev);
1387
1388         if (dev_priv->agp_heap)
1389                 i915_mem_takedown(&(dev_priv->agp_heap));
1390
1391         i915_dma_cleanup(dev);
1392 }
1393
1394 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1395 {
1396         drm_i915_private_t *dev_priv = dev->dev_private;
1397         i915_gem_release(dev, file_priv);
1398         if (!drm_core_check_feature(dev, DRIVER_MODESET))
1399                 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1400 }
1401
1402 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1403 {
1404         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1405
1406         kfree(i915_file_priv);
1407 }
1408
1409 struct drm_ioctl_desc i915_ioctls[] = {
1410         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1411         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1412         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1413         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1414         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1415         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1416         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1417         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1418         DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1419         DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1420         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1421         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1422         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1423         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1424         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1425         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1426         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1427         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1428         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1429         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1430         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1431         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1432         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1433         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1434         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1435         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1436         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1437         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1438         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1439         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
1440         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1441         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1442         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1443         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1444         DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
1445         DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1446 };
1447
1448 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1449
1450 /**
1451  * Determine if the device really is AGP or not.
1452  *
1453  * All Intel graphics chipsets are treated as AGP, even if they are really
1454  * PCI-e.
1455  *
1456  * \param dev   The device to be tested.
1457  *
1458  * \returns
1459  * A value of 1 is always retured to indictate every i9x5 is AGP.
1460  */
1461 int i915_driver_device_is_agp(struct drm_device * dev)
1462 {
1463         return 1;
1464 }