]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/i915_dma.c
drm/r128: Add test for initialisation to all ioctls that require it
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_dma.c
CommitLineData
1da177e4
LT
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4
LT
28
29#include "drmP.h"
30#include "drm.h"
79e53945
JB
31#include "drm_crtc_helper.h"
32#include "intel_drv.h"
1da177e4
LT
33#include "i915_drm.h"
34#include "i915_drv.h"
35
1da177e4
LT
36/* Really want an OS-independent resettable timer. Would like to have
37 * this loop run for (eg) 3 sec, but have the timer reset every time
38 * the head pointer changes, so that EBUSY only happens if the ring
39 * actually stalls for (eg) 3 seconds.
40 */
84b1fd10 41int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
1da177e4
LT
42{
43 drm_i915_private_t *dev_priv = dev->dev_private;
44 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
d3a6d446
KP
45 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
46 u32 last_acthd = I915_READ(acthd_reg);
47 u32 acthd;
585fb111 48 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
1da177e4
LT
49 int i;
50
d3a6d446 51 for (i = 0; i < 100000; i++) {
585fb111 52 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
d3a6d446 53 acthd = I915_READ(acthd_reg);
1da177e4
LT
54 ring->space = ring->head - (ring->tail + 8);
55 if (ring->space < 0)
56 ring->space += ring->Size;
57 if (ring->space >= n)
58 return 0;
59
98787c05
CW
60 if (dev->primary->master) {
61 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
62 if (master_priv->sarea_priv)
63 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
64 }
65
1da177e4
LT
66
67 if (ring->head != last_head)
68 i = 0;
d3a6d446
KP
69 if (acthd != last_acthd)
70 i = 0;
1da177e4
LT
71
72 last_head = ring->head;
d3a6d446
KP
73 last_acthd = acthd;
74 msleep_interruptible(10);
75
1da177e4
LT
76 }
77
20caafa6 78 return -EBUSY;
1da177e4
LT
79}
80
398c9cb2
KP
81/**
82 * Sets up the hardware status page for devices that need a physical address
83 * in the register.
84 */
3043c60c 85static int i915_init_phys_hws(struct drm_device *dev)
398c9cb2
KP
86{
87 drm_i915_private_t *dev_priv = dev->dev_private;
88 /* Program Hardware Status Page */
89 dev_priv->status_page_dmah =
90 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
91
92 if (!dev_priv->status_page_dmah) {
93 DRM_ERROR("Can not allocate hardware status page\n");
94 return -ENOMEM;
95 }
96 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
97 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
98
99 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
100
101 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
8a4c47f3 102 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
398c9cb2
KP
103 return 0;
104}
105
106/**
107 * Frees the hardware status page, whether it's a physical address or a virtual
108 * address set up by the X Server.
109 */
3043c60c 110static void i915_free_hws(struct drm_device *dev)
398c9cb2
KP
111{
112 drm_i915_private_t *dev_priv = dev->dev_private;
113 if (dev_priv->status_page_dmah) {
114 drm_pci_free(dev, dev_priv->status_page_dmah);
115 dev_priv->status_page_dmah = NULL;
116 }
117
118 if (dev_priv->status_gfx_addr) {
119 dev_priv->status_gfx_addr = 0;
120 drm_core_ioremapfree(&dev_priv->hws_map, dev);
121 }
122
123 /* Need to rewrite hardware status page */
124 I915_WRITE(HWS_PGA, 0x1ffff000);
125}
126
84b1fd10 127void i915_kernel_lost_context(struct drm_device * dev)
1da177e4
LT
128{
129 drm_i915_private_t *dev_priv = dev->dev_private;
7c1c2871 130 struct drm_i915_master_private *master_priv;
1da177e4
LT
131 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
132
79e53945
JB
133 /*
134 * We should never lose context on the ring with modesetting
135 * as we don't expose it to userspace
136 */
137 if (drm_core_check_feature(dev, DRIVER_MODESET))
138 return;
139
585fb111
JB
140 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
141 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
1da177e4
LT
142 ring->space = ring->head - (ring->tail + 8);
143 if (ring->space < 0)
144 ring->space += ring->Size;
145
7c1c2871
DA
146 if (!dev->primary->master)
147 return;
148
149 master_priv = dev->primary->master->driver_priv;
150 if (ring->head == ring->tail && master_priv->sarea_priv)
151 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
1da177e4
LT
152}
153
84b1fd10 154static int i915_dma_cleanup(struct drm_device * dev)
1da177e4 155{
ba8bbcf6 156 drm_i915_private_t *dev_priv = dev->dev_private;
1da177e4
LT
157 /* Make sure interrupts are disabled here because the uninstall ioctl
158 * may not have been called from userspace and after dev_private
159 * is freed, it's too late.
160 */
ed4cb414 161 if (dev->irq_enabled)
b5e89ed5 162 drm_irq_uninstall(dev);
1da177e4 163
ba8bbcf6
JB
164 if (dev_priv->ring.virtual_start) {
165 drm_core_ioremapfree(&dev_priv->ring.map, dev);
3043c60c
EA
166 dev_priv->ring.virtual_start = NULL;
167 dev_priv->ring.map.handle = NULL;
ba8bbcf6
JB
168 dev_priv->ring.map.size = 0;
169 }
dc7a9319 170
398c9cb2
KP
171 /* Clear the HWS virtual address at teardown */
172 if (I915_NEED_GFX_HWS(dev))
173 i915_free_hws(dev);
1da177e4
LT
174
175 return 0;
176}
177
ba8bbcf6 178static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
1da177e4 179{
ba8bbcf6 180 drm_i915_private_t *dev_priv = dev->dev_private;
7c1c2871 181 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4 182
3a03ac1a
DA
183 master_priv->sarea = drm_getsarea(dev);
184 if (master_priv->sarea) {
185 master_priv->sarea_priv = (drm_i915_sarea_t *)
186 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
187 } else {
8a4c47f3 188 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
3a03ac1a
DA
189 }
190
673a394b
EA
191 if (init->ring_size != 0) {
192 if (dev_priv->ring.ring_obj != NULL) {
193 i915_dma_cleanup(dev);
194 DRM_ERROR("Client tried to initialize ringbuffer in "
195 "GEM mode\n");
196 return -EINVAL;
197 }
1da177e4 198
673a394b
EA
199 dev_priv->ring.Size = init->ring_size;
200 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
1da177e4 201
673a394b
EA
202 dev_priv->ring.map.offset = init->ring_start;
203 dev_priv->ring.map.size = init->ring_size;
204 dev_priv->ring.map.type = 0;
205 dev_priv->ring.map.flags = 0;
206 dev_priv->ring.map.mtrr = 0;
1da177e4 207
6fb88588 208 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
673a394b
EA
209
210 if (dev_priv->ring.map.handle == NULL) {
211 i915_dma_cleanup(dev);
212 DRM_ERROR("can not ioremap virtual address for"
213 " ring buffer\n");
214 return -ENOMEM;
215 }
1da177e4
LT
216 }
217
218 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
219
a6b54f3f 220 dev_priv->cpp = init->cpp;
1da177e4
LT
221 dev_priv->back_offset = init->back_offset;
222 dev_priv->front_offset = init->front_offset;
223 dev_priv->current_page = 0;
7c1c2871
DA
224 if (master_priv->sarea_priv)
225 master_priv->sarea_priv->pf_current_page = 0;
1da177e4 226
1da177e4
LT
227 /* Allow hardware batchbuffers unless told otherwise.
228 */
229 dev_priv->allow_batchbuffer = 1;
230
1da177e4
LT
231 return 0;
232}
233
84b1fd10 234static int i915_dma_resume(struct drm_device * dev)
1da177e4
LT
235{
236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
237
8a4c47f3 238 DRM_DEBUG_DRIVER("%s\n", __func__);
1da177e4 239
1da177e4
LT
240 if (dev_priv->ring.map.handle == NULL) {
241 DRM_ERROR("can not ioremap virtual address for"
242 " ring buffer\n");
20caafa6 243 return -ENOMEM;
1da177e4
LT
244 }
245
246 /* Program Hardware Status Page */
247 if (!dev_priv->hw_status_page) {
248 DRM_ERROR("Can not find hardware status page\n");
20caafa6 249 return -EINVAL;
1da177e4 250 }
8a4c47f3 251 DRM_DEBUG_DRIVER("hw status page @ %p\n",
be25ed9c 252 dev_priv->hw_status_page);
1da177e4 253
dc7a9319 254 if (dev_priv->status_gfx_addr != 0)
585fb111 255 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
dc7a9319 256 else
585fb111 257 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
8a4c47f3 258 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
1da177e4
LT
259
260 return 0;
261}
262
c153f45f
EA
263static int i915_dma_init(struct drm_device *dev, void *data,
264 struct drm_file *file_priv)
1da177e4 265{
c153f45f 266 drm_i915_init_t *init = data;
1da177e4
LT
267 int retcode = 0;
268
c153f45f 269 switch (init->func) {
1da177e4 270 case I915_INIT_DMA:
ba8bbcf6 271 retcode = i915_initialize(dev, init);
1da177e4
LT
272 break;
273 case I915_CLEANUP_DMA:
274 retcode = i915_dma_cleanup(dev);
275 break;
276 case I915_RESUME_DMA:
0d6aa60b 277 retcode = i915_dma_resume(dev);
1da177e4
LT
278 break;
279 default:
20caafa6 280 retcode = -EINVAL;
1da177e4
LT
281 break;
282 }
283
284 return retcode;
285}
286
287/* Implement basically the same security restrictions as hardware does
288 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
289 *
290 * Most of the calculations below involve calculating the size of a
291 * particular instruction. It's important to get the size right as
292 * that tells us where the next instruction to check is. Any illegal
293 * instruction detected will be given a size of zero, which is a
294 * signal to abort the rest of the buffer.
295 */
296static int do_validate_cmd(int cmd)
297{
298 switch (((cmd >> 29) & 0x7)) {
299 case 0x0:
300 switch ((cmd >> 23) & 0x3f) {
301 case 0x0:
302 return 1; /* MI_NOOP */
303 case 0x4:
304 return 1; /* MI_FLUSH */
305 default:
306 return 0; /* disallow everything else */
307 }
308 break;
309 case 0x1:
310 return 0; /* reserved */
311 case 0x2:
312 return (cmd & 0xff) + 2; /* 2d commands */
313 case 0x3:
314 if (((cmd >> 24) & 0x1f) <= 0x18)
315 return 1;
316
317 switch ((cmd >> 24) & 0x1f) {
318 case 0x1c:
319 return 1;
320 case 0x1d:
b5e89ed5 321 switch ((cmd >> 16) & 0xff) {
1da177e4
LT
322 case 0x3:
323 return (cmd & 0x1f) + 2;
324 case 0x4:
325 return (cmd & 0xf) + 2;
326 default:
327 return (cmd & 0xffff) + 2;
328 }
329 case 0x1e:
330 if (cmd & (1 << 23))
331 return (cmd & 0xffff) + 1;
332 else
333 return 1;
334 case 0x1f:
335 if ((cmd & (1 << 23)) == 0) /* inline vertices */
336 return (cmd & 0x1ffff) + 2;
337 else if (cmd & (1 << 17)) /* indirect random */
338 if ((cmd & 0xffff) == 0)
339 return 0; /* unknown length, too hard */
340 else
341 return (((cmd & 0xffff) + 1) / 2) + 1;
342 else
343 return 2; /* indirect sequential */
344 default:
345 return 0;
346 }
347 default:
348 return 0;
349 }
350
351 return 0;
352}
353
354static int validate_cmd(int cmd)
355{
356 int ret = do_validate_cmd(cmd);
357
bc5f4523 358/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
1da177e4
LT
359
360 return ret;
361}
362
201361a5 363static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
1da177e4
LT
364{
365 drm_i915_private_t *dev_priv = dev->dev_private;
366 int i;
367 RING_LOCALS;
368
de227f5f 369 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
20caafa6 370 return -EINVAL;
de227f5f 371
c29b669c 372 BEGIN_LP_RING((dwords+1)&~1);
de227f5f 373
1da177e4
LT
374 for (i = 0; i < dwords;) {
375 int cmd, sz;
376
201361a5 377 cmd = buffer[i];
1da177e4 378
1da177e4 379 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
20caafa6 380 return -EINVAL;
1da177e4 381
1da177e4
LT
382 OUT_RING(cmd);
383
384 while (++i, --sz) {
201361a5 385 OUT_RING(buffer[i]);
1da177e4 386 }
1da177e4
LT
387 }
388
de227f5f
DA
389 if (dwords & 1)
390 OUT_RING(0);
391
392 ADVANCE_LP_RING();
393
1da177e4
LT
394 return 0;
395}
396
673a394b
EA
397int
398i915_emit_box(struct drm_device *dev,
201361a5 399 struct drm_clip_rect *boxes,
673a394b 400 int i, int DR1, int DR4)
1da177e4
LT
401{
402 drm_i915_private_t *dev_priv = dev->dev_private;
201361a5 403 struct drm_clip_rect box = boxes[i];
1da177e4
LT
404 RING_LOCALS;
405
1da177e4
LT
406 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
407 DRM_ERROR("Bad box %d,%d..%d,%d\n",
408 box.x1, box.y1, box.x2, box.y2);
20caafa6 409 return -EINVAL;
1da177e4
LT
410 }
411
c29b669c
AH
412 if (IS_I965G(dev)) {
413 BEGIN_LP_RING(4);
414 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
415 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
78eca43d 416 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
c29b669c
AH
417 OUT_RING(DR4);
418 ADVANCE_LP_RING();
419 } else {
420 BEGIN_LP_RING(6);
421 OUT_RING(GFX_OP_DRAWRECT_INFO);
422 OUT_RING(DR1);
423 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
424 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
425 OUT_RING(DR4);
426 OUT_RING(0);
427 ADVANCE_LP_RING();
428 }
1da177e4
LT
429
430 return 0;
431}
432
c29b669c
AH
433/* XXX: Emitting the counter should really be moved to part of the IRQ
434 * emit. For now, do it in both places:
435 */
436
84b1fd10 437static void i915_emit_breadcrumb(struct drm_device *dev)
de227f5f
DA
438{
439 drm_i915_private_t *dev_priv = dev->dev_private;
7c1c2871 440 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
de227f5f
DA
441 RING_LOCALS;
442
c99b058f 443 dev_priv->counter++;
af6061af 444 if (dev_priv->counter > 0x7FFFFFFFUL)
c99b058f 445 dev_priv->counter = 0;
7c1c2871
DA
446 if (master_priv->sarea_priv)
447 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
de227f5f
DA
448
449 BEGIN_LP_RING(4);
585fb111 450 OUT_RING(MI_STORE_DWORD_INDEX);
0baf823a 451 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
de227f5f
DA
452 OUT_RING(dev_priv->counter);
453 OUT_RING(0);
454 ADVANCE_LP_RING();
455}
456
84b1fd10 457static int i915_dispatch_cmdbuffer(struct drm_device * dev,
201361a5
EA
458 drm_i915_cmdbuffer_t *cmd,
459 struct drm_clip_rect *cliprects,
460 void *cmdbuf)
1da177e4
LT
461{
462 int nbox = cmd->num_cliprects;
463 int i = 0, count, ret;
464
465 if (cmd->sz & 0x3) {
466 DRM_ERROR("alignment");
20caafa6 467 return -EINVAL;
1da177e4
LT
468 }
469
470 i915_kernel_lost_context(dev);
471
472 count = nbox ? nbox : 1;
473
474 for (i = 0; i < count; i++) {
475 if (i < nbox) {
201361a5 476 ret = i915_emit_box(dev, cliprects, i,
1da177e4
LT
477 cmd->DR1, cmd->DR4);
478 if (ret)
479 return ret;
480 }
481
201361a5 482 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
1da177e4
LT
483 if (ret)
484 return ret;
485 }
486
de227f5f 487 i915_emit_breadcrumb(dev);
1da177e4
LT
488 return 0;
489}
490
84b1fd10 491static int i915_dispatch_batchbuffer(struct drm_device * dev,
201361a5
EA
492 drm_i915_batchbuffer_t * batch,
493 struct drm_clip_rect *cliprects)
1da177e4
LT
494{
495 drm_i915_private_t *dev_priv = dev->dev_private;
1da177e4
LT
496 int nbox = batch->num_cliprects;
497 int i = 0, count;
498 RING_LOCALS;
499
500 if ((batch->start | batch->used) & 0x7) {
501 DRM_ERROR("alignment");
20caafa6 502 return -EINVAL;
1da177e4
LT
503 }
504
505 i915_kernel_lost_context(dev);
506
507 count = nbox ? nbox : 1;
508
509 for (i = 0; i < count; i++) {
510 if (i < nbox) {
201361a5 511 int ret = i915_emit_box(dev, cliprects, i,
1da177e4
LT
512 batch->DR1, batch->DR4);
513 if (ret)
514 return ret;
515 }
516
0790d5e1 517 if (!IS_I830(dev) && !IS_845G(dev)) {
1da177e4 518 BEGIN_LP_RING(2);
21f16289
DA
519 if (IS_I965G(dev)) {
520 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
521 OUT_RING(batch->start);
522 } else {
523 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
524 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
525 }
1da177e4
LT
526 ADVANCE_LP_RING();
527 } else {
528 BEGIN_LP_RING(4);
529 OUT_RING(MI_BATCH_BUFFER);
530 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
531 OUT_RING(batch->start + batch->used - 4);
532 OUT_RING(0);
533 ADVANCE_LP_RING();
534 }
535 }
536
de227f5f 537 i915_emit_breadcrumb(dev);
1da177e4
LT
538
539 return 0;
540}
541
af6061af 542static int i915_dispatch_flip(struct drm_device * dev)
1da177e4
LT
543{
544 drm_i915_private_t *dev_priv = dev->dev_private;
7c1c2871
DA
545 struct drm_i915_master_private *master_priv =
546 dev->primary->master->driver_priv;
1da177e4
LT
547 RING_LOCALS;
548
7c1c2871 549 if (!master_priv->sarea_priv)
c99b058f
KH
550 return -EINVAL;
551
8a4c47f3 552 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
be25ed9c 553 __func__,
554 dev_priv->current_page,
555 master_priv->sarea_priv->pf_current_page);
1da177e4 556
af6061af
DA
557 i915_kernel_lost_context(dev);
558
559 BEGIN_LP_RING(2);
585fb111 560 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
af6061af
DA
561 OUT_RING(0);
562 ADVANCE_LP_RING();
1da177e4 563
af6061af
DA
564 BEGIN_LP_RING(6);
565 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
566 OUT_RING(0);
567 if (dev_priv->current_page == 0) {
568 OUT_RING(dev_priv->back_offset);
569 dev_priv->current_page = 1;
1da177e4 570 } else {
af6061af
DA
571 OUT_RING(dev_priv->front_offset);
572 dev_priv->current_page = 0;
1da177e4 573 }
af6061af
DA
574 OUT_RING(0);
575 ADVANCE_LP_RING();
1da177e4 576
af6061af
DA
577 BEGIN_LP_RING(2);
578 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
579 OUT_RING(0);
580 ADVANCE_LP_RING();
1da177e4 581
7c1c2871 582 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
1da177e4
LT
583
584 BEGIN_LP_RING(4);
585fb111 585 OUT_RING(MI_STORE_DWORD_INDEX);
0baf823a 586 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
af6061af
DA
587 OUT_RING(dev_priv->counter);
588 OUT_RING(0);
1da177e4
LT
589 ADVANCE_LP_RING();
590
7c1c2871 591 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
af6061af 592 return 0;
1da177e4
LT
593}
594
84b1fd10 595static int i915_quiescent(struct drm_device * dev)
1da177e4
LT
596{
597 drm_i915_private_t *dev_priv = dev->dev_private;
598
599 i915_kernel_lost_context(dev);
bf9d8929 600 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1da177e4
LT
601}
602
c153f45f
EA
603static int i915_flush_ioctl(struct drm_device *dev, void *data,
604 struct drm_file *file_priv)
1da177e4 605{
546b0974
EA
606 int ret;
607
608 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 609
546b0974
EA
610 mutex_lock(&dev->struct_mutex);
611 ret = i915_quiescent(dev);
612 mutex_unlock(&dev->struct_mutex);
613
614 return ret;
1da177e4
LT
615}
616
c153f45f
EA
617static int i915_batchbuffer(struct drm_device *dev, void *data,
618 struct drm_file *file_priv)
1da177e4 619{
1da177e4 620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 621 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4 622 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
7c1c2871 623 master_priv->sarea_priv;
c153f45f 624 drm_i915_batchbuffer_t *batch = data;
1da177e4 625 int ret;
201361a5 626 struct drm_clip_rect *cliprects = NULL;
1da177e4
LT
627
628 if (!dev_priv->allow_batchbuffer) {
629 DRM_ERROR("Batchbuffer ioctl disabled\n");
20caafa6 630 return -EINVAL;
1da177e4
LT
631 }
632
8a4c47f3 633 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
be25ed9c 634 batch->start, batch->used, batch->num_cliprects);
1da177e4 635
546b0974 636 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 637
201361a5
EA
638 if (batch->num_cliprects < 0)
639 return -EINVAL;
640
641 if (batch->num_cliprects) {
9a298b2a
EA
642 cliprects = kcalloc(batch->num_cliprects,
643 sizeof(struct drm_clip_rect),
644 GFP_KERNEL);
201361a5
EA
645 if (cliprects == NULL)
646 return -ENOMEM;
647
648 ret = copy_from_user(cliprects, batch->cliprects,
649 batch->num_cliprects *
650 sizeof(struct drm_clip_rect));
651 if (ret != 0)
652 goto fail_free;
653 }
1da177e4 654
546b0974 655 mutex_lock(&dev->struct_mutex);
201361a5 656 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
546b0974 657 mutex_unlock(&dev->struct_mutex);
1da177e4 658
c99b058f 659 if (sarea_priv)
0baf823a 660 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
201361a5
EA
661
662fail_free:
9a298b2a 663 kfree(cliprects);
201361a5 664
1da177e4
LT
665 return ret;
666}
667
c153f45f
EA
668static int i915_cmdbuffer(struct drm_device *dev, void *data,
669 struct drm_file *file_priv)
1da177e4 670{
1da177e4 671 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 672 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4 673 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
7c1c2871 674 master_priv->sarea_priv;
c153f45f 675 drm_i915_cmdbuffer_t *cmdbuf = data;
201361a5
EA
676 struct drm_clip_rect *cliprects = NULL;
677 void *batch_data;
1da177e4
LT
678 int ret;
679
8a4c47f3 680 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
be25ed9c 681 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
1da177e4 682
546b0974 683 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 684
201361a5
EA
685 if (cmdbuf->num_cliprects < 0)
686 return -EINVAL;
687
9a298b2a 688 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
201361a5
EA
689 if (batch_data == NULL)
690 return -ENOMEM;
691
692 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
693 if (ret != 0)
694 goto fail_batch_free;
695
696 if (cmdbuf->num_cliprects) {
9a298b2a
EA
697 cliprects = kcalloc(cmdbuf->num_cliprects,
698 sizeof(struct drm_clip_rect), GFP_KERNEL);
201361a5
EA
699 if (cliprects == NULL)
700 goto fail_batch_free;
701
702 ret = copy_from_user(cliprects, cmdbuf->cliprects,
703 cmdbuf->num_cliprects *
704 sizeof(struct drm_clip_rect));
705 if (ret != 0)
706 goto fail_clip_free;
1da177e4
LT
707 }
708
546b0974 709 mutex_lock(&dev->struct_mutex);
201361a5 710 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
546b0974 711 mutex_unlock(&dev->struct_mutex);
1da177e4
LT
712 if (ret) {
713 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
355d7f37 714 goto fail_clip_free;
1da177e4
LT
715 }
716
c99b058f 717 if (sarea_priv)
0baf823a 718 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
201361a5 719
201361a5 720fail_clip_free:
9a298b2a 721 kfree(cliprects);
355d7f37 722fail_batch_free:
9a298b2a 723 kfree(batch_data);
201361a5
EA
724
725 return ret;
1da177e4
LT
726}
727
c153f45f
EA
728static int i915_flip_bufs(struct drm_device *dev, void *data,
729 struct drm_file *file_priv)
1da177e4 730{
546b0974
EA
731 int ret;
732
8a4c47f3 733 DRM_DEBUG_DRIVER("%s\n", __func__);
1da177e4 734
546b0974 735 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1da177e4 736
546b0974
EA
737 mutex_lock(&dev->struct_mutex);
738 ret = i915_dispatch_flip(dev);
739 mutex_unlock(&dev->struct_mutex);
740
741 return ret;
1da177e4
LT
742}
743
c153f45f
EA
744static int i915_getparam(struct drm_device *dev, void *data,
745 struct drm_file *file_priv)
1da177e4 746{
1da177e4 747 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 748 drm_i915_getparam_t *param = data;
1da177e4
LT
749 int value;
750
751 if (!dev_priv) {
3e684eae 752 DRM_ERROR("called with no initialization\n");
20caafa6 753 return -EINVAL;
1da177e4
LT
754 }
755
c153f45f 756 switch (param->param) {
1da177e4 757 case I915_PARAM_IRQ_ACTIVE:
0a3e67a4 758 value = dev->pdev->irq ? 1 : 0;
1da177e4
LT
759 break;
760 case I915_PARAM_ALLOW_BATCHBUFFER:
761 value = dev_priv->allow_batchbuffer ? 1 : 0;
762 break;
0d6aa60b
DA
763 case I915_PARAM_LAST_DISPATCH:
764 value = READ_BREADCRUMB(dev_priv);
765 break;
ed4c9c4a
KH
766 case I915_PARAM_CHIPSET_ID:
767 value = dev->pci_device;
768 break;
673a394b 769 case I915_PARAM_HAS_GEM:
ac5c4e76 770 value = dev_priv->has_gem;
673a394b 771 break;
0f973f27
JB
772 case I915_PARAM_NUM_FENCES_AVAIL:
773 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
774 break;
1da177e4 775 default:
8a4c47f3 776 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
be25ed9c 777 param->param);
20caafa6 778 return -EINVAL;
1da177e4
LT
779 }
780
c153f45f 781 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1da177e4 782 DRM_ERROR("DRM_COPY_TO_USER failed\n");
20caafa6 783 return -EFAULT;
1da177e4
LT
784 }
785
786 return 0;
787}
788
c153f45f
EA
789static int i915_setparam(struct drm_device *dev, void *data,
790 struct drm_file *file_priv)
1da177e4 791{
1da177e4 792 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 793 drm_i915_setparam_t *param = data;
1da177e4
LT
794
795 if (!dev_priv) {
3e684eae 796 DRM_ERROR("called with no initialization\n");
20caafa6 797 return -EINVAL;
1da177e4
LT
798 }
799
c153f45f 800 switch (param->param) {
1da177e4 801 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1da177e4
LT
802 break;
803 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
c153f45f 804 dev_priv->tex_lru_log_granularity = param->value;
1da177e4
LT
805 break;
806 case I915_SETPARAM_ALLOW_BATCHBUFFER:
c153f45f 807 dev_priv->allow_batchbuffer = param->value;
1da177e4 808 break;
0f973f27
JB
809 case I915_SETPARAM_NUM_USED_FENCES:
810 if (param->value > dev_priv->num_fence_regs ||
811 param->value < 0)
812 return -EINVAL;
813 /* Userspace can use first N regs */
814 dev_priv->fence_reg_start = param->value;
815 break;
1da177e4 816 default:
8a4c47f3 817 DRM_DEBUG_DRIVER("unknown parameter %d\n",
be25ed9c 818 param->param);
20caafa6 819 return -EINVAL;
1da177e4
LT
820 }
821
822 return 0;
823}
824
c153f45f
EA
825static int i915_set_status_page(struct drm_device *dev, void *data,
826 struct drm_file *file_priv)
dc7a9319 827{
dc7a9319 828 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 829 drm_i915_hws_addr_t *hws = data;
b39d50e5
ZW
830
831 if (!I915_NEED_GFX_HWS(dev))
832 return -EINVAL;
dc7a9319
WZ
833
834 if (!dev_priv) {
3e684eae 835 DRM_ERROR("called with no initialization\n");
20caafa6 836 return -EINVAL;
dc7a9319 837 }
dc7a9319 838
79e53945
JB
839 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
840 WARN(1, "tried to set status page when mode setting active\n");
841 return 0;
842 }
843
8a4c47f3 844 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
c153f45f
EA
845
846 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
dc7a9319 847
8b409580 848 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dc7a9319
WZ
849 dev_priv->hws_map.size = 4*1024;
850 dev_priv->hws_map.type = 0;
851 dev_priv->hws_map.flags = 0;
852 dev_priv->hws_map.mtrr = 0;
853
dd0910b3 854 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
dc7a9319 855 if (dev_priv->hws_map.handle == NULL) {
dc7a9319
WZ
856 i915_dma_cleanup(dev);
857 dev_priv->status_gfx_addr = 0;
858 DRM_ERROR("can not ioremap virtual address for"
859 " G33 hw status page\n");
20caafa6 860 return -ENOMEM;
dc7a9319
WZ
861 }
862 dev_priv->hw_status_page = dev_priv->hws_map.handle;
863
864 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
585fb111 865 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
8a4c47f3 866 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
be25ed9c 867 dev_priv->status_gfx_addr);
8a4c47f3 868 DRM_DEBUG_DRIVER("load hws at %p\n",
be25ed9c 869 dev_priv->hw_status_page);
dc7a9319
WZ
870 return 0;
871}
872
79e53945
JB
873/**
874 * i915_probe_agp - get AGP bootup configuration
875 * @pdev: PCI device
876 * @aperture_size: returns AGP aperture configured size
877 * @preallocated_size: returns size of BIOS preallocated AGP space
878 *
879 * Since Intel integrated graphics are UMA, the BIOS has to set aside
880 * some RAM for the framebuffer at early boot. This code figures out
881 * how much was set aside so we can use it for our own purposes.
882 */
2a34f5e6
EA
883static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
884 uint32_t *preallocated_size)
79e53945
JB
885{
886 struct pci_dev *bridge_dev;
887 u16 tmp = 0;
888 unsigned long overhead;
241fa85b 889 unsigned long stolen;
79e53945
JB
890
891 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
892 if (!bridge_dev) {
893 DRM_ERROR("bridge device not found\n");
894 return -1;
895 }
896
897 /* Get the fb aperture size and "stolen" memory amount. */
898 pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
899 pci_dev_put(bridge_dev);
900
901 *aperture_size = 1024 * 1024;
902 *preallocated_size = 1024 * 1024;
903
60fd99e3 904 switch (dev->pdev->device) {
79e53945
JB
905 case PCI_DEVICE_ID_INTEL_82830_CGC:
906 case PCI_DEVICE_ID_INTEL_82845G_IG:
907 case PCI_DEVICE_ID_INTEL_82855GM_IG:
908 case PCI_DEVICE_ID_INTEL_82865_IG:
909 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
910 *aperture_size *= 64;
911 else
912 *aperture_size *= 128;
913 break;
914 default:
915 /* 9xx supports large sizes, just look at the length */
60fd99e3 916 *aperture_size = pci_resource_len(dev->pdev, 2);
79e53945
JB
917 break;
918 }
919
920 /*
921 * Some of the preallocated space is taken by the GTT
922 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
923 */
2c07245f 924 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
60fd99e3
EA
925 overhead = 4096;
926 else
927 overhead = (*aperture_size / 1024) + 4096;
928
241fa85b
EA
929 switch (tmp & INTEL_GMCH_GMS_MASK) {
930 case INTEL_855_GMCH_GMS_DISABLED:
931 DRM_ERROR("video memory is disabled\n");
932 return -1;
79e53945 933 case INTEL_855_GMCH_GMS_STOLEN_1M:
241fa85b
EA
934 stolen = 1 * 1024 * 1024;
935 break;
79e53945 936 case INTEL_855_GMCH_GMS_STOLEN_4M:
241fa85b 937 stolen = 4 * 1024 * 1024;
79e53945
JB
938 break;
939 case INTEL_855_GMCH_GMS_STOLEN_8M:
241fa85b 940 stolen = 8 * 1024 * 1024;
79e53945
JB
941 break;
942 case INTEL_855_GMCH_GMS_STOLEN_16M:
241fa85b 943 stolen = 16 * 1024 * 1024;
79e53945
JB
944 break;
945 case INTEL_855_GMCH_GMS_STOLEN_32M:
241fa85b 946 stolen = 32 * 1024 * 1024;
79e53945
JB
947 break;
948 case INTEL_915G_GMCH_GMS_STOLEN_48M:
241fa85b 949 stolen = 48 * 1024 * 1024;
79e53945
JB
950 break;
951 case INTEL_915G_GMCH_GMS_STOLEN_64M:
241fa85b
EA
952 stolen = 64 * 1024 * 1024;
953 break;
954 case INTEL_GMCH_GMS_STOLEN_128M:
955 stolen = 128 * 1024 * 1024;
956 break;
957 case INTEL_GMCH_GMS_STOLEN_256M:
958 stolen = 256 * 1024 * 1024;
959 break;
960 case INTEL_GMCH_GMS_STOLEN_96M:
961 stolen = 96 * 1024 * 1024;
962 break;
963 case INTEL_GMCH_GMS_STOLEN_160M:
964 stolen = 160 * 1024 * 1024;
965 break;
966 case INTEL_GMCH_GMS_STOLEN_224M:
967 stolen = 224 * 1024 * 1024;
968 break;
969 case INTEL_GMCH_GMS_STOLEN_352M:
970 stolen = 352 * 1024 * 1024;
79e53945 971 break;
79e53945
JB
972 default:
973 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
241fa85b 974 tmp & INTEL_GMCH_GMS_MASK);
79e53945
JB
975 return -1;
976 }
241fa85b 977 *preallocated_size = stolen - overhead;
79e53945
JB
978
979 return 0;
980}
981
2a34f5e6
EA
982static int i915_load_modeset_init(struct drm_device *dev,
983 unsigned long prealloc_size,
984 unsigned long agp_size)
79e53945
JB
985{
986 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
987 int fb_bar = IS_I9XX(dev) ? 2 : 0;
988 int ret = 0;
989
990 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
991 0xff000000;
992
2906f025 993 if (IS_MOBILE(dev) || IS_I9XX(dev))
79e53945
JB
994 dev_priv->cursor_needs_physical = true;
995 else
996 dev_priv->cursor_needs_physical = false;
997
2906f025
JB
998 if (IS_I965G(dev) || IS_G33(dev))
999 dev_priv->cursor_needs_physical = false;
1000
79e53945
JB
1001 /* Basic memrange allocator for stolen space (aka vram) */
1002 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1003
13f4c435
EA
1004 /* Let GEM Manage from end of prealloc space to end of aperture.
1005 *
1006 * However, leave one page at the end still bound to the scratch page.
1007 * There are a number of places where the hardware apparently
1008 * prefetches past the end of the object, and we've seen multiple
1009 * hangs with the GPU head pointer stuck in a batchbuffer bound
1010 * at the last page of the aperture. One page should be enough to
1011 * keep any prefetching inside of the aperture.
1012 */
1013 i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
79e53945
JB
1014
1015 ret = i915_gem_init_ringbuffer(dev);
1016 if (ret)
b8da7de5 1017 goto out;
79e53945 1018
79e53945
JB
1019 /* Allow hardware batchbuffers unless told otherwise.
1020 */
1021 dev_priv->allow_batchbuffer = 1;
1022
1023 ret = intel_init_bios(dev);
1024 if (ret)
1025 DRM_INFO("failed to find VBIOS tables\n");
1026
1027 ret = drm_irq_install(dev);
1028 if (ret)
1029 goto destroy_ringbuffer;
1030
79e53945
JB
1031 /* Always safe in the mode setting case. */
1032 /* FIXME: do pre/post-mode set stuff in core KMS code */
1033 dev->vblank_disable_allowed = 1;
1034
1035 /*
1036 * Initialize the hardware status page IRQ location.
1037 */
1038
1039 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1040
1041 intel_modeset_init(dev);
1042
7a1fb5d0 1043 drm_helper_initial_config(dev);
79e53945 1044
79e53945
JB
1045 return 0;
1046
79e53945
JB
1047destroy_ringbuffer:
1048 i915_gem_cleanup_ringbuffer(dev);
1049out:
1050 return ret;
1051}
1052
7c1c2871
DA
1053int i915_master_create(struct drm_device *dev, struct drm_master *master)
1054{
1055 struct drm_i915_master_private *master_priv;
1056
9a298b2a 1057 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
7c1c2871
DA
1058 if (!master_priv)
1059 return -ENOMEM;
1060
1061 master->driver_priv = master_priv;
1062 return 0;
1063}
1064
1065void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1066{
1067 struct drm_i915_master_private *master_priv = master->driver_priv;
1068
1069 if (!master_priv)
1070 return;
1071
9a298b2a 1072 kfree(master_priv);
7c1c2871
DA
1073
1074 master->driver_priv = NULL;
1075}
1076
7662c8bd
SL
1077static void i915_get_mem_freq(struct drm_device *dev)
1078{
1079 drm_i915_private_t *dev_priv = dev->dev_private;
1080 u32 tmp;
1081
1082 if (!IS_IGD(dev))
1083 return;
1084
1085 tmp = I915_READ(CLKCFG);
1086
1087 switch (tmp & CLKCFG_FSB_MASK) {
1088 case CLKCFG_FSB_533:
1089 dev_priv->fsb_freq = 533; /* 133*4 */
1090 break;
1091 case CLKCFG_FSB_800:
1092 dev_priv->fsb_freq = 800; /* 200*4 */
1093 break;
1094 case CLKCFG_FSB_667:
1095 dev_priv->fsb_freq = 667; /* 167*4 */
1096 break;
1097 case CLKCFG_FSB_400:
1098 dev_priv->fsb_freq = 400; /* 100*4 */
1099 break;
1100 }
1101
1102 switch (tmp & CLKCFG_MEM_MASK) {
1103 case CLKCFG_MEM_533:
1104 dev_priv->mem_freq = 533;
1105 break;
1106 case CLKCFG_MEM_667:
1107 dev_priv->mem_freq = 667;
1108 break;
1109 case CLKCFG_MEM_800:
1110 dev_priv->mem_freq = 800;
1111 break;
1112 }
1113}
1114
79e53945
JB
1115/**
1116 * i915_driver_load - setup chip and create an initial config
1117 * @dev: DRM device
1118 * @flags: startup flags
1119 *
1120 * The driver load routine has to do several things:
1121 * - drive output discovery via intel_modeset_init()
1122 * - initialize the memory manager
1123 * - allocate initial config memory
1124 * - setup the DRM framebuffer with the allocated memory
1125 */
84b1fd10 1126int i915_driver_load(struct drm_device *dev, unsigned long flags)
22eae947 1127{
ba8bbcf6 1128 struct drm_i915_private *dev_priv = dev->dev_private;
d883f7f1 1129 resource_size_t base, size;
ba8bbcf6 1130 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
2a34f5e6 1131 uint32_t agp_size, prealloc_size;
ba8bbcf6 1132
22eae947
DA
1133 /* i915 has 4 more counters */
1134 dev->counters += 4;
1135 dev->types[6] = _DRM_STAT_IRQ;
1136 dev->types[7] = _DRM_STAT_PRIMARY;
1137 dev->types[8] = _DRM_STAT_SECONDARY;
1138 dev->types[9] = _DRM_STAT_DMA;
1139
9a298b2a 1140 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
ba8bbcf6
JB
1141 if (dev_priv == NULL)
1142 return -ENOMEM;
1143
ba8bbcf6 1144 dev->dev_private = (void *)dev_priv;
673a394b 1145 dev_priv->dev = dev;
ba8bbcf6
JB
1146
1147 /* Add register map (needed for suspend/resume) */
1148 base = drm_get_resource_start(dev, mmio_bar);
1149 size = drm_get_resource_len(dev, mmio_bar);
1150
3043c60c 1151 dev_priv->regs = ioremap(base, size);
79e53945
JB
1152 if (!dev_priv->regs) {
1153 DRM_ERROR("failed to map registers\n");
1154 ret = -EIO;
1155 goto free_priv;
1156 }
ed4cb414 1157
ab657db1
EA
1158 dev_priv->mm.gtt_mapping =
1159 io_mapping_create_wc(dev->agp->base,
1160 dev->agp->agp_info.aper_size * 1024*1024);
6644107d
VP
1161 if (dev_priv->mm.gtt_mapping == NULL) {
1162 ret = -EIO;
1163 goto out_rmmap;
1164 }
1165
ab657db1
EA
1166 /* Set up a WC MTRR for non-PAT systems. This is more common than
1167 * one would think, because the kernel disables PAT on first
1168 * generation Core chips because WC PAT gets overridden by a UC
1169 * MTRR if present. Even if a UC MTRR isn't present.
1170 */
1171 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1172 dev->agp->agp_info.aper_size *
1173 1024 * 1024,
1174 MTRR_TYPE_WRCOMB, 1);
1175 if (dev_priv->mm.gtt_mtrr < 0) {
040aefa2 1176 DRM_INFO("MTRR allocation failed. Graphics "
ab657db1
EA
1177 "performance may suffer.\n");
1178 }
1179
2a34f5e6
EA
1180 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1181 if (ret)
1182 goto out_iomapfree;
1183
9c9fe1f8
EA
1184 dev_priv->wq = create_workqueue("i915");
1185 if (dev_priv->wq == NULL) {
1186 DRM_ERROR("Failed to create our workqueue.\n");
1187 ret = -ENOMEM;
1188 goto out_iomapfree;
1189 }
1190
ac5c4e76
DA
1191 /* enable GEM by default */
1192 dev_priv->has_gem = 1;
ac5c4e76 1193
2a34f5e6
EA
1194 if (prealloc_size > agp_size * 3 / 4) {
1195 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
1196 "memory stolen.\n",
1197 prealloc_size / 1024, agp_size / 1024);
1198 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
1199 "updating the BIOS to fix).\n");
1200 dev_priv->has_gem = 0;
1201 }
1202
9880b7a5 1203 dev->driver->get_vblank_counter = i915_get_vblank_counter;
42c2798b 1204 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
036a4a7d 1205 if (IS_G4X(dev) || IS_IGDNG(dev)) {
42c2798b 1206 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
9880b7a5 1207 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
42c2798b 1208 }
9880b7a5 1209
673a394b
EA
1210 i915_gem_load(dev);
1211
398c9cb2
KP
1212 /* Init HWS */
1213 if (!I915_NEED_GFX_HWS(dev)) {
1214 ret = i915_init_phys_hws(dev);
1215 if (ret != 0)
9c9fe1f8 1216 goto out_workqueue_free;
398c9cb2 1217 }
ed4cb414 1218
7662c8bd
SL
1219 i915_get_mem_freq(dev);
1220
ed4cb414
EA
1221 /* On the 945G/GM, the chipset reports the MSI capability on the
1222 * integrated graphics even though the support isn't actually there
1223 * according to the published specs. It doesn't appear to function
1224 * correctly in testing on 945G.
1225 * This may be a side effect of MSI having been made available for PEG
1226 * and the registers being closely associated.
d1ed629f
KP
1227 *
1228 * According to chipset errata, on the 965GM, MSI interrupts may
b60678a7
KP
1229 * be lost or delayed, but we use them anyways to avoid
1230 * stuck interrupts on some machines.
ed4cb414 1231 */
b60678a7 1232 if (!IS_I945G(dev) && !IS_I945GM(dev))
d3e74d02 1233 pci_enable_msi(dev->pdev);
ed4cb414
EA
1234
1235 spin_lock_init(&dev_priv->user_irq_lock);
63eeaf38 1236 spin_lock_init(&dev_priv->error_lock);
79e53945 1237 dev_priv->user_irq_refcount = 0;
ed4cb414 1238
52440211
KP
1239 ret = drm_vblank_init(dev, I915_NUM_PIPE);
1240
1241 if (ret) {
1242 (void) i915_driver_unload(dev);
1243 return ret;
1244 }
1245
79e53945 1246 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2a34f5e6 1247 ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
79e53945
JB
1248 if (ret < 0) {
1249 DRM_ERROR("failed to init modeset\n");
9c9fe1f8 1250 goto out_workqueue_free;
79e53945
JB
1251 }
1252 }
1253
74a365b3 1254 /* Must be done after probing outputs */
e170b030
ZW
1255 /* FIXME: verify on IGDNG */
1256 if (!IS_IGDNG(dev))
1257 intel_opregion_init(dev, 0);
74a365b3 1258
79e53945
JB
1259 return 0;
1260
9c9fe1f8
EA
1261out_workqueue_free:
1262 destroy_workqueue(dev_priv->wq);
6644107d
VP
1263out_iomapfree:
1264 io_mapping_free(dev_priv->mm.gtt_mapping);
79e53945
JB
1265out_rmmap:
1266 iounmap(dev_priv->regs);
1267free_priv:
9a298b2a 1268 kfree(dev_priv);
ba8bbcf6
JB
1269 return ret;
1270}
1271
1272int i915_driver_unload(struct drm_device *dev)
1273{
1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275
9c9fe1f8
EA
1276 destroy_workqueue(dev_priv->wq);
1277
ab657db1
EA
1278 io_mapping_free(dev_priv->mm.gtt_mapping);
1279 if (dev_priv->mm.gtt_mtrr >= 0) {
1280 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1281 dev->agp->agp_info.aper_size * 1024 * 1024);
1282 dev_priv->mm.gtt_mtrr = -1;
1283 }
1284
79e53945 1285 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
79e53945
JB
1286 drm_irq_uninstall(dev);
1287 }
1288
ed4cb414
EA
1289 if (dev->pdev->msi_enabled)
1290 pci_disable_msi(dev->pdev);
1291
3043c60c
EA
1292 if (dev_priv->regs != NULL)
1293 iounmap(dev_priv->regs);
ba8bbcf6 1294
e170b030
ZW
1295 if (!IS_IGDNG(dev))
1296 intel_opregion_free(dev, 0);
8ee1c3db 1297
79e53945
JB
1298 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1299 intel_modeset_cleanup(dev);
1300
71acb5eb
DA
1301 i915_gem_free_all_phys_object(dev);
1302
79e53945
JB
1303 mutex_lock(&dev->struct_mutex);
1304 i915_gem_cleanup_ringbuffer(dev);
1305 mutex_unlock(&dev->struct_mutex);
1306 drm_mm_takedown(&dev_priv->vram);
1307 i915_gem_lastclose(dev);
1308 }
1309
9a298b2a 1310 kfree(dev->dev_private);
ba8bbcf6 1311
22eae947
DA
1312 return 0;
1313}
1314
673a394b
EA
1315int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1316{
1317 struct drm_i915_file_private *i915_file_priv;
1318
8a4c47f3 1319 DRM_DEBUG_DRIVER("\n");
673a394b 1320 i915_file_priv = (struct drm_i915_file_private *)
9a298b2a 1321 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
673a394b
EA
1322
1323 if (!i915_file_priv)
1324 return -ENOMEM;
1325
1326 file_priv->driver_priv = i915_file_priv;
1327
b962442e 1328 INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
673a394b
EA
1329
1330 return 0;
1331}
1332
79e53945
JB
1333/**
1334 * i915_driver_lastclose - clean up after all DRM clients have exited
1335 * @dev: DRM device
1336 *
1337 * Take care of cleaning up after all DRM clients have exited. In the
1338 * mode setting case, we want to restore the kernel's initial mode (just
1339 * in case the last client left us in a bad state).
1340 *
1341 * Additionally, in the non-mode setting case, we'll tear down the AGP
1342 * and DMA structures, since the kernel won't be using them, and clea
1343 * up any GEM state.
1344 */
84b1fd10 1345void i915_driver_lastclose(struct drm_device * dev)
1da177e4 1346{
ba8bbcf6
JB
1347 drm_i915_private_t *dev_priv = dev->dev_private;
1348
79e53945
JB
1349 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1350 intelfb_restore();
144a75fa 1351 return;
79e53945 1352 }
144a75fa 1353
673a394b
EA
1354 i915_gem_lastclose(dev);
1355
ba8bbcf6 1356 if (dev_priv->agp_heap)
b5e89ed5 1357 i915_mem_takedown(&(dev_priv->agp_heap));
ba8bbcf6 1358
b5e89ed5 1359 i915_dma_cleanup(dev);
1da177e4
LT
1360}
1361
6c340eac 1362void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1da177e4 1363{
ba8bbcf6 1364 drm_i915_private_t *dev_priv = dev->dev_private;
b962442e 1365 i915_gem_release(dev, file_priv);
79e53945
JB
1366 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1367 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1da177e4
LT
1368}
1369
673a394b
EA
1370void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1371{
1372 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1373
9a298b2a 1374 kfree(i915_file_priv);
673a394b
EA
1375}
1376
c153f45f
EA
1377struct drm_ioctl_desc i915_ioctls[] = {
1378 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1379 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1380 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1381 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1382 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1383 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1384 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1385 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1386 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1387 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1388 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1389 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1390 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1391 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1392 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1393 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
4b408939 1394 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2bdf00b2 1395 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
673a394b
EA
1396 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1397 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1398 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1399 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1400 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
2bdf00b2
DA
1401 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1402 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
673a394b
EA
1403 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1404 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1405 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1406 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
de151cf6 1407 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
673a394b
EA
1408 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1409 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1410 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1411 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
5a125c3c 1412 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
08d7b3d1 1413 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
c94f7029
DA
1414};
1415
1416int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
cda17380
DA
1417
1418/**
1419 * Determine if the device really is AGP or not.
1420 *
1421 * All Intel graphics chipsets are treated as AGP, even if they are really
1422 * PCI-e.
1423 *
1424 * \param dev The device to be tested.
1425 *
1426 * \returns
1427 * A value of 1 is always retured to indictate every i9x5 is AGP.
1428 */
84b1fd10 1429int i915_driver_device_is_agp(struct drm_device * dev)
cda17380
DA
1430{
1431 return 1;
1432}