]>
Commit | Line | Data |
---|---|---|
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- | |
2 | */ | |
3 | /* | |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | */ | |
28 | ||
29 | #include "drmP.h" | |
30 | #include "drm.h" | |
31 | #include "i915_drm.h" | |
32 | #include "i915_drv.h" | |
33 | ||
34 | /* Really want an OS-independent resettable timer. Would like to have | |
35 | * this loop run for (eg) 3 sec, but have the timer reset every time | |
36 | * the head pointer changes, so that EBUSY only happens if the ring | |
37 | * actually stalls for (eg) 3 seconds. | |
38 | */ | |
39 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |
40 | { | |
41 | drm_i915_private_t *dev_priv = dev->dev_private; | |
42 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | |
43 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | |
44 | u32 last_acthd = I915_READ(acthd_reg); | |
45 | u32 acthd; | |
46 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | |
47 | int i; | |
48 | ||
49 | for (i = 0; i < 100000; i++) { | |
50 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | |
51 | acthd = I915_READ(acthd_reg); | |
52 | ring->space = ring->head - (ring->tail + 8); | |
53 | if (ring->space < 0) | |
54 | ring->space += ring->Size; | |
55 | if (ring->space >= n) | |
56 | return 0; | |
57 | ||
58 | if (dev_priv->sarea_priv) | |
59 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
60 | ||
61 | if (ring->head != last_head) | |
62 | i = 0; | |
63 | if (acthd != last_acthd) | |
64 | i = 0; | |
65 | ||
66 | last_head = ring->head; | |
67 | last_acthd = acthd; | |
68 | msleep_interruptible(10); | |
69 | ||
70 | } | |
71 | ||
72 | return -EBUSY; | |
73 | } | |
74 | ||
75 | /** | |
76 | * Sets up the hardware status page for devices that need a physical address | |
77 | * in the register. | |
78 | */ | |
79 | static int i915_init_phys_hws(struct drm_device *dev) | |
80 | { | |
81 | drm_i915_private_t *dev_priv = dev->dev_private; | |
82 | /* Program Hardware Status Page */ | |
83 | dev_priv->status_page_dmah = | |
84 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); | |
85 | ||
86 | if (!dev_priv->status_page_dmah) { | |
87 | DRM_ERROR("Can not allocate hardware status page\n"); | |
88 | return -ENOMEM; | |
89 | } | |
90 | dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; | |
91 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | |
92 | ||
93 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | |
94 | ||
95 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | |
96 | DRM_DEBUG("Enabled hardware status page\n"); | |
97 | return 0; | |
98 | } | |
99 | ||
100 | /** | |
101 | * Frees the hardware status page, whether it's a physical address or a virtual | |
102 | * address set up by the X Server. | |
103 | */ | |
104 | static void i915_free_hws(struct drm_device *dev) | |
105 | { | |
106 | drm_i915_private_t *dev_priv = dev->dev_private; | |
107 | if (dev_priv->status_page_dmah) { | |
108 | drm_pci_free(dev, dev_priv->status_page_dmah); | |
109 | dev_priv->status_page_dmah = NULL; | |
110 | } | |
111 | ||
112 | if (dev_priv->status_gfx_addr) { | |
113 | dev_priv->status_gfx_addr = 0; | |
114 | drm_core_ioremapfree(&dev_priv->hws_map, dev); | |
115 | } | |
116 | ||
117 | /* Need to rewrite hardware status page */ | |
118 | I915_WRITE(HWS_PGA, 0x1ffff000); | |
119 | } | |
120 | ||
121 | void i915_kernel_lost_context(struct drm_device * dev) | |
122 | { | |
123 | drm_i915_private_t *dev_priv = dev->dev_private; | |
124 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | |
125 | ||
126 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | |
127 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | |
128 | ring->space = ring->head - (ring->tail + 8); | |
129 | if (ring->space < 0) | |
130 | ring->space += ring->Size; | |
131 | ||
132 | if (ring->head == ring->tail && dev_priv->sarea_priv) | |
133 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | |
134 | } | |
135 | ||
136 | static int i915_dma_cleanup(struct drm_device * dev) | |
137 | { | |
138 | drm_i915_private_t *dev_priv = dev->dev_private; | |
139 | /* Make sure interrupts are disabled here because the uninstall ioctl | |
140 | * may not have been called from userspace and after dev_private | |
141 | * is freed, it's too late. | |
142 | */ | |
143 | if (dev->irq_enabled) | |
144 | drm_irq_uninstall(dev); | |
145 | ||
146 | if (dev_priv->ring.virtual_start) { | |
147 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | |
148 | dev_priv->ring.virtual_start = NULL; | |
149 | dev_priv->ring.map.handle = NULL; | |
150 | dev_priv->ring.map.size = 0; | |
151 | } | |
152 | ||
153 | /* Clear the HWS virtual address at teardown */ | |
154 | if (I915_NEED_GFX_HWS(dev)) | |
155 | i915_free_hws(dev); | |
156 | ||
157 | dev_priv->sarea = NULL; | |
158 | dev_priv->sarea_priv = NULL; | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
163 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |
164 | { | |
165 | drm_i915_private_t *dev_priv = dev->dev_private; | |
166 | ||
167 | dev_priv->sarea = drm_getsarea(dev); | |
168 | if (!dev_priv->sarea) { | |
169 | DRM_ERROR("can not find sarea!\n"); | |
170 | i915_dma_cleanup(dev); | |
171 | return -EINVAL; | |
172 | } | |
173 | ||
174 | dev_priv->sarea_priv = (drm_i915_sarea_t *) | |
175 | ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); | |
176 | ||
177 | if (init->ring_size != 0) { | |
178 | if (dev_priv->ring.ring_obj != NULL) { | |
179 | i915_dma_cleanup(dev); | |
180 | DRM_ERROR("Client tried to initialize ringbuffer in " | |
181 | "GEM mode\n"); | |
182 | return -EINVAL; | |
183 | } | |
184 | ||
185 | dev_priv->ring.Size = init->ring_size; | |
186 | dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; | |
187 | ||
188 | dev_priv->ring.map.offset = init->ring_start; | |
189 | dev_priv->ring.map.size = init->ring_size; | |
190 | dev_priv->ring.map.type = 0; | |
191 | dev_priv->ring.map.flags = 0; | |
192 | dev_priv->ring.map.mtrr = 0; | |
193 | ||
194 | drm_core_ioremap(&dev_priv->ring.map, dev); | |
195 | ||
196 | if (dev_priv->ring.map.handle == NULL) { | |
197 | i915_dma_cleanup(dev); | |
198 | DRM_ERROR("can not ioremap virtual address for" | |
199 | " ring buffer\n"); | |
200 | return -ENOMEM; | |
201 | } | |
202 | } | |
203 | ||
204 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | |
205 | ||
206 | dev_priv->cpp = init->cpp; | |
207 | dev_priv->back_offset = init->back_offset; | |
208 | dev_priv->front_offset = init->front_offset; | |
209 | dev_priv->current_page = 0; | |
210 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | |
211 | ||
212 | /* Allow hardware batchbuffers unless told otherwise. | |
213 | */ | |
214 | dev_priv->allow_batchbuffer = 1; | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | static int i915_dma_resume(struct drm_device * dev) | |
220 | { | |
221 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
222 | ||
223 | DRM_DEBUG("%s\n", __func__); | |
224 | ||
225 | if (!dev_priv->sarea) { | |
226 | DRM_ERROR("can not find sarea!\n"); | |
227 | return -EINVAL; | |
228 | } | |
229 | ||
230 | if (dev_priv->ring.map.handle == NULL) { | |
231 | DRM_ERROR("can not ioremap virtual address for" | |
232 | " ring buffer\n"); | |
233 | return -ENOMEM; | |
234 | } | |
235 | ||
236 | /* Program Hardware Status Page */ | |
237 | if (!dev_priv->hw_status_page) { | |
238 | DRM_ERROR("Can not find hardware status page\n"); | |
239 | return -EINVAL; | |
240 | } | |
241 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); | |
242 | ||
243 | if (dev_priv->status_gfx_addr != 0) | |
244 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | |
245 | else | |
246 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | |
247 | DRM_DEBUG("Enabled hardware status page\n"); | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
252 | static int i915_dma_init(struct drm_device *dev, void *data, | |
253 | struct drm_file *file_priv) | |
254 | { | |
255 | drm_i915_init_t *init = data; | |
256 | int retcode = 0; | |
257 | ||
258 | switch (init->func) { | |
259 | case I915_INIT_DMA: | |
260 | retcode = i915_initialize(dev, init); | |
261 | break; | |
262 | case I915_CLEANUP_DMA: | |
263 | retcode = i915_dma_cleanup(dev); | |
264 | break; | |
265 | case I915_RESUME_DMA: | |
266 | retcode = i915_dma_resume(dev); | |
267 | break; | |
268 | default: | |
269 | retcode = -EINVAL; | |
270 | break; | |
271 | } | |
272 | ||
273 | return retcode; | |
274 | } | |
275 | ||
276 | /* Implement basically the same security restrictions as hardware does | |
277 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | |
278 | * | |
279 | * Most of the calculations below involve calculating the size of a | |
280 | * particular instruction. It's important to get the size right as | |
281 | * that tells us where the next instruction to check is. Any illegal | |
282 | * instruction detected will be given a size of zero, which is a | |
283 | * signal to abort the rest of the buffer. | |
284 | */ | |
285 | static int do_validate_cmd(int cmd) | |
286 | { | |
287 | switch (((cmd >> 29) & 0x7)) { | |
288 | case 0x0: | |
289 | switch ((cmd >> 23) & 0x3f) { | |
290 | case 0x0: | |
291 | return 1; /* MI_NOOP */ | |
292 | case 0x4: | |
293 | return 1; /* MI_FLUSH */ | |
294 | default: | |
295 | return 0; /* disallow everything else */ | |
296 | } | |
297 | break; | |
298 | case 0x1: | |
299 | return 0; /* reserved */ | |
300 | case 0x2: | |
301 | return (cmd & 0xff) + 2; /* 2d commands */ | |
302 | case 0x3: | |
303 | if (((cmd >> 24) & 0x1f) <= 0x18) | |
304 | return 1; | |
305 | ||
306 | switch ((cmd >> 24) & 0x1f) { | |
307 | case 0x1c: | |
308 | return 1; | |
309 | case 0x1d: | |
310 | switch ((cmd >> 16) & 0xff) { | |
311 | case 0x3: | |
312 | return (cmd & 0x1f) + 2; | |
313 | case 0x4: | |
314 | return (cmd & 0xf) + 2; | |
315 | default: | |
316 | return (cmd & 0xffff) + 2; | |
317 | } | |
318 | case 0x1e: | |
319 | if (cmd & (1 << 23)) | |
320 | return (cmd & 0xffff) + 1; | |
321 | else | |
322 | return 1; | |
323 | case 0x1f: | |
324 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | |
325 | return (cmd & 0x1ffff) + 2; | |
326 | else if (cmd & (1 << 17)) /* indirect random */ | |
327 | if ((cmd & 0xffff) == 0) | |
328 | return 0; /* unknown length, too hard */ | |
329 | else | |
330 | return (((cmd & 0xffff) + 1) / 2) + 1; | |
331 | else | |
332 | return 2; /* indirect sequential */ | |
333 | default: | |
334 | return 0; | |
335 | } | |
336 | default: | |
337 | return 0; | |
338 | } | |
339 | ||
340 | return 0; | |
341 | } | |
342 | ||
343 | static int validate_cmd(int cmd) | |
344 | { | |
345 | int ret = do_validate_cmd(cmd); | |
346 | ||
347 | /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ | |
348 | ||
349 | return ret; | |
350 | } | |
351 | ||
352 | static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) | |
353 | { | |
354 | drm_i915_private_t *dev_priv = dev->dev_private; | |
355 | int i; | |
356 | RING_LOCALS; | |
357 | ||
358 | if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) | |
359 | return -EINVAL; | |
360 | ||
361 | BEGIN_LP_RING((dwords+1)&~1); | |
362 | ||
363 | for (i = 0; i < dwords;) { | |
364 | int cmd, sz; | |
365 | ||
366 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) | |
367 | return -EINVAL; | |
368 | ||
369 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | |
370 | return -EINVAL; | |
371 | ||
372 | OUT_RING(cmd); | |
373 | ||
374 | while (++i, --sz) { | |
375 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], | |
376 | sizeof(cmd))) { | |
377 | return -EINVAL; | |
378 | } | |
379 | OUT_RING(cmd); | |
380 | } | |
381 | } | |
382 | ||
383 | if (dwords & 1) | |
384 | OUT_RING(0); | |
385 | ||
386 | ADVANCE_LP_RING(); | |
387 | ||
388 | return 0; | |
389 | } | |
390 | ||
391 | int | |
392 | i915_emit_box(struct drm_device *dev, | |
393 | struct drm_clip_rect __user *boxes, | |
394 | int i, int DR1, int DR4) | |
395 | { | |
396 | drm_i915_private_t *dev_priv = dev->dev_private; | |
397 | struct drm_clip_rect box; | |
398 | RING_LOCALS; | |
399 | ||
400 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { | |
401 | return -EFAULT; | |
402 | } | |
403 | ||
404 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | |
405 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | |
406 | box.x1, box.y1, box.x2, box.y2); | |
407 | return -EINVAL; | |
408 | } | |
409 | ||
410 | if (IS_I965G(dev)) { | |
411 | BEGIN_LP_RING(4); | |
412 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | |
413 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | |
414 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | |
415 | OUT_RING(DR4); | |
416 | ADVANCE_LP_RING(); | |
417 | } else { | |
418 | BEGIN_LP_RING(6); | |
419 | OUT_RING(GFX_OP_DRAWRECT_INFO); | |
420 | OUT_RING(DR1); | |
421 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | |
422 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | |
423 | OUT_RING(DR4); | |
424 | OUT_RING(0); | |
425 | ADVANCE_LP_RING(); | |
426 | } | |
427 | ||
428 | return 0; | |
429 | } | |
430 | ||
431 | /* XXX: Emitting the counter should really be moved to part of the IRQ | |
432 | * emit. For now, do it in both places: | |
433 | */ | |
434 | ||
435 | static void i915_emit_breadcrumb(struct drm_device *dev) | |
436 | { | |
437 | drm_i915_private_t *dev_priv = dev->dev_private; | |
438 | RING_LOCALS; | |
439 | ||
440 | dev_priv->counter++; | |
441 | if (dev_priv->counter > 0x7FFFFFFFUL) | |
442 | dev_priv->counter = 0; | |
443 | if (dev_priv->sarea_priv) | |
444 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; | |
445 | ||
446 | BEGIN_LP_RING(4); | |
447 | OUT_RING(MI_STORE_DWORD_INDEX); | |
448 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
449 | OUT_RING(dev_priv->counter); | |
450 | OUT_RING(0); | |
451 | ADVANCE_LP_RING(); | |
452 | } | |
453 | ||
454 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |
455 | drm_i915_cmdbuffer_t * cmd) | |
456 | { | |
457 | int nbox = cmd->num_cliprects; | |
458 | int i = 0, count, ret; | |
459 | ||
460 | if (cmd->sz & 0x3) { | |
461 | DRM_ERROR("alignment"); | |
462 | return -EINVAL; | |
463 | } | |
464 | ||
465 | i915_kernel_lost_context(dev); | |
466 | ||
467 | count = nbox ? nbox : 1; | |
468 | ||
469 | for (i = 0; i < count; i++) { | |
470 | if (i < nbox) { | |
471 | ret = i915_emit_box(dev, cmd->cliprects, i, | |
472 | cmd->DR1, cmd->DR4); | |
473 | if (ret) | |
474 | return ret; | |
475 | } | |
476 | ||
477 | ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); | |
478 | if (ret) | |
479 | return ret; | |
480 | } | |
481 | ||
482 | i915_emit_breadcrumb(dev); | |
483 | return 0; | |
484 | } | |
485 | ||
486 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | |
487 | drm_i915_batchbuffer_t * batch) | |
488 | { | |
489 | drm_i915_private_t *dev_priv = dev->dev_private; | |
490 | struct drm_clip_rect __user *boxes = batch->cliprects; | |
491 | int nbox = batch->num_cliprects; | |
492 | int i = 0, count; | |
493 | RING_LOCALS; | |
494 | ||
495 | if ((batch->start | batch->used) & 0x7) { | |
496 | DRM_ERROR("alignment"); | |
497 | return -EINVAL; | |
498 | } | |
499 | ||
500 | i915_kernel_lost_context(dev); | |
501 | ||
502 | count = nbox ? nbox : 1; | |
503 | ||
504 | for (i = 0; i < count; i++) { | |
505 | if (i < nbox) { | |
506 | int ret = i915_emit_box(dev, boxes, i, | |
507 | batch->DR1, batch->DR4); | |
508 | if (ret) | |
509 | return ret; | |
510 | } | |
511 | ||
512 | if (!IS_I830(dev) && !IS_845G(dev)) { | |
513 | BEGIN_LP_RING(2); | |
514 | if (IS_I965G(dev)) { | |
515 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | |
516 | OUT_RING(batch->start); | |
517 | } else { | |
518 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | |
519 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
520 | } | |
521 | ADVANCE_LP_RING(); | |
522 | } else { | |
523 | BEGIN_LP_RING(4); | |
524 | OUT_RING(MI_BATCH_BUFFER); | |
525 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
526 | OUT_RING(batch->start + batch->used - 4); | |
527 | OUT_RING(0); | |
528 | ADVANCE_LP_RING(); | |
529 | } | |
530 | } | |
531 | ||
532 | i915_emit_breadcrumb(dev); | |
533 | ||
534 | return 0; | |
535 | } | |
536 | ||
537 | static int i915_dispatch_flip(struct drm_device * dev) | |
538 | { | |
539 | drm_i915_private_t *dev_priv = dev->dev_private; | |
540 | RING_LOCALS; | |
541 | ||
542 | if (!dev_priv->sarea_priv) | |
543 | return -EINVAL; | |
544 | ||
545 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", | |
546 | __func__, | |
547 | dev_priv->current_page, | |
548 | dev_priv->sarea_priv->pf_current_page); | |
549 | ||
550 | i915_kernel_lost_context(dev); | |
551 | ||
552 | BEGIN_LP_RING(2); | |
553 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | |
554 | OUT_RING(0); | |
555 | ADVANCE_LP_RING(); | |
556 | ||
557 | BEGIN_LP_RING(6); | |
558 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | |
559 | OUT_RING(0); | |
560 | if (dev_priv->current_page == 0) { | |
561 | OUT_RING(dev_priv->back_offset); | |
562 | dev_priv->current_page = 1; | |
563 | } else { | |
564 | OUT_RING(dev_priv->front_offset); | |
565 | dev_priv->current_page = 0; | |
566 | } | |
567 | OUT_RING(0); | |
568 | ADVANCE_LP_RING(); | |
569 | ||
570 | BEGIN_LP_RING(2); | |
571 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | |
572 | OUT_RING(0); | |
573 | ADVANCE_LP_RING(); | |
574 | ||
575 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; | |
576 | ||
577 | BEGIN_LP_RING(4); | |
578 | OUT_RING(MI_STORE_DWORD_INDEX); | |
579 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
580 | OUT_RING(dev_priv->counter); | |
581 | OUT_RING(0); | |
582 | ADVANCE_LP_RING(); | |
583 | ||
584 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | |
585 | return 0; | |
586 | } | |
587 | ||
588 | static int i915_quiescent(struct drm_device * dev) | |
589 | { | |
590 | drm_i915_private_t *dev_priv = dev->dev_private; | |
591 | ||
592 | i915_kernel_lost_context(dev); | |
593 | return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); | |
594 | } | |
595 | ||
596 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | |
597 | struct drm_file *file_priv) | |
598 | { | |
599 | int ret; | |
600 | ||
601 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
602 | ||
603 | mutex_lock(&dev->struct_mutex); | |
604 | ret = i915_quiescent(dev); | |
605 | mutex_unlock(&dev->struct_mutex); | |
606 | ||
607 | return ret; | |
608 | } | |
609 | ||
610 | static int i915_batchbuffer(struct drm_device *dev, void *data, | |
611 | struct drm_file *file_priv) | |
612 | { | |
613 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
614 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
615 | dev_priv->sarea_priv; | |
616 | drm_i915_batchbuffer_t *batch = data; | |
617 | int ret; | |
618 | ||
619 | if (!dev_priv->allow_batchbuffer) { | |
620 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | |
621 | return -EINVAL; | |
622 | } | |
623 | ||
624 | DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", | |
625 | batch->start, batch->used, batch->num_cliprects); | |
626 | ||
627 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
628 | ||
629 | if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, | |
630 | batch->num_cliprects * | |
631 | sizeof(struct drm_clip_rect))) | |
632 | return -EFAULT; | |
633 | ||
634 | mutex_lock(&dev->struct_mutex); | |
635 | ret = i915_dispatch_batchbuffer(dev, batch); | |
636 | mutex_unlock(&dev->struct_mutex); | |
637 | ||
638 | if (sarea_priv) | |
639 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
640 | return ret; | |
641 | } | |
642 | ||
643 | static int i915_cmdbuffer(struct drm_device *dev, void *data, | |
644 | struct drm_file *file_priv) | |
645 | { | |
646 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
647 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
648 | dev_priv->sarea_priv; | |
649 | drm_i915_cmdbuffer_t *cmdbuf = data; | |
650 | int ret; | |
651 | ||
652 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | |
653 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); | |
654 | ||
655 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
656 | ||
657 | if (cmdbuf->num_cliprects && | |
658 | DRM_VERIFYAREA_READ(cmdbuf->cliprects, | |
659 | cmdbuf->num_cliprects * | |
660 | sizeof(struct drm_clip_rect))) { | |
661 | DRM_ERROR("Fault accessing cliprects\n"); | |
662 | return -EFAULT; | |
663 | } | |
664 | ||
665 | mutex_lock(&dev->struct_mutex); | |
666 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf); | |
667 | mutex_unlock(&dev->struct_mutex); | |
668 | if (ret) { | |
669 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | |
670 | return ret; | |
671 | } | |
672 | ||
673 | if (sarea_priv) | |
674 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
675 | return 0; | |
676 | } | |
677 | ||
678 | static int i915_flip_bufs(struct drm_device *dev, void *data, | |
679 | struct drm_file *file_priv) | |
680 | { | |
681 | int ret; | |
682 | ||
683 | DRM_DEBUG("%s\n", __func__); | |
684 | ||
685 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
686 | ||
687 | mutex_lock(&dev->struct_mutex); | |
688 | ret = i915_dispatch_flip(dev); | |
689 | mutex_unlock(&dev->struct_mutex); | |
690 | ||
691 | return ret; | |
692 | } | |
693 | ||
694 | static int i915_getparam(struct drm_device *dev, void *data, | |
695 | struct drm_file *file_priv) | |
696 | { | |
697 | drm_i915_private_t *dev_priv = dev->dev_private; | |
698 | drm_i915_getparam_t *param = data; | |
699 | int value; | |
700 | ||
701 | if (!dev_priv) { | |
702 | DRM_ERROR("called with no initialization\n"); | |
703 | return -EINVAL; | |
704 | } | |
705 | ||
706 | switch (param->param) { | |
707 | case I915_PARAM_IRQ_ACTIVE: | |
708 | value = dev->pdev->irq ? 1 : 0; | |
709 | break; | |
710 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
711 | value = dev_priv->allow_batchbuffer ? 1 : 0; | |
712 | break; | |
713 | case I915_PARAM_LAST_DISPATCH: | |
714 | value = READ_BREADCRUMB(dev_priv); | |
715 | break; | |
716 | case I915_PARAM_CHIPSET_ID: | |
717 | value = dev->pci_device; | |
718 | break; | |
719 | case I915_PARAM_HAS_GEM: | |
720 | value = 1; | |
721 | break; | |
722 | default: | |
723 | DRM_ERROR("Unknown parameter %d\n", param->param); | |
724 | return -EINVAL; | |
725 | } | |
726 | ||
727 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | |
728 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); | |
729 | return -EFAULT; | |
730 | } | |
731 | ||
732 | return 0; | |
733 | } | |
734 | ||
735 | static int i915_setparam(struct drm_device *dev, void *data, | |
736 | struct drm_file *file_priv) | |
737 | { | |
738 | drm_i915_private_t *dev_priv = dev->dev_private; | |
739 | drm_i915_setparam_t *param = data; | |
740 | ||
741 | if (!dev_priv) { | |
742 | DRM_ERROR("called with no initialization\n"); | |
743 | return -EINVAL; | |
744 | } | |
745 | ||
746 | switch (param->param) { | |
747 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: | |
748 | break; | |
749 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | |
750 | dev_priv->tex_lru_log_granularity = param->value; | |
751 | break; | |
752 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | |
753 | dev_priv->allow_batchbuffer = param->value; | |
754 | break; | |
755 | default: | |
756 | DRM_ERROR("unknown parameter %d\n", param->param); | |
757 | return -EINVAL; | |
758 | } | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
763 | static int i915_set_status_page(struct drm_device *dev, void *data, | |
764 | struct drm_file *file_priv) | |
765 | { | |
766 | drm_i915_private_t *dev_priv = dev->dev_private; | |
767 | drm_i915_hws_addr_t *hws = data; | |
768 | ||
769 | if (!I915_NEED_GFX_HWS(dev)) | |
770 | return -EINVAL; | |
771 | ||
772 | if (!dev_priv) { | |
773 | DRM_ERROR("called with no initialization\n"); | |
774 | return -EINVAL; | |
775 | } | |
776 | ||
777 | printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); | |
778 | ||
779 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); | |
780 | ||
781 | dev_priv->hws_map.offset = dev->agp->base + hws->addr; | |
782 | dev_priv->hws_map.size = 4*1024; | |
783 | dev_priv->hws_map.type = 0; | |
784 | dev_priv->hws_map.flags = 0; | |
785 | dev_priv->hws_map.mtrr = 0; | |
786 | ||
787 | drm_core_ioremap(&dev_priv->hws_map, dev); | |
788 | if (dev_priv->hws_map.handle == NULL) { | |
789 | i915_dma_cleanup(dev); | |
790 | dev_priv->status_gfx_addr = 0; | |
791 | DRM_ERROR("can not ioremap virtual address for" | |
792 | " G33 hw status page\n"); | |
793 | return -ENOMEM; | |
794 | } | |
795 | dev_priv->hw_status_page = dev_priv->hws_map.handle; | |
796 | ||
797 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | |
798 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | |
799 | DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", | |
800 | dev_priv->status_gfx_addr); | |
801 | DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); | |
802 | return 0; | |
803 | } | |
804 | ||
805 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | |
806 | { | |
807 | struct drm_i915_private *dev_priv = dev->dev_private; | |
808 | unsigned long base, size; | |
809 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | |
810 | ||
811 | /* i915 has 4 more counters */ | |
812 | dev->counters += 4; | |
813 | dev->types[6] = _DRM_STAT_IRQ; | |
814 | dev->types[7] = _DRM_STAT_PRIMARY; | |
815 | dev->types[8] = _DRM_STAT_SECONDARY; | |
816 | dev->types[9] = _DRM_STAT_DMA; | |
817 | ||
818 | dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); | |
819 | if (dev_priv == NULL) | |
820 | return -ENOMEM; | |
821 | ||
822 | memset(dev_priv, 0, sizeof(drm_i915_private_t)); | |
823 | ||
824 | dev->dev_private = (void *)dev_priv; | |
825 | dev_priv->dev = dev; | |
826 | ||
827 | /* Add register map (needed for suspend/resume) */ | |
828 | base = drm_get_resource_start(dev, mmio_bar); | |
829 | size = drm_get_resource_len(dev, mmio_bar); | |
830 | ||
831 | dev_priv->regs = ioremap(base, size); | |
832 | ||
833 | i915_gem_load(dev); | |
834 | ||
835 | /* Init HWS */ | |
836 | if (!I915_NEED_GFX_HWS(dev)) { | |
837 | ret = i915_init_phys_hws(dev); | |
838 | if (ret != 0) | |
839 | return ret; | |
840 | } | |
841 | ||
842 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
843 | * integrated graphics even though the support isn't actually there | |
844 | * according to the published specs. It doesn't appear to function | |
845 | * correctly in testing on 945G. | |
846 | * This may be a side effect of MSI having been made available for PEG | |
847 | * and the registers being closely associated. | |
848 | * | |
849 | * According to chipset errata, on the 965GM, MSI interrupts may | |
850 | * be lost or delayed, but we use them anyways to avoid | |
851 | * stuck interrupts on some machines. | |
852 | */ | |
853 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | |
854 | pci_enable_msi(dev->pdev); | |
855 | ||
856 | intel_opregion_init(dev); | |
857 | ||
858 | spin_lock_init(&dev_priv->user_irq_lock); | |
859 | ||
860 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | |
861 | ||
862 | if (ret) { | |
863 | (void) i915_driver_unload(dev); | |
864 | return ret; | |
865 | } | |
866 | ||
867 | return ret; | |
868 | } | |
869 | ||
870 | int i915_driver_unload(struct drm_device *dev) | |
871 | { | |
872 | struct drm_i915_private *dev_priv = dev->dev_private; | |
873 | ||
874 | if (dev->pdev->msi_enabled) | |
875 | pci_disable_msi(dev->pdev); | |
876 | ||
877 | i915_free_hws(dev); | |
878 | ||
879 | if (dev_priv->regs != NULL) | |
880 | iounmap(dev_priv->regs); | |
881 | ||
882 | intel_opregion_free(dev); | |
883 | ||
884 | drm_free(dev->dev_private, sizeof(drm_i915_private_t), | |
885 | DRM_MEM_DRIVER); | |
886 | ||
887 | return 0; | |
888 | } | |
889 | ||
890 | int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |
891 | { | |
892 | struct drm_i915_file_private *i915_file_priv; | |
893 | ||
894 | DRM_DEBUG("\n"); | |
895 | i915_file_priv = (struct drm_i915_file_private *) | |
896 | drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); | |
897 | ||
898 | if (!i915_file_priv) | |
899 | return -ENOMEM; | |
900 | ||
901 | file_priv->driver_priv = i915_file_priv; | |
902 | ||
903 | i915_file_priv->mm.last_gem_seqno = 0; | |
904 | i915_file_priv->mm.last_gem_throttle_seqno = 0; | |
905 | ||
906 | return 0; | |
907 | } | |
908 | ||
909 | void i915_driver_lastclose(struct drm_device * dev) | |
910 | { | |
911 | drm_i915_private_t *dev_priv = dev->dev_private; | |
912 | ||
913 | if (!dev_priv) | |
914 | return; | |
915 | ||
916 | i915_gem_lastclose(dev); | |
917 | ||
918 | if (dev_priv->agp_heap) | |
919 | i915_mem_takedown(&(dev_priv->agp_heap)); | |
920 | ||
921 | i915_dma_cleanup(dev); | |
922 | } | |
923 | ||
924 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | |
925 | { | |
926 | drm_i915_private_t *dev_priv = dev->dev_private; | |
927 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | |
928 | } | |
929 | ||
930 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | |
931 | { | |
932 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | |
933 | ||
934 | drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); | |
935 | } | |
936 | ||
937 | struct drm_ioctl_desc i915_ioctls[] = { | |
938 | DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
939 | DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | |
940 | DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), | |
941 | DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), | |
942 | DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), | |
943 | DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | |
944 | DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), | |
945 | DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
946 | DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), | |
947 | DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), | |
948 | DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
949 | DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | |
950 | DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | |
951 | DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | |
952 | DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), | |
953 | DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | |
954 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
955 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
956 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | |
957 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
958 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
959 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | |
960 | DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), | |
961 | DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
962 | DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
963 | DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), | |
964 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), | |
965 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), | |
966 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), | |
967 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), | |
968 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), | |
969 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), | |
970 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), | |
971 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), | |
972 | }; | |
973 | ||
974 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | |
975 | ||
976 | /** | |
977 | * Determine if the device really is AGP or not. | |
978 | * | |
979 | * All Intel graphics chipsets are treated as AGP, even if they are really | |
980 | * PCI-e. | |
981 | * | |
982 | * \param dev The device to be tested. | |
983 | * | |
984 | * \returns | |
985 | * A value of 1 is always retured to indictate every i9x5 is AGP. | |
986 | */ | |
987 | int i915_driver_device_is_agp(struct drm_device * dev) | |
988 | { | |
989 | return 1; | |
990 | } |