]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/intel_ringbuffer.c
drm/i915: Account for space on the ring buffer consumed whilst wrapping.
[net-next-2.6.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
CommitLineData
62fdfeaf
EA
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
62fdfeaf 32#include "i915_drv.h"
8187a2b7 33#include "i915_drm.h"
62fdfeaf 34#include "i915_trace.h"
62fdfeaf 35
8187a2b7
ZN
36static void
37render_ring_flush(struct drm_device *dev,
38 struct intel_ring_buffer *ring,
39 u32 invalidate_domains,
40 u32 flush_domains)
62fdfeaf 41{
62fdfeaf
EA
42#if WATCH_EXEC
43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
44 invalidate_domains, flush_domains);
45#endif
8187a2b7
ZN
46 u32 cmd;
47 trace_i915_gem_request_flush(dev, ring->next_seqno,
62fdfeaf
EA
48 invalidate_domains, flush_domains);
49
62fdfeaf
EA
50 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
51 /*
52 * read/write caches:
53 *
54 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
55 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
56 * also flushed at 2d versus 3d pipeline switches.
57 *
58 * read-only caches:
59 *
60 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
61 * MI_READ_FLUSH is set, and is always flushed on 965.
62 *
63 * I915_GEM_DOMAIN_COMMAND may not exist?
64 *
65 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
66 * invalidated when MI_EXE_FLUSH is set.
67 *
68 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
69 * invalidated with every MI_FLUSH.
70 *
71 * TLBs:
72 *
73 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
74 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
75 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
76 * are flushed at any MI_FLUSH.
77 */
78
79 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
80 if ((invalidate_domains|flush_domains) &
81 I915_GEM_DOMAIN_RENDER)
82 cmd &= ~MI_NO_WRITE_FLUSH;
83 if (!IS_I965G(dev)) {
84 /*
85 * On the 965, the sampler cache always gets flushed
86 * and this bit is reserved.
87 */
88 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
89 cmd |= MI_READ_FLUSH;
90 }
91 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
92 cmd |= MI_EXE_FLUSH;
93
94#if WATCH_EXEC
95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
96#endif
be26a10b 97 intel_ring_begin(dev, ring, 2);
8187a2b7
ZN
98 intel_ring_emit(dev, ring, cmd);
99 intel_ring_emit(dev, ring, MI_NOOP);
100 intel_ring_advance(dev, ring);
62fdfeaf 101 }
8187a2b7
ZN
102}
103
104static unsigned int render_ring_get_head(struct drm_device *dev,
105 struct intel_ring_buffer *ring)
106{
107 drm_i915_private_t *dev_priv = dev->dev_private;
108 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
109}
62fdfeaf 110
8187a2b7
ZN
111static unsigned int render_ring_get_tail(struct drm_device *dev,
112 struct intel_ring_buffer *ring)
113{
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 return I915_READ(PRB0_TAIL) & TAIL_ADDR;
62fdfeaf 116}
8187a2b7
ZN
117
118static unsigned int render_ring_get_active_head(struct drm_device *dev,
119 struct intel_ring_buffer *ring)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
123
124 return I915_READ(acthd_reg);
125}
126
127static void render_ring_advance_ring(struct drm_device *dev,
128 struct intel_ring_buffer *ring)
129{
130 drm_i915_private_t *dev_priv = dev->dev_private;
131 I915_WRITE(PRB0_TAIL, ring->tail);
132}
133
134static int init_ring_common(struct drm_device *dev,
135 struct intel_ring_buffer *ring)
136{
137 u32 head;
138 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv;
140 obj_priv = to_intel_bo(ring->gem_object);
141
142 /* Stop the ring if it's running. */
143 I915_WRITE(ring->regs.ctl, 0);
144 I915_WRITE(ring->regs.head, 0);
145 I915_WRITE(ring->regs.tail, 0);
146
147 /* Initialize the ring. */
148 I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
149 head = ring->get_head(dev, ring);
150
151 /* G45 ring initialization fails to reset head to zero */
152 if (head != 0) {
153 DRM_ERROR("%s head not reset to zero "
154 "ctl %08x head %08x tail %08x start %08x\n",
155 ring->name,
156 I915_READ(ring->regs.ctl),
157 I915_READ(ring->regs.head),
158 I915_READ(ring->regs.tail),
159 I915_READ(ring->regs.start));
160
161 I915_WRITE(ring->regs.head, 0);
162
163 DRM_ERROR("%s head forced to zero "
164 "ctl %08x head %08x tail %08x start %08x\n",
165 ring->name,
166 I915_READ(ring->regs.ctl),
167 I915_READ(ring->regs.head),
168 I915_READ(ring->regs.tail),
169 I915_READ(ring->regs.start));
170 }
171
172 I915_WRITE(ring->regs.ctl,
173 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
174 | RING_NO_REPORT | RING_VALID);
175
176 head = I915_READ(ring->regs.head) & HEAD_ADDR;
177 /* If the head is still not zero, the ring is dead */
178 if (head != 0) {
179 DRM_ERROR("%s initialization failed "
180 "ctl %08x head %08x tail %08x start %08x\n",
181 ring->name,
182 I915_READ(ring->regs.ctl),
183 I915_READ(ring->regs.head),
184 I915_READ(ring->regs.tail),
185 I915_READ(ring->regs.start));
186 return -EIO;
187 }
188
189 if (!drm_core_check_feature(dev, DRIVER_MODESET))
190 i915_kernel_lost_context(dev);
191 else {
192 ring->head = ring->get_head(dev, ring);
193 ring->tail = ring->get_tail(dev, ring);
194 ring->space = ring->head - (ring->tail + 8);
195 if (ring->space < 0)
196 ring->space += ring->size;
197 }
198 return 0;
199}
200
201static int init_render_ring(struct drm_device *dev,
202 struct intel_ring_buffer *ring)
203{
204 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring);
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE,
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
209 }
210 return ret;
211}
212
62fdfeaf 213#define PIPE_CONTROL_FLUSH(addr) \
8187a2b7 214do { \
62fdfeaf 215 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
ca76482e 216 PIPE_CONTROL_DEPTH_STALL | 2); \
62fdfeaf
EA
217 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
218 OUT_RING(0); \
219 OUT_RING(0); \
8187a2b7 220} while (0)
62fdfeaf
EA
221
222/**
223 * Creates a new sequence number, emitting a write of it to the status page
224 * plus an interrupt, which will trigger i915_user_interrupt_handler.
225 *
226 * Must be called with struct_lock held.
227 *
228 * Returned sequence numbers are nonzero on success.
229 */
8187a2b7
ZN
230static u32
231render_ring_add_request(struct drm_device *dev,
232 struct intel_ring_buffer *ring,
233 struct drm_file *file_priv,
234 u32 flush_domains)
62fdfeaf 235{
8187a2b7 236 u32 seqno;
62fdfeaf 237 drm_i915_private_t *dev_priv = dev->dev_private;
8187a2b7 238 seqno = intel_ring_get_seqno(dev, ring);
ca76482e
ZW
239
240 if (IS_GEN6(dev)) {
241 BEGIN_LP_RING(6);
242 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
243 OUT_RING(PIPE_CONTROL_QW_WRITE |
244 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
245 PIPE_CONTROL_NOTIFY);
246 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
247 OUT_RING(seqno);
248 OUT_RING(0);
249 OUT_RING(0);
250 ADVANCE_LP_RING();
251 } else if (HAS_PIPE_CONTROL(dev)) {
62fdfeaf
EA
252 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
253
254 /*
255 * Workaround qword write incoherence by flushing the
256 * PIPE_NOTIFY buffers out to memory before requesting
257 * an interrupt.
258 */
259 BEGIN_LP_RING(32);
260 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
261 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
262 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
263 OUT_RING(seqno);
264 OUT_RING(0);
265 PIPE_CONTROL_FLUSH(scratch_addr);
266 scratch_addr += 128; /* write to separate cachelines */
267 PIPE_CONTROL_FLUSH(scratch_addr);
268 scratch_addr += 128;
269 PIPE_CONTROL_FLUSH(scratch_addr);
270 scratch_addr += 128;
271 PIPE_CONTROL_FLUSH(scratch_addr);
272 scratch_addr += 128;
273 PIPE_CONTROL_FLUSH(scratch_addr);
274 scratch_addr += 128;
275 PIPE_CONTROL_FLUSH(scratch_addr);
276 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
277 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
278 PIPE_CONTROL_NOTIFY);
279 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
280 OUT_RING(seqno);
281 OUT_RING(0);
282 ADVANCE_LP_RING();
283 } else {
284 BEGIN_LP_RING(4);
285 OUT_RING(MI_STORE_DWORD_INDEX);
286 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
287 OUT_RING(seqno);
288
289 OUT_RING(MI_USER_INTERRUPT);
290 ADVANCE_LP_RING();
291 }
292 return seqno;
293}
294
8187a2b7
ZN
295static u32
296render_ring_get_gem_seqno(struct drm_device *dev,
297 struct intel_ring_buffer *ring)
298{
299 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
300 if (HAS_PIPE_CONTROL(dev))
301 return ((volatile u32 *)(dev_priv->seqno_page))[0];
302 else
303 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
304}
305
306static void
307render_ring_get_user_irq(struct drm_device *dev,
308 struct intel_ring_buffer *ring)
62fdfeaf
EA
309{
310 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
311 unsigned long irqflags;
312
313 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
8187a2b7 314 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
62fdfeaf
EA
315 if (HAS_PCH_SPLIT(dev))
316 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
317 else
318 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
319 }
320 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
321}
322
8187a2b7
ZN
323static void
324render_ring_put_user_irq(struct drm_device *dev,
325 struct intel_ring_buffer *ring)
62fdfeaf
EA
326{
327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
328 unsigned long irqflags;
329
330 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
8187a2b7
ZN
331 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
332 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
62fdfeaf
EA
333 if (HAS_PCH_SPLIT(dev))
334 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
335 else
336 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
337 }
338 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
339}
340
8187a2b7
ZN
341static void render_setup_status_page(struct drm_device *dev,
342 struct intel_ring_buffer *ring)
343{
344 drm_i915_private_t *dev_priv = dev->dev_private;
345 if (IS_GEN6(dev)) {
346 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
347 I915_READ(HWS_PGA_GEN6); /* posting read */
348 } else {
349 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
350 I915_READ(HWS_PGA); /* posting read */
351 }
352
353}
354
d1b851fc
ZN
355void
356bsd_ring_flush(struct drm_device *dev,
357 struct intel_ring_buffer *ring,
358 u32 invalidate_domains,
359 u32 flush_domains)
360{
be26a10b 361 intel_ring_begin(dev, ring, 2);
d1b851fc
ZN
362 intel_ring_emit(dev, ring, MI_FLUSH);
363 intel_ring_emit(dev, ring, MI_NOOP);
364 intel_ring_advance(dev, ring);
365}
366
367static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
368 struct intel_ring_buffer *ring)
369{
370 drm_i915_private_t *dev_priv = dev->dev_private;
371 return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
372}
373
374static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
375 struct intel_ring_buffer *ring)
376{
377 drm_i915_private_t *dev_priv = dev->dev_private;
378 return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
379}
380
381static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
382 struct intel_ring_buffer *ring)
383{
384 drm_i915_private_t *dev_priv = dev->dev_private;
385 return I915_READ(BSD_RING_ACTHD);
386}
387
388static inline void bsd_ring_advance_ring(struct drm_device *dev,
389 struct intel_ring_buffer *ring)
390{
391 drm_i915_private_t *dev_priv = dev->dev_private;
392 I915_WRITE(BSD_RING_TAIL, ring->tail);
393}
394
395static int init_bsd_ring(struct drm_device *dev,
396 struct intel_ring_buffer *ring)
397{
398 return init_ring_common(dev, ring);
399}
400
401static u32
402bsd_ring_add_request(struct drm_device *dev,
403 struct intel_ring_buffer *ring,
404 struct drm_file *file_priv,
405 u32 flush_domains)
406{
407 u32 seqno;
408 seqno = intel_ring_get_seqno(dev, ring);
409 intel_ring_begin(dev, ring, 4);
410 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
411 intel_ring_emit(dev, ring,
412 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
413 intel_ring_emit(dev, ring, seqno);
414 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
415 intel_ring_advance(dev, ring);
416
417 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
418
419 return seqno;
420}
421
422static void bsd_setup_status_page(struct drm_device *dev,
423 struct intel_ring_buffer *ring)
424{
425 drm_i915_private_t *dev_priv = dev->dev_private;
426 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
427 I915_READ(BSD_HWS_PGA);
428}
429
430static void
431bsd_ring_get_user_irq(struct drm_device *dev,
432 struct intel_ring_buffer *ring)
433{
434 /* do nothing */
435}
436static void
437bsd_ring_put_user_irq(struct drm_device *dev,
438 struct intel_ring_buffer *ring)
439{
440 /* do nothing */
441}
442
443static u32
444bsd_ring_get_gem_seqno(struct drm_device *dev,
445 struct intel_ring_buffer *ring)
446{
447 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
448}
449
450static int
451bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
452 struct intel_ring_buffer *ring,
453 struct drm_i915_gem_execbuffer2 *exec,
454 struct drm_clip_rect *cliprects,
455 uint64_t exec_offset)
456{
457 uint32_t exec_start;
458 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
459 intel_ring_begin(dev, ring, 2);
460 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
461 (2 << 6) | MI_BATCH_NON_SECURE_I965);
462 intel_ring_emit(dev, ring, exec_start);
463 intel_ring_advance(dev, ring);
464 return 0;
465}
466
467
8187a2b7
ZN
468static int
469render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
470 struct intel_ring_buffer *ring,
471 struct drm_i915_gem_execbuffer2 *exec,
472 struct drm_clip_rect *cliprects,
473 uint64_t exec_offset)
62fdfeaf
EA
474{
475 drm_i915_private_t *dev_priv = dev->dev_private;
476 int nbox = exec->num_cliprects;
477 int i = 0, count;
478 uint32_t exec_start, exec_len;
62fdfeaf
EA
479 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
480 exec_len = (uint32_t) exec->batch_len;
481
482 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
483
484 count = nbox ? nbox : 1;
485
486 for (i = 0; i < count; i++) {
487 if (i < nbox) {
488 int ret = i915_emit_box(dev, cliprects, i,
489 exec->DR1, exec->DR4);
490 if (ret)
491 return ret;
492 }
493
494 if (IS_I830(dev) || IS_845G(dev)) {
8187a2b7
ZN
495 intel_ring_begin(dev, ring, 4);
496 intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
497 intel_ring_emit(dev, ring,
498 exec_start | MI_BATCH_NON_SECURE);
499 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
500 intel_ring_emit(dev, ring, 0);
62fdfeaf 501 } else {
8187a2b7 502 intel_ring_begin(dev, ring, 4);
62fdfeaf 503 if (IS_I965G(dev)) {
8187a2b7
ZN
504 intel_ring_emit(dev, ring,
505 MI_BATCH_BUFFER_START | (2 << 6)
506 | MI_BATCH_NON_SECURE_I965);
507 intel_ring_emit(dev, ring, exec_start);
62fdfeaf 508 } else {
8187a2b7
ZN
509 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
510 | (2 << 6));
511 intel_ring_emit(dev, ring, exec_start |
512 MI_BATCH_NON_SECURE);
62fdfeaf 513 }
62fdfeaf 514 }
8187a2b7 515 intel_ring_advance(dev, ring);
62fdfeaf
EA
516 }
517
518 /* XXX breadcrumb */
519 return 0;
520}
521
8187a2b7
ZN
522static void cleanup_status_page(struct drm_device *dev,
523 struct intel_ring_buffer *ring)
62fdfeaf
EA
524{
525 drm_i915_private_t *dev_priv = dev->dev_private;
526 struct drm_gem_object *obj;
527 struct drm_i915_gem_object *obj_priv;
528
8187a2b7
ZN
529 obj = ring->status_page.obj;
530 if (obj == NULL)
62fdfeaf 531 return;
62fdfeaf
EA
532 obj_priv = to_intel_bo(obj);
533
534 kunmap(obj_priv->pages[0]);
535 i915_gem_object_unpin(obj);
536 drm_gem_object_unreference(obj);
8187a2b7 537 ring->status_page.obj = NULL;
62fdfeaf
EA
538
539 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
62fdfeaf
EA
540}
541
8187a2b7
ZN
542static int init_status_page(struct drm_device *dev,
543 struct intel_ring_buffer *ring)
62fdfeaf
EA
544{
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct drm_gem_object *obj;
547 struct drm_i915_gem_object *obj_priv;
548 int ret;
549
62fdfeaf
EA
550 obj = i915_gem_alloc_object(dev, 4096);
551 if (obj == NULL) {
552 DRM_ERROR("Failed to allocate status page\n");
553 ret = -ENOMEM;
554 goto err;
555 }
556 obj_priv = to_intel_bo(obj);
557 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
558
559 ret = i915_gem_object_pin(obj, 4096);
560 if (ret != 0) {
62fdfeaf
EA
561 goto err_unref;
562 }
563
8187a2b7
ZN
564 ring->status_page.gfx_addr = obj_priv->gtt_offset;
565 ring->status_page.page_addr = kmap(obj_priv->pages[0]);
566 if (ring->status_page.page_addr == NULL) {
62fdfeaf 567 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
62fdfeaf
EA
568 goto err_unpin;
569 }
8187a2b7
ZN
570 ring->status_page.obj = obj;
571 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
62fdfeaf 572
8187a2b7
ZN
573 ring->setup_status_page(dev, ring);
574 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
575 ring->name, ring->status_page.gfx_addr);
62fdfeaf
EA
576
577 return 0;
578
579err_unpin:
580 i915_gem_object_unpin(obj);
581err_unref:
582 drm_gem_object_unreference(obj);
583err:
8187a2b7 584 return ret;
62fdfeaf
EA
585}
586
8187a2b7
ZN
587
588int intel_init_ring_buffer(struct drm_device *dev,
589 struct intel_ring_buffer *ring)
62fdfeaf 590{
62fdfeaf 591 int ret;
8187a2b7
ZN
592 struct drm_i915_gem_object *obj_priv;
593 struct drm_gem_object *obj;
594 ring->dev = dev;
62fdfeaf 595
8187a2b7
ZN
596 if (I915_NEED_GFX_HWS(dev)) {
597 ret = init_status_page(dev, ring);
598 if (ret)
599 return ret;
600 }
62fdfeaf 601
8187a2b7 602 obj = i915_gem_alloc_object(dev, ring->size);
62fdfeaf
EA
603 if (obj == NULL) {
604 DRM_ERROR("Failed to allocate ringbuffer\n");
8187a2b7
ZN
605 ret = -ENOMEM;
606 goto cleanup;
62fdfeaf 607 }
62fdfeaf 608
8187a2b7
ZN
609 ring->gem_object = obj;
610
611 ret = i915_gem_object_pin(obj, ring->alignment);
62fdfeaf
EA
612 if (ret != 0) {
613 drm_gem_object_unreference(obj);
8187a2b7 614 goto cleanup;
62fdfeaf
EA
615 }
616
8187a2b7
ZN
617 obj_priv = to_intel_bo(obj);
618 ring->map.size = ring->size;
62fdfeaf 619 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
62fdfeaf
EA
620 ring->map.type = 0;
621 ring->map.flags = 0;
622 ring->map.mtrr = 0;
623
624 drm_core_ioremap_wc(&ring->map, dev);
625 if (ring->map.handle == NULL) {
626 DRM_ERROR("Failed to map ringbuffer.\n");
62fdfeaf
EA
627 i915_gem_object_unpin(obj);
628 drm_gem_object_unreference(obj);
8187a2b7
ZN
629 ret = -EINVAL;
630 goto cleanup;
62fdfeaf
EA
631 }
632
8187a2b7
ZN
633 ring->virtual_start = ring->map.handle;
634 ret = ring->init(dev, ring);
635 if (ret != 0) {
636 intel_cleanup_ring_buffer(dev, ring);
637 return ret;
62fdfeaf
EA
638 }
639
62fdfeaf
EA
640 if (!drm_core_check_feature(dev, DRIVER_MODESET))
641 i915_kernel_lost_context(dev);
642 else {
8187a2b7
ZN
643 ring->head = ring->get_head(dev, ring);
644 ring->tail = ring->get_tail(dev, ring);
62fdfeaf
EA
645 ring->space = ring->head - (ring->tail + 8);
646 if (ring->space < 0)
8187a2b7 647 ring->space += ring->size;
62fdfeaf 648 }
8187a2b7
ZN
649 INIT_LIST_HEAD(&ring->active_list);
650 INIT_LIST_HEAD(&ring->request_list);
651 return ret;
652cleanup:
653 cleanup_status_page(dev, ring);
654 return ret;
62fdfeaf
EA
655}
656
8187a2b7
ZN
657void intel_cleanup_ring_buffer(struct drm_device *dev,
658 struct intel_ring_buffer *ring)
62fdfeaf 659{
8187a2b7 660 if (ring->gem_object == NULL)
62fdfeaf
EA
661 return;
662
8187a2b7 663 drm_core_ioremapfree(&ring->map, dev);
62fdfeaf 664
8187a2b7
ZN
665 i915_gem_object_unpin(ring->gem_object);
666 drm_gem_object_unreference(ring->gem_object);
667 ring->gem_object = NULL;
668 cleanup_status_page(dev, ring);
62fdfeaf
EA
669}
670
8187a2b7
ZN
671int intel_wrap_ring_buffer(struct drm_device *dev,
672 struct intel_ring_buffer *ring)
62fdfeaf 673{
8187a2b7 674 unsigned int *virt;
62fdfeaf 675 int rem;
8187a2b7 676 rem = ring->size - ring->tail;
62fdfeaf 677
8187a2b7
ZN
678 if (ring->space < rem) {
679 int ret = intel_wait_ring_buffer(dev, ring, rem);
62fdfeaf
EA
680 if (ret)
681 return ret;
682 }
62fdfeaf 683
8187a2b7 684 virt = (unsigned int *)(ring->virtual_start + ring->tail);
62fdfeaf
EA
685 rem /= 4;
686 while (rem--)
687 *virt++ = MI_NOOP;
688
8187a2b7 689 ring->tail = 0;
43ed340a 690 ring->space = ring->head - 8;
62fdfeaf
EA
691
692 return 0;
693}
694
8187a2b7
ZN
695int intel_wait_ring_buffer(struct drm_device *dev,
696 struct intel_ring_buffer *ring, int n)
62fdfeaf 697{
8187a2b7 698 unsigned long end;
62fdfeaf
EA
699
700 trace_i915_ring_wait_begin (dev);
8187a2b7
ZN
701 end = jiffies + 3 * HZ;
702 do {
703 ring->head = ring->get_head(dev, ring);
62fdfeaf
EA
704 ring->space = ring->head - (ring->tail + 8);
705 if (ring->space < 0)
8187a2b7 706 ring->space += ring->size;
62fdfeaf
EA
707 if (ring->space >= n) {
708 trace_i915_ring_wait_end (dev);
709 return 0;
710 }
711
712 if (dev->primary->master) {
713 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
714 if (master_priv->sarea_priv)
715 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
716 }
d1b851fc 717
8187a2b7
ZN
718 yield();
719 } while (!time_after(jiffies, end));
720 trace_i915_ring_wait_end (dev);
721 return -EBUSY;
722}
62fdfeaf 723
8187a2b7 724void intel_ring_begin(struct drm_device *dev,
be26a10b 725 struct intel_ring_buffer *ring, int num_dwords)
8187a2b7 726{
be26a10b 727 int n = 4*num_dwords;
8187a2b7
ZN
728 if (unlikely(ring->tail + n > ring->size))
729 intel_wrap_ring_buffer(dev, ring);
730 if (unlikely(ring->space < n))
731 intel_wait_ring_buffer(dev, ring, n);
732}
62fdfeaf 733
8187a2b7
ZN
734void intel_ring_emit(struct drm_device *dev,
735 struct intel_ring_buffer *ring, unsigned int data)
736{
737 unsigned int *virt = ring->virtual_start + ring->tail;
738 *virt = data;
739 ring->tail += 4;
740 ring->tail &= ring->size - 1;
741 ring->space -= 4;
742}
62fdfeaf 743
8187a2b7
ZN
744void intel_ring_advance(struct drm_device *dev,
745 struct intel_ring_buffer *ring)
746{
747 ring->advance_ring(dev, ring);
748}
62fdfeaf 749
8187a2b7
ZN
750void intel_fill_struct(struct drm_device *dev,
751 struct intel_ring_buffer *ring,
752 void *data,
753 unsigned int len)
754{
755 unsigned int *virt = ring->virtual_start + ring->tail;
756 BUG_ON((len&~(4-1)) != 0);
be26a10b 757 intel_ring_begin(dev, ring, len/4);
8187a2b7
ZN
758 memcpy(virt, data, len);
759 ring->tail += len;
760 ring->tail &= ring->size - 1;
761 ring->space -= len;
762 intel_ring_advance(dev, ring);
763}
62fdfeaf 764
8187a2b7
ZN
765u32 intel_ring_get_seqno(struct drm_device *dev,
766 struct intel_ring_buffer *ring)
767{
768 u32 seqno;
769 seqno = ring->next_seqno;
770
771 /* reserve 0 for non-seqno */
772 if (++ring->next_seqno == 0)
773 ring->next_seqno = 1;
774 return seqno;
62fdfeaf 775}
8187a2b7
ZN
776
777struct intel_ring_buffer render_ring = {
778 .name = "render ring",
779 .regs = {
780 .ctl = PRB0_CTL,
781 .head = PRB0_HEAD,
782 .tail = PRB0_TAIL,
783 .start = PRB0_START
784 },
785 .ring_flag = I915_EXEC_RENDER,
786 .size = 32 * PAGE_SIZE,
787 .alignment = PAGE_SIZE,
788 .virtual_start = NULL,
789 .dev = NULL,
790 .gem_object = NULL,
791 .head = 0,
792 .tail = 0,
793 .space = 0,
794 .next_seqno = 1,
795 .user_irq_refcount = 0,
796 .irq_gem_seqno = 0,
797 .waiting_gem_seqno = 0,
798 .setup_status_page = render_setup_status_page,
799 .init = init_render_ring,
800 .get_head = render_ring_get_head,
801 .get_tail = render_ring_get_tail,
802 .get_active_head = render_ring_get_active_head,
803 .advance_ring = render_ring_advance_ring,
804 .flush = render_ring_flush,
805 .add_request = render_ring_add_request,
806 .get_gem_seqno = render_ring_get_gem_seqno,
807 .user_irq_get = render_ring_get_user_irq,
808 .user_irq_put = render_ring_put_user_irq,
809 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
810 .status_page = {NULL, 0, NULL},
811 .map = {0,}
812};
d1b851fc
ZN
813
814/* ring buffer for bit-stream decoder */
815
816struct intel_ring_buffer bsd_ring = {
817 .name = "bsd ring",
818 .regs = {
819 .ctl = BSD_RING_CTL,
820 .head = BSD_RING_HEAD,
821 .tail = BSD_RING_TAIL,
822 .start = BSD_RING_START
823 },
824 .ring_flag = I915_EXEC_BSD,
825 .size = 32 * PAGE_SIZE,
826 .alignment = PAGE_SIZE,
827 .virtual_start = NULL,
828 .dev = NULL,
829 .gem_object = NULL,
830 .head = 0,
831 .tail = 0,
832 .space = 0,
833 .next_seqno = 1,
834 .user_irq_refcount = 0,
835 .irq_gem_seqno = 0,
836 .waiting_gem_seqno = 0,
837 .setup_status_page = bsd_setup_status_page,
838 .init = init_bsd_ring,
839 .get_head = bsd_ring_get_head,
840 .get_tail = bsd_ring_get_tail,
841 .get_active_head = bsd_ring_get_active_head,
842 .advance_ring = bsd_ring_advance_ring,
843 .flush = bsd_ring_flush,
844 .add_request = bsd_ring_add_request,
845 .get_gem_seqno = bsd_ring_get_gem_seqno,
846 .user_irq_get = bsd_ring_get_user_irq,
847 .user_irq_put = bsd_ring_put_user_irq,
848 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
849 .status_page = {NULL, 0, NULL},
850 .map = {0,}
851};