]>
Commit | Line | Data |
---|---|---|
8187a2b7 ZN |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | |
3 | ||
4 | struct intel_hw_status_page { | |
5 | void *page_addr; | |
6 | unsigned int gfx_addr; | |
7 | struct drm_gem_object *obj; | |
8 | }; | |
9 | ||
870e86dd DV |
10 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) |
11 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) | |
6c0e1c55 DV |
12 | #define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) |
13 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) | |
570ef608 DV |
14 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) |
15 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) | |
7f2ab699 DV |
16 | #define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) |
17 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) | |
870e86dd | 18 | |
8187a2b7 ZN |
19 | struct drm_i915_gem_execbuffer2; |
20 | struct intel_ring_buffer { | |
21 | const char *name; | |
9220434a CW |
22 | enum intel_ring_id { |
23 | RING_RENDER = 0x1, | |
24 | RING_BSD = 0x2, | |
549f7365 | 25 | RING_BLT = 0x4, |
9220434a | 26 | } id; |
333e9fe9 | 27 | u32 mmio_base; |
8187a2b7 | 28 | unsigned long size; |
8187a2b7 ZN |
29 | void *virtual_start; |
30 | struct drm_device *dev; | |
31 | struct drm_gem_object *gem_object; | |
32 | ||
33 | unsigned int head; | |
34 | unsigned int tail; | |
780f0ca3 | 35 | int space; |
8187a2b7 ZN |
36 | struct intel_hw_status_page status_page; |
37 | ||
38 | u32 irq_gem_seqno; /* last seq seem at irq time */ | |
39 | u32 waiting_gem_seqno; | |
40 | int user_irq_refcount; | |
41 | void (*user_irq_get)(struct drm_device *dev, | |
42 | struct intel_ring_buffer *ring); | |
43 | void (*user_irq_put)(struct drm_device *dev, | |
44 | struct intel_ring_buffer *ring); | |
8187a2b7 ZN |
45 | |
46 | int (*init)(struct drm_device *dev, | |
47 | struct intel_ring_buffer *ring); | |
48 | ||
297b0c5b CW |
49 | void (*write_tail)(struct drm_device *dev, |
50 | struct intel_ring_buffer *ring, | |
51 | u32 value); | |
8187a2b7 ZN |
52 | void (*flush)(struct drm_device *dev, |
53 | struct intel_ring_buffer *ring, | |
54 | u32 invalidate_domains, | |
55 | u32 flush_domains); | |
56 | u32 (*add_request)(struct drm_device *dev, | |
57 | struct intel_ring_buffer *ring, | |
8187a2b7 | 58 | u32 flush_domains); |
f787a5f5 CW |
59 | u32 (*get_seqno)(struct drm_device *dev, |
60 | struct intel_ring_buffer *ring); | |
8187a2b7 ZN |
61 | int (*dispatch_gem_execbuffer)(struct drm_device *dev, |
62 | struct intel_ring_buffer *ring, | |
63 | struct drm_i915_gem_execbuffer2 *exec, | |
64 | struct drm_clip_rect *cliprects, | |
65 | uint64_t exec_offset); | |
55889788 | 66 | void (*cleanup)(struct intel_ring_buffer *ring); |
8187a2b7 ZN |
67 | |
68 | /** | |
69 | * List of objects currently involved in rendering from the | |
70 | * ringbuffer. | |
71 | * | |
72 | * Includes buffers having the contents of their GPU caches | |
73 | * flushed, not necessarily primitives. last_rendering_seqno | |
74 | * represents when the rendering involved will be completed. | |
75 | * | |
76 | * A reference is held on the buffer while on this list. | |
77 | */ | |
78 | struct list_head active_list; | |
79 | ||
80 | /** | |
81 | * List of breadcrumbs associated with GPU requests currently | |
82 | * outstanding. | |
83 | */ | |
84 | struct list_head request_list; | |
85 | ||
64193406 CW |
86 | /** |
87 | * List of objects currently pending a GPU write flush. | |
88 | * | |
89 | * All elements on this list will belong to either the | |
90 | * active_list or flushing_list, last_rendering_seqno can | |
91 | * be used to differentiate between the two elements. | |
92 | */ | |
93 | struct list_head gpu_write_list; | |
94 | ||
a56ba56c CW |
95 | /** |
96 | * Do we have some not yet emitted requests outstanding? | |
97 | */ | |
98 | bool outstanding_lazy_request; | |
99 | ||
8187a2b7 ZN |
100 | wait_queue_head_t irq_queue; |
101 | drm_local_map_t map; | |
55889788 CW |
102 | |
103 | void *private; | |
8187a2b7 ZN |
104 | }; |
105 | ||
106 | static inline u32 | |
107 | intel_read_status_page(struct intel_ring_buffer *ring, | |
108 | int reg) | |
109 | { | |
110 | u32 *regs = ring->status_page.page_addr; | |
111 | return regs[reg]; | |
112 | } | |
113 | ||
114 | int intel_init_ring_buffer(struct drm_device *dev, | |
ab6f8e32 | 115 | struct intel_ring_buffer *ring); |
8187a2b7 | 116 | void intel_cleanup_ring_buffer(struct drm_device *dev, |
ab6f8e32 | 117 | struct intel_ring_buffer *ring); |
8187a2b7 | 118 | int intel_wait_ring_buffer(struct drm_device *dev, |
ab6f8e32 | 119 | struct intel_ring_buffer *ring, int n); |
8187a2b7 | 120 | void intel_ring_begin(struct drm_device *dev, |
ab6f8e32 | 121 | struct intel_ring_buffer *ring, int n); |
e898cd22 CW |
122 | |
123 | static inline void intel_ring_emit(struct drm_device *dev, | |
124 | struct intel_ring_buffer *ring, | |
125 | unsigned int data) | |
126 | { | |
127 | unsigned int *virt = ring->virtual_start + ring->tail; | |
128 | *virt = data; | |
129 | ring->tail += 4; | |
130 | } | |
131 | ||
8187a2b7 ZN |
132 | void intel_ring_advance(struct drm_device *dev, |
133 | struct intel_ring_buffer *ring); | |
134 | ||
135 | u32 intel_ring_get_seqno(struct drm_device *dev, | |
136 | struct intel_ring_buffer *ring); | |
137 | ||
5c1143bb XH |
138 | int intel_init_render_ring_buffer(struct drm_device *dev); |
139 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | |
549f7365 | 140 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
8187a2b7 | 141 | |
79f321b7 DV |
142 | u32 intel_ring_get_active_head(struct drm_device *dev, |
143 | struct intel_ring_buffer *ring); | |
447da187 DV |
144 | void intel_ring_setup_status_page(struct drm_device *dev, |
145 | struct intel_ring_buffer *ring); | |
79f321b7 | 146 | |
8187a2b7 | 147 | #endif /* _INTEL_RINGBUFFER_H_ */ |