]>
Commit | Line | Data |
---|---|---|
b47eb4a2 CW |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Chris Wilson <chris@chris-wilson.co.uuk> | |
26 | * | |
27 | */ | |
28 | ||
29 | #include "drmP.h" | |
30 | #include "drm.h" | |
31 | #include "i915_drv.h" | |
32 | #include "i915_drm.h" | |
33 | ||
cd377ea9 CW |
34 | static struct drm_i915_gem_object * |
35 | i915_gem_next_active_object(struct drm_device *dev, | |
36 | struct list_head **render_iter, | |
37 | struct list_head **bsd_iter) | |
b47eb4a2 CW |
38 | { |
39 | drm_i915_private_t *dev_priv = dev->dev_private; | |
cd377ea9 CW |
40 | struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; |
41 | ||
42 | if (*render_iter != &dev_priv->render_ring.active_list) | |
43 | render_obj = list_entry(*render_iter, | |
44 | struct drm_i915_gem_object, | |
45 | list); | |
46 | ||
47 | if (HAS_BSD(dev)) { | |
48 | if (*bsd_iter != &dev_priv->bsd_ring.active_list) | |
49 | bsd_obj = list_entry(*bsd_iter, | |
50 | struct drm_i915_gem_object, | |
51 | list); | |
52 | ||
53 | if (render_obj == NULL) { | |
54 | *bsd_iter = (*bsd_iter)->next; | |
55 | return bsd_obj; | |
b47eb4a2 | 56 | } |
b47eb4a2 | 57 | |
cd377ea9 CW |
58 | if (bsd_obj == NULL) { |
59 | *render_iter = (*render_iter)->next; | |
60 | return render_obj; | |
61 | } | |
b47eb4a2 | 62 | |
cd377ea9 CW |
63 | /* XXX can we handle seqno wrapping? */ |
64 | if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { | |
65 | *render_iter = (*render_iter)->next; | |
66 | return render_obj; | |
67 | } else { | |
68 | *bsd_iter = (*bsd_iter)->next; | |
69 | return bsd_obj; | |
70 | } | |
71 | } else { | |
72 | *render_iter = (*render_iter)->next; | |
73 | return render_obj; | |
b47eb4a2 | 74 | } |
b47eb4a2 CW |
75 | } |
76 | ||
cd377ea9 CW |
77 | static bool |
78 | mark_free(struct drm_i915_gem_object *obj_priv, | |
79 | struct list_head *unwind) | |
b47eb4a2 | 80 | { |
cd377ea9 | 81 | list_add(&obj_priv->evict_list, unwind); |
af626103 | 82 | drm_gem_object_reference(&obj_priv->base); |
cd377ea9 | 83 | return drm_mm_scan_add_block(obj_priv->gtt_space); |
b47eb4a2 CW |
84 | } |
85 | ||
cd377ea9 CW |
86 | #define i915_for_each_active_object(OBJ, R, B) \ |
87 | *(R) = dev_priv->render_ring.active_list.next; \ | |
88 | *(B) = dev_priv->bsd_ring.active_list.next; \ | |
89 | while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) | |
90 | ||
b47eb4a2 | 91 | int |
cd377ea9 | 92 | i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) |
b47eb4a2 CW |
93 | { |
94 | drm_i915_private_t *dev_priv = dev->dev_private; | |
cd377ea9 CW |
95 | struct list_head eviction_list, unwind_list; |
96 | struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; | |
97 | struct list_head *render_iter, *bsd_iter; | |
98 | int ret = 0; | |
b47eb4a2 | 99 | |
cd377ea9 | 100 | i915_gem_retire_requests(dev); |
b47eb4a2 | 101 | |
cd377ea9 CW |
102 | /* Re-check for free space after retiring requests */ |
103 | if (drm_mm_search_free(&dev_priv->mm.gtt_space, | |
104 | min_size, alignment, 0)) | |
105 | return 0; | |
b47eb4a2 | 106 | |
cd377ea9 CW |
107 | /* |
108 | * The goal is to evict objects and amalgamate space in LRU order. | |
109 | * The oldest idle objects reside on the inactive list, which is in | |
110 | * retirement order. The next objects to retire are those on the (per | |
111 | * ring) active list that do not have an outstanding flush. Once the | |
112 | * hardware reports completion (the seqno is updated after the | |
113 | * batchbuffer has been finished) the clean buffer objects would | |
114 | * be retired to the inactive list. Any dirty objects would be added | |
115 | * to the tail of the flushing list. So after processing the clean | |
116 | * active objects we need to emit a MI_FLUSH to retire the flushing | |
117 | * list, hence the retirement order of the flushing list is in | |
118 | * advance of the dirty objects on the active lists. | |
119 | * | |
120 | * The retirement sequence is thus: | |
121 | * 1. Inactive objects (already retired) | |
122 | * 2. Clean active objects | |
123 | * 3. Flushing list | |
124 | * 4. Dirty active objects. | |
125 | * | |
126 | * On each list, the oldest objects lie at the HEAD with the freshest | |
127 | * object on the TAIL. | |
128 | */ | |
129 | ||
130 | INIT_LIST_HEAD(&unwind_list); | |
131 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | |
132 | ||
133 | /* First see if there is a large enough contiguous idle region... */ | |
134 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | |
135 | if (mark_free(obj_priv, &unwind_list)) | |
136 | goto found; | |
137 | } | |
b47eb4a2 | 138 | |
cd377ea9 CW |
139 | /* Now merge in the soon-to-be-expired objects... */ |
140 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | |
141 | /* Does the object require an outstanding flush? */ | |
142 | if (obj_priv->base.write_domain || obj_priv->pin_count) | |
b47eb4a2 | 143 | continue; |
b47eb4a2 | 144 | |
cd377ea9 CW |
145 | if (mark_free(obj_priv, &unwind_list)) |
146 | goto found; | |
147 | } | |
b47eb4a2 | 148 | |
cd377ea9 CW |
149 | /* Finally add anything with a pending flush (in order of retirement) */ |
150 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | |
151 | if (obj_priv->pin_count) | |
152 | continue; | |
b47eb4a2 | 153 | |
cd377ea9 CW |
154 | if (mark_free(obj_priv, &unwind_list)) |
155 | goto found; | |
156 | } | |
157 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | |
158 | if (! obj_priv->base.write_domain || obj_priv->pin_count) | |
b47eb4a2 | 159 | continue; |
b47eb4a2 | 160 | |
cd377ea9 CW |
161 | if (mark_free(obj_priv, &unwind_list)) |
162 | goto found; | |
163 | } | |
164 | ||
165 | /* Nothing found, clean up and bail out! */ | |
166 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { | |
167 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); | |
168 | BUG_ON(ret); | |
af626103 | 169 | drm_gem_object_unreference(&obj_priv->base); |
cd377ea9 CW |
170 | } |
171 | ||
172 | /* We expect the caller to unpin, evict all and try again, or give up. | |
173 | * So calling i915_gem_evict_everything() is unnecessary. | |
174 | */ | |
175 | return -ENOSPC; | |
176 | ||
177 | found: | |
178 | INIT_LIST_HEAD(&eviction_list); | |
179 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | |
180 | &unwind_list, evict_list) { | |
181 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | |
182 | /* drm_mm doesn't allow any other other operations while | |
183 | * scanning, therefore store to be evicted objects on a | |
184 | * temporary list. */ | |
185 | list_move(&obj_priv->evict_list, &eviction_list); | |
af626103 CW |
186 | } else |
187 | drm_gem_object_unreference(&obj_priv->base); | |
cd377ea9 | 188 | } |
b47eb4a2 | 189 | |
cd377ea9 CW |
190 | /* Unbinding will emit any required flushes */ |
191 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | |
192 | &eviction_list, evict_list) { | |
193 | #if WATCH_LRU | |
af626103 | 194 | DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base); |
cd377ea9 CW |
195 | #endif |
196 | ret = i915_gem_object_unbind(&obj_priv->base); | |
197 | if (ret) | |
198 | return ret; | |
af626103 CW |
199 | |
200 | drm_gem_object_unreference(&obj_priv->base); | |
b47eb4a2 | 201 | } |
cd377ea9 CW |
202 | |
203 | /* The just created free hole should be on the top of the free stack | |
204 | * maintained by drm_mm, so this BUG_ON actually executes in O(1). | |
205 | * Furthermore all accessed data has just recently been used, so it | |
206 | * should be really fast, too. */ | |
207 | BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, | |
208 | alignment, 0)); | |
209 | ||
210 | return 0; | |
b47eb4a2 CW |
211 | } |
212 | ||
213 | int | |
214 | i915_gem_evict_everything(struct drm_device *dev) | |
215 | { | |
216 | drm_i915_private_t *dev_priv = dev->dev_private; | |
217 | int ret; | |
218 | bool lists_empty; | |
219 | ||
220 | spin_lock(&dev_priv->mm.active_list_lock); | |
221 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | |
222 | list_empty(&dev_priv->mm.flushing_list) && | |
223 | list_empty(&dev_priv->render_ring.active_list) && | |
224 | (!HAS_BSD(dev) | |
225 | || list_empty(&dev_priv->bsd_ring.active_list))); | |
226 | spin_unlock(&dev_priv->mm.active_list_lock); | |
227 | ||
228 | if (lists_empty) | |
229 | return -ENOSPC; | |
230 | ||
231 | /* Flush everything (on to the inactive lists) and evict */ | |
232 | ret = i915_gpu_idle(dev); | |
233 | if (ret) | |
234 | return ret; | |
235 | ||
236 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | |
237 | ||
238 | ret = i915_gem_evict_inactive(dev); | |
239 | if (ret) | |
240 | return ret; | |
241 | ||
242 | spin_lock(&dev_priv->mm.active_list_lock); | |
243 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | |
244 | list_empty(&dev_priv->mm.flushing_list) && | |
245 | list_empty(&dev_priv->render_ring.active_list) && | |
246 | (!HAS_BSD(dev) | |
247 | || list_empty(&dev_priv->bsd_ring.active_list))); | |
248 | spin_unlock(&dev_priv->mm.active_list_lock); | |
249 | BUG_ON(!lists_empty); | |
250 | ||
251 | return 0; | |
252 | } | |
253 | ||
254 | /** Unbinds all inactive objects. */ | |
255 | int | |
256 | i915_gem_evict_inactive(struct drm_device *dev) | |
257 | { | |
258 | drm_i915_private_t *dev_priv = dev->dev_private; | |
259 | ||
260 | while (!list_empty(&dev_priv->mm.inactive_list)) { | |
261 | struct drm_gem_object *obj; | |
262 | int ret; | |
263 | ||
264 | obj = &list_first_entry(&dev_priv->mm.inactive_list, | |
265 | struct drm_i915_gem_object, | |
266 | list)->base; | |
267 | ||
268 | ret = i915_gem_object_unbind(obj); | |
269 | if (ret != 0) { | |
270 | DRM_ERROR("Error unbinding object: %d\n", ret); | |
271 | return ret; | |
272 | } | |
273 | } | |
274 | ||
275 | return 0; | |
276 | } |