]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/i915/i915_gem_evict.c
Merge branch 'x86-mem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
CommitLineData
b47eb4a2
CW
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drv.h"
32#include "i915_drm.h"
33
cd377ea9
CW
34static struct drm_i915_gem_object *
35i915_gem_next_active_object(struct drm_device *dev,
36 struct list_head **render_iter,
37 struct list_head **bsd_iter)
b47eb4a2
CW
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
cd377ea9
CW
40 struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
41
42 if (*render_iter != &dev_priv->render_ring.active_list)
43 render_obj = list_entry(*render_iter,
44 struct drm_i915_gem_object,
45 list);
46
47 if (HAS_BSD(dev)) {
48 if (*bsd_iter != &dev_priv->bsd_ring.active_list)
49 bsd_obj = list_entry(*bsd_iter,
50 struct drm_i915_gem_object,
51 list);
52
53 if (render_obj == NULL) {
54 *bsd_iter = (*bsd_iter)->next;
55 return bsd_obj;
b47eb4a2 56 }
b47eb4a2 57
cd377ea9
CW
58 if (bsd_obj == NULL) {
59 *render_iter = (*render_iter)->next;
60 return render_obj;
61 }
b47eb4a2 62
cd377ea9
CW
63 /* XXX can we handle seqno wrapping? */
64 if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
65 *render_iter = (*render_iter)->next;
66 return render_obj;
67 } else {
68 *bsd_iter = (*bsd_iter)->next;
69 return bsd_obj;
70 }
71 } else {
72 *render_iter = (*render_iter)->next;
73 return render_obj;
b47eb4a2 74 }
b47eb4a2
CW
75}
76
cd377ea9
CW
77static bool
78mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind)
b47eb4a2 80{
cd377ea9 81 list_add(&obj_priv->evict_list, unwind);
af626103 82 drm_gem_object_reference(&obj_priv->base);
cd377ea9 83 return drm_mm_scan_add_block(obj_priv->gtt_space);
b47eb4a2
CW
84}
85
cd377ea9
CW
86#define i915_for_each_active_object(OBJ, R, B) \
87 *(R) = dev_priv->render_ring.active_list.next; \
88 *(B) = dev_priv->bsd_ring.active_list.next; \
89 while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
90
b47eb4a2 91int
cd377ea9 92i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
b47eb4a2
CW
93{
94 drm_i915_private_t *dev_priv = dev->dev_private;
cd377ea9 95 struct list_head eviction_list, unwind_list;
e39a0150 96 struct drm_i915_gem_object *obj_priv;
cd377ea9
CW
97 struct list_head *render_iter, *bsd_iter;
98 int ret = 0;
b47eb4a2 99
cd377ea9 100 i915_gem_retire_requests(dev);
b47eb4a2 101
cd377ea9
CW
102 /* Re-check for free space after retiring requests */
103 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
104 min_size, alignment, 0))
105 return 0;
b47eb4a2 106
cd377ea9
CW
107 /*
108 * The goal is to evict objects and amalgamate space in LRU order.
109 * The oldest idle objects reside on the inactive list, which is in
110 * retirement order. The next objects to retire are those on the (per
111 * ring) active list that do not have an outstanding flush. Once the
112 * hardware reports completion (the seqno is updated after the
113 * batchbuffer has been finished) the clean buffer objects would
114 * be retired to the inactive list. Any dirty objects would be added
115 * to the tail of the flushing list. So after processing the clean
116 * active objects we need to emit a MI_FLUSH to retire the flushing
117 * list, hence the retirement order of the flushing list is in
118 * advance of the dirty objects on the active lists.
119 *
120 * The retirement sequence is thus:
121 * 1. Inactive objects (already retired)
122 * 2. Clean active objects
123 * 3. Flushing list
124 * 4. Dirty active objects.
125 *
126 * On each list, the oldest objects lie at the HEAD with the freshest
127 * object on the TAIL.
128 */
129
130 INIT_LIST_HEAD(&unwind_list);
131 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
132
133 /* First see if there is a large enough contiguous idle region... */
134 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
135 if (mark_free(obj_priv, &unwind_list))
136 goto found;
137 }
b47eb4a2 138
cd377ea9
CW
139 /* Now merge in the soon-to-be-expired objects... */
140 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
141 /* Does the object require an outstanding flush? */
142 if (obj_priv->base.write_domain || obj_priv->pin_count)
b47eb4a2 143 continue;
b47eb4a2 144
cd377ea9
CW
145 if (mark_free(obj_priv, &unwind_list))
146 goto found;
147 }
b47eb4a2 148
cd377ea9
CW
149 /* Finally add anything with a pending flush (in order of retirement) */
150 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
151 if (obj_priv->pin_count)
152 continue;
b47eb4a2 153
cd377ea9
CW
154 if (mark_free(obj_priv, &unwind_list))
155 goto found;
156 }
157 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
158 if (! obj_priv->base.write_domain || obj_priv->pin_count)
b47eb4a2 159 continue;
b47eb4a2 160
cd377ea9
CW
161 if (mark_free(obj_priv, &unwind_list))
162 goto found;
163 }
164
165 /* Nothing found, clean up and bail out! */
166 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
167 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
168 BUG_ON(ret);
af626103 169 drm_gem_object_unreference(&obj_priv->base);
cd377ea9
CW
170 }
171
172 /* We expect the caller to unpin, evict all and try again, or give up.
173 * So calling i915_gem_evict_everything() is unnecessary.
174 */
175 return -ENOSPC;
176
177found:
e39a0150
CW
178 /* drm_mm doesn't allow any other other operations while
179 * scanning, therefore store to be evicted objects on a
180 * temporary list. */
cd377ea9 181 INIT_LIST_HEAD(&eviction_list);
e39a0150
CW
182 while (!list_empty(&unwind_list)) {
183 obj_priv = list_first_entry(&unwind_list,
184 struct drm_i915_gem_object,
185 evict_list);
cd377ea9 186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
cd377ea9 187 list_move(&obj_priv->evict_list, &eviction_list);
e39a0150
CW
188 continue;
189 }
190 list_del(&obj_priv->evict_list);
191 drm_gem_object_unreference(&obj_priv->base);
cd377ea9 192 }
b47eb4a2 193
cd377ea9 194 /* Unbinding will emit any required flushes */
e39a0150
CW
195 while (!list_empty(&eviction_list)) {
196 obj_priv = list_first_entry(&eviction_list,
197 struct drm_i915_gem_object,
198 evict_list);
199 if (ret == 0)
200 ret = i915_gem_object_unbind(&obj_priv->base);
201 list_del(&obj_priv->evict_list);
af626103 202 drm_gem_object_unreference(&obj_priv->base);
b47eb4a2 203 }
cd377ea9 204
e39a0150 205 return ret;
b47eb4a2
CW
206}
207
208int
209i915_gem_evict_everything(struct drm_device *dev)
210{
211 drm_i915_private_t *dev_priv = dev->dev_private;
212 int ret;
213 bool lists_empty;
214
215 spin_lock(&dev_priv->mm.active_list_lock);
216 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
217 list_empty(&dev_priv->mm.flushing_list) &&
218 list_empty(&dev_priv->render_ring.active_list) &&
219 (!HAS_BSD(dev)
220 || list_empty(&dev_priv->bsd_ring.active_list)));
221 spin_unlock(&dev_priv->mm.active_list_lock);
222
223 if (lists_empty)
224 return -ENOSPC;
225
226 /* Flush everything (on to the inactive lists) and evict */
227 ret = i915_gpu_idle(dev);
228 if (ret)
229 return ret;
230
231 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
232
233 ret = i915_gem_evict_inactive(dev);
234 if (ret)
235 return ret;
236
237 spin_lock(&dev_priv->mm.active_list_lock);
238 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
239 list_empty(&dev_priv->mm.flushing_list) &&
240 list_empty(&dev_priv->render_ring.active_list) &&
241 (!HAS_BSD(dev)
242 || list_empty(&dev_priv->bsd_ring.active_list)));
243 spin_unlock(&dev_priv->mm.active_list_lock);
244 BUG_ON(!lists_empty);
245
246 return 0;
247}
248
249/** Unbinds all inactive objects. */
250int
251i915_gem_evict_inactive(struct drm_device *dev)
252{
253 drm_i915_private_t *dev_priv = dev->dev_private;
254
255 while (!list_empty(&dev_priv->mm.inactive_list)) {
256 struct drm_gem_object *obj;
257 int ret;
258
259 obj = &list_first_entry(&dev_priv->mm.inactive_list,
260 struct drm_i915_gem_object,
261 list)->base;
262
263 ret = i915_gem_object_unbind(obj);
264 if (ret != 0) {
265 DRM_ERROR("Error unbinding object: %d\n", ret);
266 return ret;
267 }
268 }
269
270 return 0;
271}