]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/radeon_gem.c
drm/radeon/kms: fix regression rendering issue on R6XX/R7XX
[net-next-2.6.git] / drivers / gpu / drm / radeon / radeon_gem.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "radeon_drm.h"
31#include "radeon.h"
32
33int radeon_gem_object_init(struct drm_gem_object *obj)
34{
35 /* we do nothings here */
36 return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{
4c788679 41 struct radeon_bo *robj = gobj->driver_private;
771fe6b9
JG
42
43 gobj->driver_private = NULL;
44 if (robj) {
4c788679 45 radeon_bo_unref(&robj);
771fe6b9
JG
46 }
47}
48
49int radeon_gem_object_create(struct radeon_device *rdev, int size,
4c788679
JG
50 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 struct drm_gem_object **obj)
771fe6b9
JG
53{
54 struct drm_gem_object *gobj;
4c788679 55 struct radeon_bo *robj;
771fe6b9
JG
56 int r;
57
58 *obj = NULL;
59 gobj = drm_gem_object_alloc(rdev->ddev, size);
60 if (!gobj) {
61 return -ENOMEM;
62 }
63 /* At least align on page size */
64 if (alignment < PAGE_SIZE) {
65 alignment = PAGE_SIZE;
66 }
4c788679 67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
771fe6b9 68 if (r) {
ecabd32a
DA
69 if (r != -ERESTARTSYS)
70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
71 size, initial_domain, alignment, r);
771fe6b9
JG
72 mutex_lock(&rdev->ddev->struct_mutex);
73 drm_gem_object_unreference(gobj);
74 mutex_unlock(&rdev->ddev->struct_mutex);
75 return r;
76 }
77 gobj->driver_private = robj;
78 *obj = gobj;
79 return 0;
80}
81
82int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
83 uint64_t *gpu_addr)
84{
4c788679
JG
85 struct radeon_bo *robj = obj->driver_private;
86 int r;
771fe6b9 87
4c788679
JG
88 r = radeon_bo_reserve(robj, false);
89 if (unlikely(r != 0))
90 return r;
91 r = radeon_bo_pin(robj, pin_domain, gpu_addr);
92 radeon_bo_unreserve(robj);
93 return r;
771fe6b9
JG
94}
95
96void radeon_gem_object_unpin(struct drm_gem_object *obj)
97{
4c788679
JG
98 struct radeon_bo *robj = obj->driver_private;
99 int r;
100
101 r = radeon_bo_reserve(robj, false);
102 if (likely(r == 0)) {
103 radeon_bo_unpin(robj);
104 radeon_bo_unreserve(robj);
105 }
771fe6b9
JG
106}
107
108int radeon_gem_set_domain(struct drm_gem_object *gobj,
109 uint32_t rdomain, uint32_t wdomain)
110{
4c788679 111 struct radeon_bo *robj;
771fe6b9
JG
112 uint32_t domain;
113 int r;
114
115 /* FIXME: reeimplement */
116 robj = gobj->driver_private;
117 /* work out where to validate the buffer to */
118 domain = wdomain;
119 if (!domain) {
120 domain = rdomain;
121 }
122 if (!domain) {
123 /* Do nothings */
124 printk(KERN_WARNING "Set domain withou domain !\n");
125 return 0;
126 }
127 if (domain == RADEON_GEM_DOMAIN_CPU) {
128 /* Asking for cpu access wait for object idle */
4c788679 129 r = radeon_bo_wait(robj, NULL, false);
771fe6b9
JG
130 if (r) {
131 printk(KERN_ERR "Failed to wait for object !\n");
132 return r;
133 }
134 }
135 return 0;
136}
137
138int radeon_gem_init(struct radeon_device *rdev)
139{
140 INIT_LIST_HEAD(&rdev->gem.objects);
141 return 0;
142}
143
144void radeon_gem_fini(struct radeon_device *rdev)
145{
4c788679 146 radeon_bo_force_delete(rdev);
771fe6b9
JG
147}
148
149
150/*
151 * GEM ioctls.
152 */
153int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
154 struct drm_file *filp)
155{
156 struct radeon_device *rdev = dev->dev_private;
157 struct drm_radeon_gem_info *args = data;
158
7a50f01a 159 args->vram_size = rdev->mc.real_vram_size;
38e14921
MD
160 args->vram_visible = rdev->mc.real_vram_size;
161 if (rdev->stollen_vga_memory)
4c788679
JG
162 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
163 if (rdev->fbdev_rbo)
164 args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
38e14921
MD
165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
166 RADEON_IB_POOL_SIZE*64*1024;
771fe6b9
JG
167 return 0;
168}
169
170int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
171 struct drm_file *filp)
172{
173 /* TODO: implement */
174 DRM_ERROR("unimplemented %s\n", __func__);
175 return -ENOSYS;
176}
177
178int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
179 struct drm_file *filp)
180{
181 /* TODO: implement */
182 DRM_ERROR("unimplemented %s\n", __func__);
183 return -ENOSYS;
184}
185
186int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
187 struct drm_file *filp)
188{
189 struct radeon_device *rdev = dev->dev_private;
190 struct drm_radeon_gem_create *args = data;
191 struct drm_gem_object *gobj;
192 uint32_t handle;
193 int r;
194
195 /* create a gem object to contain this object in */
196 args->size = roundup(args->size, PAGE_SIZE);
197 r = radeon_gem_object_create(rdev, args->size, args->alignment,
4c788679
JG
198 args->initial_domain, false,
199 false, &gobj);
771fe6b9
JG
200 if (r) {
201 return r;
202 }
203 r = drm_gem_handle_create(filp, gobj, &handle);
204 if (r) {
205 mutex_lock(&dev->struct_mutex);
206 drm_gem_object_unreference(gobj);
207 mutex_unlock(&dev->struct_mutex);
208 return r;
209 }
210 mutex_lock(&dev->struct_mutex);
211 drm_gem_object_handle_unreference(gobj);
212 mutex_unlock(&dev->struct_mutex);
213 args->handle = handle;
214 return 0;
215}
216
217int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
218 struct drm_file *filp)
219{
220 /* transition the BO to a domain -
221 * just validate the BO into a certain domain */
222 struct drm_radeon_gem_set_domain *args = data;
223 struct drm_gem_object *gobj;
4c788679 224 struct radeon_bo *robj;
771fe6b9
JG
225 int r;
226
227 /* for now if someone requests domain CPU -
228 * just make sure the buffer is finished with */
229
230 /* just do a BO wait for now */
231 gobj = drm_gem_object_lookup(dev, filp, args->handle);
232 if (gobj == NULL) {
233 return -EINVAL;
234 }
235 robj = gobj->driver_private;
236
237 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
238
239 mutex_lock(&dev->struct_mutex);
240 drm_gem_object_unreference(gobj);
241 mutex_unlock(&dev->struct_mutex);
242 return r;
243}
244
245int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *filp)
247{
248 struct drm_radeon_gem_mmap *args = data;
249 struct drm_gem_object *gobj;
4c788679 250 struct radeon_bo *robj;
771fe6b9
JG
251
252 gobj = drm_gem_object_lookup(dev, filp, args->handle);
253 if (gobj == NULL) {
254 return -EINVAL;
255 }
256 robj = gobj->driver_private;
4c788679 257 args->addr_ptr = radeon_bo_mmap_offset(robj);
771fe6b9
JG
258 mutex_lock(&dev->struct_mutex);
259 drm_gem_object_unreference(gobj);
260 mutex_unlock(&dev->struct_mutex);
4c788679 261 return 0;
771fe6b9
JG
262}
263
264int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
265 struct drm_file *filp)
266{
cefb87ef
DA
267 struct drm_radeon_gem_busy *args = data;
268 struct drm_gem_object *gobj;
4c788679 269 struct radeon_bo *robj;
cefb87ef 270 int r;
4361e52a 271 uint32_t cur_placement = 0;
cefb87ef
DA
272
273 gobj = drm_gem_object_lookup(dev, filp, args->handle);
274 if (gobj == NULL) {
275 return -EINVAL;
276 }
277 robj = gobj->driver_private;
4c788679 278 r = radeon_bo_wait(robj, &cur_placement, true);
9f844e51
MD
279 switch (cur_placement) {
280 case TTM_PL_VRAM:
cefb87ef 281 args->domain = RADEON_GEM_DOMAIN_VRAM;
9f844e51
MD
282 break;
283 case TTM_PL_TT:
cefb87ef 284 args->domain = RADEON_GEM_DOMAIN_GTT;
9f844e51
MD
285 break;
286 case TTM_PL_SYSTEM:
cefb87ef 287 args->domain = RADEON_GEM_DOMAIN_CPU;
9f844e51
MD
288 default:
289 break;
290 }
cefb87ef
DA
291 mutex_lock(&dev->struct_mutex);
292 drm_gem_object_unreference(gobj);
293 mutex_unlock(&dev->struct_mutex);
e3b2415e 294 return r;
771fe6b9
JG
295}
296
297int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
298 struct drm_file *filp)
299{
300 struct drm_radeon_gem_wait_idle *args = data;
301 struct drm_gem_object *gobj;
4c788679 302 struct radeon_bo *robj;
771fe6b9
JG
303 int r;
304
305 gobj = drm_gem_object_lookup(dev, filp, args->handle);
306 if (gobj == NULL) {
307 return -EINVAL;
308 }
309 robj = gobj->driver_private;
4c788679 310 r = radeon_bo_wait(robj, NULL, false);
062b389c
JG
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
771fe6b9
JG
314 mutex_lock(&dev->struct_mutex);
315 drm_gem_object_unreference(gobj);
316 mutex_unlock(&dev->struct_mutex);
317 return r;
318}
e024e110
DA
319
320int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
321 struct drm_file *filp)
322{
323 struct drm_radeon_gem_set_tiling *args = data;
324 struct drm_gem_object *gobj;
4c788679 325 struct radeon_bo *robj;
e024e110
DA
326 int r = 0;
327
328 DRM_DEBUG("%d \n", args->handle);
329 gobj = drm_gem_object_lookup(dev, filp, args->handle);
330 if (gobj == NULL)
331 return -EINVAL;
332 robj = gobj->driver_private;
4c788679 333 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
e024e110
DA
334 mutex_lock(&dev->struct_mutex);
335 drm_gem_object_unreference(gobj);
336 mutex_unlock(&dev->struct_mutex);
337 return r;
338}
339
340int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
341 struct drm_file *filp)
342{
343 struct drm_radeon_gem_get_tiling *args = data;
344 struct drm_gem_object *gobj;
4c788679 345 struct radeon_bo *rbo;
e024e110
DA
346 int r = 0;
347
348 DRM_DEBUG("\n");
349 gobj = drm_gem_object_lookup(dev, filp, args->handle);
350 if (gobj == NULL)
351 return -EINVAL;
4c788679
JG
352 rbo = gobj->driver_private;
353 r = radeon_bo_reserve(rbo, false);
354 if (unlikely(r != 0))
51f07b7e 355 goto out;
4c788679
JG
356 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
357 radeon_bo_unreserve(rbo);
51f07b7e 358out:
e024e110
DA
359 mutex_lock(&dev->struct_mutex);
360 drm_gem_object_unreference(gobj);
361 mutex_unlock(&dev->struct_mutex);
362 return r;
363}