]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/radeon_gem.c
drm/radeon/kms: Rework radeon object handling
[net-next-2.6.git] / drivers / gpu / drm / radeon / radeon_gem.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "radeon_drm.h"
31#include "radeon.h"
32
33int radeon_gem_object_init(struct drm_gem_object *obj)
34{
35 /* we do nothings here */
36 return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{
4c788679 41 struct radeon_bo *robj = gobj->driver_private;
771fe6b9
JG
42
43 gobj->driver_private = NULL;
44 if (robj) {
4c788679 45 radeon_bo_unref(&robj);
771fe6b9
JG
46 }
47}
48
49int radeon_gem_object_create(struct radeon_device *rdev, int size,
4c788679
JG
50 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 struct drm_gem_object **obj)
771fe6b9
JG
53{
54 struct drm_gem_object *gobj;
4c788679 55 struct radeon_bo *robj;
771fe6b9
JG
56 int r;
57
58 *obj = NULL;
59 gobj = drm_gem_object_alloc(rdev->ddev, size);
60 if (!gobj) {
61 return -ENOMEM;
62 }
63 /* At least align on page size */
64 if (alignment < PAGE_SIZE) {
65 alignment = PAGE_SIZE;
66 }
4c788679 67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
771fe6b9
JG
68 if (r) {
69 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
70 size, initial_domain, alignment);
71 mutex_lock(&rdev->ddev->struct_mutex);
72 drm_gem_object_unreference(gobj);
73 mutex_unlock(&rdev->ddev->struct_mutex);
74 return r;
75 }
76 gobj->driver_private = robj;
77 *obj = gobj;
78 return 0;
79}
80
81int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
82 uint64_t *gpu_addr)
83{
4c788679
JG
84 struct radeon_bo *robj = obj->driver_private;
85 int r;
771fe6b9 86
4c788679
JG
87 r = radeon_bo_reserve(robj, false);
88 if (unlikely(r != 0))
89 return r;
90 r = radeon_bo_pin(robj, pin_domain, gpu_addr);
91 radeon_bo_unreserve(robj);
92 return r;
771fe6b9
JG
93}
94
95void radeon_gem_object_unpin(struct drm_gem_object *obj)
96{
4c788679
JG
97 struct radeon_bo *robj = obj->driver_private;
98 int r;
99
100 r = radeon_bo_reserve(robj, false);
101 if (likely(r == 0)) {
102 radeon_bo_unpin(robj);
103 radeon_bo_unreserve(robj);
104 }
771fe6b9
JG
105}
106
107int radeon_gem_set_domain(struct drm_gem_object *gobj,
108 uint32_t rdomain, uint32_t wdomain)
109{
4c788679 110 struct radeon_bo *robj;
771fe6b9
JG
111 uint32_t domain;
112 int r;
113
114 /* FIXME: reeimplement */
115 robj = gobj->driver_private;
116 /* work out where to validate the buffer to */
117 domain = wdomain;
118 if (!domain) {
119 domain = rdomain;
120 }
121 if (!domain) {
122 /* Do nothings */
123 printk(KERN_WARNING "Set domain withou domain !\n");
124 return 0;
125 }
126 if (domain == RADEON_GEM_DOMAIN_CPU) {
127 /* Asking for cpu access wait for object idle */
4c788679 128 r = radeon_bo_wait(robj, NULL, false);
771fe6b9
JG
129 if (r) {
130 printk(KERN_ERR "Failed to wait for object !\n");
131 return r;
132 }
4c788679 133 radeon_hdp_flush(robj->rdev);
771fe6b9
JG
134 }
135 return 0;
136}
137
138int radeon_gem_init(struct radeon_device *rdev)
139{
140 INIT_LIST_HEAD(&rdev->gem.objects);
141 return 0;
142}
143
144void radeon_gem_fini(struct radeon_device *rdev)
145{
4c788679 146 radeon_bo_force_delete(rdev);
771fe6b9
JG
147}
148
149
150/*
151 * GEM ioctls.
152 */
153int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
154 struct drm_file *filp)
155{
156 struct radeon_device *rdev = dev->dev_private;
157 struct drm_radeon_gem_info *args = data;
158
7a50f01a 159 args->vram_size = rdev->mc.real_vram_size;
38e14921
MD
160 args->vram_visible = rdev->mc.real_vram_size;
161 if (rdev->stollen_vga_memory)
4c788679
JG
162 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
163 if (rdev->fbdev_rbo)
164 args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
38e14921
MD
165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
166 RADEON_IB_POOL_SIZE*64*1024;
771fe6b9
JG
167 return 0;
168}
169
170int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
171 struct drm_file *filp)
172{
173 /* TODO: implement */
174 DRM_ERROR("unimplemented %s\n", __func__);
175 return -ENOSYS;
176}
177
178int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
179 struct drm_file *filp)
180{
181 /* TODO: implement */
182 DRM_ERROR("unimplemented %s\n", __func__);
183 return -ENOSYS;
184}
185
186int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
187 struct drm_file *filp)
188{
189 struct radeon_device *rdev = dev->dev_private;
190 struct drm_radeon_gem_create *args = data;
191 struct drm_gem_object *gobj;
192 uint32_t handle;
193 int r;
194
195 /* create a gem object to contain this object in */
196 args->size = roundup(args->size, PAGE_SIZE);
197 r = radeon_gem_object_create(rdev, args->size, args->alignment,
4c788679
JG
198 args->initial_domain, false,
199 false, &gobj);
771fe6b9
JG
200 if (r) {
201 return r;
202 }
203 r = drm_gem_handle_create(filp, gobj, &handle);
204 if (r) {
205 mutex_lock(&dev->struct_mutex);
206 drm_gem_object_unreference(gobj);
207 mutex_unlock(&dev->struct_mutex);
208 return r;
209 }
210 mutex_lock(&dev->struct_mutex);
211 drm_gem_object_handle_unreference(gobj);
212 mutex_unlock(&dev->struct_mutex);
213 args->handle = handle;
214 return 0;
215}
216
217int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
218 struct drm_file *filp)
219{
220 /* transition the BO to a domain -
221 * just validate the BO into a certain domain */
222 struct drm_radeon_gem_set_domain *args = data;
223 struct drm_gem_object *gobj;
4c788679 224 struct radeon_bo *robj;
771fe6b9
JG
225 int r;
226
227 /* for now if someone requests domain CPU -
228 * just make sure the buffer is finished with */
229
230 /* just do a BO wait for now */
231 gobj = drm_gem_object_lookup(dev, filp, args->handle);
232 if (gobj == NULL) {
233 return -EINVAL;
234 }
235 robj = gobj->driver_private;
236
237 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
238
239 mutex_lock(&dev->struct_mutex);
240 drm_gem_object_unreference(gobj);
241 mutex_unlock(&dev->struct_mutex);
242 return r;
243}
244
245int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *filp)
247{
248 struct drm_radeon_gem_mmap *args = data;
249 struct drm_gem_object *gobj;
4c788679 250 struct radeon_bo *robj;
771fe6b9
JG
251
252 gobj = drm_gem_object_lookup(dev, filp, args->handle);
253 if (gobj == NULL) {
254 return -EINVAL;
255 }
256 robj = gobj->driver_private;
4c788679 257 args->addr_ptr = radeon_bo_mmap_offset(robj);
771fe6b9
JG
258 mutex_lock(&dev->struct_mutex);
259 drm_gem_object_unreference(gobj);
260 mutex_unlock(&dev->struct_mutex);
4c788679 261 return 0;
771fe6b9
JG
262}
263
264int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
265 struct drm_file *filp)
266{
cefb87ef
DA
267 struct drm_radeon_gem_busy *args = data;
268 struct drm_gem_object *gobj;
4c788679 269 struct radeon_bo *robj;
cefb87ef
DA
270 int r;
271 uint32_t cur_placement;
272
273 gobj = drm_gem_object_lookup(dev, filp, args->handle);
274 if (gobj == NULL) {
275 return -EINVAL;
276 }
277 robj = gobj->driver_private;
4c788679 278 r = radeon_bo_wait(robj, &cur_placement, true);
9f844e51
MD
279 switch (cur_placement) {
280 case TTM_PL_VRAM:
cefb87ef 281 args->domain = RADEON_GEM_DOMAIN_VRAM;
9f844e51
MD
282 break;
283 case TTM_PL_TT:
cefb87ef 284 args->domain = RADEON_GEM_DOMAIN_GTT;
9f844e51
MD
285 break;
286 case TTM_PL_SYSTEM:
cefb87ef 287 args->domain = RADEON_GEM_DOMAIN_CPU;
9f844e51
MD
288 default:
289 break;
290 }
cefb87ef
DA
291 mutex_lock(&dev->struct_mutex);
292 drm_gem_object_unreference(gobj);
293 mutex_unlock(&dev->struct_mutex);
e3b2415e 294 return r;
771fe6b9
JG
295}
296
297int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
298 struct drm_file *filp)
299{
300 struct drm_radeon_gem_wait_idle *args = data;
301 struct drm_gem_object *gobj;
4c788679 302 struct radeon_bo *robj;
771fe6b9
JG
303 int r;
304
305 gobj = drm_gem_object_lookup(dev, filp, args->handle);
306 if (gobj == NULL) {
307 return -EINVAL;
308 }
309 robj = gobj->driver_private;
4c788679 310 r = radeon_bo_wait(robj, NULL, false);
771fe6b9
JG
311 mutex_lock(&dev->struct_mutex);
312 drm_gem_object_unreference(gobj);
313 mutex_unlock(&dev->struct_mutex);
4c788679 314 radeon_hdp_flush(robj->rdev);
771fe6b9
JG
315 return r;
316}
e024e110
DA
317
318int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
319 struct drm_file *filp)
320{
321 struct drm_radeon_gem_set_tiling *args = data;
322 struct drm_gem_object *gobj;
4c788679 323 struct radeon_bo *robj;
e024e110
DA
324 int r = 0;
325
326 DRM_DEBUG("%d \n", args->handle);
327 gobj = drm_gem_object_lookup(dev, filp, args->handle);
328 if (gobj == NULL)
329 return -EINVAL;
330 robj = gobj->driver_private;
4c788679 331 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
e024e110
DA
332 mutex_lock(&dev->struct_mutex);
333 drm_gem_object_unreference(gobj);
334 mutex_unlock(&dev->struct_mutex);
335 return r;
336}
337
338int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
339 struct drm_file *filp)
340{
341 struct drm_radeon_gem_get_tiling *args = data;
342 struct drm_gem_object *gobj;
4c788679 343 struct radeon_bo *rbo;
e024e110
DA
344 int r = 0;
345
346 DRM_DEBUG("\n");
347 gobj = drm_gem_object_lookup(dev, filp, args->handle);
348 if (gobj == NULL)
349 return -EINVAL;
4c788679
JG
350 rbo = gobj->driver_private;
351 r = radeon_bo_reserve(rbo, false);
352 if (unlikely(r != 0))
353 return r;
354 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
355 radeon_bo_unreserve(rbo);
e024e110
DA
356 mutex_lock(&dev->struct_mutex);
357 drm_gem_object_unreference(gobj);
358 mutex_unlock(&dev->struct_mutex);
359 return r;
360}