]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drm/vmwgfx: Fix an error path causing an oops.
[net-next-2.6.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
33
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
37
38 struct vmw_user_context {
39         struct ttm_base_object base;
40         struct vmw_resource res;
41 };
42
43 struct vmw_user_surface {
44         struct ttm_base_object base;
45         struct vmw_surface srf;
46 };
47
48 struct vmw_user_dma_buffer {
49         struct ttm_base_object base;
50         struct vmw_dma_buffer dma;
51 };
52
53 struct vmw_bo_user_rep {
54         uint32_t handle;
55         uint64_t map_handle;
56 };
57
58 struct vmw_stream {
59         struct vmw_resource res;
60         uint32_t stream_id;
61 };
62
63 struct vmw_user_stream {
64         struct ttm_base_object base;
65         struct vmw_stream stream;
66 };
67
68 static inline struct vmw_dma_buffer *
69 vmw_dma_buffer(struct ttm_buffer_object *bo)
70 {
71         return container_of(bo, struct vmw_dma_buffer, base);
72 }
73
74 static inline struct vmw_user_dma_buffer *
75 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76 {
77         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79 }
80
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82 {
83         kref_get(&res->kref);
84         return res;
85 }
86
87 static void vmw_resource_release(struct kref *kref)
88 {
89         struct vmw_resource *res =
90             container_of(kref, struct vmw_resource, kref);
91         struct vmw_private *dev_priv = res->dev_priv;
92
93         idr_remove(res->idr, res->id);
94         write_unlock(&dev_priv->resource_lock);
95
96         if (likely(res->hw_destroy != NULL))
97                 res->hw_destroy(res);
98
99         if (res->res_free != NULL)
100                 res->res_free(res);
101         else
102                 kfree(res);
103
104         write_lock(&dev_priv->resource_lock);
105 }
106
107 void vmw_resource_unreference(struct vmw_resource **p_res)
108 {
109         struct vmw_resource *res = *p_res;
110         struct vmw_private *dev_priv = res->dev_priv;
111
112         *p_res = NULL;
113         write_lock(&dev_priv->resource_lock);
114         kref_put(&res->kref, vmw_resource_release);
115         write_unlock(&dev_priv->resource_lock);
116 }
117
118 static int vmw_resource_init(struct vmw_private *dev_priv,
119                              struct vmw_resource *res,
120                              struct idr *idr,
121                              enum ttm_object_type obj_type,
122                              void (*res_free) (struct vmw_resource *res))
123 {
124         int ret;
125
126         kref_init(&res->kref);
127         res->hw_destroy = NULL;
128         res->res_free = res_free;
129         res->res_type = obj_type;
130         res->idr = idr;
131         res->avail = false;
132         res->dev_priv = dev_priv;
133
134         do {
135                 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136                         return -ENOMEM;
137
138                 write_lock(&dev_priv->resource_lock);
139                 ret = idr_get_new_above(idr, res, 1, &res->id);
140                 write_unlock(&dev_priv->resource_lock);
141
142         } while (ret == -EAGAIN);
143
144         return ret;
145 }
146
147 /**
148  * vmw_resource_activate
149  *
150  * @res:        Pointer to the newly created resource
151  * @hw_destroy: Destroy function. NULL if none.
152  *
153  * Activate a resource after the hardware has been made aware of it.
154  * Set tye destroy function to @destroy. Typically this frees the
155  * resource and destroys the hardware resources associated with it.
156  * Activate basically means that the function vmw_resource_lookup will
157  * find it.
158  */
159
160 static void vmw_resource_activate(struct vmw_resource *res,
161                                   void (*hw_destroy) (struct vmw_resource *))
162 {
163         struct vmw_private *dev_priv = res->dev_priv;
164
165         write_lock(&dev_priv->resource_lock);
166         res->avail = true;
167         res->hw_destroy = hw_destroy;
168         write_unlock(&dev_priv->resource_lock);
169 }
170
171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172                                          struct idr *idr, int id)
173 {
174         struct vmw_resource *res;
175
176         read_lock(&dev_priv->resource_lock);
177         res = idr_find(idr, id);
178         if (res && res->avail)
179                 kref_get(&res->kref);
180         else
181                 res = NULL;
182         read_unlock(&dev_priv->resource_lock);
183
184         if (unlikely(res == NULL))
185                 return NULL;
186
187         return res;
188 }
189
190 /**
191  * Context management:
192  */
193
194 static void vmw_hw_context_destroy(struct vmw_resource *res)
195 {
196
197         struct vmw_private *dev_priv = res->dev_priv;
198         struct {
199                 SVGA3dCmdHeader header;
200                 SVGA3dCmdDestroyContext body;
201         } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202
203         if (unlikely(cmd == NULL)) {
204                 DRM_ERROR("Failed reserving FIFO space for surface "
205                           "destruction.\n");
206                 return;
207         }
208
209         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211         cmd->body.cid = cpu_to_le32(res->id);
212
213         vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 }
215
216 static int vmw_context_init(struct vmw_private *dev_priv,
217                             struct vmw_resource *res,
218                             void (*res_free) (struct vmw_resource *res))
219 {
220         int ret;
221
222         struct {
223                 SVGA3dCmdHeader header;
224                 SVGA3dCmdDefineContext body;
225         } *cmd;
226
227         ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228                                 VMW_RES_CONTEXT, res_free);
229
230         if (unlikely(ret != 0)) {
231                 if (res_free == NULL)
232                         kfree(res);
233                 else
234                         res_free(res);
235                 return ret;
236         }
237
238         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239         if (unlikely(cmd == NULL)) {
240                 DRM_ERROR("Fifo reserve failed.\n");
241                 vmw_resource_unreference(&res);
242                 return -ENOMEM;
243         }
244
245         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247         cmd->body.cid = cpu_to_le32(res->id);
248
249         vmw_fifo_commit(dev_priv, sizeof(*cmd));
250         vmw_resource_activate(res, vmw_hw_context_destroy);
251         return 0;
252 }
253
254 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
255 {
256         struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257         int ret;
258
259         if (unlikely(res == NULL))
260                 return NULL;
261
262         ret = vmw_context_init(dev_priv, res, NULL);
263         return (ret == 0) ? res : NULL;
264 }
265
266 /**
267  * User-space context management:
268  */
269
270 static void vmw_user_context_free(struct vmw_resource *res)
271 {
272         struct vmw_user_context *ctx =
273             container_of(res, struct vmw_user_context, res);
274
275         kfree(ctx);
276 }
277
278 /**
279  * This function is called when user space has no more references on the
280  * base object. It releases the base-object's reference on the resource object.
281  */
282
283 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
284 {
285         struct ttm_base_object *base = *p_base;
286         struct vmw_user_context *ctx =
287             container_of(base, struct vmw_user_context, base);
288         struct vmw_resource *res = &ctx->res;
289
290         *p_base = NULL;
291         vmw_resource_unreference(&res);
292 }
293
294 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295                               struct drm_file *file_priv)
296 {
297         struct vmw_private *dev_priv = vmw_priv(dev);
298         struct vmw_resource *res;
299         struct vmw_user_context *ctx;
300         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302         int ret = 0;
303
304         res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305         if (unlikely(res == NULL))
306                 return -EINVAL;
307
308         if (res->res_free != &vmw_user_context_free) {
309                 ret = -EINVAL;
310                 goto out;
311         }
312
313         ctx = container_of(res, struct vmw_user_context, res);
314         if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315                 ret = -EPERM;
316                 goto out;
317         }
318
319         ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320 out:
321         vmw_resource_unreference(&res);
322         return ret;
323 }
324
325 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326                              struct drm_file *file_priv)
327 {
328         struct vmw_private *dev_priv = vmw_priv(dev);
329         struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330         struct vmw_resource *res;
331         struct vmw_resource *tmp;
332         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334         int ret;
335
336         if (unlikely(ctx == NULL))
337                 return -ENOMEM;
338
339         res = &ctx->res;
340         ctx->base.shareable = false;
341         ctx->base.tfile = NULL;
342
343         ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344         if (unlikely(ret != 0))
345                 return ret;
346
347         tmp = vmw_resource_reference(&ctx->res);
348         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349                                    &vmw_user_context_base_release, NULL);
350
351         if (unlikely(ret != 0)) {
352                 vmw_resource_unreference(&tmp);
353                 goto out_err;
354         }
355
356         arg->cid = res->id;
357 out_err:
358         vmw_resource_unreference(&res);
359         return ret;
360
361 }
362
363 int vmw_context_check(struct vmw_private *dev_priv,
364                       struct ttm_object_file *tfile,
365                       int id)
366 {
367         struct vmw_resource *res;
368         int ret = 0;
369
370         read_lock(&dev_priv->resource_lock);
371         res = idr_find(&dev_priv->context_idr, id);
372         if (res && res->avail) {
373                 struct vmw_user_context *ctx =
374                         container_of(res, struct vmw_user_context, res);
375                 if (ctx->base.tfile != tfile && !ctx->base.shareable)
376                         ret = -EPERM;
377         } else
378                 ret = -EINVAL;
379         read_unlock(&dev_priv->resource_lock);
380
381         return ret;
382 }
383
384
385 /**
386  * Surface management.
387  */
388
389 static void vmw_hw_surface_destroy(struct vmw_resource *res)
390 {
391
392         struct vmw_private *dev_priv = res->dev_priv;
393         struct {
394                 SVGA3dCmdHeader header;
395                 SVGA3dCmdDestroySurface body;
396         } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
397
398         if (unlikely(cmd == NULL)) {
399                 DRM_ERROR("Failed reserving FIFO space for surface "
400                           "destruction.\n");
401                 return;
402         }
403
404         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406         cmd->body.sid = cpu_to_le32(res->id);
407
408         vmw_fifo_commit(dev_priv, sizeof(*cmd));
409 }
410
411 void vmw_surface_res_free(struct vmw_resource *res)
412 {
413         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
414
415         kfree(srf->sizes);
416         kfree(srf->snooper.image);
417         kfree(srf);
418 }
419
420 int vmw_surface_init(struct vmw_private *dev_priv,
421                      struct vmw_surface *srf,
422                      void (*res_free) (struct vmw_resource *res))
423 {
424         int ret;
425         struct {
426                 SVGA3dCmdHeader header;
427                 SVGA3dCmdDefineSurface body;
428         } *cmd;
429         SVGA3dSize *cmd_size;
430         struct vmw_resource *res = &srf->res;
431         struct drm_vmw_size *src_size;
432         size_t submit_size;
433         uint32_t cmd_len;
434         int i;
435
436         BUG_ON(res_free == NULL);
437         ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438                                 VMW_RES_SURFACE, res_free);
439
440         if (unlikely(ret != 0)) {
441                 res_free(res);
442                 return ret;
443         }
444
445         submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446         cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
447
448         cmd = vmw_fifo_reserve(dev_priv, submit_size);
449         if (unlikely(cmd == NULL)) {
450                 DRM_ERROR("Fifo reserve failed for create surface.\n");
451                 vmw_resource_unreference(&res);
452                 return -ENOMEM;
453         }
454
455         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456         cmd->header.size = cpu_to_le32(cmd_len);
457         cmd->body.sid = cpu_to_le32(res->id);
458         cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459         cmd->body.format = cpu_to_le32(srf->format);
460         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461                 cmd->body.face[i].numMipLevels =
462                     cpu_to_le32(srf->mip_levels[i]);
463         }
464
465         cmd += 1;
466         cmd_size = (SVGA3dSize *) cmd;
467         src_size = srf->sizes;
468
469         for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470                 cmd_size->width = cpu_to_le32(src_size->width);
471                 cmd_size->height = cpu_to_le32(src_size->height);
472                 cmd_size->depth = cpu_to_le32(src_size->depth);
473         }
474
475         vmw_fifo_commit(dev_priv, submit_size);
476         vmw_resource_activate(res, vmw_hw_surface_destroy);
477         return 0;
478 }
479
480 static void vmw_user_surface_free(struct vmw_resource *res)
481 {
482         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483         struct vmw_user_surface *user_srf =
484             container_of(srf, struct vmw_user_surface, srf);
485
486         kfree(srf->sizes);
487         kfree(srf->snooper.image);
488         kfree(user_srf);
489 }
490
491 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492                                    struct ttm_object_file *tfile,
493                                    uint32_t handle, struct vmw_surface **out)
494 {
495         struct vmw_resource *res;
496         struct vmw_surface *srf;
497         struct vmw_user_surface *user_srf;
498         struct ttm_base_object *base;
499         int ret = -EINVAL;
500
501         base = ttm_base_object_lookup(tfile, handle);
502         if (unlikely(base == NULL))
503                 return -EINVAL;
504
505         if (unlikely(base->object_type != VMW_RES_SURFACE))
506                 goto out_bad_resource;
507
508         user_srf = container_of(base, struct vmw_user_surface, base);
509         srf = &user_srf->srf;
510         res = &srf->res;
511
512         read_lock(&dev_priv->resource_lock);
513
514         if (!res->avail || res->res_free != &vmw_user_surface_free) {
515                 read_unlock(&dev_priv->resource_lock);
516                 goto out_bad_resource;
517         }
518
519         kref_get(&res->kref);
520         read_unlock(&dev_priv->resource_lock);
521
522         *out = srf;
523         ret = 0;
524
525 out_bad_resource:
526         ttm_base_object_unref(&base);
527
528         return ret;
529 }
530
531 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
532 {
533         struct ttm_base_object *base = *p_base;
534         struct vmw_user_surface *user_srf =
535             container_of(base, struct vmw_user_surface, base);
536         struct vmw_resource *res = &user_srf->srf.res;
537
538         *p_base = NULL;
539         vmw_resource_unreference(&res);
540 }
541
542 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
543                               struct drm_file *file_priv)
544 {
545         struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
546         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
547
548         return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
549 }
550
551 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
552                              struct drm_file *file_priv)
553 {
554         struct vmw_private *dev_priv = vmw_priv(dev);
555         struct vmw_user_surface *user_srf =
556             kmalloc(sizeof(*user_srf), GFP_KERNEL);
557         struct vmw_surface *srf;
558         struct vmw_resource *res;
559         struct vmw_resource *tmp;
560         union drm_vmw_surface_create_arg *arg =
561             (union drm_vmw_surface_create_arg *)data;
562         struct drm_vmw_surface_create_req *req = &arg->req;
563         struct drm_vmw_surface_arg *rep = &arg->rep;
564         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
565         struct drm_vmw_size __user *user_sizes;
566         int ret;
567         int i;
568
569         if (unlikely(user_srf == NULL))
570                 return -ENOMEM;
571
572         srf = &user_srf->srf;
573         res = &srf->res;
574
575         srf->flags = req->flags;
576         srf->format = req->format;
577         memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
578         srf->num_sizes = 0;
579         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
580                 srf->num_sizes += srf->mip_levels[i];
581
582         if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
583             DRM_VMW_MAX_MIP_LEVELS) {
584                 ret = -EINVAL;
585                 goto out_err0;
586         }
587
588         srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
589         if (unlikely(srf->sizes == NULL)) {
590                 ret = -ENOMEM;
591                 goto out_err0;
592         }
593
594         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
595             req->size_addr;
596
597         ret = copy_from_user(srf->sizes, user_sizes,
598                              srf->num_sizes * sizeof(*srf->sizes));
599         if (unlikely(ret != 0))
600                 goto out_err1;
601
602
603         if (srf->flags & (1 << 9) &&
604             srf->num_sizes == 1 &&
605             srf->sizes[0].width == 64 &&
606             srf->sizes[0].height == 64 &&
607             srf->format == SVGA3D_A8R8G8B8) {
608
609                 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610                 /* clear the image */
611                 if (srf->snooper.image) {
612                         memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613                 } else {
614                         DRM_ERROR("Failed to allocate cursor_image\n");
615                         ret = -ENOMEM;
616                         goto out_err1;
617                 }
618         } else {
619                 srf->snooper.image = NULL;
620         }
621         srf->snooper.crtc = NULL;
622
623         user_srf->base.shareable = false;
624         user_srf->base.tfile = NULL;
625
626         /**
627          * From this point, the generic resource management functions
628          * destroy the object on failure.
629          */
630
631         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
632         if (unlikely(ret != 0))
633                 return ret;
634
635         tmp = vmw_resource_reference(&srf->res);
636         ret = ttm_base_object_init(tfile, &user_srf->base,
637                                    req->shareable, VMW_RES_SURFACE,
638                                    &vmw_user_surface_base_release, NULL);
639
640         if (unlikely(ret != 0)) {
641                 vmw_resource_unreference(&tmp);
642                 vmw_resource_unreference(&res);
643                 return ret;
644         }
645
646         rep->sid = user_srf->base.hash.key;
647         if (rep->sid == SVGA3D_INVALID_ID)
648                 DRM_ERROR("Created bad Surface ID.\n");
649
650         vmw_resource_unreference(&res);
651         return 0;
652 out_err1:
653         kfree(srf->sizes);
654 out_err0:
655         kfree(user_srf);
656         return ret;
657 }
658
659 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
660                                 struct drm_file *file_priv)
661 {
662         union drm_vmw_surface_reference_arg *arg =
663             (union drm_vmw_surface_reference_arg *)data;
664         struct drm_vmw_surface_arg *req = &arg->req;
665         struct drm_vmw_surface_create_req *rep = &arg->rep;
666         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
667         struct vmw_surface *srf;
668         struct vmw_user_surface *user_srf;
669         struct drm_vmw_size __user *user_sizes;
670         struct ttm_base_object *base;
671         int ret = -EINVAL;
672
673         base = ttm_base_object_lookup(tfile, req->sid);
674         if (unlikely(base == NULL)) {
675                 DRM_ERROR("Could not find surface to reference.\n");
676                 return -EINVAL;
677         }
678
679         if (unlikely(base->object_type != VMW_RES_SURFACE))
680                 goto out_bad_resource;
681
682         user_srf = container_of(base, struct vmw_user_surface, base);
683         srf = &user_srf->srf;
684
685         ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
686         if (unlikely(ret != 0)) {
687                 DRM_ERROR("Could not add a reference to a surface.\n");
688                 goto out_no_reference;
689         }
690
691         rep->flags = srf->flags;
692         rep->format = srf->format;
693         memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
694         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
695             rep->size_addr;
696
697         if (user_sizes)
698                 ret = copy_to_user(user_sizes, srf->sizes,
699                                    srf->num_sizes * sizeof(*srf->sizes));
700         if (unlikely(ret != 0))
701                 DRM_ERROR("copy_to_user failed %p %u\n",
702                           user_sizes, srf->num_sizes);
703 out_bad_resource:
704 out_no_reference:
705         ttm_base_object_unref(&base);
706
707         return ret;
708 }
709
710 int vmw_surface_check(struct vmw_private *dev_priv,
711                       struct ttm_object_file *tfile,
712                       uint32_t handle, int *id)
713 {
714         struct ttm_base_object *base;
715         struct vmw_user_surface *user_srf;
716
717         int ret = -EPERM;
718
719         base = ttm_base_object_lookup(tfile, handle);
720         if (unlikely(base == NULL))
721                 return -EINVAL;
722
723         if (unlikely(base->object_type != VMW_RES_SURFACE))
724                 goto out_bad_surface;
725
726         user_srf = container_of(base, struct vmw_user_surface, base);
727         *id = user_srf->srf.res.id;
728         ret = 0;
729
730 out_bad_surface:
731         /**
732          * FIXME: May deadlock here when called from the
733          * command parsing code.
734          */
735
736         ttm_base_object_unref(&base);
737         return ret;
738 }
739
740 /**
741  * Buffer management.
742  */
743
744 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
745                                   unsigned long num_pages)
746 {
747         static size_t bo_user_size = ~0;
748
749         size_t page_array_size =
750             (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
751
752         if (unlikely(bo_user_size == ~0)) {
753                 bo_user_size = glob->ttm_bo_extra_size +
754                     ttm_round_pot(sizeof(struct vmw_dma_buffer));
755         }
756
757         return bo_user_size + page_array_size;
758 }
759
760 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
761 {
762         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
763         struct ttm_bo_global *glob = bo->glob;
764         struct vmw_private *dev_priv =
765                 container_of(bo->bdev, struct vmw_private, bdev);
766
767         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
768         if (vmw_bo->gmr_bound) {
769                 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
770                 spin_lock(&glob->lru_lock);
771                 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
772                 spin_unlock(&glob->lru_lock);
773         }
774         kfree(vmw_bo);
775 }
776
777 int vmw_dmabuf_init(struct vmw_private *dev_priv,
778                     struct vmw_dma_buffer *vmw_bo,
779                     size_t size, struct ttm_placement *placement,
780                     bool interruptible,
781                     void (*bo_free) (struct ttm_buffer_object *bo))
782 {
783         struct ttm_bo_device *bdev = &dev_priv->bdev;
784         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
785         size_t acc_size;
786         int ret;
787
788         BUG_ON(!bo_free);
789
790         acc_size =
791             vmw_dmabuf_acc_size(bdev->glob,
792                                 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
793
794         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
795         if (unlikely(ret != 0)) {
796                 /* we must free the bo here as
797                  * ttm_buffer_object_init does so as well */
798                 bo_free(&vmw_bo->base);
799                 return ret;
800         }
801
802         memset(vmw_bo, 0, sizeof(*vmw_bo));
803
804         INIT_LIST_HEAD(&vmw_bo->gmr_lru);
805         INIT_LIST_HEAD(&vmw_bo->validate_list);
806         vmw_bo->gmr_id = 0;
807         vmw_bo->gmr_bound = false;
808
809         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
810                           ttm_bo_type_device, placement,
811                           0, 0, interruptible,
812                           NULL, acc_size, bo_free);
813         return ret;
814 }
815
816 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
817 {
818         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
819         struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
820         struct ttm_bo_global *glob = bo->glob;
821         struct vmw_private *dev_priv =
822                 container_of(bo->bdev, struct vmw_private, bdev);
823
824         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
825         if (vmw_bo->gmr_bound) {
826                 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
827                 spin_lock(&glob->lru_lock);
828                 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
829                 spin_unlock(&glob->lru_lock);
830         }
831         kfree(vmw_user_bo);
832 }
833
834 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
835 {
836         struct vmw_user_dma_buffer *vmw_user_bo;
837         struct ttm_base_object *base = *p_base;
838         struct ttm_buffer_object *bo;
839
840         *p_base = NULL;
841
842         if (unlikely(base == NULL))
843                 return;
844
845         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
846         bo = &vmw_user_bo->dma.base;
847         ttm_bo_unref(&bo);
848 }
849
850 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
851                            struct drm_file *file_priv)
852 {
853         struct vmw_private *dev_priv = vmw_priv(dev);
854         union drm_vmw_alloc_dmabuf_arg *arg =
855             (union drm_vmw_alloc_dmabuf_arg *)data;
856         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
857         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
858         struct vmw_user_dma_buffer *vmw_user_bo;
859         struct ttm_buffer_object *tmp;
860         struct vmw_master *vmaster = vmw_master(file_priv->master);
861         int ret;
862
863         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
864         if (unlikely(vmw_user_bo == NULL))
865                 return -ENOMEM;
866
867         ret = ttm_read_lock(&vmaster->lock, true);
868         if (unlikely(ret != 0)) {
869                 kfree(vmw_user_bo);
870                 return ret;
871         }
872
873         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
874                               &vmw_vram_placement, true,
875                               &vmw_user_dmabuf_destroy);
876         if (unlikely(ret != 0))
877                 return ret;
878
879         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
880         ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
881                                    &vmw_user_bo->base,
882                                    false,
883                                    ttm_buffer_type,
884                                    &vmw_user_dmabuf_release, NULL);
885         if (unlikely(ret != 0)) {
886                 ttm_bo_unref(&tmp);
887         } else {
888                 rep->handle = vmw_user_bo->base.hash.key;
889                 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
890                 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
891                 rep->cur_gmr_offset = 0;
892         }
893         ttm_bo_unref(&tmp);
894
895         ttm_read_unlock(&vmaster->lock);
896
897         return 0;
898 }
899
900 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
901                            struct drm_file *file_priv)
902 {
903         struct drm_vmw_unref_dmabuf_arg *arg =
904             (struct drm_vmw_unref_dmabuf_arg *)data;
905
906         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
907                                          arg->handle,
908                                          TTM_REF_USAGE);
909 }
910
911 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
912                                   uint32_t cur_validate_node)
913 {
914         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
915
916         if (likely(vmw_bo->on_validate_list))
917                 return vmw_bo->cur_validate_node;
918
919         vmw_bo->cur_validate_node = cur_validate_node;
920         vmw_bo->on_validate_list = true;
921
922         return cur_validate_node;
923 }
924
925 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
926 {
927         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
928
929         vmw_bo->on_validate_list = false;
930 }
931
932 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
933 {
934         struct vmw_dma_buffer *vmw_bo;
935
936         if (bo->mem.mem_type == TTM_PL_VRAM)
937                 return SVGA_GMR_FRAMEBUFFER;
938
939         vmw_bo = vmw_dma_buffer(bo);
940
941         return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
942 }
943
944 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
945 {
946         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
947         vmw_bo->gmr_bound = true;
948         vmw_bo->gmr_id = id;
949 }
950
951 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
952                            uint32_t handle, struct vmw_dma_buffer **out)
953 {
954         struct vmw_user_dma_buffer *vmw_user_bo;
955         struct ttm_base_object *base;
956
957         base = ttm_base_object_lookup(tfile, handle);
958         if (unlikely(base == NULL)) {
959                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
960                        (unsigned long)handle);
961                 return -ESRCH;
962         }
963
964         if (unlikely(base->object_type != ttm_buffer_type)) {
965                 ttm_base_object_unref(&base);
966                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
967                        (unsigned long)handle);
968                 return -EINVAL;
969         }
970
971         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
972         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
973         ttm_base_object_unref(&base);
974         *out = &vmw_user_bo->dma;
975
976         return 0;
977 }
978
979 /**
980  * TODO: Implement a gmr id eviction mechanism. Currently we just fail
981  * when we're out of ids, causing GMR space to be allocated
982  * out of VRAM.
983  */
984
985 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
986 {
987         struct ttm_bo_global *glob = dev_priv->bdev.glob;
988         int id;
989         int ret;
990
991         do {
992                 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
993                         return -ENOMEM;
994
995                 spin_lock(&glob->lru_lock);
996                 ret = ida_get_new(&dev_priv->gmr_ida, &id);
997                 spin_unlock(&glob->lru_lock);
998         } while (ret == -EAGAIN);
999
1000         if (unlikely(ret != 0))
1001                 return ret;
1002
1003         if (unlikely(id >= dev_priv->max_gmr_ids)) {
1004                 spin_lock(&glob->lru_lock);
1005                 ida_remove(&dev_priv->gmr_ida, id);
1006                 spin_unlock(&glob->lru_lock);
1007                 return -EBUSY;
1008         }
1009
1010         *p_id = (uint32_t) id;
1011         return 0;
1012 }
1013
1014 /*
1015  * Stream managment
1016  */
1017
1018 static void vmw_stream_destroy(struct vmw_resource *res)
1019 {
1020         struct vmw_private *dev_priv = res->dev_priv;
1021         struct vmw_stream *stream;
1022         int ret;
1023
1024         DRM_INFO("%s: unref\n", __func__);
1025         stream = container_of(res, struct vmw_stream, res);
1026
1027         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1028         WARN_ON(ret != 0);
1029 }
1030
1031 static int vmw_stream_init(struct vmw_private *dev_priv,
1032                            struct vmw_stream *stream,
1033                            void (*res_free) (struct vmw_resource *res))
1034 {
1035         struct vmw_resource *res = &stream->res;
1036         int ret;
1037
1038         ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1039                                 VMW_RES_STREAM, res_free);
1040
1041         if (unlikely(ret != 0)) {
1042                 if (res_free == NULL)
1043                         kfree(stream);
1044                 else
1045                         res_free(&stream->res);
1046                 return ret;
1047         }
1048
1049         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1050         if (ret) {
1051                 vmw_resource_unreference(&res);
1052                 return ret;
1053         }
1054
1055         DRM_INFO("%s: claimed\n", __func__);
1056
1057         vmw_resource_activate(&stream->res, vmw_stream_destroy);
1058         return 0;
1059 }
1060
1061 /**
1062  * User-space context management:
1063  */
1064
1065 static void vmw_user_stream_free(struct vmw_resource *res)
1066 {
1067         struct vmw_user_stream *stream =
1068             container_of(res, struct vmw_user_stream, stream.res);
1069
1070         kfree(stream);
1071 }
1072
1073 /**
1074  * This function is called when user space has no more references on the
1075  * base object. It releases the base-object's reference on the resource object.
1076  */
1077
1078 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1079 {
1080         struct ttm_base_object *base = *p_base;
1081         struct vmw_user_stream *stream =
1082             container_of(base, struct vmw_user_stream, base);
1083         struct vmw_resource *res = &stream->stream.res;
1084
1085         *p_base = NULL;
1086         vmw_resource_unreference(&res);
1087 }
1088
1089 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1090                            struct drm_file *file_priv)
1091 {
1092         struct vmw_private *dev_priv = vmw_priv(dev);
1093         struct vmw_resource *res;
1094         struct vmw_user_stream *stream;
1095         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1096         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1097         int ret = 0;
1098
1099         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1100         if (unlikely(res == NULL))
1101                 return -EINVAL;
1102
1103         if (res->res_free != &vmw_user_stream_free) {
1104                 ret = -EINVAL;
1105                 goto out;
1106         }
1107
1108         stream = container_of(res, struct vmw_user_stream, stream.res);
1109         if (stream->base.tfile != tfile) {
1110                 ret = -EINVAL;
1111                 goto out;
1112         }
1113
1114         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1115 out:
1116         vmw_resource_unreference(&res);
1117         return ret;
1118 }
1119
1120 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1121                            struct drm_file *file_priv)
1122 {
1123         struct vmw_private *dev_priv = vmw_priv(dev);
1124         struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1125         struct vmw_resource *res;
1126         struct vmw_resource *tmp;
1127         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1128         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1129         int ret;
1130
1131         if (unlikely(stream == NULL))
1132                 return -ENOMEM;
1133
1134         res = &stream->stream.res;
1135         stream->base.shareable = false;
1136         stream->base.tfile = NULL;
1137
1138         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1139         if (unlikely(ret != 0))
1140                 return ret;
1141
1142         tmp = vmw_resource_reference(res);
1143         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1144                                    &vmw_user_stream_base_release, NULL);
1145
1146         if (unlikely(ret != 0)) {
1147                 vmw_resource_unreference(&tmp);
1148                 goto out_err;
1149         }
1150
1151         arg->stream_id = res->id;
1152 out_err:
1153         vmw_resource_unreference(&res);
1154         return ret;
1155 }
1156
1157 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1158                            struct ttm_object_file *tfile,
1159                            uint32_t *inout_id, struct vmw_resource **out)
1160 {
1161         struct vmw_user_stream *stream;
1162         struct vmw_resource *res;
1163         int ret;
1164
1165         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1166         if (unlikely(res == NULL))
1167                 return -EINVAL;
1168
1169         if (res->res_free != &vmw_user_stream_free) {
1170                 ret = -EINVAL;
1171                 goto err_ref;
1172         }
1173
1174         stream = container_of(res, struct vmw_user_stream, stream.res);
1175         if (stream->base.tfile != tfile) {
1176                 ret = -EPERM;
1177                 goto err_ref;
1178         }
1179
1180         *inout_id = stream->stream.stream_id;
1181         *out = res;
1182         return 0;
1183 err_ref:
1184         vmw_resource_unreference(&res);
1185         return ret;
1186 }