]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drm/ttm: split no_wait argument in 2 GPU or reserve wait
[net-next-2.6.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fb.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31
32#include "ttm/ttm_placement.h"
33
34#define VMW_DIRTY_DELAY (HZ / 30)
35
36struct vmw_fb_par {
37 struct vmw_private *vmw_priv;
38
39 void *vmalloc;
40
41 struct vmw_dma_buffer *vmw_bo;
42 struct ttm_bo_kmap_obj map;
43
44 u32 pseudo_palette[17];
45
46 unsigned depth;
47 unsigned bpp;
48
49 unsigned max_width;
50 unsigned max_height;
51
52 void *bo_ptr;
53 unsigned bo_size;
54 bool bo_iowrite;
55
56 struct {
57 spinlock_t lock;
58 bool active;
59 unsigned x1;
60 unsigned y1;
61 unsigned x2;
62 unsigned y2;
63 } dirty;
64};
65
66static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
67 unsigned blue, unsigned transp,
68 struct fb_info *info)
69{
70 struct vmw_fb_par *par = info->par;
71 u32 *pal = par->pseudo_palette;
72
73 if (regno > 15) {
74 DRM_ERROR("Bad regno %u.\n", regno);
75 return 1;
76 }
77
78 switch (par->depth) {
79 case 24:
80 case 32:
81 pal[regno] = ((red & 0xff00) << 8) |
82 (green & 0xff00) |
83 ((blue & 0xff00) >> 8);
84 break;
85 default:
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
87 return 1;
88 }
89
90 return 0;
91}
92
93static int vmw_fb_check_var(struct fb_var_screeninfo *var,
94 struct fb_info *info)
95{
96 int depth = var->bits_per_pixel;
97 struct vmw_fb_par *par = info->par;
98 struct vmw_private *vmw_priv = par->vmw_priv;
99
100 switch (var->bits_per_pixel) {
101 case 32:
102 depth = (var->transp.length > 0) ? 32 : 24;
103 break;
104 default:
105 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
106 return -EINVAL;
107 }
108
109 switch (depth) {
110 case 24:
111 var->red.offset = 16;
112 var->green.offset = 8;
113 var->blue.offset = 0;
114 var->red.length = 8;
115 var->green.length = 8;
116 var->blue.length = 8;
117 var->transp.length = 0;
118 var->transp.offset = 0;
119 break;
120 case 32:
121 var->red.offset = 16;
122 var->green.offset = 8;
123 var->blue.offset = 0;
124 var->red.length = 8;
125 var->green.length = 8;
126 var->blue.length = 8;
127 var->transp.length = 8;
128 var->transp.offset = 24;
129 break;
130 default:
131 DRM_ERROR("Bad depth %u.\n", depth);
132 return -EINVAL;
133 }
134
135 /* without multimon its hard to resize */
136 if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
137 (var->xres != par->max_width ||
138 var->yres != par->max_height)) {
139 DRM_ERROR("Tried to resize, but we don't have multimon\n");
140 return -EINVAL;
141 }
142
143 if (var->xres > par->max_width ||
144 var->yres > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
149 return 0;
150}
151
152static int vmw_fb_set_par(struct fb_info *info)
153{
154 struct vmw_fb_par *par = info->par;
155 struct vmw_private *vmw_priv = par->vmw_priv;
156
157 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
158 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
159 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
160 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
163 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
164 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
166
167 vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
168 vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
169 vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
170 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
171 vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
172 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
173 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
174 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
175
176 /* TODO check if pitch and offset changes */
177
178 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
184 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
185 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
186 } else {
187 vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
188 vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
189
190 /* TODO check if pitch and offset changes */
191 }
192
193 return 0;
194}
195
196static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
197 struct fb_info *info)
198{
199 return 0;
200}
201
202static int vmw_fb_blank(int blank, struct fb_info *info)
203{
204 return 0;
205}
206
207/*
208 * Dirty code
209 */
210
211static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
212{
213 struct vmw_private *vmw_priv = par->vmw_priv;
214 struct fb_info *info = vmw_priv->fb_info;
215 int stride = (info->fix.line_length / 4);
216 int *src = (int *)info->screen_base;
217 __le32 __iomem *vram_mem = par->bo_ptr;
218 unsigned long flags;
219 unsigned x, y, w, h;
220 int i, k;
221 struct {
222 uint32_t header;
223 SVGAFifoCmdUpdate body;
224 } *cmd;
225
226 spin_lock_irqsave(&par->dirty.lock, flags);
227 if (!par->dirty.active) {
228 spin_unlock_irqrestore(&par->dirty.lock, flags);
229 return;
230 }
231 x = par->dirty.x1;
232 y = par->dirty.y1;
233 w = min(par->dirty.x2, info->var.xres) - x;
234 h = min(par->dirty.y2, info->var.yres) - y;
235 par->dirty.x1 = par->dirty.x2 = 0;
236 par->dirty.y1 = par->dirty.y2 = 0;
237 spin_unlock_irqrestore(&par->dirty.lock, flags);
238
239 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
240 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
241 iowrite32(src[k], vram_mem + k);
242 }
243
244#if 0
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
246#endif
247
248 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
249 if (unlikely(cmd == NULL)) {
250 DRM_ERROR("Fifo reserve failed.\n");
251 return;
252 }
253
254 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
255 cmd->body.x = cpu_to_le32(x);
256 cmd->body.y = cpu_to_le32(y);
257 cmd->body.width = cpu_to_le32(w);
258 cmd->body.height = cpu_to_le32(h);
259 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260}
261
262static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
263 unsigned x1, unsigned y1,
264 unsigned width, unsigned height)
265{
266 struct fb_info *info = par->vmw_priv->fb_info;
267 unsigned long flags;
268 unsigned x2 = x1 + width;
269 unsigned y2 = y1 + height;
270
271 spin_lock_irqsave(&par->dirty.lock, flags);
272 if (par->dirty.x1 == par->dirty.x2) {
273 par->dirty.x1 = x1;
274 par->dirty.y1 = y1;
275 par->dirty.x2 = x2;
276 par->dirty.y2 = y2;
277 /* if we are active start the dirty work
278 * we share the work with the defio system */
279 if (par->dirty.active)
280 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
281 } else {
282 if (x1 < par->dirty.x1)
283 par->dirty.x1 = x1;
284 if (y1 < par->dirty.y1)
285 par->dirty.y1 = y1;
286 if (x2 > par->dirty.x2)
287 par->dirty.x2 = x2;
288 if (y2 > par->dirty.y2)
289 par->dirty.y2 = y2;
290 }
291 spin_unlock_irqrestore(&par->dirty.lock, flags);
292}
293
294static void vmw_deferred_io(struct fb_info *info,
295 struct list_head *pagelist)
296{
297 struct vmw_fb_par *par = info->par;
298 unsigned long start, end, min, max;
299 unsigned long flags;
300 struct page *page;
301 int y1, y2;
302
303 min = ULONG_MAX;
304 max = 0;
305 list_for_each_entry(page, pagelist, lru) {
306 start = page->index << PAGE_SHIFT;
307 end = start + PAGE_SIZE - 1;
308 min = min(min, start);
309 max = max(max, end);
310 }
311
312 if (min < max) {
313 y1 = min / info->fix.line_length;
314 y2 = (max / info->fix.line_length) + 1;
315
316 spin_lock_irqsave(&par->dirty.lock, flags);
317 par->dirty.x1 = 0;
318 par->dirty.y1 = y1;
319 par->dirty.x2 = info->var.xres;
320 par->dirty.y2 = y2;
321 spin_unlock_irqrestore(&par->dirty.lock, flags);
322 }
323
324 vmw_fb_dirty_flush(par);
325};
326
327struct fb_deferred_io vmw_defio = {
328 .delay = VMW_DIRTY_DELAY,
329 .deferred_io = vmw_deferred_io,
330};
331
332/*
333 * Draw code
334 */
335
336static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
337{
338 cfb_fillrect(info, rect);
339 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
340 rect->width, rect->height);
341}
342
343static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
344{
345 cfb_copyarea(info, region);
346 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
347 region->width, region->height);
348}
349
350static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
351{
352 cfb_imageblit(info, image);
353 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
354 image->width, image->height);
355}
356
357/*
358 * Bring up code
359 */
360
361static struct fb_ops vmw_fb_ops = {
362 .owner = THIS_MODULE,
363 .fb_check_var = vmw_fb_check_var,
364 .fb_set_par = vmw_fb_set_par,
365 .fb_setcolreg = vmw_fb_setcolreg,
366 .fb_fillrect = vmw_fb_fillrect,
367 .fb_copyarea = vmw_fb_copyarea,
368 .fb_imageblit = vmw_fb_imageblit,
369 .fb_pan_display = vmw_fb_pan_display,
370 .fb_blank = vmw_fb_blank,
371};
372
373static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out)
375{
376 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_placement ne_placement = vmw_vram_ne_placement;
378 int ret;
379
380 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381
382 /* interuptable? */
383 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
384 if (unlikely(ret != 0))
385 return ret;
386
387 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
388 if (!vmw_bo)
389 goto err_unlock;
390
391 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
392 &ne_placement,
393 false,
394 &vmw_dmabuf_bo_free);
395 if (unlikely(ret != 0))
396 goto err_unlock; /* init frees the buffer on failure */
397
398 *out = vmw_bo;
399
400 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
401
402 return 0;
403
404err_unlock:
405 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406 return ret;
407}
408
409int vmw_fb_init(struct vmw_private *vmw_priv)
410{
411 struct device *device = &vmw_priv->dev->pdev->dev;
412 struct vmw_fb_par *par;
413 struct fb_info *info;
414 unsigned initial_width, initial_height;
415 unsigned fb_width, fb_height;
416 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
417 int ret;
418
419 initial_width = 800;
420 initial_height = 600;
421
422 fb_bbp = 32;
423 fb_depth = 24;
424
425 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
426 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
427 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
428 } else {
429 fb_width = min(vmw_priv->fb_max_width, initial_width);
430 fb_height = min(vmw_priv->fb_max_height, initial_height);
431 }
432
433 initial_width = min(fb_width, initial_width);
434 initial_height = min(fb_height, initial_height);
435
436 vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
437 vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
438 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
439 vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
440 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
441 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
442 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
443
444 fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
445 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
446 fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
447
448 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
449 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
450 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
451 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
452 DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
453 DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
454 DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
455 DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
456 DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
457 DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
458 DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
459 DRM_DEBUG("fb_pitch %u\n", fb_pitch);
460 DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
461
462 info = framebuffer_alloc(sizeof(*par), device);
463 if (!info)
464 return -ENOMEM;
465
466 /*
467 * Par
468 */
469 vmw_priv->fb_info = info;
470 par = info->par;
471 par->vmw_priv = vmw_priv;
472 par->depth = fb_depth;
473 par->bpp = fb_bbp;
474 par->vmalloc = NULL;
475 par->max_width = fb_width;
476 par->max_height = fb_height;
477
478 /*
479 * Create buffers and alloc memory
480 */
481 par->vmalloc = vmalloc(fb_size);
482 if (unlikely(par->vmalloc == NULL)) {
483 ret = -ENOMEM;
484 goto err_free;
485 }
486
487 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
488 if (unlikely(ret != 0))
489 goto err_free;
490
491 ret = ttm_bo_kmap(&par->vmw_bo->base,
492 0,
493 par->vmw_bo->base.num_pages,
494 &par->map);
495 if (unlikely(ret != 0))
496 goto err_unref;
497 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
498 par->bo_size = fb_size;
499
500 /*
501 * Fixed and var
502 */
503 strcpy(info->fix.id, "svgadrmfb");
504 info->fix.type = FB_TYPE_PACKED_PIXELS;
505 info->fix.visual = FB_VISUAL_TRUECOLOR;
506 info->fix.type_aux = 0;
507 info->fix.xpanstep = 1; /* doing it in hw */
508 info->fix.ypanstep = 1; /* doing it in hw */
509 info->fix.ywrapstep = 0;
510 info->fix.accel = FB_ACCEL_NONE;
511 info->fix.line_length = fb_pitch;
512
513 info->fix.smem_start = 0;
514 info->fix.smem_len = fb_size;
515
516 info->fix.mmio_start = 0;
517 info->fix.mmio_len = 0;
518
519 info->pseudo_palette = par->pseudo_palette;
520 info->screen_base = par->vmalloc;
521 info->screen_size = fb_size;
522
523 info->flags = FBINFO_DEFAULT;
524 info->fbops = &vmw_fb_ops;
525
526 /* 24 depth per default */
527 info->var.red.offset = 16;
528 info->var.green.offset = 8;
529 info->var.blue.offset = 0;
530 info->var.red.length = 8;
531 info->var.green.length = 8;
532 info->var.blue.length = 8;
533 info->var.transp.offset = 0;
534 info->var.transp.length = 0;
535
536 info->var.xres_virtual = fb_width;
537 info->var.yres_virtual = fb_height;
538 info->var.bits_per_pixel = par->bpp;
539 info->var.xoffset = 0;
540 info->var.yoffset = 0;
541 info->var.activate = FB_ACTIVATE_NOW;
542 info->var.height = -1;
543 info->var.width = -1;
544
545 info->var.xres = initial_width;
546 info->var.yres = initial_height;
547
548#if 0
549 info->pixmap.size = 64*1024;
550 info->pixmap.buf_align = 8;
551 info->pixmap.access_align = 32;
552 info->pixmap.flags = FB_PIXMAP_SYSTEM;
553 info->pixmap.scan_align = 1;
554#else
555 info->pixmap.size = 0;
556 info->pixmap.buf_align = 8;
557 info->pixmap.access_align = 32;
558 info->pixmap.flags = FB_PIXMAP_SYSTEM;
559 info->pixmap.scan_align = 1;
560#endif
561
f2d12b8e
TH
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
fb1d9738
JB
565 /*
566 * Dirty & Deferred IO
567 */
568 par->dirty.x1 = par->dirty.x2 = 0;
569 par->dirty.y1 = par->dirty.y1 = 0;
570 par->dirty.active = true;
571 spin_lock_init(&par->dirty.lock);
572 info->fbdefio = &vmw_defio;
573 fb_deferred_io_init(info);
574
575 ret = register_framebuffer(info);
576 if (unlikely(ret != 0))
577 goto err_defio;
578
579 return 0;
580
581err_defio:
582 fb_deferred_io_cleanup(info);
583 ttm_bo_kunmap(&par->map);
584err_unref:
585 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
586err_free:
587 vfree(par->vmalloc);
588 framebuffer_release(info);
589 vmw_priv->fb_info = NULL;
590
591 return ret;
592}
593
594int vmw_fb_close(struct vmw_private *vmw_priv)
595{
596 struct fb_info *info;
597 struct vmw_fb_par *par;
598 struct ttm_buffer_object *bo;
599
600 if (!vmw_priv->fb_info)
601 return 0;
602
603 info = vmw_priv->fb_info;
604 par = info->par;
605 bo = &par->vmw_bo->base;
606 par->vmw_bo = NULL;
607
608 /* ??? order */
609 fb_deferred_io_cleanup(info);
610 unregister_framebuffer(info);
611
612 ttm_bo_kunmap(&par->map);
613 ttm_bo_unref(&bo);
614
615 vfree(par->vmalloc);
616 framebuffer_release(info);
617
618 return 0;
619}
620
621int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
622 struct vmw_dma_buffer *vmw_bo)
623{
624 struct ttm_buffer_object *bo = &vmw_bo->base;
625 int ret = 0;
626
627 ret = ttm_bo_reserve(bo, false, false, false, 0);
628 if (unlikely(ret != 0))
629 return ret;
630
9d87fa21 631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
fb1d9738
JB
632 ttm_bo_unreserve(bo);
633
634 return ret;
635}
636
637int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
638 struct vmw_dma_buffer *vmw_bo)
639{
640 struct ttm_buffer_object *bo = &vmw_bo->base;
641 struct ttm_placement ne_placement = vmw_vram_ne_placement;
642 int ret = 0;
643
644 ne_placement.lpfn = bo->num_pages;
645
646 /* interuptable? */
647 ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
648 if (unlikely(ret != 0))
649 return ret;
650
651 ret = ttm_bo_reserve(bo, false, false, false, 0);
652 if (unlikely(ret != 0))
653 goto err_unlock;
654
9d87fa21 655 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
fb1d9738
JB
656 ttm_bo_unreserve(bo);
657err_unlock:
658 ttm_write_unlock(&vmw_priv->active_master->lock);
659
660 return ret;
661}
662
663int vmw_fb_off(struct vmw_private *vmw_priv)
664{
665 struct fb_info *info;
666 struct vmw_fb_par *par;
667 unsigned long flags;
668
669 if (!vmw_priv->fb_info)
670 return -EINVAL;
671
672 info = vmw_priv->fb_info;
673 par = info->par;
674
675 spin_lock_irqsave(&par->dirty.lock, flags);
676 par->dirty.active = false;
677 spin_unlock_irqrestore(&par->dirty.lock, flags);
678
679 flush_scheduled_work();
680
681 par->bo_ptr = NULL;
682 ttm_bo_kunmap(&par->map);
683
684 vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
685
686 return 0;
687}
688
689int vmw_fb_on(struct vmw_private *vmw_priv)
690{
691 struct fb_info *info;
692 struct vmw_fb_par *par;
693 unsigned long flags;
694 bool dummy;
695 int ret;
696
697 if (!vmw_priv->fb_info)
698 return -EINVAL;
699
700 info = vmw_priv->fb_info;
701 par = info->par;
702
703 /* we are already active */
704 if (par->bo_ptr != NULL)
705 return 0;
706
707 /* Make sure that all overlays are stoped when we take over */
708 vmw_overlay_stop_all(vmw_priv);
709
710 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
711 if (unlikely(ret != 0)) {
712 DRM_ERROR("could not move buffer to start of VRAM\n");
713 goto err_no_buffer;
714 }
715
716 ret = ttm_bo_kmap(&par->vmw_bo->base,
717 0,
718 par->vmw_bo->base.num_pages,
719 &par->map);
720 BUG_ON(ret != 0);
721 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
722
723 spin_lock_irqsave(&par->dirty.lock, flags);
724 par->dirty.active = true;
725 spin_unlock_irqrestore(&par->dirty.lock, flags);
726
727err_no_buffer:
728 vmw_fb_set_par(info);
729
730 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
731
732 /* If there already was stuff dirty we wont
733 * schedule a new work, so lets do it now */
734 schedule_delayed_work(&info->deferred_work, 0);
735
736 return 0;
737}