]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'drm-ttm-unmappable' into drm-core-next
authorDave Airlie <airlied@redhat.com>
Tue, 20 Apr 2010 04:15:09 +0000 (14:15 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 20 Apr 2010 04:15:09 +0000 (14:15 +1000)
* drm-ttm-unmappable:
  drm/radeon/kms: enable use of unmappable VRAM V2
  drm/ttm: remove io_ field from TTM V6
  drm/vmwgfx: add support for new TTM fault callback V5
  drm/nouveau/kms: add support for new TTM fault callback V5
  drm/radeon/kms: add support for new fault callback V7
  drm/ttm: ttm_fault callback to allow driver to handle bo placement V6
  drm/ttm: split no_wait argument in 2 GPU or reserve wait

Conflicts:
drivers/gpu/drm/nouveau/nouveau_bo.c

18 files changed:
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h

index 957d17629840254c1ccf5ad13139856cea232525..fb164efada3b29aecc9578c86c9f5aef53effdac 100644 (file)
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
 
        nouveau_bo_placement_set(nvbo, memtype, 0);
 
-       ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+       ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 
        nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-       ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+       ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                break;
        case TTM_PL_VRAM:
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_MAPPABLE |
-                            TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+                            TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_FLAG_UNCACHED |
                                         TTM_PL_FLAG_WC;
                man->default_caching = TTM_PL_FLAG_WC;
-
-               man->io_addr = NULL;
-               man->io_offset = drm_get_resource_start(dev, 1);
-               man->io_size = drm_get_resource_len(dev, 1);
-               if (man->io_size > dev_priv->vram_size)
-                       man->io_size = dev_priv->vram_size;
-
                man->gpu_offset = dev_priv->vm_vram_base;
                break;
        case TTM_PL_TT:
                switch (dev_priv->gart_info.type) {
                case NOUVEAU_GART_AGP:
-                       man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
-                                    TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+                       man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
                        man->available_caching = TTM_PL_FLAG_UNCACHED;
                        man->default_caching = TTM_PL_FLAG_UNCACHED;
                        break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                                 dev_priv->gart_info.type);
                        return -EINVAL;
                }
-
-               man->io_offset  = dev_priv->gart_info.aper_base;
-               man->io_size    = dev_priv->gart_info.aper_size;
-               man->io_addr   = NULL;
                man->gpu_offset = dev_priv->vm_gart_base;
                break;
        default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 
 static int
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
-                             struct nouveau_bo *nvbo, bool evict, bool no_wait,
+                             struct nouveau_bo *nvbo, bool evict,
+                             bool no_wait_reserve, bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
        struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
                return ret;
 
        ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
-                                       evict, no_wait, new_mem);
+                                       evict, no_wait_reserve, no_wait_gpu, new_mem);
        if (nvbo->channel && nvbo->channel != chan)
                ret = nouveau_fence_wait(fence, NULL, false, false);
        nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    int no_wait, struct ttm_mem_reg *new_mem)
+                    bool no_wait_reserve, bool no_wait_gpu,
+                    struct ttm_mem_reg *new_mem)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
                dst_offset += (PAGE_SIZE * line_count);
        }
 
-       return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+       return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 }
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait, struct ttm_mem_reg *new_mem)
+                     bool no_wait_reserve, bool no_wait_gpu,
+                     struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
        if (ret)
                return ret;
 
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 out:
        if (tmp_mem.mm_node) {
                spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait, struct ttm_mem_reg *new_mem)
+                     bool no_wait_reserve, bool no_wait_gpu,
+                     struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        if (ret)
                goto out;
 
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-               bool no_wait, struct ttm_mem_reg *new_mem)
+               bool no_wait_reserve, bool no_wait_gpu,
+               struct ttm_mem_reg *new_mem)
 {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        /* Software copy if the card isn't up and running yet. */
        if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
            !dev_priv->channel) {
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
                goto out;
        }
 
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        /* Hardware assisted copy. */
        if (new_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        else if (old_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        else
-               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
 
        if (!ret)
                goto out;
 
        /* Fallback to software copy. */
-       ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 
 out:
        if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
        return 0;
 }
 
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+       struct drm_device *dev = dev_priv->dev;
+
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               return 0;
+       case TTM_PL_TT:
+#if __OS_HAS_AGP
+               if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.base = dev_priv->gart_info.aper_base;
+                       mem->bus.is_iomem = true;
+               }
+#endif
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.base = drm_get_resource_start(dev, 1);
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+       return 0;
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
        .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
        .invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
        .sync_obj_flush = nouveau_fence_flush,
        .sync_obj_unref = nouveau_fence_unref,
        .sync_obj_ref = nouveau_fence_ref,
+       .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+       .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+       .io_mem_free = &nouveau_ttm_io_mem_free,
 };
 
index 6d1aa89ec870b4c9a2b0955e1868fc4f0e3d927a..69c76cf934074b9ef962dc3f1e4c78f40b4bdf28 100644 (file)
@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 
                nvbo->channel = chan;
                ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-                                     false, false);
+                                     false, false, false);
                nvbo->channel = NULL;
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail ttm_validate\n");
index 3295154e59345227e90404c88e8d0c7929aa62d6..b3d168fb89e565b3894249ab86c5fb68665c1944 100644 (file)
@@ -1266,11 +1266,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
index 9d3b47deecb38f163cad064295d7bec284c47ec8..9bdccb9649999d9d27b64774b9e038f5aa2e395e 100644 (file)
@@ -2036,11 +2036,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
                else
                        rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
        }
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
 }
 
 void r100_vga_set_state(struct radeon_device *rdev, bool state)
index 9b08c5743c8662ae33ec80f752fc59ed614a15bf..c325cb121059e2ff4006ce8a4388e63ba2f7b21d 100644 (file)
@@ -730,11 +730,6 @@ int r600_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
 
        if (rdev->flags & RADEON_IS_IGP)
index 122774742bd551ebecdd6b5509f904ff24ebe660..6a8617bac1429400105b626d796f86029f7c132d 100644 (file)
@@ -192,7 +192,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
        }
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
@@ -216,7 +216,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
                return 0;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
        if (unlikely(r != 0))
                dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
        return r;
@@ -331,7 +331,7 @@ int radeon_bo_list_validate(struct list_head *head)
                                                                lobj->rdomain);
                        }
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
-                                               true, false);
+                                               true, false, false);
                        if (unlikely(r))
                                return r;
                }
@@ -499,11 +499,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
        radeon_bo_check_tiling(rbo, 0, 1);
 }
 
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
+       struct radeon_device *rdev;
        struct radeon_bo *rbo;
+       unsigned long offset, size;
+       int r;
+
        if (!radeon_ttm_bo_is_radeon_bo(bo))
-               return;
+               return 0;
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
+       rdev = rbo->rdev;
+       if (bo->mem.mem_type == TTM_PL_VRAM) {
+               size = bo->mem.num_pages << PAGE_SHIFT;
+               offset = bo->mem.mm_node->start << PAGE_SHIFT;
+               if ((offset + size) > rdev->mc.visible_vram_size) {
+                       /* hurrah the memory is not visible ! */
+                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+                       r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+                       if (unlikely(r != 0))
+                               return r;
+                       offset = bo->mem.mm_node->start << PAGE_SHIFT;
+                       /* this should not happen */
+                       if ((offset + size) > rdev->mc.visible_vram_size)
+                               return -EINVAL;
+               }
+       }
+       return 0;
 }
index 7ab43de1e244cd66745a9c524edfc208ae625516..353998dc2c03b12992cd244ff116d01e45db2b96 100644 (file)
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
                                bool force_drop);
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                        struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 #endif
index f06533676e7dbf6d40662d21dd4f63dc2ad3f9c4..af98f45954b31884e748a1d922dee33964949424 100644 (file)
@@ -163,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                                          (unsigned)type);
                                return -EINVAL;
                        }
-                       man->io_offset = rdev->mc.agp_base;
-                       man->io_size = rdev->mc.gtt_size;
-                       man->io_addr = NULL;
                        if (!rdev->ddev->agp->cant_use_aperture)
-                               man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
-                                            TTM_MEMTYPE_FLAG_MAPPABLE;
+                               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
                        man->available_caching = TTM_PL_FLAG_UNCACHED |
                                                 TTM_PL_FLAG_WC;
                        man->default_caching = TTM_PL_FLAG_WC;
-               } else
-#endif
-               {
-                       man->io_offset = 0;
-                       man->io_size = 0;
-                       man->io_addr = NULL;
                }
+#endif
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
                man->gpu_offset = rdev->mc.vram_start;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
                man->default_caching = TTM_PL_FLAG_WC;
-               man->io_addr = NULL;
-               man->io_offset = rdev->mc.aper_base;
-               man->io_size = rdev->mc.aper_size;
                break;
        default:
                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -245,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
 }
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
-                           bool evict, int no_wait,
-                           struct ttm_mem_reg *new_mem,
-                           struct ttm_mem_reg *old_mem)
+                       bool evict, int no_wait_reserve, bool no_wait_gpu,
+                       struct ttm_mem_reg *new_mem,
+                       struct ttm_mem_reg *old_mem)
 {
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
@@ -291,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
-                                     evict, no_wait, new_mem);
+                                     evict, no_wait_reserve, no_wait_gpu, new_mem);
        radeon_fence_unref(&fence);
        return r;
 }
 
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible, bool no_wait,
+                               bool evict, bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -318,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
-                            interruptible, no_wait);
+                            interruptible, no_wait_reserve, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
@@ -332,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out_cleanup:
        if (tmp_mem.mm_node) {
                struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -350,7 +338,8 @@ out_cleanup:
 }
 
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible, bool no_wait,
+                               bool evict, bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -370,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -395,8 +384,9 @@ out_cleanup:
 }
 
 static int radeon_bo_move(struct ttm_buffer_object *bo,
-                         bool evict, bool interruptible, bool no_wait,
-                         struct ttm_mem_reg *new_mem)
+                       bool evict, bool interruptible,
+                       bool no_wait_reserve, bool no_wait_gpu,
+                       struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
        struct ttm_mem_reg *old_mem = &bo->mem;
@@ -423,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
        if (old_mem->mem_type == TTM_PL_VRAM &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
                r = radeon_move_vram_ram(bo, evict, interruptible,
-                                           no_wait, new_mem);
+                                       no_wait_reserve, no_wait_gpu, new_mem);
        } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
                   new_mem->mem_type == TTM_PL_VRAM) {
                r = radeon_move_ram_vram(bo, evict, interruptible,
-                                           no_wait, new_mem);
+                                           no_wait_reserve, no_wait_gpu, new_mem);
        } else {
-               r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+               r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
        }
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
        }
-
        return r;
 }
 
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* system memory */
+               return 0;
+       case TTM_PL_TT:
+#if __OS_HAS_AGP
+               if (rdev->flags & RADEON_IS_AGP) {
+                       /* RADEON_IS_AGP is set only if AGP is active */
+                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.base = rdev->mc.agp_base;
+                       mem->bus.is_iomem = true;
+               }
+#endif
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               /* check if it's visible */
+               if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+                       return -EINVAL;
+               mem->bus.base = rdev->mc.aper_base;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
                                bool lazy, bool interruptible)
 {
@@ -480,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
        .sync_obj_ref = &radeon_sync_obj_ref,
        .move_notify = &radeon_bo_move_notify,
        .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+       .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+       .io_mem_free = &radeon_ttm_io_mem_free,
 };
 
 int radeon_ttm_init(struct radeon_device *rdev)
index c14f3be25b4b2e5596079f4849c364a09a629e8f..a74683e186128eca769ae61f435743b038746f93 100644 (file)
@@ -910,11 +910,6 @@ int rv770_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
index dd47b2a9a791fc2f539c0df6cf797661570c47dc..3b5b094b1397ab75e01ee97ca538ff47076beed3 100644 (file)
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
        printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
        printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
        printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
-       printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
-       printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
        printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
        printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
                man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem,
-                                 bool evict, bool interruptible, bool no_wait)
+                                 bool evict, bool interruptible,
+                                 bool no_wait_reserve, bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+               ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
-                                        no_wait, mem);
+                                        no_wait_reserve, no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
 
        if (ret)
                goto out_err;
@@ -606,7 +605,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 EXPORT_SYMBOL(ttm_bo_unref);
 
 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-                       bool no_wait)
+                       bool no_wait_reserve, bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +614,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        int ret = 0;
 
        spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
        spin_unlock(&bo->lock);
 
        if (unlikely(ret != 0)) {
@@ -631,6 +630,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
+       evict_mem.bus.io_reserved = false;
 
        placement.fpfn = 0;
        placement.lpfn = 0;
@@ -638,7 +638,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
        ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-                               no_wait);
+                               no_wait_reserve, no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS) {
                        printk(KERN_ERR TTM_PFX
@@ -650,7 +650,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        }
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-                                    no_wait);
+                                    no_wait_reserve, no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS)
                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +670,8 @@ out:
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                                uint32_t mem_type,
-                               bool interruptible, bool no_wait)
+                               bool interruptible, bool no_wait_reserve,
+                               bool no_wait_gpu)
 {
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +688,11 @@ retry:
        bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
        kref_get(&bo->list_kref);
 
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+       ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
 
        if (unlikely(ret == -EBUSY)) {
                spin_unlock(&glob->lru_lock);
-               if (likely(!no_wait))
+               if (likely(!no_wait_gpu))
                        ret = ttm_bo_wait_unreserved(bo, interruptible);
 
                kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +714,7 @@ retry:
        while (put_count--)
                kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
-       ret = ttm_bo_evict(bo, interruptible, no_wait);
+       ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
        ttm_bo_unreserve(bo);
 
        kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +765,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        uint32_t mem_type,
                                        struct ttm_placement *placement,
                                        struct ttm_mem_reg *mem,
-                                       bool interruptible, bool no_wait)
+                                       bool interruptible,
+                                       bool no_wait_reserve,
+                                       bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +788,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                }
                spin_unlock(&glob->lru_lock);
                ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
-                                               no_wait);
+                                               no_wait_reserve, no_wait_gpu);
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
@@ -855,7 +858,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
                        struct ttm_mem_reg *mem,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man;
@@ -952,7 +956,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                }
 
                ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
-                                               interruptible, no_wait);
+                                               interruptible, no_wait_reserve, no_wait_gpu);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
                        mem->mm_node->private = bo;
@@ -978,7 +982,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
 {
        struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
@@ -992,20 +997,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
         * instead of doing it here.
         */
        spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
        spin_unlock(&bo->lock);
        if (ret)
                return ret;
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
+       mem.bus.io_reserved = false;
        /*
         * Determine where to move the buffer.
         */
-       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
        if (ret)
                goto out_unlock;
-       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
 out_unlock:
        if (ret && mem.mm_node) {
                spin_lock(&glob->lru_lock);
@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
 {
        int ret;
 
@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
         */
        ret = ttm_bo_mem_compat(placement, &bo->mem);
        if (ret < 0) {
-               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
                if (ret)
                        return ret;
        } else {
@@ -1153,6 +1160,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->mem.num_pages = bo->num_pages;
        bo->mem.mm_node = NULL;
        bo->mem.page_alignment = page_alignment;
+       bo->mem.bus.io_reserved = false;
        bo->buffer_start = buffer_start & PAGE_MASK;
        bo->priv_flags = 0;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1183,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                        goto out_err;
        }
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
        if (ret)
                goto out_err;
 
@@ -1249,7 +1257,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
        spin_lock(&glob->lru_lock);
        while (!list_empty(&man->lru)) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+               ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
                if (ret) {
                        if (allow_errors) {
                                return ret;
@@ -1553,26 +1561,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
        return true;
 }
 
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
-                     struct ttm_mem_reg *mem,
-                     unsigned long *bus_base,
-                     unsigned long *bus_offset, unsigned long *bus_size)
-{
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
-       *bus_size = 0;
-       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-               return -EINVAL;
-
-       if (ttm_mem_reg_is_pci(bdev, mem)) {
-               *bus_offset = mem->mm_node->start << PAGE_SHIFT;
-               *bus_size = mem->num_pages << PAGE_SHIFT;
-               *bus_base = man->io_offset;
-       }
-
-       return 0;
-}
-
 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1569,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
        if (!bdev->dev_mapping)
                return;
-
        unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+       ttm_mem_io_free(bdev, &bo->mem);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
@@ -1839,7 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                evict_mem.mem_type = TTM_PL_SYSTEM;
 
                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
-                                            false, false);
+                                            false, false, false);
                if (unlikely(ret != 0))
                        goto out;
        }
index d764e82e799b4a264618a723ad66d46aa3e9b217..a37a94872a14c888001356ca45ea5832881397a0 100644 (file)
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+                   bool evict, bool no_wait_reserve,
+                   bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
        struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       int ret;
+
+       if (!mem->bus.io_reserved) {
+               mem->bus.io_reserved = true;
+               ret = bdev->driver->io_mem_reserve(bdev, mem);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+       return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       if (bdev->driver->io_mem_reserve) {
+               if (mem->bus.io_reserved) {
+                       mem->bus.io_reserved = false;
+                       bdev->driver->io_mem_free(bdev, mem);
+               }
+       }
+}
+
 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
                        void **virtual)
 {
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-       unsigned long bus_offset;
-       unsigned long bus_size;
-       unsigned long bus_base;
        int ret;
        void *addr;
 
        *virtual = NULL;
-       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
-       if (ret || bus_size == 0)
+       ret = ttm_mem_io_reserve(bdev, mem);
+       if (ret)
                return ret;
 
-       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
-               addr = (void *)(((u8 *) man->io_addr) + bus_offset);
-       else {
+       if (mem->bus.addr) {
+               addr = mem->bus.addr;
+       else {
                if (mem->placement & TTM_PL_FLAG_WC)
-                       addr = ioremap_wc(bus_base + bus_offset, bus_size);
+                       addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
                else
-                       addr = ioremap_nocache(bus_base + bus_offset, bus_size);
-               if (!addr)
+                       addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+               if (!addr) {
+                       ttm_mem_io_free(bdev, mem);
                        return -ENOMEM;
+               }
        }
        *virtual = addr;
        return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 
        man = &bdev->man[mem->mem_type];
 
-       if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+       if (virtual && mem->bus.addr == NULL)
                iounmap(virtual);
+       ttm_mem_io_free(bdev, mem);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+                      bool evict, bool no_wait_reserve, bool no_wait_gpu,
+                      struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 EXPORT_SYMBOL(ttm_io_prot);
 
 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
-                         unsigned long bus_base,
-                         unsigned long bus_offset,
-                         unsigned long bus_size,
+                         unsigned long offset,
+                         unsigned long size,
                          struct ttm_bo_kmap_obj *map)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_reg *mem = &bo->mem;
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
-       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+       if (bo->mem.bus.addr) {
                map->bo_kmap_type = ttm_bo_map_premapped;
-               map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+               map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
        } else {
                map->bo_kmap_type = ttm_bo_map_iomap;
                if (mem->placement & TTM_PL_FLAG_WC)
-                       map->virtual = ioremap_wc(bus_base + bus_offset,
-                                                 bus_size);
+                       map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+                                                 size);
                else
-                       map->virtual = ioremap_nocache(bus_base + bus_offset,
-                                                      bus_size);
+                       map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+                                                      size);
        }
        return (!map->virtual) ? -ENOMEM : 0;
 }
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
                unsigned long start_page, unsigned long num_pages,
                struct ttm_bo_kmap_obj *map)
 {
+       unsigned long offset, size;
        int ret;
-       unsigned long bus_base;
-       unsigned long bus_offset;
-       unsigned long bus_size;
 
        BUG_ON(!list_empty(&bo->swap));
        map->virtual = NULL;
+       map->bo = bo;
        if (num_pages > bo->num_pages)
                return -EINVAL;
        if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
        if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
                return -EPERM;
 #endif
-       ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
-                               &bus_offset, &bus_size);
+       ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
        if (ret)
                return ret;
-       if (bus_size == 0) {
+       if (!bo->mem.bus.is_iomem) {
                return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
        } else {
-               bus_offset += start_page << PAGE_SHIFT;
-               bus_size = num_pages << PAGE_SHIFT;
-               return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+               offset = start_page << PAGE_SHIFT;
+               size = num_pages << PAGE_SHIFT;
+               return ttm_bo_ioremap(bo, offset, size, map);
        }
 }
 EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
        switch (map->bo_kmap_type) {
        case ttm_bo_map_iomap:
                iounmap(map->virtual);
+               ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
                break;
        case ttm_bo_map_vmap:
                vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 }
 EXPORT_SYMBOL(ttm_bo_kunmap);
 
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
-                   unsigned long dst_offset,
-                   unsigned long *pfn, pgprot_t *prot)
-{
-       struct ttm_mem_reg *mem = &bo->mem;
-       struct ttm_bo_device *bdev = bo->bdev;
-       unsigned long bus_offset;
-       unsigned long bus_size;
-       unsigned long bus_base;
-       int ret;
-       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
-                       &bus_size);
-       if (ret)
-               return -EINVAL;
-       if (bus_size != 0)
-               *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
-       else
-               if (!bo->ttm)
-                       return -EINVAL;
-               else
-                       *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
-                                                          dst_offset >>
-                                                          PAGE_SHIFT));
-       *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
-               PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
-       return 0;
-}
-
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              void *sync_obj,
                              void *sync_obj_arg,
-                             bool evict, bool no_wait,
+                             bool evict, bool no_wait_reserve,
+                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
index 668dbe8b8dd3c9e486ebd483942c827dcd8d40a6..fe6cb77899f4b22ffc388190fb08e909fc4a298a 100644 (file)
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
            vma->vm_private_data;
        struct ttm_bo_device *bdev = bo->bdev;
-       unsigned long bus_base;
-       unsigned long bus_offset;
-       unsigned long bus_size;
        unsigned long page_offset;
        unsigned long page_last;
        unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct page *page;
        int ret;
        int i;
-       bool is_iomem;
        unsigned long address = (unsigned long)vmf->virtual_address;
        int retval = VM_FAULT_NOPAGE;
 
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_NOPAGE;
        }
 
-       if (bdev->driver->fault_reserve_notify)
-               bdev->driver->fault_reserve_notify(bo);
+       if (bdev->driver->fault_reserve_notify) {
+               ret = bdev->driver->fault_reserve_notify(bo);
+               switch (ret) {
+               case 0:
+                       break;
+               case -EBUSY:
+                       set_need_resched();
+               case -ERESTARTSYS:
+                       retval = VM_FAULT_NOPAGE;
+                       goto out_unlock;
+               default:
+                       retval = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+       }
 
        /*
         * Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                spin_unlock(&bo->lock);
 
 
-       ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
-                               &bus_size);
-       if (unlikely(ret != 0)) {
+       ret = ttm_mem_io_reserve(bdev, &bo->mem);
+       if (ret) {
                retval = VM_FAULT_SIGBUS;
                goto out_unlock;
        }
 
-       is_iomem = (bus_size != 0);
-
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
            bo->vm_node->start - vma->vm_pgoff;
        page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * vma->vm_page_prot when the object changes caching policy, with
         * the correct locks held.
         */
-
-       if (is_iomem) {
+       if (bo->mem.bus.is_iomem) {
                vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
                                                vma->vm_page_prot);
        } else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         */
 
        for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
-               if (is_iomem)
-                       pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
-                           page_offset;
+               if (bo->mem.bus.is_iomem)
+                       pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
                else {
                        page = ttm_tt_get_page(ttm, page_offset);
                        if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                        retval =
                            (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
                        goto out_unlock;
-
                }
 
                address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
 
 static void ttm_bo_vm_close(struct vm_area_struct *vma)
 {
-       struct ttm_buffer_object *bo =
-           (struct ttm_buffer_object *)vma->vm_private_data;
+       struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
 
        ttm_bo_unref(&bo);
        vma->vm_private_data = NULL;
index 825ebe3d89d573c384dc3181e56b988e7bfeea55..c4f5114aee7c8bf956867dd45da9ddb1f4236b00 100644 (file)
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                      struct ttm_mem_type_manager *man)
 {
-       struct vmw_private *dev_priv =
-           container_of(bdev, struct vmw_private, bdev);
-
        switch (type) {
        case TTM_PL_SYSTEM:
                /* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
        case TTM_PL_VRAM:
                /* "On-card" video ram */
                man->gpu_offset = 0;
-               man->io_offset = dev_priv->vram_start;
-               man->io_size = dev_priv->vram_size;
-               man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                   TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->io_addr = NULL;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_WC;
                break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
        vmw_dmabuf_gmr_unbind(bo);
 }
 
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.is_iomem = false;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.base = dev_priv->vram_start;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+       return 0;
+}
+
 /**
  * FIXME: We're using the old vmware polling method to sync.
  * Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
        .sync_obj_unref = vmw_sync_obj_unref,
        .sync_obj_ref = vmw_sync_obj_ref,
        .move_notify = vmw_move_notify,
-       .swap_notify = vmw_swap_notify
+       .swap_notify = vmw_swap_notify,
+       .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+       .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+       .io_mem_free = &vmw_ttm_io_mem_free,
 };
index 0897359b3e4e27550dbe2d767e2ca3a50be49c14..dbd36b8910cf5cc65f22879c61017f8a0de83dc3 100644 (file)
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * Put BO in VRAM, only if there is space.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
        if (unlikely(ret == -ERESTARTSYS))
                return ret;
 
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * previous contents.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
        return ret;
 }
 
index a93367041cdcfa66393a7e043da0bb75d1c08d31..80125ffc4e28c8fc072fb90a2eae1bd972376a8e 100644 (file)
@@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+       ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
        ttm_bo_unreserve(bo);
 
        return ret;
@@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                goto err_unlock;
 
-       ret = ttm_bo_validate(bo, &ne_placement, false, false);
+       ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
        ttm_bo_unreserve(bo);
 err_unlock:
        ttm_write_unlock(&vmw_priv->active_master->lock);
index 5b6eabeb7f51f6f290885426c2f93422f84d7a04..ad566c85b075da13305ecef856b254ff4dc30a6b 100644 (file)
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
        if (pin)
                overlay_placement = &vmw_vram_ne_placement;
 
-       ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+       ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
 
        ttm_bo_unreserve(bo);
 
index 81eb9f45883c4b2111b549f600fd720f6e5a0511..3e273e0b9417eed6f430963cfd679669a4f6daf1 100644 (file)
@@ -66,6 +66,26 @@ struct ttm_placement {
        const uint32_t  *busy_placement;
 };
 
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr:              mapped virtual address
+ * @base:              bus base address
+ * @is_iomem:          is this io memory ?
+ * @size:              size in byte
+ * @offset:            offset from the base address
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+       void            *addr;
+       unsigned long   base;
+       unsigned long   size;
+       unsigned long   offset;
+       bool            is_iomem;
+       bool            io_reserved;
+};
+
 
 /**
  * struct ttm_mem_reg
@@ -75,6 +95,7 @@ struct ttm_placement {
  * @num_pages: Actual size of memory region in pages.
  * @page_alignment: Page alignment.
  * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
  *
  * Structure indicating the placement and space resources used by a
  * buffer object.
@@ -87,6 +108,7 @@ struct ttm_mem_reg {
        uint32_t page_alignment;
        uint32_t mem_type;
        uint32_t placement;
+       struct ttm_bus_placement bus;
 };
 
 /**
@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj {
                ttm_bo_map_kmap         = 3,
                ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
        } bo_kmap_type;
+       struct ttm_buffer_object *bo;
 };
 
 /**
@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
- * @no_wait: Return immediately if the buffer is busy.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Changes placement and caching policy of the buffer object
  * according proposed placement.
@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  */
 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
-                               bool interruptible, bool no_wait);
+                               bool interruptible, bool no_wait_reserve,
+                               bool no_wait_gpu);
 
 /**
  * ttm_bo_unref
index e929c27ede2229b2e3f656619464e4cb7cc0d47e..7720b1787e23282de2866ecb8d3eecc203a13ba1 100644 (file)
@@ -176,8 +176,6 @@ struct ttm_tt {
 
 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)        /* Fixed (on-card) PCI memory */
 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)        /* Memory mappable */
-#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2)        /* Fixed memory needs ioremap
-                                                  before kernel access. */
 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)        /* Can't map aperture */
 
 /**
@@ -189,13 +187,6 @@ struct ttm_tt {
  * managed by this memory type.
  * @gpu_offset: If used, the GPU offset of the first managed page of
  * fixed memory or the first managed location in an aperture.
- * @io_offset: The io_offset of the first managed page of IO memory or
- * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
- * memory, this should be set to NULL.
- * @io_size: The size of a managed IO region (fixed memory or aperture).
- * @io_addr: Virtual kernel address if the io region is pre-mapped. For
- * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
- * @io_addr should be set to NULL.
  * @size: Size of the managed region.
  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
  * as defined in ttm_placement_common.h
@@ -221,9 +212,6 @@ struct ttm_mem_type_manager {
        bool use_type;
        uint32_t flags;
        unsigned long gpu_offset;
-       unsigned long io_offset;
-       unsigned long io_size;
-       void *io_addr;
        uint64_t size;
        uint32_t available_caching;
        uint32_t default_caching;
@@ -311,7 +299,8 @@ struct ttm_bo_driver {
         */
        int (*move) (struct ttm_buffer_object *bo,
                     bool evict, bool interruptible,
-                    bool no_wait, struct ttm_mem_reg *new_mem);
+                    bool no_wait_reserve, bool no_wait_gpu,
+                    struct ttm_mem_reg *new_mem);
 
        /**
         * struct ttm_bo_driver_member verify_access
@@ -351,12 +340,21 @@ struct ttm_bo_driver {
                            struct ttm_mem_reg *new_mem);
        /* notify the driver we are taking a fault on this BO
         * and have reserved it */
-       void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+       int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
 
        /**
         * notify the driver that we're about to swap out this bo
         */
        void (*swap_notify) (struct ttm_buffer_object *bo);
+
+       /**
+        * Driver callback on when mapping io memory (for bo_move_memcpy
+        * for instance). TTM will take care to call io_mem_free whenever
+        * the mapping is not use anymore. io_mem_reserve & io_mem_free
+        * are balanced.
+        */
+       int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+       void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
 };
 
 /**
@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
  * @proposed_placement: Proposed new placement for the buffer object.
  * @mem: A struct ttm_mem_reg.
  * @interruptible: Sleep interruptible when sliping.
- * @no_wait: Don't sleep waiting for space to become available.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Allocate memory space for the buffer object pointed to by @bo, using
  * the placement flags in @mem, potentially evicting other idle buffer objects.
@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
                                struct ttm_mem_reg *mem,
-                               bool interruptible, bool no_wait);
+                               bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu);
 /**
  * ttm_bo_wait_for_cpu
  *
@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
                             unsigned long *bus_offset,
                             unsigned long *bus_size);
 
+extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem);
+extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem);
+
 extern void ttm_bo_global_release(struct ttm_global_reference *ref);
 extern int ttm_bo_global_init(struct ttm_global_reference *ref);
 
@@ -826,7 +831,8 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Optimized move function for a buffer object with both old and
@@ -840,15 +846,16 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait,
-                          struct ttm_mem_reg *new_mem);
+                          bool evict, bool no_wait_reserve,
+                          bool no_wait_gpu, struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Fallback move function for a mappable buffer object in mappable memory.
@@ -862,8 +869,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict,
-                             bool no_wait, struct ttm_mem_reg *new_mem);
+                             bool evict, bool no_wait_reserve,
+                             bool no_wait_gpu, struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_free_old_node
@@ -882,7 +889,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  * @sync_obj_arg: An argument to pass to the sync object idle / wait
  * functions.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Accelerated move function to be called when an accelerated move
@@ -896,7 +904,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                                     void *sync_obj,
                                     void *sync_obj_arg,
-                                    bool evict, bool no_wait,
+                                    bool evict, bool no_wait_reserve,
+                                    bool no_wait_gpu,
                                     struct ttm_mem_reg *new_mem);
 /**
  * ttm_io_prot