]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: fix hibernation since i915 self-reclaim fixes
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
index a5ca9599b232f86ec4b96b48889e52ca594dcad7..074385882ccfe721ff5630b9e825950c6479af7e 100644 (file)
@@ -167,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
                obj_priv->tiling_mode != I915_TILING_NONE;
 }
 
-static inline int
+static inline void
 slow_shmem_copy(struct page *dst_page,
                int dst_offset,
                struct page *src_page,
@@ -176,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
 {
        char *dst_vaddr, *src_vaddr;
 
-       dst_vaddr = kmap_atomic(dst_page, KM_USER0);
-       if (dst_vaddr == NULL)
-               return -ENOMEM;
-
-       src_vaddr = kmap_atomic(src_page, KM_USER1);
-       if (src_vaddr == NULL) {
-               kunmap_atomic(dst_vaddr, KM_USER0);
-               return -ENOMEM;
-       }
+       dst_vaddr = kmap(dst_page);
+       src_vaddr = kmap(src_page);
 
        memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
 
-       kunmap_atomic(src_vaddr, KM_USER1);
-       kunmap_atomic(dst_vaddr, KM_USER0);
-
-       return 0;
+       kunmap(src_page);
+       kunmap(dst_page);
 }
 
-static inline int
+static inline void
 slow_shmem_bit17_copy(struct page *gpu_page,
                      int gpu_offset,
                      struct page *cpu_page,
@@ -214,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
                                               cpu_page, cpu_offset, length);
        }
 
-       gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
-       if (gpu_vaddr == NULL)
-               return -ENOMEM;
-
-       cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
-       if (cpu_vaddr == NULL) {
-               kunmap_atomic(gpu_vaddr, KM_USER0);
-               return -ENOMEM;
-       }
+       gpu_vaddr = kmap(gpu_page);
+       cpu_vaddr = kmap(cpu_page);
 
        /* Copy the data, XORing A6 with A17 (1). The user already knows he's
         * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -246,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
                length -= this_length;
        }
 
-       kunmap_atomic(cpu_vaddr, KM_USER1);
-       kunmap_atomic(gpu_vaddr, KM_USER0);
-
-       return 0;
+       kunmap(cpu_page);
+       kunmap(gpu_page);
 }
 
 /**
@@ -425,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                        page_length = PAGE_SIZE - data_page_offset;
 
                if (do_bit17_swizzling) {
-                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-                                                   shmem_page_offset,
-                                                   user_pages[data_page_index],
-                                                   data_page_offset,
-                                                   page_length,
-                                                   1);
-               } else {
-                       ret = slow_shmem_copy(user_pages[data_page_index],
-                                             data_page_offset,
-                                             obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
-                                             page_length);
+                                             user_pages[data_page_index],
+                                             data_page_offset,
+                                             page_length,
+                                             1);
+               } else {
+                       slow_shmem_copy(user_pages[data_page_index],
+                                       data_page_offset,
+                                       obj_priv->pages[shmem_page_index],
+                                       shmem_page_offset,
+                                       page_length);
                }
-               if (ret)
-                       goto fail_put_pages;
 
                remain -= page_length;
                data_ptr += page_length;
@@ -529,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
  * page faults
  */
 
-static inline int
+static inline void
 slow_kernel_write(struct io_mapping *mapping,
                  loff_t gtt_base, int gtt_offset,
                  struct page *user_page, int user_offset,
                  int length)
 {
-       char *src_vaddr, *dst_vaddr;
-       unsigned long unwritten;
+       char __iomem *dst_vaddr;
+       char *src_vaddr;
 
-       dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
-       src_vaddr = kmap_atomic(user_page, KM_USER1);
-       unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
-                                                     src_vaddr + user_offset,
-                                                     length);
-       kunmap_atomic(src_vaddr, KM_USER1);
-       io_mapping_unmap_atomic(dst_vaddr);
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+       src_vaddr = kmap(user_page);
+
+       memcpy_toio(dst_vaddr + gtt_offset,
+                   src_vaddr + user_offset,
+                   length);
+
+       kunmap(user_page);
+       io_mapping_unmap(dst_vaddr);
 }
 
 static inline int
@@ -720,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
-                                       gtt_page_base, gtt_page_offset,
-                                       user_pages[data_page_index],
-                                       data_page_offset,
-                                       page_length);
-
-               /* If we get a fault while copying data, then (presumably) our
-                * source page isn't available.  Return the error and we'll
-                * retry in the slow path.
-                */
-               if (ret)
-                       goto out_unpin_object;
+               slow_kernel_write(dev_priv->mm.gtt_mapping,
+                                 gtt_page_base, gtt_page_offset,
+                                 user_pages[data_page_index],
+                                 data_page_offset,
+                                 page_length);
 
                remain -= page_length;
                offset += page_length;
@@ -900,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                        page_length = PAGE_SIZE - data_page_offset;
 
                if (do_bit17_swizzling) {
-                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-                                                   shmem_page_offset,
-                                                   user_pages[data_page_index],
-                                                   data_page_offset,
-                                                   page_length,
-                                                   0);
-               } else {
-                       ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
                                              user_pages[data_page_index],
                                              data_page_offset,
-                                             page_length);
+                                             page_length,
+                                             0);
+               } else {
+                       slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                                       shmem_page_offset,
+                                       user_pages[data_page_index],
+                                       data_page_offset,
+                                       page_length);
                }
-               if (ret)
-                       goto fail_put_pages;
 
                remain -= page_length;
                data_ptr += page_length;
@@ -971,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0) {
+                dev->gtt_total != 0 &&
+                obj->write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
                if (ret == -EFAULT) {
                        ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -2268,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
        mapping = inode->i_mapping;
        for (i = 0; i < page_count; i++) {
                page = read_cache_page_gfp(mapping, i,
-                                          mapping_gfp_mask (mapping) |
+                                          GFP_HIGHUSER |
                                           __GFP_COLD |
                                           gfpmask);
                if (IS_ERR(page))
@@ -2648,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                return -EINVAL;
        }
 
+       /* If the object is bigger than the entire aperture, reject it early
+        * before evicting everything in a vain attempt to find space.
+        */
+       if (obj->size > dev->gtt_total) {
+               DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+               return -E2BIG;
+       }
+
  search_free:
        free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
                                        obj->size, alignment, 0);
@@ -3327,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
        if (need_fence) {
                ret = i915_gem_object_get_fence_reg(obj);
                if (ret != 0) {
-                       if (ret != -EBUSY && ret != -ERESTARTSYS)
-                               DRM_ERROR("Failure to install fence: %d\n",
-                                         ret);
                        i915_gem_object_unpin(obj);
                        return ret;
                }
@@ -3815,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (ret != -ENOSPC || pin_tries >= 1) {
                        if (ret != -ERESTARTSYS) {
                                unsigned long long total_size = 0;
-                               for (i = 0; i < args->buffer_count; i++)
+                               int num_fences = 0;
+                               for (i = 0; i < args->buffer_count; i++) {
+                                       obj_priv = object_list[i]->driver_private;
+
                                        total_size += object_list[i]->size;
-                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+                                       num_fences +=
+                                               exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+                                               obj_priv->tiling_mode != I915_TILING_NONE;
+                               }
+                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
                                          pinned+1, args->buffer_count,
-                                         total_size, ret);
+                                         total_size, num_fences,
+                                         ret);
                                DRM_ERROR("%d objects [%d pinned], "
                                          "%d object bytes [%d pinned], "
                                          "%d/%d gtt bytes\n",
@@ -4619,23 +4603,40 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
+
        dev_priv->render_ring = render_ring;
+
        if (!I915_NEED_GFX_HWS(dev)) {
                dev_priv->render_ring.status_page.page_addr
                        = dev_priv->status_page_dmah->vaddr;
                memset(dev_priv->render_ring.status_page.page_addr,
                                0, PAGE_SIZE);
        }
+
        if (HAS_PIPE_CONTROL(dev)) {
                ret = i915_gem_init_pipe_control(dev);
                if (ret)
                        return ret;
        }
+
        ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
-       if (!ret && HAS_BSD(dev)) {
+       if (ret)
+               goto cleanup_pipe_control;
+
+       if (HAS_BSD(dev)) {
                dev_priv->bsd_ring = bsd_ring;
                ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+               if (ret)
+                       goto cleanup_render_ring;
        }
+
+       return 0;
+
+cleanup_render_ring:
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+cleanup_pipe_control:
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
        return ret;
 }