]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: add 'reclaimable' to i915 self-reclaimable page allocations
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
index f84c8e982dcb398c810eb915bcd5c352686419a0..8757ecf6e96bd117527c18424c22b10e0576f94c 100644 (file)
@@ -167,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
                obj_priv->tiling_mode != I915_TILING_NONE;
 }
 
-static inline int
+static inline void
 slow_shmem_copy(struct page *dst_page,
                int dst_offset,
                struct page *src_page,
@@ -176,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
 {
        char *dst_vaddr, *src_vaddr;
 
-       dst_vaddr = kmap_atomic(dst_page, KM_USER0);
-       if (dst_vaddr == NULL)
-               return -ENOMEM;
-
-       src_vaddr = kmap_atomic(src_page, KM_USER1);
-       if (src_vaddr == NULL) {
-               kunmap_atomic(dst_vaddr, KM_USER0);
-               return -ENOMEM;
-       }
+       dst_vaddr = kmap(dst_page);
+       src_vaddr = kmap(src_page);
 
        memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
 
-       kunmap_atomic(src_vaddr, KM_USER1);
-       kunmap_atomic(dst_vaddr, KM_USER0);
-
-       return 0;
+       kunmap(src_page);
+       kunmap(dst_page);
 }
 
-static inline int
+static inline void
 slow_shmem_bit17_copy(struct page *gpu_page,
                      int gpu_offset,
                      struct page *cpu_page,
@@ -214,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
                                               cpu_page, cpu_offset, length);
        }
 
-       gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
-       if (gpu_vaddr == NULL)
-               return -ENOMEM;
-
-       cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
-       if (cpu_vaddr == NULL) {
-               kunmap_atomic(gpu_vaddr, KM_USER0);
-               return -ENOMEM;
-       }
+       gpu_vaddr = kmap(gpu_page);
+       cpu_vaddr = kmap(cpu_page);
 
        /* Copy the data, XORing A6 with A17 (1). The user already knows he's
         * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -246,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
                length -= this_length;
        }
 
-       kunmap_atomic(cpu_vaddr, KM_USER1);
-       kunmap_atomic(gpu_vaddr, KM_USER0);
-
-       return 0;
+       kunmap(cpu_page);
+       kunmap(gpu_page);
 }
 
 /**
@@ -425,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                        page_length = PAGE_SIZE - data_page_offset;
 
                if (do_bit17_swizzling) {
-                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-                                                   shmem_page_offset,
-                                                   user_pages[data_page_index],
-                                                   data_page_offset,
-                                                   page_length,
-                                                   1);
-               } else {
-                       ret = slow_shmem_copy(user_pages[data_page_index],
-                                             data_page_offset,
-                                             obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
-                                             page_length);
+                                             user_pages[data_page_index],
+                                             data_page_offset,
+                                             page_length,
+                                             1);
+               } else {
+                       slow_shmem_copy(user_pages[data_page_index],
+                                       data_page_offset,
+                                       obj_priv->pages[shmem_page_index],
+                                       shmem_page_offset,
+                                       page_length);
                }
-               if (ret)
-                       goto fail_put_pages;
 
                remain -= page_length;
                data_ptr += page_length;
@@ -529,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
  * page faults
  */
 
-static inline int
+static inline void
 slow_kernel_write(struct io_mapping *mapping,
                  loff_t gtt_base, int gtt_offset,
                  struct page *user_page, int user_offset,
                  int length)
 {
-       char *src_vaddr, *dst_vaddr;
-       unsigned long unwritten;
+       char __iomem *dst_vaddr;
+       char *src_vaddr;
 
-       dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
-       src_vaddr = kmap_atomic(user_page, KM_USER1);
-       unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
-                                                     src_vaddr + user_offset,
-                                                     length);
-       kunmap_atomic(src_vaddr, KM_USER1);
-       io_mapping_unmap_atomic(dst_vaddr);
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+       src_vaddr = kmap(user_page);
+
+       memcpy_toio(dst_vaddr + gtt_offset,
+                   src_vaddr + user_offset,
+                   length);
+
+       kunmap(user_page);
+       io_mapping_unmap(dst_vaddr);
 }
 
 static inline int
@@ -720,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
-                                       gtt_page_base, gtt_page_offset,
-                                       user_pages[data_page_index],
-                                       data_page_offset,
-                                       page_length);
-
-               /* If we get a fault while copying data, then (presumably) our
-                * source page isn't available.  Return the error and we'll
-                * retry in the slow path.
-                */
-               if (ret)
-                       goto out_unpin_object;
+               slow_kernel_write(dev_priv->mm.gtt_mapping,
+                                 gtt_page_base, gtt_page_offset,
+                                 user_pages[data_page_index],
+                                 data_page_offset,
+                                 page_length);
 
                remain -= page_length;
                offset += page_length;
@@ -900,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                        page_length = PAGE_SIZE - data_page_offset;
 
                if (do_bit17_swizzling) {
-                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-                                                   shmem_page_offset,
-                                                   user_pages[data_page_index],
-                                                   data_page_offset,
-                                                   page_length,
-                                                   0);
-               } else {
-                       ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
                                              user_pages[data_page_index],
                                              data_page_offset,
-                                             page_length);
+                                             page_length,
+                                             0);
+               } else {
+                       slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                                       shmem_page_offset,
+                                       user_pages[data_page_index],
+                                       data_page_offset,
+                                       page_length);
                }
-               if (ret)
-                       goto fail_put_pages;
 
                remain -= page_length;
                data_ptr += page_length;
@@ -971,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0) {
+                dev->gtt_total != 0 &&
+                obj->write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
                if (ret == -EFAULT) {
                        ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -2268,8 +2239,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
        mapping = inode->i_mapping;
        for (i = 0; i < page_count; i++) {
                page = read_cache_page_gfp(mapping, i,
-                                          mapping_gfp_mask (mapping) |
+                                          GFP_HIGHUSER |
                                           __GFP_COLD |
+                                          __GFP_RECLAIMABLE |
                                           gfpmask);
                if (IS_ERR(page))
                        goto err_pages;
@@ -4632,23 +4604,40 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
+
        dev_priv->render_ring = render_ring;
+
        if (!I915_NEED_GFX_HWS(dev)) {
                dev_priv->render_ring.status_page.page_addr
                        = dev_priv->status_page_dmah->vaddr;
                memset(dev_priv->render_ring.status_page.page_addr,
                                0, PAGE_SIZE);
        }
+
        if (HAS_PIPE_CONTROL(dev)) {
                ret = i915_gem_init_pipe_control(dev);
                if (ret)
                        return ret;
        }
+
        ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
-       if (!ret && HAS_BSD(dev)) {
+       if (ret)
+               goto cleanup_pipe_control;
+
+       if (HAS_BSD(dev)) {
                dev_priv->bsd_ring = bsd_ring;
                ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+               if (ret)
+                       goto cleanup_render_ring;
        }
+
+       return 0;
+
+cleanup_render_ring:
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+cleanup_pipe_control:
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
        return ret;
 }