]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Selectively enable self-reclaim
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
index 2748609f05b386c4c314fa10810276e22e3aa888..dda787aafcc626ce064c3d59daf183c588fb5b69 100644 (file)
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
                goto fail_unlock;
 
@@ -321,40 +321,24 @@ fail_unlock:
        return ret;
 }
 
-static inline gfp_t
-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
-{
-       return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
-}
-
-static inline void
-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
-{
-       mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
-}
-
 static int
 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
 {
        int ret;
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
 
        /* If we've insufficient memory to map in the pages, attempt
         * to make some space by throwing out some old buffers.
         */
        if (ret == -ENOMEM) {
                struct drm_device *dev = obj->dev;
-               gfp_t gfp;
 
                ret = i915_gem_evict_something(dev, obj->size);
                if (ret)
                        return ret;
 
-               gfp = i915_gem_object_get_page_gfp_mask(obj);
-               i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
-               ret = i915_gem_object_get_pages(obj);
-               i915_gem_object_set_page_gfp_mask (obj, gfp);
+               ret = i915_gem_object_get_pages(obj, 0);
        }
 
        return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
                goto fail_unlock;
 
@@ -2230,7 +2214,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 }
 
 int
-i915_gem_object_get_pages(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+                         gfp_t gfpmask)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count, i;
@@ -2256,7 +2241,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
        inode = obj->filp->f_path.dentry->d_inode;
        mapping = inode->i_mapping;
        for (i = 0; i < page_count; i++) {
-               page = read_mapping_page(mapping, i, NULL);
+               page = read_cache_page_gfp(mapping, i,
+                                          mapping_gfp_mask (mapping) |
+                                          __GFP_COLD |
+                                          gfpmask);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        i915_gem_object_put_pages(obj);
@@ -2579,7 +2567,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        struct drm_mm_node *free_space;
-       bool retry_alloc = false;
+       gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
        int ret;
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2623,15 +2611,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        DRM_INFO("Binding object of size %zd at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
 #endif
-       if (retry_alloc) {
-               i915_gem_object_set_page_gfp_mask (obj,
-                                                  i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
-       }
-       ret = i915_gem_object_get_pages(obj);
-       if (retry_alloc) {
-               i915_gem_object_set_page_gfp_mask (obj,
-                                                  i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
-       }
+       ret = i915_gem_object_get_pages(obj, gfpmask);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
@@ -2641,9 +2621,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                        ret = i915_gem_evict_something(dev, obj->size);
                        if (ret) {
                                /* now try to shrink everyone else */
-                               if (! retry_alloc) {
-                                   retry_alloc = true;
-                                   goto search_free;
+                               if (gfpmask) {
+                                       gfpmask = 0;
+                                       goto search_free;
                                }
 
                                return ret;
@@ -2837,6 +2817,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        return 0;
 }
 
+/*
+ * Prepare buffer for display plane. Use uninterruptible for possible flush
+ * wait, as in modesetting process we're not supposed to be interrupted.
+ */
+int
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       uint32_t old_write_domain, old_read_domains;
+       int ret;
+
+       /* Not valid to be called on unbound objects. */
+       if (obj_priv->gtt_space == NULL)
+               return -EINVAL;
+
+       i915_gem_object_flush_gpu_write_domain(obj);
+
+       /* Wait on any GPU rendering and flushing to occur. */
+       if (obj_priv->active) {
+#if WATCH_BUF
+               DRM_INFO("%s: object %p wait for seqno %08x\n",
+                         __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+               ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+               if (ret != 0)
+                       return ret;
+       }
+
+       old_write_domain = obj->write_domain;
+       old_read_domains = obj->read_domains;
+
+       obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+       i915_gem_object_flush_cpu_write_domain(obj);
+
+       /* It should now be out of any other write domains, and we can update
+        * the domain values for our changes.
+        */
+       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
+       obj->write_domain = I915_GEM_DOMAIN_GTT;
+       obj_priv->dirty = 1;
+
+       trace_i915_gem_object_change_domain(obj,
+                                           old_read_domains,
+                                           old_write_domain);
+
+       return 0;
+}
+
 /**
  * Moves a single object to the CPU read, and possibly write domain.
  *
@@ -4000,8 +4031,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                                  "back to user (%d)\n",
                                  args->buffer_count, ret);
                }
-       } else {
-               DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
        }
 
        drm_free_large(exec_list);
@@ -4897,7 +4926,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        if (!obj_priv->phys_obj)
                return;
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_get_pages(obj, 0);
        if (ret)
                goto out;
 
@@ -4955,7 +4984,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
        obj_priv->phys_obj->cur_obj = obj;
 
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_object_get_pages(obj, 0);
        if (ret) {
                DRM_ERROR("failed to get page list\n");
                goto out;