]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: introduce intel_ring_buffer structure (V2)
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
index ef3d91dda71a9a8a0ce0e1ab61a9731afed1f5f8..58b6e814fae198d6cf74b5008d9dea98625d3bce 100644 (file)
@@ -35,8 +35,6 @@
 #include <linux/swap.h>
 #include <linux/pci.h>
 
-#define I915_GEM_GPU_DOMAINS   (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -124,7 +122,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        args->size = roundup(args->size, PAGE_SIZE);
 
        /* Allocate the new object */
-       obj = drm_gem_object_alloc(dev, args->size);
+       obj = i915_gem_alloc_object(dev, args->size);
        if (obj == NULL)
                return -ENOMEM;
 
@@ -1051,7 +1049,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                 * about to occur.
                 */
                if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-                       list_move_tail(&obj_priv->fence_list,
+                       struct drm_i915_fence_reg *reg =
+                               &dev_priv->fence_regs[obj_priv->fence_reg];
+                       list_move_tail(&reg->lru_list,
                                       &dev_priv->mm.fence_list);
                }
 
@@ -1566,7 +1566,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
        list_for_each_entry_safe(obj_priv, next,
                                 &dev_priv->mm.gpu_write_list,
                                 gpu_write_list) {
-               struct drm_gem_object *obj = obj_priv->obj;
+               struct drm_gem_object *obj = &obj_priv->base;
 
                if ((obj->write_domain & flush_domains) ==
                    obj->write_domain) {
@@ -1577,9 +1577,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                        i915_gem_object_move_to_active(obj, seqno);
 
                        /* update the fence lru list */
-                       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-                               list_move_tail(&obj_priv->fence_list,
+                       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                               struct drm_i915_fence_reg *reg =
+                                       &dev_priv->fence_regs[obj_priv->fence_reg];
+                               list_move_tail(&reg->lru_list,
                                                &dev_priv->mm.fence_list);
+                       }
 
                        trace_i915_gem_object_change_domain(obj,
                                                            obj->read_domains,
@@ -1588,21 +1591,6 @@ i915_gem_process_flushing_list(struct drm_device *dev,
        }
 }
 
-#define PIPE_CONTROL_FLUSH(addr)                                       \
-       OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
-                PIPE_CONTROL_DEPTH_STALL);                             \
-       OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
-       OUT_RING(0);                                                    \
-       OUT_RING(0);                                                    \
-
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
                 uint32_t flush_domains)
@@ -1612,7 +1600,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        struct drm_i915_gem_request *request;
        uint32_t seqno;
        int was_empty;
-       RING_LOCALS;
 
        if (file_priv != NULL)
                i915_file_priv = file_priv->driver_priv;
@@ -1621,55 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (request == NULL)
                return 0;
 
-       /* Grab the seqno we're going to make this request be, and bump the
-        * next (skipping 0 so it can be the reserved no-seqno value).
-        */
-       seqno = dev_priv->mm.next_gem_seqno;
-       dev_priv->mm.next_gem_seqno++;
-       if (dev_priv->mm.next_gem_seqno == 0)
-               dev_priv->mm.next_gem_seqno++;
-
-       if (HAS_PIPE_CONTROL(dev)) {
-               u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
-
-               /*
-                * Workaround qword write incoherence by flushing the
-                * PIPE_NOTIFY buffers out to memory before requesting
-                * an interrupt.
-                */
-               BEGIN_LP_RING(32);
-               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
-               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-               OUT_RING(seqno);
-               OUT_RING(0);
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128; /* write to separate cachelines */
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               scratch_addr += 128;
-               PIPE_CONTROL_FLUSH(scratch_addr);
-               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
-                        PIPE_CONTROL_NOTIFY);
-               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-               OUT_RING(seqno);
-               OUT_RING(0);
-               ADVANCE_LP_RING();
-       } else {
-               BEGIN_LP_RING(4);
-               OUT_RING(MI_STORE_DWORD_INDEX);
-               OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(seqno);
-
-               OUT_RING(MI_USER_INTERRUPT);
-               ADVANCE_LP_RING();
-       }
+       seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring,
+                                                 file_priv, flush_domains);
 
        DRM_DEBUG_DRIVER("%d\n", seqno);
 
@@ -1707,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 static uint32_t
 i915_retire_commands(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
        uint32_t flush_domains = 0;
-       RING_LOCALS;
 
        /* The sampler always gets flushed on i965 (sigh) */
        if (IS_I965G(dev))
@@ -1745,7 +1683,7 @@ i915_gem_retire_request(struct drm_device *dev,
                obj_priv = list_first_entry(&dev_priv->mm.active_list,
                                            struct drm_i915_gem_object,
                                            list);
-               obj = obj_priv->obj;
+               obj = &obj_priv->base;
 
                /* If the seqno being retired doesn't match the oldest in the
                 * list, then the oldest in the list must still be newer than
@@ -1808,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+       struct intel_ring_buffer *ring = &(dev_priv->render_ring);
+       if (!ring->status_page.page_addr
+                       || list_empty(&dev_priv->mm.request_list))
                return;
 
        seqno = i915_get_gem_seqno(dev);
@@ -1835,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev)
 
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-               i915_user_irq_put(dev);
+
+               ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
 }
@@ -1865,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
        u32 ier;
        int ret = 0;
 
+       struct intel_ring_buffer *ring = &dev_priv->render_ring;
        BUG_ON(seqno == 0);
 
        if (atomic_read(&dev_priv->mm.wedged))
@@ -1885,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
                trace_i915_gem_request_wait_begin(dev, seqno);
 
                dev_priv->mm.waiting_gem_seqno = seqno;
-               i915_user_irq_get(dev);
+               ring->user_irq_get(dev, ring);
                if (interruptible)
                        ret = wait_event_interruptible(dev_priv->irq_queue,
                                i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
@@ -1895,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
                                i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
                                atomic_read(&dev_priv->mm.wedged));
 
-               i915_user_irq_put(dev);
+               ring->user_irq_put(dev, ring);
                dev_priv->mm.waiting_gem_seqno = 0;
 
                trace_i915_gem_request_wait_end(dev, seqno);
@@ -1928,77 +1870,18 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
        return i915_do_wait_request(dev, seqno, 1);
 }
 
+
 static void
 i915_gem_flush(struct drm_device *dev,
               uint32_t invalidate_domains,
               uint32_t flush_domains)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t cmd;
-       RING_LOCALS;
-
-#if WATCH_EXEC
-       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
-                 invalidate_domains, flush_domains);
-#endif
-       trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
-                                    invalidate_domains, flush_domains);
-
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
-
-       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
-               /*
-                * read/write caches:
-                *
-                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
-                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
-                * also flushed at 2d versus 3d pipeline switches.
-                *
-                * read-only caches:
-                *
-                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
-                * MI_READ_FLUSH is set, and is always flushed on 965.
-                *
-                * I915_GEM_DOMAIN_COMMAND may not exist?
-                *
-                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
-                * invalidated when MI_EXE_FLUSH is set.
-                *
-                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
-                * invalidated with every MI_FLUSH.
-                *
-                * TLBs:
-                *
-                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
-                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
-                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
-                * are flushed at any MI_FLUSH.
-                */
-
-               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-               if ((invalidate_domains|flush_domains) &
-                   I915_GEM_DOMAIN_RENDER)
-                       cmd &= ~MI_NO_WRITE_FLUSH;
-               if (!IS_I965G(dev)) {
-                       /*
-                        * On the 965, the sampler cache always gets flushed
-                        * and this bit is reserved.
-                        */
-                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-                               cmd |= MI_READ_FLUSH;
-               }
-               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
-                       cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
-               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
-               BEGIN_LP_RING(2);
-               OUT_RING(cmd);
-               OUT_RING(MI_NOOP);
-               ADVANCE_LP_RING();
-       }
+       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+                       invalidate_domains,
+                       flush_domains);
 }
 
 /**
@@ -2119,7 +2002,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
 
        /* Try to find the smallest clean object */
        list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-               struct drm_gem_object *obj = obj_priv->obj;
+               struct drm_gem_object *obj = &obj_priv->base;
                if (obj->size >= min_size) {
                        if ((!obj_priv->dirty ||
                             i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2136,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 
                        /* Find an object that we can immediately reuse */
                        list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
-                               obj = obj_priv->obj;
+                               obj = &obj_priv->base;
                                if (obj->size >= min_size)
                                        break;
 
@@ -2485,9 +2368,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
 
        /* None available, try to steal one or wait for a user to finish */
        i = I915_FENCE_REG_NONE;
-       list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
-                           fence_list) {
-               obj = obj_priv->obj;
+       list_for_each_entry(reg, &dev_priv->mm.fence_list,
+                           lru_list) {
+               obj = reg->obj;
+               obj_priv = to_intel_bo(obj);
 
                if (obj_priv->pin_count)
                        continue;
@@ -2536,7 +2420,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 
        /* Just update our place in the LRU if our fence is getting used. */
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-               list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+               reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+               list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
                return 0;
        }
 
@@ -2566,7 +2451,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 
        obj_priv->fence_reg = ret;
        reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-       list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+       list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
 
        reg->obj = obj;
 
@@ -2598,6 +2483,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_i915_fence_reg *reg =
+               &dev_priv->fence_regs[obj_priv->fence_reg];
 
        if (IS_GEN6(dev)) {
                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2616,9 +2503,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
                I915_WRITE(fence_reg, 0);
        }
 
-       dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+       reg->obj = NULL;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
-       list_del_init(&obj_priv->fence_list);
+       list_del_init(&reg->lru_list);
 }
 
 /**
@@ -3536,62 +3423,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
        return 0;
 }
 
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
-                             struct drm_i915_gem_execbuffer2 *exec,
-                             struct drm_clip_rect *cliprects,
-                             uint64_t exec_offset)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int nbox = exec->num_cliprects;
-       int i = 0, count;
-       uint32_t exec_start, exec_len;
-       RING_LOCALS;
-
-       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-       exec_len = (uint32_t) exec->batch_len;
-
-       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
-       count = nbox ? nbox : 1;
-
-       for (i = 0; i < count; i++) {
-               if (i < nbox) {
-                       int ret = i915_emit_box(dev, cliprects, i,
-                                               exec->DR1, exec->DR4);
-                       if (ret)
-                               return ret;
-               }
-
-               if (IS_I830(dev) || IS_845G(dev)) {
-                       BEGIN_LP_RING(4);
-                       OUT_RING(MI_BATCH_BUFFER);
-                       OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-                       OUT_RING(exec_start + exec_len - 4);
-                       OUT_RING(0);
-                       ADVANCE_LP_RING();
-               } else {
-                       BEGIN_LP_RING(2);
-                       if (IS_I965G(dev)) {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6) |
-                                        MI_BATCH_NON_SECURE_I965);
-                               OUT_RING(exec_start);
-                       } else {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6));
-                               OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-                       }
-                       ADVANCE_LP_RING();
-               }
-       }
-
-       /* XXX breadcrumb */
-       return 0;
-}
-
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -4006,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif
 
        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+       ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev,
+                                                           &dev_priv->render_ring,
+                                                           args,
+                                                           cliprects,
+                                                           exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -4471,34 +4306,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-int i915_gem_init_object(struct drm_gem_object *obj)
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+                                             size_t size)
 {
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
 
-       obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
-       if (obj_priv == NULL)
-               return -ENOMEM;
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (obj == NULL)
+               return NULL;
 
-       /*
-        * We've just allocated pages from the kernel,
-        * so they've just been written by the CPU with
-        * zeros. They'll need to be clflushed before we
-        * use them with the GPU.
-        */
-       obj->write_domain = I915_GEM_DOMAIN_CPU;
-       obj->read_domains = I915_GEM_DOMAIN_CPU;
+       if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+               kfree(obj);
+               return NULL;
+       }
 
-       obj_priv->agp_type = AGP_USER_MEMORY;
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
-       obj->driver_private = obj_priv;
-       obj_priv->obj = obj;
-       obj_priv->fence_reg = I915_FENCE_REG_NONE;
-       INIT_LIST_HEAD(&obj_priv->list);
-       INIT_LIST_HEAD(&obj_priv->gpu_write_list);
-       INIT_LIST_HEAD(&obj_priv->fence_list);
-       obj_priv->madv = I915_MADV_WILLNEED;
+       obj->agp_type = AGP_USER_MEMORY;
+       obj->base.driver_private = NULL;
+       obj->fence_reg = I915_FENCE_REG_NONE;
+       INIT_LIST_HEAD(&obj->list);
+       INIT_LIST_HEAD(&obj->gpu_write_list);
+       obj->madv = I915_MADV_WILLNEED;
+
+       trace_i915_gem_object_create(&obj->base);
 
-       trace_i915_gem_object_create(obj);
+       return &obj->base;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+       BUG();
 
        return 0;
 }
@@ -4521,9 +4360,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
        if (obj_priv->mmap_offset)
                i915_gem_free_mmap_offset(obj);
 
+       drm_gem_object_release(obj);
+
        kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
-       kfree(obj->driver_private);
+       kfree(obj_priv);
 }
 
 /** Unbinds all inactive objects. */
@@ -4536,9 +4377,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
                struct drm_gem_object *obj;
                int ret;
 
-               obj = list_first_entry(&dev_priv->mm.inactive_list,
-                                      struct drm_i915_gem_object,
-                                      list)->obj;
+               obj = &list_first_entry(&dev_priv->mm.inactive_list,
+                                       struct drm_i915_gem_object,
+                                       list)->base;
 
                ret = i915_gem_object_unbind(obj);
                if (ret != 0) {
@@ -4558,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+       if (dev_priv->mm.suspended ||
+                       dev_priv->render_ring.gem_object == NULL) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4608,7 +4450,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       obj = drm_gem_object_alloc(dev, 4096);
+       obj = i915_gem_alloc_object(dev, 4096);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate seqno page\n");
                ret = -ENOMEM;
@@ -4639,71 +4481,6 @@ err:
        return ret;
 }
 
-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       int ret;
-
-       /* If we need a physical address for the status page, it's already
-        * initialized at driver load time.
-        */
-       if (!I915_NEED_GFX_HWS(dev))
-               return 0;
-
-       obj = drm_gem_object_alloc(dev, 4096);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate status page\n");
-               ret = -ENOMEM;
-               goto err;
-       }
-       obj_priv = to_intel_bo(obj);
-       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
-       ret = i915_gem_object_pin(obj, 4096);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               goto err_unref;
-       }
-
-       dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-
-       dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
-       if (dev_priv->hw_status_page == NULL) {
-               DRM_ERROR("Failed to map status page.\n");
-               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               ret = -EINVAL;
-               goto err_unpin;
-       }
-
-       if (HAS_PIPE_CONTROL(dev)) {
-               ret = i915_gem_init_pipe_control(dev);
-               if (ret)
-                       goto err_unpin;
-       }
-
-       dev_priv->hws_obj = obj;
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       if (IS_GEN6(dev)) {
-               I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA_GEN6); /* posting read */
-       } else {
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA); /* posting read */
-       }
-       DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
-       return 0;
-
-err_unpin:
-       i915_gem_object_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(obj);
-err:
-       return 0;
-}
 
 static void
 i915_gem_cleanup_pipe_control(struct drm_device *dev)
@@ -4722,146 +4499,25 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
        dev_priv->seqno_page = NULL;
 }
 
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-
-       if (dev_priv->hws_obj == NULL)
-               return;
-
-       obj = dev_priv->hws_obj;
-       obj_priv = to_intel_bo(obj);
-
-       kunmap(obj_priv->pages[0]);
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
-       dev_priv->hws_obj = NULL;
-
-       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-       dev_priv->hw_status_page = NULL;
-
-       if (HAS_PIPE_CONTROL(dev))
-               i915_gem_cleanup_pipe_control(dev);
-
-       /* Write high address into HWS_PGA when disabling. */
-       I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
 int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       drm_i915_ring_buffer_t *ring = &dev_priv->ring;
        int ret;
-       u32 head;
-
-       ret = i915_gem_init_hws(dev);
-       if (ret != 0)
-               return ret;
-
-       obj = drm_gem_object_alloc(dev, 128 * 1024);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate ringbuffer\n");
-               i915_gem_cleanup_hws(dev);
-               return -ENOMEM;
-       }
-       obj_priv = to_intel_bo(obj);
-
-       ret = i915_gem_object_pin(obj, 4096);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return ret;
+       dev_priv->render_ring = render_ring;
+       if (!I915_NEED_GFX_HWS(dev)) {
+               dev_priv->render_ring.status_page.page_addr
+                       = dev_priv->status_page_dmah->vaddr;
+               memset(dev_priv->render_ring.status_page.page_addr,
+                               0, PAGE_SIZE);
        }
-
-       /* Set up the kernel mapping for the ring. */
-       ring->Size = obj->size;
-
-       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
-       ring->map.size = obj->size;
-       ring->map.type = 0;
-       ring->map.flags = 0;
-       ring->map.mtrr = 0;
-
-       drm_core_ioremap_wc(&ring->map, dev);
-       if (ring->map.handle == NULL) {
-               DRM_ERROR("Failed to map ringbuffer.\n");
-               memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return -EINVAL;
-       }
-       ring->ring_obj = obj;
-       ring->virtual_start = ring->map.handle;
-
-       /* Stop the ring if it's running. */
-       I915_WRITE(PRB0_CTL, 0);
-       I915_WRITE(PRB0_TAIL, 0);
-       I915_WRITE(PRB0_HEAD, 0);
-
-       /* Initialize the ring. */
-       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
-               DRM_ERROR("Ring head not reset to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               I915_WRITE(PRB0_HEAD, 0);
-
-               DRM_ERROR("Ring head forced to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-       }
-
-       I915_WRITE(PRB0_CTL,
-                  ((obj->size - 4096) & RING_NR_PAGES) |
-                  RING_NO_REPORT |
-                  RING_VALID);
-
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* If the head is still not zero, the ring is dead */
-       if (head != 0) {
-               DRM_ERROR("Ring initialization failed "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               return -EIO;
-       }
-
-       /* Update our cache of the ring state */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_kernel_lost_context(dev);
-       else {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->Size;
-       }
-
-       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-               I915_WRITE(MI_MODE,
-                          (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+       if (HAS_PIPE_CONTROL(dev)) {
+               ret = i915_gem_init_pipe_control(dev);
+               if (ret)
+                       return ret;
        }
-
-       return 0;
+       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       return ret;
 }
 
 void
@@ -4869,17 +4525,9 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (dev_priv->ring.ring_obj == NULL)
-               return;
-
-       drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
-       i915_gem_object_unpin(dev_priv->ring.ring_obj);
-       drm_gem_object_unreference(dev_priv->ring.ring_obj);
-       dev_priv->ring.ring_obj = NULL;
-       memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
-       i915_gem_cleanup_hws(dev);
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
 }
 
 int
@@ -4957,6 +4605,8 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       for (i = 0; i < 16; i++)
+               INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
@@ -5184,6 +4834,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
        mutex_unlock(&dev->struct_mutex);
 }
 
+static int
+i915_gpu_is_active(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int lists_empty;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+                     list_empty(&dev_priv->mm.active_list);
+       spin_unlock(&dev_priv->mm.active_list_lock);
+
+       return !lists_empty;
+}
+
 static int
 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 {
@@ -5213,6 +4877,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 
        spin_lock(&shrink_list_lock);
 
+rescan:
        /* first scan for clean buffers */
        list_for_each_entry_safe(dev_priv, next_dev,
                                 &shrink_list, mm.shrink_list) {
@@ -5229,7 +4894,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                                         &dev_priv->mm.inactive_list,
                                         list) {
                        if (i915_gem_object_is_purgeable(obj_priv)) {
-                               i915_gem_object_unbind(obj_priv->obj);
+                               i915_gem_object_unbind(&obj_priv->base);
                                if (--nr_to_scan <= 0)
                                        break;
                        }
@@ -5258,7 +4923,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                                         &dev_priv->mm.inactive_list,
                                         list) {
                        if (nr_to_scan > 0) {
-                               i915_gem_object_unbind(obj_priv->obj);
+                               i915_gem_object_unbind(&obj_priv->base);
                                nr_to_scan--;
                        } else
                                cnt++;
@@ -5270,6 +4935,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                would_deadlock = 0;
        }
 
+       if (nr_to_scan) {
+               int active = 0;
+
+               /*
+                * We are desperate for pages, so as a last resort, wait
+                * for the GPU to finish and discard whatever we can.
+                * This has a dramatic impact to reduce the number of
+                * OOM-killer events whilst running the GPU aggressively.
+                */
+               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+                       struct drm_device *dev = dev_priv->dev;
+
+                       if (!mutex_trylock(&dev->struct_mutex))
+                               continue;
+
+                       spin_unlock(&shrink_list_lock);
+
+                       if (i915_gpu_is_active(dev)) {
+                               i915_gpu_idle(dev);
+                               active++;
+                       }
+
+                       spin_lock(&shrink_list_lock);
+                       mutex_unlock(&dev->struct_mutex);
+               }
+
+               if (active)
+                       goto rescan;
+       }
+
        spin_unlock(&shrink_list_lock);
 
        if (would_deadlock)