]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: fix hibernation since i915 self-reclaim fixes
[net-next-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
index 95dbe5628a25caf63a7d2d188548698c6ea9da12..074385882ccfe721ff5630b9e825950c6479af7e 100644 (file)
@@ -167,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
                obj_priv->tiling_mode != I915_TILING_NONE;
 }
 
-static inline int
+static inline void
 slow_shmem_copy(struct page *dst_page,
                int dst_offset,
                struct page *src_page,
@@ -176,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
 {
        char *dst_vaddr, *src_vaddr;
 
-       dst_vaddr = kmap_atomic(dst_page, KM_USER0);
-       if (dst_vaddr == NULL)
-               return -ENOMEM;
-
-       src_vaddr = kmap_atomic(src_page, KM_USER1);
-       if (src_vaddr == NULL) {
-               kunmap_atomic(dst_vaddr, KM_USER0);
-               return -ENOMEM;
-       }
+       dst_vaddr = kmap(dst_page);
+       src_vaddr = kmap(src_page);
 
        memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
 
-       kunmap_atomic(src_vaddr, KM_USER1);
-       kunmap_atomic(dst_vaddr, KM_USER0);
-
-       return 0;
+       kunmap(src_page);
+       kunmap(dst_page);
 }
 
-static inline int
+static inline void
 slow_shmem_bit17_copy(struct page *gpu_page,
                      int gpu_offset,
                      struct page *cpu_page,
@@ -214,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
                                               cpu_page, cpu_offset, length);
        }
 
-       gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
-       if (gpu_vaddr == NULL)
-               return -ENOMEM;
-
-       cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
-       if (cpu_vaddr == NULL) {
-               kunmap_atomic(gpu_vaddr, KM_USER0);
-               return -ENOMEM;
-       }
+       gpu_vaddr = kmap(gpu_page);
+       cpu_vaddr = kmap(cpu_page);
 
        /* Copy the data, XORing A6 with A17 (1). The user already knows he's
         * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -246,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
                length -= this_length;
        }
 
-       kunmap_atomic(cpu_vaddr, KM_USER1);
-       kunmap_atomic(gpu_vaddr, KM_USER0);
-
-       return 0;
+       kunmap(cpu_page);
+       kunmap(gpu_page);
 }
 
 /**
@@ -425,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                        page_length = PAGE_SIZE - data_page_offset;
 
                if (do_bit17_swizzling) {
-                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-                                                   shmem_page_offset,
-                                                   user_pages[data_page_index],
-                                                   data_page_offset,
-                                                   page_length,
-                                                   1);
-               } else {
-                       ret = slow_shmem_copy(user_pages[data_page_index],
-                                             data_page_offset,
-                                             obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
-                                             page_length);
+                                             user_pages[data_page_index],
+                                             data_page_offset,
+                                             page_length,
+                                             1);
+               } else {
+                       slow_shmem_copy(user_pages[data_page_index],
+                                       data_page_offset,
+                                       obj_priv->pages[shmem_page_index],
+                                       shmem_page_offset,
+                                       page_length);
                }
-               if (ret)
-                       goto fail_put_pages;
 
                remain -= page_length;
                data_ptr += page_length;
@@ -529,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
  * page faults
  */
 
-static inline int
+static inline void
 slow_kernel_write(struct io_mapping *mapping,
                  loff_t gtt_base, int gtt_offset,
                  struct page *user_page, int user_offset,
                  int length)
 {
-       char *src_vaddr, *dst_vaddr;
-       unsigned long unwritten;
+       char __iomem *dst_vaddr;
+       char *src_vaddr;
 
-       dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
-       src_vaddr = kmap_atomic(user_page, KM_USER1);
-       unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
-                                                     src_vaddr + user_offset,
-                                                     length);
-       kunmap_atomic(src_vaddr, KM_USER1);
-       io_mapping_unmap_atomic(dst_vaddr);
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+       src_vaddr = kmap(user_page);
+
+       memcpy_toio(dst_vaddr + gtt_offset,
+                   src_vaddr + user_offset,
+                   length);
+
+       kunmap(user_page);
+       io_mapping_unmap(dst_vaddr);
 }
 
 static inline int
@@ -720,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
-                                       gtt_page_base, gtt_page_offset,
-                                       user_pages[data_page_index],
-                                       data_page_offset,
-                                       page_length);
-
-               /* If we get a fault while copying data, then (presumably) our
-                * source page isn't available.  Return the error and we'll
-                * retry in the slow path.
-                */
-               if (ret)
-                       goto out_unpin_object;
+               slow_kernel_write(dev_priv->mm.gtt_mapping,
+                                 gtt_page_base, gtt_page_offset,
+                                 user_pages[data_page_index],
+                                 data_page_offset,
+                                 page_length);
 
                remain -= page_length;
                offset += page_length;
@@ -900,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                        page_length = PAGE_SIZE - data_page_offset;
 
                if (do_bit17_swizzling) {
-                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
-                                                   shmem_page_offset,
-                                                   user_pages[data_page_index],
-                                                   data_page_offset,
-                                                   page_length,
-                                                   0);
-               } else {
-                       ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                       slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
                                              shmem_page_offset,
                                              user_pages[data_page_index],
                                              data_page_offset,
-                                             page_length);
+                                             page_length,
+                                             0);
+               } else {
+                       slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                                       shmem_page_offset,
+                                       user_pages[data_page_index],
+                                       data_page_offset,
+                                       page_length);
                }
-               if (ret)
-                       goto fail_put_pages;
 
                remain -= page_length;
                data_ptr += page_length;
@@ -971,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0) {
+                dev->gtt_total != 0 &&
+                obj->write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
                if (ret == -EFAULT) {
                        ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -1482,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
 }
 
 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+                              struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       BUG_ON(ring == NULL);
+       obj_priv->ring = ring;
 
        /* Add a reference if we're newly entering the active list. */
        if (!obj_priv->active) {
@@ -1495,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
        }
        /* Move from whatever list we were on to the tail of execution. */
        spin_lock(&dev_priv->mm.active_list_lock);
-       list_move_tail(&obj_priv->list,
-                      &dev_priv->mm.active_list);
+       list_move_tail(&obj_priv->list, &ring->active_list);
        spin_unlock(&dev_priv->mm.active_list_lock);
        obj_priv->last_rendering_seqno = seqno;
 }
@@ -1549,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
        BUG_ON(!list_empty(&obj_priv->gpu_write_list));
 
        obj_priv->last_rendering_seqno = 0;
+       obj_priv->ring = NULL;
        if (obj_priv->active) {
                obj_priv->active = 0;
                drm_gem_object_unreference(obj);
@@ -1558,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 
 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains, uint32_t seqno)
+                              uint32_t flush_domains, uint32_t seqno,
+                              struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv, *next;
@@ -1569,12 +1544,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                struct drm_gem_object *obj = &obj_priv->base;
 
                if ((obj->write_domain & flush_domains) ==
-                   obj->write_domain) {
+                   obj->write_domain &&
+                   obj_priv->ring->ring_flag == ring->ring_flag) {
                        uint32_t old_write_domain = obj->write_domain;
 
                        obj->write_domain = 0;
                        list_del_init(&obj_priv->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, seqno);
+                       i915_gem_object_move_to_active(obj, seqno, ring);
 
                        /* update the fence lru list */
                        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1590,9 +1566,10 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                }
        }
 }
+
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                uint32_t flush_domains)
+                uint32_t flush_domains, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_file_private *i915_file_priv = NULL;
@@ -1607,14 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (request == NULL)
                return 0;
 
-       seqno = i915_ring_add_request(dev);
-
-       DRM_DEBUG_DRIVER("%d\n", seqno);
+       seqno = ring->add_request(dev, ring, file_priv, flush_domains);
 
        request->seqno = seqno;
+       request->ring = ring;
        request->emitted_jiffies = jiffies;
-       was_empty = list_empty(&dev_priv->mm.request_list);
-       list_add_tail(&request->list, &dev_priv->mm.request_list);
+       was_empty = list_empty(&ring->request_list);
+       list_add_tail(&request->list, &ring->request_list);
+
        if (i915_file_priv) {
                list_add_tail(&request->client_list,
                              &i915_file_priv->mm.request_list);
@@ -1626,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
         * domain we're flushing with our flush.
         */
        if (flush_domains != 0) 
-               i915_gem_process_flushing_list(dev, flush_domains, seqno);
+               i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
 
        if (!dev_priv->mm.suspended) {
                mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1643,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  * before signalling the CPU
  */
 static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
        uint32_t flush_domains = 0;
-       RING_LOCALS;
 
        /* The sampler always gets flushed on i965 (sigh) */
        if (IS_I965G(dev))
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-       BEGIN_LP_RING(2);
-       OUT_RING(cmd);
-       OUT_RING(0); /* noop */
-       ADVANCE_LP_RING();
+
+       ring->flush(dev, ring,
+                       I915_GEM_DOMAIN_COMMAND, flush_domains);
        return flush_domains;
 }
 
@@ -1676,11 +1649,11 @@ i915_gem_retire_request(struct drm_device *dev,
         * by the ringbuffer to the flushing/inactive lists as appropriate.
         */
        spin_lock(&dev_priv->mm.active_list_lock);
-       while (!list_empty(&dev_priv->mm.active_list)) {
+       while (!list_empty(&request->ring->active_list)) {
                struct drm_gem_object *obj;
                struct drm_i915_gem_object *obj_priv;
 
-               obj_priv = list_first_entry(&dev_priv->mm.active_list,
+               obj_priv = list_first_entry(&request->ring->active_list,
                                            struct drm_i915_gem_object,
                                            list);
                obj = &obj_priv->base;
@@ -1727,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 }
 
 uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev,
+                  struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       if (HAS_PIPE_CONTROL(dev))
-               return ((volatile u32 *)(dev_priv->seqno_page))[0];
-       else
-               return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+       return ring->get_gem_seqno(dev, ring);
 }
 
 /**
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+       if (!ring->status_page.page_addr
+                       || list_empty(&ring->request_list))
                return;
 
-       seqno = i915_get_gem_seqno(dev);
+       seqno = i915_get_gem_seqno(dev, ring);
 
-       while (!list_empty(&dev_priv->mm.request_list)) {
+       while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
                uint32_t retiring_seqno;
 
-               request = list_first_entry(&dev_priv->mm.request_list,
+               request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
                retiring_seqno = request->seqno;
@@ -1773,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev)
 
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-               i915_user_irq_put(dev);
+
+               ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
 }
@@ -1789,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work)
        dev = dev_priv->dev;
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev))
+               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
        if (!dev_priv->mm.suspended &&
-           !list_empty(&dev_priv->mm.request_list))
+               (!list_empty(&dev_priv->render_ring.request_list) ||
+                       (HAS_BSD(dev) &&
+                        !list_empty(&dev_priv->bsd_ring.request_list))))
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }
 
 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+               int interruptible, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
@@ -1808,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
        if (atomic_read(&dev_priv->mm.wedged))
                return -EIO;
 
-       if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1822,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 
                trace_i915_gem_request_wait_begin(dev, seqno);
 
-               dev_priv->mm.waiting_gem_seqno = seqno;
-               i915_user_irq_get(dev);
+               ring->waiting_gem_seqno = seqno;
+               ring->user_irq_get(dev, ring);
                if (interruptible)
-                       ret = wait_event_interruptible(dev_priv->irq_queue,
-                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-                               atomic_read(&dev_priv->mm.wedged));
+                       ret = wait_event_interruptible(ring->irq_queue,
+                               i915_seqno_passed(
+                                       ring->get_gem_seqno(dev, ring), seqno)
+                               || atomic_read(&dev_priv->mm.wedged));
                else
-                       wait_event(dev_priv->irq_queue,
-                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-                               atomic_read(&dev_priv->mm.wedged));
+                       wait_event(ring->irq_queue,
+                               i915_seqno_passed(
+                                       ring->get_gem_seqno(dev, ring), seqno)
+                               || atomic_read(&dev_priv->mm.wedged));
 
-               i915_user_irq_put(dev);
-               dev_priv->mm.waiting_gem_seqno = 0;
+               ring->user_irq_put(dev, ring);
+               ring->waiting_gem_seqno = 0;
 
                trace_i915_gem_request_wait_end(dev, seqno);
        }
@@ -1843,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 
        if (ret && ret != -ERESTARTSYS)
                DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-                         __func__, ret, seqno, i915_get_gem_seqno(dev));
+                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
 
        /* Directly dispatch request retiring.  While we have the work queue
         * to handle this, the waiter on a request often wants an associated
@@ -1851,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
         * a separate wait queue to handle that.
         */
        if (ret == 0)
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev, ring);
 
        return ret;
 }
@@ -1861,11 +1842,42 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
  * request and object lists appropriately for that event.
  */
 static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno,
+               struct intel_ring_buffer *ring)
 {
-       return i915_do_wait_request(dev, seqno, 1);
+       return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
+static void
+i915_gem_flush(struct drm_device *dev,
+              uint32_t invalidate_domains,
+              uint32_t flush_domains)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       if (flush_domains & I915_GEM_DOMAIN_CPU)
+               drm_agp_chipset_flush(dev);
+       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+                       invalidate_domains,
+                       flush_domains);
+
+       if (HAS_BSD(dev))
+               dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+                               invalidate_domains,
+                               flush_domains);
+}
+
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+              uint32_t invalidate_domains,
+              uint32_t flush_domains,
+              struct intel_ring_buffer *ring)
+{
+       if (flush_domains & I915_GEM_DOMAIN_CPU)
+               drm_agp_chipset_flush(dev);
+       ring->flush(dev, ring,
+                       invalidate_domains,
+                       flush_domains);
+}
 
 /**
  * Ensures that all rendering to the object has completed and the object is
@@ -1891,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
                DRM_INFO("%s: object %p wait for seqno %08x\n",
                          __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-               ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+               ret = i915_wait_request(dev,
+                               obj_priv->last_rendering_seqno, obj_priv->ring);
                if (ret != 0)
                        return ret;
        }
@@ -2007,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
-       uint32_t seqno;
+       uint32_t seqno1, seqno2;
+       int ret;
 
        spin_lock(&dev_priv->mm.active_list_lock);
-       lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->mm.active_list);
+       lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev) ||
+                       list_empty(&dev_priv->bsd_ring.active_list)));
        spin_unlock(&dev_priv->mm.active_list_lock);
 
        if (lists_empty)
@@ -2019,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-       if (seqno == 0)
+       seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+                       &dev_priv->render_ring);
+       if (seqno1 == 0)
                return -ENOMEM;
+       ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev)) {
+               seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+                               &dev_priv->bsd_ring);
+               if (seqno2 == 0)
+                       return -ENOMEM;
+
+               ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+               if (ret)
+                       return ret;
+       }
 
-       return i915_wait_request(dev, seqno);
+
+       return ret;
 }
 
 static int
@@ -2036,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev)
        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev)
+                       || list_empty(&dev_priv->bsd_ring.active_list)));
        spin_unlock(&dev_priv->mm.active_list_lock);
 
        if (lists_empty)
@@ -2056,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev)
        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev)
+                       || list_empty(&dev_priv->bsd_ring.active_list)));
        spin_unlock(&dev_priv->mm.active_list_lock);
        BUG_ON(!lists_empty);
 
@@ -2070,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
        struct drm_gem_object *obj;
        int ret;
 
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+       struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
        for (;;) {
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev, render_ring);
+
+               if (HAS_BSD(dev))
+                       i915_gem_retire_requests(dev, bsd_ring);
 
                /* If there's an inactive buffer available now, grab it
                 * and be done.
@@ -2095,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
                 * things, wait for the next to finish and hopefully leave us
                 * a buffer to evict.
                 */
-               if (!list_empty(&dev_priv->mm.request_list)) {
+               if (!list_empty(&render_ring->request_list)) {
+                       struct drm_i915_gem_request *request;
+
+                       request = list_first_entry(&render_ring->request_list,
+                                                  struct drm_i915_gem_request,
+                                                  list);
+
+                       ret = i915_wait_request(dev,
+                                       request->seqno, request->ring);
+                       if (ret)
+                               return ret;
+
+                       continue;
+               }
+
+               if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
                        struct drm_i915_gem_request *request;
 
-                       request = list_first_entry(&dev_priv->mm.request_list,
+                       request = list_first_entry(&bsd_ring->request_list,
                                                   struct drm_i915_gem_request,
                                                   list);
 
-                       ret = i915_wait_request(dev, request->seqno);
+                       ret = i915_wait_request(dev,
+                                       request->seqno, request->ring);
                        if (ret)
                                return ret;
 
@@ -2129,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
                        if (obj != NULL) {
                                uint32_t seqno;
 
-                               i915_gem_flush(dev,
+                               i915_gem_flush_ring(dev,
+                                              obj->write_domain,
                                               obj->write_domain,
-                                              obj->write_domain);
-                               seqno = i915_add_request(dev, NULL, obj->write_domain);
+                                              obj_priv->ring);
+                               seqno = i915_add_request(dev, NULL,
+                                               obj->write_domain,
+                                               obj_priv->ring);
                                if (seqno == 0)
                                        return -ENOMEM;
                                continue;
@@ -2160,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
        struct inode *inode;
        struct page *page;
 
+       BUG_ON(obj_priv->pages_refcount
+                       == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
+
        if (obj_priv->pages_refcount++ != 0)
                return 0;
 
@@ -2178,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
        mapping = inode->i_mapping;
        for (i = 0; i < page_count; i++) {
                page = read_cache_page_gfp(mapping, i,
-                                          mapping_gfp_mask (mapping) |
+                                          GFP_HIGHUSER |
                                           __GFP_COLD |
                                           gfpmask);
                if (IS_ERR(page))
@@ -2558,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                return -EINVAL;
        }
 
+       /* If the object is bigger than the entire aperture, reject it early
+        * before evicting everything in a vain attempt to find space.
+        */
+       if (obj->size > dev->gtt_total) {
+               DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+               return -E2BIG;
+       }
+
  search_free:
        free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
                                        obj->size, alignment, 0);
@@ -2668,6 +2737,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        uint32_t old_write_domain;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return;
@@ -2675,7 +2745,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
-       (void) i915_add_request(dev, NULL, obj->write_domain);
+       (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
        BUG_ON(obj->write_domain);
 
        trace_i915_gem_object_change_domain(obj,
@@ -2815,23 +2885,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
                DRM_INFO("%s: object %p wait for seqno %08x\n",
                          __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-               ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+               ret = i915_do_wait_request(dev,
+                               obj_priv->last_rendering_seqno,
+                               0,
+                               obj_priv->ring);
                if (ret != 0)
                        return ret;
        }
 
+       i915_gem_object_flush_cpu_write_domain(obj);
+
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
 
-       obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
-       i915_gem_object_flush_cpu_write_domain(obj);
-
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
        BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-       obj->read_domains |= I915_GEM_DOMAIN_GTT;
+       obj->read_domains = I915_GEM_DOMAIN_GTT;
        obj->write_domain = I915_GEM_DOMAIN_GTT;
        obj_priv->dirty = 1;
 
@@ -3215,9 +3286,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                     obj_priv->tiling_mode != I915_TILING_NONE;
 
        /* Check fence reg constraints and rebind if necessary */
-       if (need_fence && !i915_gem_object_fence_offset_ok(obj,
-           obj_priv->tiling_mode))
-               i915_gem_object_unbind(obj);
+       if (need_fence &&
+           !i915_gem_object_fence_offset_ok(obj,
+                                            obj_priv->tiling_mode)) {
+               ret = i915_gem_object_unbind(obj);
+               if (ret)
+                       return ret;
+       }
 
        /* Choose the GTT offset for our buffer and put it there. */
        ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -3231,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
        if (need_fence) {
                ret = i915_gem_object_get_fence_reg(obj);
                if (ret != 0) {
-                       if (ret != -EBUSY && ret != -ERESTARTSYS)
-                               DRM_ERROR("Failure to install fence: %d\n",
-                                         ret);
                        i915_gem_object_unpin(obj);
                        return ret;
                }
@@ -3434,7 +3506,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
-               ret = i915_wait_request(dev, request->seqno);
+               ret = i915_wait_request(dev, request->seqno, request->ring);
                if (ret != 0)
                        break;
        }
@@ -3591,10 +3663,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        uint32_t seqno, flush_domains, reloc_index;
        int pin_tries, flips;
 
+       struct intel_ring_buffer *ring = NULL;
+
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
 #endif
+       if (args->flags & I915_EXEC_BSD) {
+               if (!HAS_BSD(dev)) {
+                       DRM_ERROR("execbuf with wrong flag\n");
+                       return -EINVAL;
+               }
+               ring = &dev_priv->bsd_ring;
+       } else {
+               ring = &dev_priv->render_ring;
+       }
+
 
        if (args->buffer_count < 1) {
                DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3707,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (ret != -ENOSPC || pin_tries >= 1) {
                        if (ret != -ERESTARTSYS) {
                                unsigned long long total_size = 0;
-                               for (i = 0; i < args->buffer_count; i++)
+                               int num_fences = 0;
+                               for (i = 0; i < args->buffer_count; i++) {
+                                       obj_priv = object_list[i]->driver_private;
+
                                        total_size += object_list[i]->size;
-                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+                                       num_fences +=
+                                               exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+                                               obj_priv->tiling_mode != I915_TILING_NONE;
+                               }
+                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
                                          pinned+1, args->buffer_count,
-                                         total_size, ret);
+                                         total_size, num_fences,
+                                         ret);
                                DRM_ERROR("%d objects [%d pinned], "
                                          "%d object bytes [%d pinned], "
                                          "%d/%d gtt bytes\n",
@@ -3781,9 +3873,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                i915_gem_flush(dev,
                               dev->invalidate_domains,
                               dev->flush_domains);
-               if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+               if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
                        (void)i915_add_request(dev, file_priv,
-                                              dev->flush_domains);
+                                       dev->flush_domains,
+                                       &dev_priv->render_ring);
+
+                       if (HAS_BSD(dev))
+                               (void)i915_add_request(dev, file_priv,
+                                               dev->flush_domains,
+                                               &dev_priv->bsd_ring);
+               }
        }
 
        for (i = 0; i < args->buffer_count; i++) {
@@ -3820,7 +3919,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif
 
        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+       ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+                       cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -3830,7 +3930,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * Ensure that the commands in the batch buffer are
         * finished before the interrupt fires
         */
-       flush_domains = i915_retire_commands(dev);
+       flush_domains = i915_retire_commands(dev, ring);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -3841,12 +3941,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * *some* interrupts representing completion of buffers that we can
         * wait on when trying to clear up gtt space).
         */
-       seqno = i915_add_request(dev, file_priv, flush_domains);
+       seqno = i915_add_request(dev, file_priv, flush_domains, ring);
        BUG_ON(seqno == 0);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
+               obj_priv = to_intel_bo(obj);
 
-               i915_gem_object_move_to_active(obj, seqno);
+               i915_gem_object_move_to_active(obj, seqno, ring);
 #if WATCH_LRU
                DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 #endif
@@ -3958,7 +4059,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec2.DR4 = args->DR4;
        exec2.num_cliprects = args->num_cliprects;
        exec2.cliprects_ptr = args->cliprects_ptr;
-       exec2.flags = 0;
+       exec2.flags = I915_EXEC_RENDER;
 
        ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
        if (!ret) {
@@ -4044,7 +4145,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
+       BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+
        i915_verify_inactive(dev, __FILE__, __LINE__);
+
+       if (obj_priv->gtt_space != NULL) {
+               if (alignment == 0)
+                       alignment = i915_gem_get_gtt_alignment(obj);
+               if (obj_priv->gtt_offset & (alignment - 1)) {
+                       ret = i915_gem_object_unbind(obj);
+                       if (ret)
+                               return ret;
+               }
+       }
+
        if (obj_priv->gtt_space == NULL) {
                ret = i915_gem_object_bind_to_gtt(obj, alignment);
                if (ret)
@@ -4197,6 +4311,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
@@ -4211,7 +4326,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
         * actually unmasked, and our working set ends up being larger than
         * required.
         */
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev))
+               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
 
        obj_priv = to_intel_bo(obj);
        /* Don't count being on the flushing list against the object being
@@ -4378,7 +4496,10 @@ i915_gem_idle(struct drm_device *dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (dev_priv->mm.suspended || dev_priv->render_ring.ring_obj == NULL) {
+       if (dev_priv->mm.suspended ||
+                       (dev_priv->render_ring.gem_object == NULL) ||
+                       (HAS_BSD(dev) &&
+                        dev_priv->bsd_ring.gem_object == NULL)) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4420,7 +4541,7 @@ i915_gem_idle(struct drm_device *dev)
  * 965+ support PIPE_CONTROL commands, which provide finer grained control
  * over cache flushing.
  */
-int
+static int
 i915_gem_init_pipe_control(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4459,7 +4580,8 @@ err:
        return ret;
 }
 
-void
+
+static void
 i915_gem_cleanup_pipe_control(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4476,6 +4598,60 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
        dev_priv->seqno_page = NULL;
 }
 
+int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+
+       dev_priv->render_ring = render_ring;
+
+       if (!I915_NEED_GFX_HWS(dev)) {
+               dev_priv->render_ring.status_page.page_addr
+                       = dev_priv->status_page_dmah->vaddr;
+               memset(dev_priv->render_ring.status_page.page_addr,
+                               0, PAGE_SIZE);
+       }
+
+       if (HAS_PIPE_CONTROL(dev)) {
+               ret = i915_gem_init_pipe_control(dev);
+               if (ret)
+                       return ret;
+       }
+
+       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       if (ret)
+               goto cleanup_pipe_control;
+
+       if (HAS_BSD(dev)) {
+               dev_priv->bsd_ring = bsd_ring;
+               ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+               if (ret)
+                       goto cleanup_render_ring;
+       }
+
+       return 0;
+
+cleanup_render_ring:
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+cleanup_pipe_control:
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
+       return ret;
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+       if (HAS_BSD(dev))
+               intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
+}
+
 int
 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
@@ -4501,12 +4677,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }
 
        spin_lock(&dev_priv->mm.active_list_lock);
-       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
        spin_unlock(&dev_priv->mm.active_list_lock);
 
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-       BUG_ON(!list_empty(&dev_priv->mm.request_list));
+       BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
        mutex_unlock(&dev->struct_mutex);
 
        drm_irq_install(dev);
@@ -4545,18 +4723,20 @@ i915_gem_load(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        spin_lock_init(&dev_priv->mm.active_list_lock);
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-       INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+       INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+       if (HAS_BSD(dev)) {
+               INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+               INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+       }
        for (i = 0; i < 16; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
-       dev_priv->mm.next_gem_seqno = 1;
-
        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
@@ -4788,7 +4968,9 @@ i915_gpu_is_active(struct drm_device *dev)
 
        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->mm.active_list);
+                     list_empty(&dev_priv->render_ring.active_list);
+       if (HAS_BSD(dev))
+               lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
        spin_unlock(&dev_priv->mm.active_list_lock);
 
        return !lists_empty;
@@ -4833,8 +5015,10 @@ rescan:
                        continue;
 
                spin_unlock(&shrink_list_lock);
+               i915_gem_retire_requests(dev, &dev_priv->render_ring);
 
-               i915_gem_retire_requests(dev);
+               if (HAS_BSD(dev))
+                       i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
 
                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,