]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'drm-ttm-unmappable' into drm-core-next
authorDave Airlie <airlied@redhat.com>
Tue, 20 Apr 2010 04:15:09 +0000 (14:15 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 20 Apr 2010 04:15:09 +0000 (14:15 +1000)
* drm-ttm-unmappable:
  drm/radeon/kms: enable use of unmappable VRAM V2
  drm/ttm: remove io_ field from TTM V6
  drm/vmwgfx: add support for new TTM fault callback V5
  drm/nouveau/kms: add support for new TTM fault callback V5
  drm/radeon/kms: add support for new fault callback V7
  drm/ttm: ttm_fault callback to allow driver to handle bo placement V6
  drm/ttm: split no_wait argument in 2 GPU or reserve wait

Conflicts:
drivers/gpu/drm/nouveau/nouveau_bo.c

1  2 
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo_util.c

index 957d17629840254c1ccf5ad13139856cea232525,34be1924218f804d9de8fb2bcca62e4592ef3024..fb164efada3b29aecc9578c86c9f5aef53effdac
@@@ -34,7 -34,6 +34,7 @@@
  #include "nouveau_dma.h"
  
  #include <linux/log2.h>
 +#include <linux/slab.h>
  
  static void
  nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
@@@ -72,7 -71,7 +72,7 @@@ nouveau_bo_fixup_align(struct drm_devic
         * many small buffers.
         */
        if (dev_priv->card_type == NV_50) {
 -              uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
 +              uint32_t block_size = dev_priv->vram_size >> 15;
                int i;
  
                switch (tile_flags) {
@@@ -154,7 -153,7 +154,7 @@@ nouveau_bo_new(struct drm_device *dev, 
  
        nvbo->placement.fpfn = 0;
        nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
 -      nouveau_bo_placement_set(nvbo, flags);
 +      nouveau_bo_placement_set(nvbo, flags, 0);
  
        nvbo->channel = chan;
        ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
        return 0;
  }
  
 +static void
 +set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
 +{
 +      *n = 0;
 +
 +      if (type & TTM_PL_FLAG_VRAM)
 +              pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
 +      if (type & TTM_PL_FLAG_TT)
 +              pl[(*n)++] = TTM_PL_FLAG_TT | flags;
 +      if (type & TTM_PL_FLAG_SYSTEM)
 +              pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
 +}
 +
  void
 -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
 +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
  {
 -      int n = 0;
 -
 -      if (memtype & TTM_PL_FLAG_VRAM)
 -              nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
 -      if (memtype & TTM_PL_FLAG_TT)
 -              nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
 -      if (memtype & TTM_PL_FLAG_SYSTEM)
 -              nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
 -      nvbo->placement.placement = nvbo->placements;
 -      nvbo->placement.busy_placement = nvbo->placements;
 -      nvbo->placement.num_placement = n;
 -      nvbo->placement.num_busy_placement = n;
 -
 -      if (nvbo->pin_refcnt) {
 -              while (n--)
 -                      nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
 -      }
 +      struct ttm_placement *pl = &nvbo->placement;
 +      uint32_t flags = TTM_PL_MASK_CACHING |
 +              (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
 +
 +      pl->placement = nvbo->placements;
 +      set_placement_list(nvbo->placements, &pl->num_placement,
 +                         type, flags);
 +
 +      pl->busy_placement = nvbo->busy_placements;
 +      set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
 +                         type | busy, flags);
  }
  
  int
@@@ -207,7 -199,7 +207,7 @@@ nouveau_bo_pin(struct nouveau_bo *nvbo
  {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_buffer_object *bo = &nvbo->bo;
 -      int ret, i;
 +      int ret;
  
        if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
                NV_ERROR(nouveau_bdev(bo->bdev)->dev,
        if (ret)
                goto out;
  
 -      nouveau_bo_placement_set(nvbo, memtype);
 -      for (i = 0; i < nvbo->placement.num_placement; i++)
 -              nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 +      nouveau_bo_placement_set(nvbo, memtype, 0);
  
-       ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+       ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@@ -250,7 -244,7 +250,7 @@@ nouveau_bo_unpin(struct nouveau_bo *nvb
  {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_buffer_object *bo = &nvbo->bo;
 -      int ret, i;
 +      int ret;
  
        if (--nvbo->pin_refcnt)
                return 0;
        if (ret)
                return ret;
  
 -      for (i = 0; i < nvbo->placement.num_placement; i++)
 -              nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
 +      nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
  
-       ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+       ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@@ -391,25 -386,16 +391,16 @@@ nouveau_bo_init_mem_type(struct ttm_bo_
                break;
        case TTM_PL_VRAM:
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_MAPPABLE |
-                            TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+                            TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_FLAG_UNCACHED |
                                         TTM_PL_FLAG_WC;
                man->default_caching = TTM_PL_FLAG_WC;
-               man->io_addr = NULL;
-               man->io_offset = drm_get_resource_start(dev, 1);
-               man->io_size = drm_get_resource_len(dev, 1);
-               if (man->io_size > dev_priv->vram_size)
-                       man->io_size = dev_priv->vram_size;
                man->gpu_offset = dev_priv->vm_vram_base;
                break;
        case TTM_PL_TT:
                switch (dev_priv->gart_info.type) {
                case NOUVEAU_GART_AGP:
-                       man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
-                                    TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+                       man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
                        man->available_caching = TTM_PL_FLAG_UNCACHED;
                        man->default_caching = TTM_PL_FLAG_UNCACHED;
                        break;
                                 dev_priv->gart_info.type);
                        return -EINVAL;
                }
-               man->io_offset  = dev_priv->gart_info.aper_base;
-               man->io_size    = dev_priv->gart_info.aper_size;
-               man->io_addr   = NULL;
                man->gpu_offset = dev_priv->vm_gart_base;
                break;
        default:
@@@ -444,11 -426,10 +431,11 @@@ nouveau_bo_evict_flags(struct ttm_buffe
  
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
 -              nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
 +              nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
 +                                       TTM_PL_FLAG_SYSTEM);
                break;
        default:
 -              nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
 +              nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
                break;
        }
  
  
  static int
  nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
-                             struct nouveau_bo *nvbo, bool evict, bool no_wait,
+                             struct nouveau_bo *nvbo, bool evict,
+                             bool no_wait_reserve, bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
  {
        struct nouveau_fence *fence = NULL;
                return ret;
  
        ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
-                                       evict, no_wait, new_mem);
+                                       evict, no_wait_reserve, no_wait_gpu, new_mem);
        if (nvbo->channel && nvbo->channel != chan)
                ret = nouveau_fence_wait(fence, NULL, false, false);
        nouveau_fence_unref((void *)&fence);
@@@ -497,7 -479,8 +485,8 @@@ nouveau_bo_mem_ctxdma(struct nouveau_b
  
  static int
  nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    int no_wait, struct ttm_mem_reg *new_mem)
+                    bool no_wait_reserve, bool no_wait_gpu,
+                    struct ttm_mem_reg *new_mem)
  {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
                dst_offset += (PAGE_SIZE * line_count);
        }
  
-       return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+       return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  }
  
  static int
  nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait, struct ttm_mem_reg *new_mem)
+                     bool no_wait_reserve, bool no_wait_gpu,
+                     struct ttm_mem_reg *new_mem)
  {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
  
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
        if (ret)
                return ret;
  
        if (ret)
                goto out;
  
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
  
-       ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  out:
        if (tmp_mem.mm_node) {
                spin_lock(&bo->bdev->glob->lru_lock);
  
  static int
  nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait, struct ttm_mem_reg *new_mem)
+                     bool no_wait_reserve, bool no_wait_gpu,
+                     struct ttm_mem_reg *new_mem)
  {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
  
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
        if (ret)
                return ret;
  
-       ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
  
-       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        if (ret)
                goto out;
  
@@@ -706,7 -691,8 +697,8 @@@ nouveau_bo_vm_cleanup(struct ttm_buffer
  
  static int
  nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-               bool no_wait, struct ttm_mem_reg *new_mem)
+               bool no_wait_reserve, bool no_wait_gpu,
+               struct ttm_mem_reg *new_mem)
  {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        /* Software copy if the card isn't up and running yet. */
        if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
            !dev_priv->channel) {
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
                goto out;
        }
  
  
        /* Hardware assisted copy. */
        if (new_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        else if (old_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        else
-               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  
        if (!ret)
                goto out;
  
        /* Fallback to software copy. */
-       ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  
  out:
        if (ret)
@@@ -762,6 -748,55 +754,55 @@@ nouveau_bo_verify_access(struct ttm_buf
        return 0;
  }
  
+ static int
+ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+       struct drm_device *dev = dev_priv->dev;
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               return 0;
+       case TTM_PL_TT:
+ #if __OS_HAS_AGP
+               if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.base = dev_priv->gart_info.aper_base;
+                       mem->bus.is_iomem = true;
+               }
+ #endif
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.base = drm_get_resource_start(dev, 1);
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static void
+ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+ }
+ static int
+ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+ {
+       return 0;
+ }
  struct ttm_bo_driver nouveau_bo_driver = {
        .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
        .invalidate_caches = nouveau_bo_invalidate_caches,
        .sync_obj_flush = nouveau_fence_flush,
        .sync_obj_unref = nouveau_fence_unref,
        .sync_obj_ref = nouveau_fence_ref,
+       .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+       .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+       .io_mem_free = &nouveau_ttm_io_mem_free,
  };
  
index 6d1aa89ec870b4c9a2b0955e1868fc4f0e3d927a,1f5040363b907d936c19dcff5519b79f19132654..69c76cf934074b9ef962dc3f1e4c78f40b4bdf28
@@@ -57,9 -57,6 +57,9 @@@ nouveau_gem_object_del(struct drm_gem_o
        }
  
        ttm_bo_unref(&bo);
 +
 +      drm_gem_object_release(gem);
 +      kfree(gem);
  }
  
  int
@@@ -183,35 -180,40 +183,35 @@@ nouveau_gem_set_domain(struct drm_gem_o
  {
        struct nouveau_bo *nvbo = gem->driver_private;
        struct ttm_buffer_object *bo = &nvbo->bo;
 -      uint64_t flags;
 +      uint32_t domains = valid_domains &
 +              (write_domains ? write_domains : read_domains);
 +      uint32_t pref_flags = 0, valid_flags = 0;
  
 -      if (!valid_domains || (!read_domains && !write_domains))
 +      if (!domains)
                return -EINVAL;
  
 -      if (write_domains) {
 -              if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 -                  (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
 -                      flags = TTM_PL_FLAG_VRAM;
 -              else
 -              if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
 -                  (write_domains & NOUVEAU_GEM_DOMAIN_GART))
 -                      flags = TTM_PL_FLAG_TT;
 -              else
 -                      return -EINVAL;
 -      } else {
 -              if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 -                  (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 -                  bo->mem.mem_type == TTM_PL_VRAM)
 -                      flags = TTM_PL_FLAG_VRAM;
 -              else
 -              if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
 -                  (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
 -                  bo->mem.mem_type == TTM_PL_TT)
 -                      flags = TTM_PL_FLAG_TT;
 -              else
 -              if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 -                  (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
 -                      flags = TTM_PL_FLAG_VRAM;
 -              else
 -                      flags = TTM_PL_FLAG_TT;
 -      }
 +      if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 +              valid_flags |= TTM_PL_FLAG_VRAM;
 +
 +      if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 +              valid_flags |= TTM_PL_FLAG_TT;
 +
 +      if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 +          bo->mem.mem_type == TTM_PL_VRAM)
 +              pref_flags |= TTM_PL_FLAG_VRAM;
 +
 +      else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 +               bo->mem.mem_type == TTM_PL_TT)
 +              pref_flags |= TTM_PL_FLAG_TT;
 +
 +      else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 +              pref_flags |= TTM_PL_FLAG_VRAM;
 +
 +      else
 +              pref_flags |= TTM_PL_FLAG_TT;
 +
 +      nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
  
 -      nouveau_bo_placement_set(nvbo, flags);
        return 0;
  }
  
@@@ -385,7 -387,7 +385,7 @@@ validate_list(struct nouveau_channel *c
  
                nvbo->channel = chan;
                ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-                                     false, false);
+                                     false, false, false);
                nvbo->channel = NULL;
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail ttm_validate\n");
index 3295154e59345227e90404c88e8d0c7929aa62d6,5c7c5c3993028040c27cc6aa02bc02c3a0dec614..b3d168fb89e565b3894249ab86c5fb68665c1944
   */
  #include <linux/firmware.h>
  #include <linux/platform_device.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "radeon.h"
  #include "radeon_asic.h"
  #include "radeon_drm.h"
 -#include "rv770d.h"
 +#include "evergreend.h"
  #include "atom.h"
  #include "avivod.h"
  #include "evergreen_reg.h"
  
 +#define EVERGREEN_PFP_UCODE_SIZE 1120
 +#define EVERGREEN_PM4_UCODE_SIZE 1376
 +
  static void evergreen_gpu_init(struct radeon_device *rdev);
  void evergreen_fini(struct radeon_device *rdev);
  
  bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
  {
        bool connected = false;
 -      /* XXX */
 +
 +      switch (hpd) {
 +      case RADEON_HPD_1:
 +              if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
 +                      connected = true;
 +              break;
 +      case RADEON_HPD_2:
 +              if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
 +                      connected = true;
 +              break;
 +      case RADEON_HPD_3:
 +              if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
 +                      connected = true;
 +              break;
 +      case RADEON_HPD_4:
 +              if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
 +                      connected = true;
 +              break;
 +      case RADEON_HPD_5:
 +              if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
 +                      connected = true;
 +              break;
 +      case RADEON_HPD_6:
 +              if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
 +                      connected = true;
 +                      break;
 +      default:
 +              break;
 +      }
 +
        return connected;
  }
  
  void evergreen_hpd_set_polarity(struct radeon_device *rdev,
                                enum radeon_hpd_id hpd)
  {
 -      /* XXX */
 +      u32 tmp;
 +      bool connected = evergreen_hpd_sense(rdev, hpd);
 +
 +      switch (hpd) {
 +      case RADEON_HPD_1:
 +              tmp = RREG32(DC_HPD1_INT_CONTROL);
 +              if (connected)
 +                      tmp &= ~DC_HPDx_INT_POLARITY;
 +              else
 +                      tmp |= DC_HPDx_INT_POLARITY;
 +              WREG32(DC_HPD1_INT_CONTROL, tmp);
 +              break;
 +      case RADEON_HPD_2:
 +              tmp = RREG32(DC_HPD2_INT_CONTROL);
 +              if (connected)
 +                      tmp &= ~DC_HPDx_INT_POLARITY;
 +              else
 +                      tmp |= DC_HPDx_INT_POLARITY;
 +              WREG32(DC_HPD2_INT_CONTROL, tmp);
 +              break;
 +      case RADEON_HPD_3:
 +              tmp = RREG32(DC_HPD3_INT_CONTROL);
 +              if (connected)
 +                      tmp &= ~DC_HPDx_INT_POLARITY;
 +              else
 +                      tmp |= DC_HPDx_INT_POLARITY;
 +              WREG32(DC_HPD3_INT_CONTROL, tmp);
 +              break;
 +      case RADEON_HPD_4:
 +              tmp = RREG32(DC_HPD4_INT_CONTROL);
 +              if (connected)
 +                      tmp &= ~DC_HPDx_INT_POLARITY;
 +              else
 +                      tmp |= DC_HPDx_INT_POLARITY;
 +              WREG32(DC_HPD4_INT_CONTROL, tmp);
 +              break;
 +      case RADEON_HPD_5:
 +              tmp = RREG32(DC_HPD5_INT_CONTROL);
 +              if (connected)
 +                      tmp &= ~DC_HPDx_INT_POLARITY;
 +              else
 +                      tmp |= DC_HPDx_INT_POLARITY;
 +              WREG32(DC_HPD5_INT_CONTROL, tmp);
 +                      break;
 +      case RADEON_HPD_6:
 +              tmp = RREG32(DC_HPD6_INT_CONTROL);
 +              if (connected)
 +                      tmp &= ~DC_HPDx_INT_POLARITY;
 +              else
 +                      tmp |= DC_HPDx_INT_POLARITY;
 +              WREG32(DC_HPD6_INT_CONTROL, tmp);
 +              break;
 +      default:
 +              break;
 +      }
  }
  
  void evergreen_hpd_init(struct radeon_device *rdev)
  {
 -      /* XXX */
 +      struct drm_device *dev = rdev->ddev;
 +      struct drm_connector *connector;
 +      u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
 +              DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
 +
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 +              struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 +              switch (radeon_connector->hpd.hpd) {
 +              case RADEON_HPD_1:
 +                      WREG32(DC_HPD1_CONTROL, tmp);
 +                      rdev->irq.hpd[0] = true;
 +                      break;
 +              case RADEON_HPD_2:
 +                      WREG32(DC_HPD2_CONTROL, tmp);
 +                      rdev->irq.hpd[1] = true;
 +                      break;
 +              case RADEON_HPD_3:
 +                      WREG32(DC_HPD3_CONTROL, tmp);
 +                      rdev->irq.hpd[2] = true;
 +                      break;
 +              case RADEON_HPD_4:
 +                      WREG32(DC_HPD4_CONTROL, tmp);
 +                      rdev->irq.hpd[3] = true;
 +                      break;
 +              case RADEON_HPD_5:
 +                      WREG32(DC_HPD5_CONTROL, tmp);
 +                      rdev->irq.hpd[4] = true;
 +                      break;
 +              case RADEON_HPD_6:
 +                      WREG32(DC_HPD6_CONTROL, tmp);
 +                      rdev->irq.hpd[5] = true;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
 +      if (rdev->irq.installed)
 +              evergreen_irq_set(rdev);
  }
  
 -
 -void evergreen_bandwidth_update(struct radeon_device *rdev)
 +void evergreen_hpd_fini(struct radeon_device *rdev)
  {
 -      /* XXX */
 +      struct drm_device *dev = rdev->ddev;
 +      struct drm_connector *connector;
 +
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 +              struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 +              switch (radeon_connector->hpd.hpd) {
 +              case RADEON_HPD_1:
 +                      WREG32(DC_HPD1_CONTROL, 0);
 +                      rdev->irq.hpd[0] = false;
 +                      break;
 +              case RADEON_HPD_2:
 +                      WREG32(DC_HPD2_CONTROL, 0);
 +                      rdev->irq.hpd[1] = false;
 +                      break;
 +              case RADEON_HPD_3:
 +                      WREG32(DC_HPD3_CONTROL, 0);
 +                      rdev->irq.hpd[2] = false;
 +                      break;
 +              case RADEON_HPD_4:
 +                      WREG32(DC_HPD4_CONTROL, 0);
 +                      rdev->irq.hpd[3] = false;
 +                      break;
 +              case RADEON_HPD_5:
 +                      WREG32(DC_HPD5_CONTROL, 0);
 +                      rdev->irq.hpd[4] = false;
 +                      break;
 +              case RADEON_HPD_6:
 +                      WREG32(DC_HPD6_CONTROL, 0);
 +                      rdev->irq.hpd[5] = false;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
  }
  
 -void evergreen_hpd_fini(struct radeon_device *rdev)
 +void evergreen_bandwidth_update(struct radeon_device *rdev)
  {
        /* XXX */
  }
@@@ -238,31 -82,10 +238,31 @@@ static int evergreen_mc_wait_for_idle(s
  /*
   * GART
   */
 +void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
 +{
 +      unsigned i;
 +      u32 tmp;
 +
 +      WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
 +      for (i = 0; i < rdev->usec_timeout; i++) {
 +              /* read MC_STATUS */
 +              tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
 +              tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
 +              if (tmp == 2) {
 +                      printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
 +                      return;
 +              }
 +              if (tmp) {
 +                      return;
 +              }
 +              udelay(1);
 +      }
 +}
 +
  int evergreen_pcie_gart_enable(struct radeon_device *rdev)
  {
        u32 tmp;
 -      int r, i;
 +      int r;
  
        if (rdev->gart.table.vram.robj == NULL) {
                dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
 -      for (i = 1; i < 7; i++)
 -              WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 +      WREG32(VM_CONTEXT1_CNTL, 0);
  
 -      r600_pcie_gart_tlb_flush(rdev);
 +      evergreen_pcie_gart_tlb_flush(rdev);
        rdev->gart.ready = true;
        return 0;
  }
  void evergreen_pcie_gart_disable(struct radeon_device *rdev)
  {
        u32 tmp;
 -      int i, r;
 +      int r;
  
        /* Disable all tables */
 -      for (i = 0; i < 7; i++)
 -              WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 +      WREG32(VM_CONTEXT0_CNTL, 0);
 +      WREG32(VM_CONTEXT1_CNTL, 0);
  
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@@ -348,6 -172,7 +348,6 @@@ void evergreen_pcie_gart_fini(struct ra
  void evergreen_agp_enable(struct radeon_device *rdev)
  {
        u32 tmp;
 -      int i;
  
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
 -      for (i = 0; i < 7; i++)
 -              WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 +      WREG32(VM_CONTEXT0_CNTL, 0);
 +      WREG32(VM_CONTEXT1_CNTL, 0);
  }
  
  static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@@ -574,656 -399,40 +574,656 @@@ static void evergreen_mc_program(struc
        rv515_vga_render_disable(rdev);
  }
  
 -#if 0
  /*
   * CP.
   */
 -static void evergreen_cp_stop(struct radeon_device *rdev)
 -{
 -      /* XXX */
 -}
 -
  
  static int evergreen_cp_load_microcode(struct radeon_device *rdev)
  {
 -      /* XXX */
 +      const __be32 *fw_data;
 +      int i;
 +
 +      if (!rdev->me_fw || !rdev->pfp_fw)
 +              return -EINVAL;
  
 +      r700_cp_stop(rdev);
 +      WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
 +
 +      fw_data = (const __be32 *)rdev->pfp_fw->data;
 +      WREG32(CP_PFP_UCODE_ADDR, 0);
 +      for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
 +              WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
 +      WREG32(CP_PFP_UCODE_ADDR, 0);
 +
 +      fw_data = (const __be32 *)rdev->me_fw->data;
 +      WREG32(CP_ME_RAM_WADDR, 0);
 +      for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
 +              WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
 +
 +      WREG32(CP_PFP_UCODE_ADDR, 0);
 +      WREG32(CP_ME_RAM_WADDR, 0);
 +      WREG32(CP_ME_RAM_RADDR, 0);
        return 0;
  }
  
 +int evergreen_cp_resume(struct radeon_device *rdev)
 +{
 +      u32 tmp;
 +      u32 rb_bufsz;
 +      int r;
 +
 +      /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
 +      WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
 +                               SOFT_RESET_PA |
 +                               SOFT_RESET_SH |
 +                               SOFT_RESET_VGT |
 +                               SOFT_RESET_SX));
 +      RREG32(GRBM_SOFT_RESET);
 +      mdelay(15);
 +      WREG32(GRBM_SOFT_RESET, 0);
 +      RREG32(GRBM_SOFT_RESET);
 +
 +      /* Set ring buffer size */
 +      rb_bufsz = drm_order(rdev->cp.ring_size / 8);
 +      tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 +#ifdef __BIG_ENDIAN
 +      tmp |= BUF_SWAP_32BIT;
 +#endif
 +      WREG32(CP_RB_CNTL, tmp);
 +      WREG32(CP_SEM_WAIT_TIMER, 0x4);
 +
 +      /* Set the write pointer delay */
 +      WREG32(CP_RB_WPTR_DELAY, 0);
 +
 +      /* Initialize the ring buffer's read and write pointers */
 +      WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
 +      WREG32(CP_RB_RPTR_WR, 0);
 +      WREG32(CP_RB_WPTR, 0);
 +      WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
 +      WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
 +      mdelay(1);
 +      WREG32(CP_RB_CNTL, tmp);
 +
 +      WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
 +      WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 +
 +      rdev->cp.rptr = RREG32(CP_RB_RPTR);
 +      rdev->cp.wptr = RREG32(CP_RB_WPTR);
 +
 +      r600_cp_start(rdev);
 +      rdev->cp.ready = true;
 +      r = radeon_ring_test(rdev);
 +      if (r) {
 +              rdev->cp.ready = false;
 +              return r;
 +      }
 +      return 0;
 +}
  
  /*
   * Core functions
   */
 -static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
 +static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
 +                                                u32 num_tile_pipes,
                                                  u32 num_backends,
                                                  u32 backend_disable_mask)
  {
        u32 backend_map = 0;
 +      u32 enabled_backends_mask = 0;
 +      u32 enabled_backends_count = 0;
 +      u32 cur_pipe;
 +      u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
 +      u32 cur_backend = 0;
 +      u32 i;
 +      bool force_no_swizzle;
 +
 +      if (num_tile_pipes > EVERGREEN_MAX_PIPES)
 +              num_tile_pipes = EVERGREEN_MAX_PIPES;
 +      if (num_tile_pipes < 1)
 +              num_tile_pipes = 1;
 +      if (num_backends > EVERGREEN_MAX_BACKENDS)
 +              num_backends = EVERGREEN_MAX_BACKENDS;
 +      if (num_backends < 1)
 +              num_backends = 1;
 +
 +      for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
 +              if (((backend_disable_mask >> i) & 1) == 0) {
 +                      enabled_backends_mask |= (1 << i);
 +                      ++enabled_backends_count;
 +              }
 +              if (enabled_backends_count == num_backends)
 +                      break;
 +      }
 +
 +      if (enabled_backends_count == 0) {
 +              enabled_backends_mask = 1;
 +              enabled_backends_count = 1;
 +      }
 +
 +      if (enabled_backends_count != num_backends)
 +              num_backends = enabled_backends_count;
 +
 +      memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
 +      switch (rdev->family) {
 +      case CHIP_CEDAR:
 +      case CHIP_REDWOOD:
 +              force_no_swizzle = false;
 +              break;
 +      case CHIP_CYPRESS:
 +      case CHIP_HEMLOCK:
 +      case CHIP_JUNIPER:
 +      default:
 +              force_no_swizzle = true;
 +              break;
 +      }
 +      if (force_no_swizzle) {
 +              bool last_backend_enabled = false;
 +
 +              force_no_swizzle = false;
 +              for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
 +                      if (((enabled_backends_mask >> i) & 1) == 1) {
 +                              if (last_backend_enabled)
 +                                      force_no_swizzle = true;
 +                              last_backend_enabled = true;
 +                      } else
 +                              last_backend_enabled = false;
 +              }
 +      }
 +
 +      switch (num_tile_pipes) {
 +      case 1:
 +      case 3:
 +      case 5:
 +      case 7:
 +              DRM_ERROR("odd number of pipes!\n");
 +              break;
 +      case 2:
 +              swizzle_pipe[0] = 0;
 +              swizzle_pipe[1] = 1;
 +              break;
 +      case 4:
 +              if (force_no_swizzle) {
 +                      swizzle_pipe[0] = 0;
 +                      swizzle_pipe[1] = 1;
 +                      swizzle_pipe[2] = 2;
 +                      swizzle_pipe[3] = 3;
 +              } else {
 +                      swizzle_pipe[0] = 0;
 +                      swizzle_pipe[1] = 2;
 +                      swizzle_pipe[2] = 1;
 +                      swizzle_pipe[3] = 3;
 +              }
 +              break;
 +      case 6:
 +              if (force_no_swizzle) {
 +                      swizzle_pipe[0] = 0;
 +                      swizzle_pipe[1] = 1;
 +                      swizzle_pipe[2] = 2;
 +                      swizzle_pipe[3] = 3;
 +                      swizzle_pipe[4] = 4;
 +                      swizzle_pipe[5] = 5;
 +              } else {
 +                      swizzle_pipe[0] = 0;
 +                      swizzle_pipe[1] = 2;
 +                      swizzle_pipe[2] = 4;
 +                      swizzle_pipe[3] = 1;
 +                      swizzle_pipe[4] = 3;
 +                      swizzle_pipe[5] = 5;
 +              }
 +              break;
 +      case 8:
 +              if (force_no_swizzle) {
 +                      swizzle_pipe[0] = 0;
 +                      swizzle_pipe[1] = 1;
 +                      swizzle_pipe[2] = 2;
 +                      swizzle_pipe[3] = 3;
 +                      swizzle_pipe[4] = 4;
 +                      swizzle_pipe[5] = 5;
 +                      swizzle_pipe[6] = 6;
 +                      swizzle_pipe[7] = 7;
 +              } else {
 +                      swizzle_pipe[0] = 0;
 +                      swizzle_pipe[1] = 2;
 +                      swizzle_pipe[2] = 4;
 +                      swizzle_pipe[3] = 6;
 +                      swizzle_pipe[4] = 1;
 +                      swizzle_pipe[5] = 3;
 +                      swizzle_pipe[6] = 5;
 +                      swizzle_pipe[7] = 7;
 +              }
 +              break;
 +      }
 +
 +      for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
 +              while (((1 << cur_backend) & enabled_backends_mask) == 0)
 +                      cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
 +
 +              backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
 +
 +              cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
 +      }
  
        return backend_map;
  }
 -#endif
  
  static void evergreen_gpu_init(struct radeon_device *rdev)
  {
 -      /* XXX */
 +      u32 cc_rb_backend_disable = 0;
 +      u32 cc_gc_shader_pipe_config;
 +      u32 gb_addr_config = 0;
 +      u32 mc_shared_chmap, mc_arb_ramcfg;
 +      u32 gb_backend_map;
 +      u32 grbm_gfx_index;
 +      u32 sx_debug_1;
 +      u32 smx_dc_ctl0;
 +      u32 sq_config;
 +      u32 sq_lds_resource_mgmt;
 +      u32 sq_gpr_resource_mgmt_1;
 +      u32 sq_gpr_resource_mgmt_2;
 +      u32 sq_gpr_resource_mgmt_3;
 +      u32 sq_thread_resource_mgmt;
 +      u32 sq_thread_resource_mgmt_2;
 +      u32 sq_stack_resource_mgmt_1;
 +      u32 sq_stack_resource_mgmt_2;
 +      u32 sq_stack_resource_mgmt_3;
 +      u32 vgt_cache_invalidation;
 +      u32 hdp_host_path_cntl;
 +      int i, j, num_shader_engines, ps_thread_count;
 +
 +      switch (rdev->family) {
 +      case CHIP_CYPRESS:
 +      case CHIP_HEMLOCK:
 +              rdev->config.evergreen.num_ses = 2;
 +              rdev->config.evergreen.max_pipes = 4;
 +              rdev->config.evergreen.max_tile_pipes = 8;
 +              rdev->config.evergreen.max_simds = 10;
 +              rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
 +              rdev->config.evergreen.max_gprs = 256;
 +              rdev->config.evergreen.max_threads = 248;
 +              rdev->config.evergreen.max_gs_threads = 32;
 +              rdev->config.evergreen.max_stack_entries = 512;
 +              rdev->config.evergreen.sx_num_of_sets = 4;
 +              rdev->config.evergreen.sx_max_export_size = 256;
 +              rdev->config.evergreen.sx_max_export_pos_size = 64;
 +              rdev->config.evergreen.sx_max_export_smx_size = 192;
 +              rdev->config.evergreen.max_hw_contexts = 8;
 +              rdev->config.evergreen.sq_num_cf_insts = 2;
 +
 +              rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 +              rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 +              rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
 +              break;
 +      case CHIP_JUNIPER:
 +              rdev->config.evergreen.num_ses = 1;
 +              rdev->config.evergreen.max_pipes = 4;
 +              rdev->config.evergreen.max_tile_pipes = 4;
 +              rdev->config.evergreen.max_simds = 10;
 +              rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
 +              rdev->config.evergreen.max_gprs = 256;
 +              rdev->config.evergreen.max_threads = 248;
 +              rdev->config.evergreen.max_gs_threads = 32;
 +              rdev->config.evergreen.max_stack_entries = 512;
 +              rdev->config.evergreen.sx_num_of_sets = 4;
 +              rdev->config.evergreen.sx_max_export_size = 256;
 +              rdev->config.evergreen.sx_max_export_pos_size = 64;
 +              rdev->config.evergreen.sx_max_export_smx_size = 192;
 +              rdev->config.evergreen.max_hw_contexts = 8;
 +              rdev->config.evergreen.sq_num_cf_insts = 2;
 +
 +              rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 +              rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 +              rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
 +              break;
 +      case CHIP_REDWOOD:
 +              rdev->config.evergreen.num_ses = 1;
 +              rdev->config.evergreen.max_pipes = 4;
 +              rdev->config.evergreen.max_tile_pipes = 4;
 +              rdev->config.evergreen.max_simds = 5;
 +              rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
 +              rdev->config.evergreen.max_gprs = 256;
 +              rdev->config.evergreen.max_threads = 248;
 +              rdev->config.evergreen.max_gs_threads = 32;
 +              rdev->config.evergreen.max_stack_entries = 256;
 +              rdev->config.evergreen.sx_num_of_sets = 4;
 +              rdev->config.evergreen.sx_max_export_size = 256;
 +              rdev->config.evergreen.sx_max_export_pos_size = 64;
 +              rdev->config.evergreen.sx_max_export_smx_size = 192;
 +              rdev->config.evergreen.max_hw_contexts = 8;
 +              rdev->config.evergreen.sq_num_cf_insts = 2;
 +
 +              rdev->config.evergreen.sc_prim_fifo_size = 0x100;
 +              rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 +              rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
 +              break;
 +      case CHIP_CEDAR:
 +      default:
 +              rdev->config.evergreen.num_ses = 1;
 +              rdev->config.evergreen.max_pipes = 2;
 +              rdev->config.evergreen.max_tile_pipes = 2;
 +              rdev->config.evergreen.max_simds = 2;
 +              rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
 +              rdev->config.evergreen.max_gprs = 256;
 +              rdev->config.evergreen.max_threads = 192;
 +              rdev->config.evergreen.max_gs_threads = 16;
 +              rdev->config.evergreen.max_stack_entries = 256;
 +              rdev->config.evergreen.sx_num_of_sets = 4;
 +              rdev->config.evergreen.sx_max_export_size = 128;
 +              rdev->config.evergreen.sx_max_export_pos_size = 32;
 +              rdev->config.evergreen.sx_max_export_smx_size = 96;
 +              rdev->config.evergreen.max_hw_contexts = 4;
 +              rdev->config.evergreen.sq_num_cf_insts = 1;
 +
 +              rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 +              rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 +              rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
 +              break;
 +      }
 +
 +      /* Initialize HDP */
 +      for (i = 0, j = 0; i < 32; i++, j += 0x18) {
 +              WREG32((0x2c14 + j), 0x00000000);
 +              WREG32((0x2c18 + j), 0x00000000);
 +              WREG32((0x2c1c + j), 0x00000000);
 +              WREG32((0x2c20 + j), 0x00000000);
 +              WREG32((0x2c24 + j), 0x00000000);
 +      }
 +
 +      WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 +
 +      cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
 +
 +      cc_gc_shader_pipe_config |=
 +              INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
 +                                & EVERGREEN_MAX_PIPES_MASK);
 +      cc_gc_shader_pipe_config |=
 +              INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
 +                             & EVERGREEN_MAX_SIMDS_MASK);
 +
 +      cc_rb_backend_disable =
 +              BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
 +                              & EVERGREEN_MAX_BACKENDS_MASK);
 +
 +
 +      mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
 +      mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 +
 +      switch (rdev->config.evergreen.max_tile_pipes) {
 +      case 1:
 +      default:
 +              gb_addr_config |= NUM_PIPES(0);
 +              break;
 +      case 2:
 +              gb_addr_config |= NUM_PIPES(1);
 +              break;
 +      case 4:
 +              gb_addr_config |= NUM_PIPES(2);
 +              break;
 +      case 8:
 +              gb_addr_config |= NUM_PIPES(3);
 +              break;
 +      }
 +
 +      gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
 +      gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
 +      gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
 +      gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
 +      gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
 +      gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
 +
 +      if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
 +              gb_addr_config |= ROW_SIZE(2);
 +      else
 +              gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
 +
 +      if (rdev->ddev->pdev->device == 0x689e) {
 +              u32 efuse_straps_4;
 +              u32 efuse_straps_3;
 +              u8 efuse_box_bit_131_124;
 +
 +              WREG32(RCU_IND_INDEX, 0x204);
 +              efuse_straps_4 = RREG32(RCU_IND_DATA);
 +              WREG32(RCU_IND_INDEX, 0x203);
 +              efuse_straps_3 = RREG32(RCU_IND_DATA);
 +              efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
 +
 +              switch(efuse_box_bit_131_124) {
 +              case 0x00:
 +                      gb_backend_map = 0x76543210;
 +                      break;
 +              case 0x55:
 +                      gb_backend_map = 0x77553311;
 +                      break;
 +              case 0x56:
 +                      gb_backend_map = 0x77553300;
 +                      break;
 +              case 0x59:
 +                      gb_backend_map = 0x77552211;
 +                      break;
 +              case 0x66:
 +                      gb_backend_map = 0x77443300;
 +                      break;
 +              case 0x99:
 +                      gb_backend_map = 0x66552211;
 +                      break;
 +              case 0x5a:
 +                      gb_backend_map = 0x77552200;
 +                      break;
 +              case 0xaa:
 +                      gb_backend_map = 0x66442200;
 +                      break;
 +              case 0x95:
 +                      gb_backend_map = 0x66553311;
 +                      break;
 +              default:
 +                      DRM_ERROR("bad backend map, using default\n");
 +                      gb_backend_map =
 +                              evergreen_get_tile_pipe_to_backend_map(rdev,
 +                                                                     rdev->config.evergreen.max_tile_pipes,
 +                                                                     rdev->config.evergreen.max_backends,
 +                                                                     ((EVERGREEN_MAX_BACKENDS_MASK <<
 +                                                                 rdev->config.evergreen.max_backends) &
 +                                                                      EVERGREEN_MAX_BACKENDS_MASK));
 +                      break;
 +              }
 +      } else if (rdev->ddev->pdev->device == 0x68b9) {
 +              u32 efuse_straps_3;
 +              u8 efuse_box_bit_127_124;
 +
 +              WREG32(RCU_IND_INDEX, 0x203);
 +              efuse_straps_3 = RREG32(RCU_IND_DATA);
 +              efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
 +
 +              switch(efuse_box_bit_127_124) {
 +              case 0x0:
 +                      gb_backend_map = 0x00003210;
 +                      break;
 +              case 0x5:
 +              case 0x6:
 +              case 0x9:
 +              case 0xa:
 +                      gb_backend_map = 0x00003311;
 +                      break;
 +              default:
 +                      DRM_ERROR("bad backend map, using default\n");
 +                      gb_backend_map =
 +                              evergreen_get_tile_pipe_to_backend_map(rdev,
 +                                                                     rdev->config.evergreen.max_tile_pipes,
 +                                                                     rdev->config.evergreen.max_backends,
 +                                                                     ((EVERGREEN_MAX_BACKENDS_MASK <<
 +                                                                 rdev->config.evergreen.max_backends) &
 +                                                                      EVERGREEN_MAX_BACKENDS_MASK));
 +                      break;
 +              }
 +      } else
 +              gb_backend_map =
 +                      evergreen_get_tile_pipe_to_backend_map(rdev,
 +                                                             rdev->config.evergreen.max_tile_pipes,
 +                                                             rdev->config.evergreen.max_backends,
 +                                                             ((EVERGREEN_MAX_BACKENDS_MASK <<
 +                                                               rdev->config.evergreen.max_backends) &
 +                                                              EVERGREEN_MAX_BACKENDS_MASK));
 +
 +      WREG32(GB_BACKEND_MAP, gb_backend_map);
 +      WREG32(GB_ADDR_CONFIG, gb_addr_config);
 +      WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 +      WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 +
 +      num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
 +      grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
 +
 +      for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
 +              u32 rb = cc_rb_backend_disable | (0xf0 << 16);
 +              u32 sp = cc_gc_shader_pipe_config;
 +              u32 gfx = grbm_gfx_index | SE_INDEX(i);
 +
 +              if (i == num_shader_engines) {
 +                      rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
 +                      sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
 +              }
 +
 +              WREG32(GRBM_GFX_INDEX, gfx);
 +              WREG32(RLC_GFX_INDEX, gfx);
 +
 +              WREG32(CC_RB_BACKEND_DISABLE, rb);
 +              WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
 +              WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
 +              WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
 +        }
 +
 +      grbm_gfx_index |= SE_BROADCAST_WRITES;
 +      WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
 +      WREG32(RLC_GFX_INDEX, grbm_gfx_index);
 +
 +      WREG32(CGTS_SYS_TCC_DISABLE, 0);
 +      WREG32(CGTS_TCC_DISABLE, 0);
 +      WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
 +      WREG32(CGTS_USER_TCC_DISABLE, 0);
 +
 +      /* set HW defaults for 3D engine */
 +      WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
 +                                   ROQ_IB2_START(0x2b)));
 +
 +      WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
 +
 +      WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
 +                           SYNC_GRADIENT |
 +                           SYNC_WALKER |
 +                           SYNC_ALIGNER));
 +
 +      sx_debug_1 = RREG32(SX_DEBUG_1);
 +      sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
 +      WREG32(SX_DEBUG_1, sx_debug_1);
 +
 +
 +      smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
 +      smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
 +      smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
 +      WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 +
 +      WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
 +                                      POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
 +                                      SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
 +
 +      WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
 +                               SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
 +                               SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
 +
 +      WREG32(VGT_NUM_INSTANCES, 1);
 +      WREG32(SPI_CONFIG_CNTL, 0);
 +      WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
 +      WREG32(CP_PERFMON_CNTL, 0);
 +
 +      WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
 +                                FETCH_FIFO_HIWATER(0x4) |
 +                                DONE_FIFO_HIWATER(0xe0) |
 +                                ALU_UPDATE_FIFO_HIWATER(0x8)));
 +
 +      sq_config = RREG32(SQ_CONFIG);
 +      sq_config &= ~(PS_PRIO(3) |
 +                     VS_PRIO(3) |
 +                     GS_PRIO(3) |
 +                     ES_PRIO(3));
 +      sq_config |= (VC_ENABLE |
 +                    EXPORT_SRC_C |
 +                    PS_PRIO(0) |
 +                    VS_PRIO(1) |
 +                    GS_PRIO(2) |
 +                    ES_PRIO(3));
 +
 +      if (rdev->family == CHIP_CEDAR)
 +              /* no vertex cache */
 +              sq_config &= ~VC_ENABLE;
 +
 +      sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
 +
 +      sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
 +      sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
 +      sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
 +      sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
 +      sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
 +      sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
 +      sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
 +
 +      if (rdev->family == CHIP_CEDAR)
 +              ps_thread_count = 96;
 +      else
 +              ps_thread_count = 128;
 +
 +      sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
 +      sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 +      sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 +      sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 +      sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 +      sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 +
 +      sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 +      sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 +      sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 +      sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 +      sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 +      sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 +
 +      WREG32(SQ_CONFIG, sq_config);
 +      WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
 +      WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
 +      WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
 +      WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
 +      WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
 +      WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
 +      WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
 +      WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
 +      WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
 +      WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
 +
 +      WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
 +                                        FORCE_EOV_MAX_REZ_CNT(255)));
 +
 +      if (rdev->family == CHIP_CEDAR)
 +              vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
 +      else
 +              vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
 +      vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
 +      WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
 +
 +      WREG32(VGT_GS_VERTEX_REUSE, 16);
 +      WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 +
 +      WREG32(CB_PERF_CTR0_SEL_0, 0);
 +      WREG32(CB_PERF_CTR0_SEL_1, 0);
 +      WREG32(CB_PERF_CTR1_SEL_0, 0);
 +      WREG32(CB_PERF_CTR1_SEL_1, 0);
 +      WREG32(CB_PERF_CTR2_SEL_0, 0);
 +      WREG32(CB_PERF_CTR2_SEL_1, 0);
 +      WREG32(CB_PERF_CTR3_SEL_0, 0);
 +      WREG32(CB_PERF_CTR3_SEL_1, 0);
 +
 +      hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
 +      WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
 +
 +      WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
 +
 +      udelay(50);
 +
  }
  
  int evergreen_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
  
        return 0;
  }
  
 -int evergreen_gpu_reset(struct radeon_device *rdev)
 +bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
  {
        /* FIXME: implement for evergreen */
 +      return false;
 +}
 +
 +static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
 +{
 +      struct evergreen_mc_save save;
 +      u32 srbm_reset = 0;
 +      u32 grbm_reset = 0;
 +
 +      dev_info(rdev->dev, "GPU softreset \n");
 +      dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
 +              RREG32(GRBM_STATUS));
 +      dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
 +              RREG32(GRBM_STATUS_SE0));
 +      dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
 +              RREG32(GRBM_STATUS_SE1));
 +      dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
 +              RREG32(SRBM_STATUS));
 +      evergreen_mc_stop(rdev, &save);
 +      if (evergreen_mc_wait_for_idle(rdev)) {
 +              dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
 +      }
 +      /* Disable CP parsing/prefetching */
 +      WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
 +
 +      /* reset all the gfx blocks */
 +      grbm_reset = (SOFT_RESET_CP |
 +                    SOFT_RESET_CB |
 +                    SOFT_RESET_DB |
 +                    SOFT_RESET_PA |
 +                    SOFT_RESET_SC |
 +                    SOFT_RESET_SPI |
 +                    SOFT_RESET_SH |
 +                    SOFT_RESET_SX |
 +                    SOFT_RESET_TC |
 +                    SOFT_RESET_TA |
 +                    SOFT_RESET_VC |
 +                    SOFT_RESET_VGT);
 +
 +      dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
 +      WREG32(GRBM_SOFT_RESET, grbm_reset);
 +      (void)RREG32(GRBM_SOFT_RESET);
 +      udelay(50);
 +      WREG32(GRBM_SOFT_RESET, 0);
 +      (void)RREG32(GRBM_SOFT_RESET);
 +
 +      /* reset all the system blocks */
 +      srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
 +
 +      dev_info(rdev->dev, "  SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
 +      WREG32(SRBM_SOFT_RESET, srbm_reset);
 +      (void)RREG32(SRBM_SOFT_RESET);
 +      udelay(50);
 +      WREG32(SRBM_SOFT_RESET, 0);
 +      (void)RREG32(SRBM_SOFT_RESET);
 +      /* Wait a little for things to settle down */
 +      udelay(50);
 +      dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
 +              RREG32(GRBM_STATUS));
 +      dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
 +              RREG32(GRBM_STATUS_SE0));
 +      dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
 +              RREG32(GRBM_STATUS_SE1));
 +      dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
 +              RREG32(SRBM_STATUS));
 +      /* After reset we need to reinit the asic as GPU often endup in an
 +       * incoherent state.
 +       */
 +      atom_asic_init(rdev->mode_info.atom_context);
 +      evergreen_mc_resume(rdev, &save);
 +      return 0;
 +}
 +
 +int evergreen_asic_reset(struct radeon_device *rdev)
 +{
 +      return evergreen_gpu_soft_reset(rdev);
 +}
 +
 +/* Interrupts */
 +
 +u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
 +{
 +      switch (crtc) {
 +      case 0:
 +              return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
 +      case 1:
 +              return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
 +      case 2:
 +              return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
 +      case 3:
 +              return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
 +      case 4:
 +              return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
 +      case 5:
 +              return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
 +      default:
 +              return 0;
 +      }
 +}
 +
 +void evergreen_disable_interrupt_state(struct radeon_device *rdev)
 +{
 +      u32 tmp;
 +
 +      WREG32(CP_INT_CNTL, 0);
 +      WREG32(GRBM_INT_CNTL, 0);
 +      WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 +      WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
 +      WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
 +      WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
 +      WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
 +      WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
 +
 +      WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 +      WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
 +      WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
 +      WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
 +      WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
 +      WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
 +
 +      WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 +      WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 +
 +      tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
 +      WREG32(DC_HPD1_INT_CONTROL, tmp);
 +      tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
 +      WREG32(DC_HPD2_INT_CONTROL, tmp);
 +      tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
 +      WREG32(DC_HPD3_INT_CONTROL, tmp);
 +      tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
 +      WREG32(DC_HPD4_INT_CONTROL, tmp);
 +      tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
 +      WREG32(DC_HPD5_INT_CONTROL, tmp);
 +      tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
 +      WREG32(DC_HPD6_INT_CONTROL, tmp);
 +
 +}
 +
 +int evergreen_irq_set(struct radeon_device *rdev)
 +{
 +      u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
 +      u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
 +      u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
 +
 +      if (!rdev->irq.installed) {
 +              WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
 +              return -EINVAL;
 +      }
 +      /* don't enable anything if the ih is disabled */
 +      if (!rdev->ih.enabled) {
 +              r600_disable_interrupts(rdev);
 +              /* force the active interrupt state to all disabled */
 +              evergreen_disable_interrupt_state(rdev);
 +              return 0;
 +      }
 +
 +      hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +      hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +      hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +      hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +      hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +      hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +
 +      if (rdev->irq.sw_int) {
 +              DRM_DEBUG("evergreen_irq_set: sw int\n");
 +              cp_int_cntl |= RB_INT_ENABLE;
 +      }
 +      if (rdev->irq.crtc_vblank_int[0]) {
 +              DRM_DEBUG("evergreen_irq_set: vblank 0\n");
 +              crtc1 |= VBLANK_INT_MASK;
 +      }
 +      if (rdev->irq.crtc_vblank_int[1]) {
 +              DRM_DEBUG("evergreen_irq_set: vblank 1\n");
 +              crtc2 |= VBLANK_INT_MASK;
 +      }
 +      if (rdev->irq.crtc_vblank_int[2]) {
 +              DRM_DEBUG("evergreen_irq_set: vblank 2\n");
 +              crtc3 |= VBLANK_INT_MASK;
 +      }
 +      if (rdev->irq.crtc_vblank_int[3]) {
 +              DRM_DEBUG("evergreen_irq_set: vblank 3\n");
 +              crtc4 |= VBLANK_INT_MASK;
 +      }
 +      if (rdev->irq.crtc_vblank_int[4]) {
 +              DRM_DEBUG("evergreen_irq_set: vblank 4\n");
 +              crtc5 |= VBLANK_INT_MASK;
 +      }
 +      if (rdev->irq.crtc_vblank_int[5]) {
 +              DRM_DEBUG("evergreen_irq_set: vblank 5\n");
 +              crtc6 |= VBLANK_INT_MASK;
 +      }
 +      if (rdev->irq.hpd[0]) {
 +              DRM_DEBUG("evergreen_irq_set: hpd 1\n");
 +              hpd1 |= DC_HPDx_INT_EN;
 +      }
 +      if (rdev->irq.hpd[1]) {
 +              DRM_DEBUG("evergreen_irq_set: hpd 2\n");
 +              hpd2 |= DC_HPDx_INT_EN;
 +      }
 +      if (rdev->irq.hpd[2]) {
 +              DRM_DEBUG("evergreen_irq_set: hpd 3\n");
 +              hpd3 |= DC_HPDx_INT_EN;
 +      }
 +      if (rdev->irq.hpd[3]) {
 +              DRM_DEBUG("evergreen_irq_set: hpd 4\n");
 +              hpd4 |= DC_HPDx_INT_EN;
 +      }
 +      if (rdev->irq.hpd[4]) {
 +              DRM_DEBUG("evergreen_irq_set: hpd 5\n");
 +              hpd5 |= DC_HPDx_INT_EN;
 +      }
 +      if (rdev->irq.hpd[5]) {
 +              DRM_DEBUG("evergreen_irq_set: hpd 6\n");
 +              hpd6 |= DC_HPDx_INT_EN;
 +      }
 +
 +      WREG32(CP_INT_CNTL, cp_int_cntl);
 +
 +      WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
 +      WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
 +      WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
 +      WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
 +      WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
 +      WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
 +
 +      WREG32(DC_HPD1_INT_CONTROL, hpd1);
 +      WREG32(DC_HPD2_INT_CONTROL, hpd2);
 +      WREG32(DC_HPD3_INT_CONTROL, hpd3);
 +      WREG32(DC_HPD4_INT_CONTROL, hpd4);
 +      WREG32(DC_HPD5_INT_CONTROL, hpd5);
 +      WREG32(DC_HPD6_INT_CONTROL, hpd6);
 +
        return 0;
  }
  
 +static inline void evergreen_irq_ack(struct radeon_device *rdev,
 +                                   u32 *disp_int,
 +                                   u32 *disp_int_cont,
 +                                   u32 *disp_int_cont2,
 +                                   u32 *disp_int_cont3,
 +                                   u32 *disp_int_cont4,
 +                                   u32 *disp_int_cont5)
 +{
 +      u32 tmp;
 +
 +      *disp_int = RREG32(DISP_INTERRUPT_STATUS);
 +      *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
 +      *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
 +      *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
 +      *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
 +      *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
 +
 +      if (*disp_int & LB_D1_VBLANK_INTERRUPT)
 +              WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
 +      if (*disp_int & LB_D1_VLINE_INTERRUPT)
 +              WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
 +
 +      if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
 +              WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
 +      if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
 +              WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
 +
 +      if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
 +              WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
 +      if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
 +              WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
 +
 +      if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
 +              WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
 +      if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
 +              WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
 +
 +      if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
 +              WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
 +      if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
 +              WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
 +
 +      if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
 +              WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
 +      if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
 +              WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
 +
 +      if (*disp_int & DC_HPD1_INTERRUPT) {
 +              tmp = RREG32(DC_HPD1_INT_CONTROL);
 +              tmp |= DC_HPDx_INT_ACK;
 +              WREG32(DC_HPD1_INT_CONTROL, tmp);
 +      }
 +      if (*disp_int_cont & DC_HPD2_INTERRUPT) {
 +              tmp = RREG32(DC_HPD2_INT_CONTROL);
 +              tmp |= DC_HPDx_INT_ACK;
 +              WREG32(DC_HPD2_INT_CONTROL, tmp);
 +      }
 +      if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
 +              tmp = RREG32(DC_HPD3_INT_CONTROL);
 +              tmp |= DC_HPDx_INT_ACK;
 +              WREG32(DC_HPD3_INT_CONTROL, tmp);
 +      }
 +      if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
 +              tmp = RREG32(DC_HPD4_INT_CONTROL);
 +              tmp |= DC_HPDx_INT_ACK;
 +              WREG32(DC_HPD4_INT_CONTROL, tmp);
 +      }
 +      if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
 +              tmp = RREG32(DC_HPD5_INT_CONTROL);
 +              tmp |= DC_HPDx_INT_ACK;
 +              WREG32(DC_HPD5_INT_CONTROL, tmp);
 +      }
 +      if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
 +              tmp = RREG32(DC_HPD5_INT_CONTROL);
 +              tmp |= DC_HPDx_INT_ACK;
 +              WREG32(DC_HPD6_INT_CONTROL, tmp);
 +      }
 +}
 +
 +void evergreen_irq_disable(struct radeon_device *rdev)
 +{
 +      u32 disp_int, disp_int_cont, disp_int_cont2;
 +      u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
 +
 +      r600_disable_interrupts(rdev);
 +      /* Wait and acknowledge irq */
 +      mdelay(1);
 +      evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
 +                        &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
 +      evergreen_disable_interrupt_state(rdev);
 +}
 +
 +static void evergreen_irq_suspend(struct radeon_device *rdev)
 +{
 +      evergreen_irq_disable(rdev);
 +      r600_rlc_stop(rdev);
 +}
 +
 +static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
 +{
 +      u32 wptr, tmp;
 +
 +      /* XXX use writeback */
 +      wptr = RREG32(IH_RB_WPTR);
 +
 +      if (wptr & RB_OVERFLOW) {
 +              /* When a ring buffer overflow happen start parsing interrupt
 +               * from the last not overwritten vector (wptr + 16). Hopefully
 +               * this should allow us to catchup.
 +               */
 +              dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
 +                      wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
 +              rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
 +              tmp = RREG32(IH_RB_CNTL);
 +              tmp |= IH_WPTR_OVERFLOW_CLEAR;
 +              WREG32(IH_RB_CNTL, tmp);
 +      }
 +      return (wptr & rdev->ih.ptr_mask);
 +}
 +
 +int evergreen_irq_process(struct radeon_device *rdev)
 +{
 +      u32 wptr = evergreen_get_ih_wptr(rdev);
 +      u32 rptr = rdev->ih.rptr;
 +      u32 src_id, src_data;
 +      u32 ring_index;
 +      u32 disp_int, disp_int_cont, disp_int_cont2;
 +      u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
 +      unsigned long flags;
 +      bool queue_hotplug = false;
 +
 +      DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 +      if (!rdev->ih.enabled)
 +              return IRQ_NONE;
 +
 +      spin_lock_irqsave(&rdev->ih.lock, flags);
 +
 +      if (rptr == wptr) {
 +              spin_unlock_irqrestore(&rdev->ih.lock, flags);
 +              return IRQ_NONE;
 +      }
 +      if (rdev->shutdown) {
 +              spin_unlock_irqrestore(&rdev->ih.lock, flags);
 +              return IRQ_NONE;
 +      }
 +
 +restart_ih:
 +      /* display interrupts */
 +      evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
 +                        &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
 +
 +      rdev->ih.wptr = wptr;
 +      while (rptr != wptr) {
 +              /* wptr/rptr are in bytes! */
 +              ring_index = rptr / 4;
 +              src_id =  rdev->ih.ring[ring_index] & 0xff;
 +              src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
 +
 +              switch (src_id) {
 +              case 1: /* D1 vblank/vline */
 +                      switch (src_data) {
 +                      case 0: /* D1 vblank */
 +                              if (disp_int & LB_D1_VBLANK_INTERRUPT) {
 +                                      drm_handle_vblank(rdev->ddev, 0);
 +                                      wake_up(&rdev->irq.vblank_queue);
 +                                      disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 +                                      DRM_DEBUG("IH: D1 vblank\n");
 +                              }
 +                              break;
 +                      case 1: /* D1 vline */
 +                              if (disp_int & LB_D1_VLINE_INTERRUPT) {
 +                                      disp_int &= ~LB_D1_VLINE_INTERRUPT;
 +                                      DRM_DEBUG("IH: D1 vline\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
 +                      break;
 +              case 2: /* D2 vblank/vline */
 +                      switch (src_data) {
 +                      case 0: /* D2 vblank */
 +                              if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
 +                                      drm_handle_vblank(rdev->ddev, 1);
 +                                      wake_up(&rdev->irq.vblank_queue);
 +                                      disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
 +                                      DRM_DEBUG("IH: D2 vblank\n");
 +                              }
 +                              break;
 +                      case 1: /* D2 vline */
 +                              if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
 +                                      disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
 +                                      DRM_DEBUG("IH: D2 vline\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
 +                      break;
 +              case 3: /* D3 vblank/vline */
 +                      switch (src_data) {
 +                      case 0: /* D3 vblank */
 +                              if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
 +                                      drm_handle_vblank(rdev->ddev, 2);
 +                                      wake_up(&rdev->irq.vblank_queue);
 +                                      disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
 +                                      DRM_DEBUG("IH: D3 vblank\n");
 +                              }
 +                              break;
 +                      case 1: /* D3 vline */
 +                              if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
 +                                      disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
 +                                      DRM_DEBUG("IH: D3 vline\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
 +                      break;
 +              case 4: /* D4 vblank/vline */
 +                      switch (src_data) {
 +                      case 0: /* D4 vblank */
 +                              if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
 +                                      drm_handle_vblank(rdev->ddev, 3);
 +                                      wake_up(&rdev->irq.vblank_queue);
 +                                      disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
 +                                      DRM_DEBUG("IH: D4 vblank\n");
 +                              }
 +                              break;
 +                      case 1: /* D4 vline */
 +                              if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
 +                                      disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
 +                                      DRM_DEBUG("IH: D4 vline\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
 +                      break;
 +              case 5: /* D5 vblank/vline */
 +                      switch (src_data) {
 +                      case 0: /* D5 vblank */
 +                              if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
 +                                      drm_handle_vblank(rdev->ddev, 4);
 +                                      wake_up(&rdev->irq.vblank_queue);
 +                                      disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
 +                                      DRM_DEBUG("IH: D5 vblank\n");
 +                              }
 +                              break;
 +                      case 1: /* D5 vline */
 +                              if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
 +                                      disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
 +                                      DRM_DEBUG("IH: D5 vline\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
 +                      break;
 +              case 6: /* D6 vblank/vline */
 +                      switch (src_data) {
 +                      case 0: /* D6 vblank */
 +                              if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
 +                                      drm_handle_vblank(rdev->ddev, 5);
 +                                      wake_up(&rdev->irq.vblank_queue);
 +                                      disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
 +                                      DRM_DEBUG("IH: D6 vblank\n");
 +                              }
 +                              break;
 +                      case 1: /* D6 vline */
 +                              if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
 +                                      disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
 +                                      DRM_DEBUG("IH: D6 vline\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
 +                      break;
 +              case 42: /* HPD hotplug */
 +                      switch (src_data) {
 +                      case 0:
 +                              if (disp_int & DC_HPD1_INTERRUPT) {
 +                                      disp_int &= ~DC_HPD1_INTERRUPT;
 +                                      queue_hotplug = true;
 +                                      DRM_DEBUG("IH: HPD1\n");
 +                              }
 +                              break;
 +                      case 1:
 +                              if (disp_int_cont & DC_HPD2_INTERRUPT) {
 +                                      disp_int_cont &= ~DC_HPD2_INTERRUPT;
 +                                      queue_hotplug = true;
 +                                      DRM_DEBUG("IH: HPD2\n");
 +                              }
 +                              break;
 +                      case 2:
 +                              if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
 +                                      disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
 +                                      queue_hotplug = true;
 +                                      DRM_DEBUG("IH: HPD3\n");
 +                              }
 +                              break;
 +                      case 3:
 +                              if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
 +                                      disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
 +                                      queue_hotplug = true;
 +                                      DRM_DEBUG("IH: HPD4\n");
 +                              }
 +                              break;
 +                      case 4:
 +                              if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
 +                                      disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
 +                                      queue_hotplug = true;
 +                                      DRM_DEBUG("IH: HPD5\n");
 +                              }
 +                              break;
 +                      case 5:
 +                              if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
 +                                      disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
 +                                      queue_hotplug = true;
 +                                      DRM_DEBUG("IH: HPD6\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
 +                      break;
 +              case 176: /* CP_INT in ring buffer */
 +              case 177: /* CP_INT in IB1 */
 +              case 178: /* CP_INT in IB2 */
 +                      DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
 +                      radeon_fence_process(rdev);
 +                      break;
 +              case 181: /* CP EOP event */
 +                      DRM_DEBUG("IH: CP EOP\n");
 +                      break;
 +              default:
 +                      DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                      break;
 +              }
 +
 +              /* wptr/rptr are in bytes! */
 +              rptr += 16;
 +              rptr &= rdev->ih.ptr_mask;
 +      }
 +      /* make sure wptr hasn't changed while processing */
 +      wptr = evergreen_get_ih_wptr(rdev);
 +      if (wptr != rdev->ih.wptr)
 +              goto restart_ih;
 +      if (queue_hotplug)
 +              queue_work(rdev->wq, &rdev->hotplug_work);
 +      rdev->ih.rptr = rptr;
 +      WREG32(IH_RB_RPTR, rdev->ih.rptr);
 +      spin_unlock_irqrestore(&rdev->ih.lock, flags);
 +      return IRQ_HANDLED;
 +}
 +
  static int evergreen_startup(struct radeon_device *rdev)
  {
 -#if 0
        int r;
  
        if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
                        return r;
                }
        }
 -#endif
 +
        evergreen_mc_program(rdev);
 -#if 0
        if (rdev->flags & RADEON_IS_AGP) {
 -              evergreem_agp_enable(rdev);
 +              evergreen_agp_enable(rdev);
        } else {
                r = evergreen_pcie_gart_enable(rdev);
                if (r)
                        return r;
        }
 -#endif
        evergreen_gpu_init(rdev);
  #if 0
        if (!rdev->r600_blit.shader_obj) {
                DRM_ERROR("failed to pin blit object %d\n", r);
                return r;
        }
 +#endif
  
        /* Enable IRQ */
        r = r600_irq_init(rdev);
                radeon_irq_kms_fini(rdev);
                return r;
        }
 -      r600_irq_set(rdev);
 +      evergreen_irq_set(rdev);
  
        r = radeon_ring_init(rdev, rdev->cp.ring_size);
        if (r)
        r = evergreen_cp_load_microcode(rdev);
        if (r)
                return r;
 -      r = r600_cp_resume(rdev);
 +      r = evergreen_cp_resume(rdev);
        if (r)
                return r;
        /* write back buffer are not vital so don't worry about failure */
        r600_wb_enable(rdev);
 -#endif
 +
        return 0;
  }
  
@@@ -1966,13 -576,13 +1961,13 @@@ int evergreen_resume(struct radeon_devi
                DRM_ERROR("r600 startup failed on resume\n");
                return r;
        }
 -#if 0
 +
        r = r600_ib_test(rdev);
        if (r) {
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
        }
 -#endif
 +
        return r;
  
  }
@@@ -1981,14 -591,12 +1976,14 @@@ int evergreen_suspend(struct radeon_dev
  {
  #if 0
        int r;
 -
 +#endif
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
 +      evergreen_irq_suspend(rdev);
        r600_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
 +#if 0
        /* unpin shaders bo */
        r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
        if (likely(r == 0)) {
@@@ -2088,7 -696,7 +2083,7 @@@ int evergreen_init(struct radeon_devic
        r = radeon_bo_init(rdev);
        if (r)
                return r;
 -#if 0
 +
        r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        r = r600_pcie_gart_init(rdev);
        if (r)
                return r;
 -#endif
 +
        rdev->accel_working = false;
        r = evergreen_startup(rdev);
        if (r) {
 -              evergreen_suspend(rdev);
 -              /*r600_wb_fini(rdev);*/
 -              /*radeon_ring_fini(rdev);*/
 -              /*evergreen_pcie_gart_fini(rdev);*/
 +              dev_err(rdev->dev, "disabling GPU acceleration\n");
 +              r700_cp_fini(rdev);
 +              r600_wb_fini(rdev);
 +              r600_irq_fini(rdev);
 +              radeon_irq_kms_fini(rdev);
 +              evergreen_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
        if (rdev->accel_working) {
  void evergreen_fini(struct radeon_device *rdev)
  {
        radeon_pm_fini(rdev);
 -      evergreen_suspend(rdev);
 -#if 0
 -      r600_blit_fini(rdev);
 +      /*r600_blit_fini(rdev);*/
 +      r700_cp_fini(rdev);
 +      r600_wb_fini(rdev);
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
 -      radeon_ring_fini(rdev);
 -      r600_wb_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
 -#endif
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_clocks_fini(rdev);
index 9d3b47deecb38f163cad064295d7bec284c47ec8,3ce549706c2aaf332733376ee742db419b412f47..9bdccb9649999d9d27b64774b9e038f5aa2e395e
@@@ -26,7 -26,6 +26,7 @@@
   *          Jerome Glisse
   */
  #include <linux/seq_file.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "drm.h"
  #include "radeon_drm.h"
@@@ -663,6 -662,26 +663,6 @@@ int r100_cp_init(struct radeon_device *
        if (r100_debugfs_cp_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for CP !\n");
        }
 -      /* Reset CP */
 -      tmp = RREG32(RADEON_CP_CSQ_STAT);
 -      if ((tmp & (1 << 31))) {
 -              DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
 -              WREG32(RADEON_CP_CSQ_MODE, 0);
 -              WREG32(RADEON_CP_CSQ_CNTL, 0);
 -              WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
 -              tmp = RREG32(RADEON_RBBM_SOFT_RESET);
 -              mdelay(2);
 -              WREG32(RADEON_RBBM_SOFT_RESET, 0);
 -              tmp = RREG32(RADEON_RBBM_SOFT_RESET);
 -              mdelay(2);
 -              tmp = RREG32(RADEON_CP_CSQ_STAT);
 -              if ((tmp & (1 << 31))) {
 -                      DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
 -              }
 -      } else {
 -              DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
 -      }
 -
        if (!rdev->me_fw) {
                r = r100_cp_init_microcode(rdev);
                if (r) {
@@@ -767,6 -786,39 +767,6 @@@ void r100_cp_disable(struct radeon_devi
        }
  }
  
 -int r100_cp_reset(struct radeon_device *rdev)
 -{
 -      uint32_t tmp;
 -      bool reinit_cp;
 -      int i;
 -
 -      reinit_cp = rdev->cp.ready;
 -      rdev->cp.ready = false;
 -      WREG32(RADEON_CP_CSQ_MODE, 0);
 -      WREG32(RADEON_CP_CSQ_CNTL, 0);
 -      WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
 -      (void)RREG32(RADEON_RBBM_SOFT_RESET);
 -      udelay(200);
 -      WREG32(RADEON_RBBM_SOFT_RESET, 0);
 -      /* Wait to prevent race in RBBM_STATUS */
 -      mdelay(1);
 -      for (i = 0; i < rdev->usec_timeout; i++) {
 -              tmp = RREG32(RADEON_RBBM_STATUS);
 -              if (!(tmp & (1 << 16))) {
 -                      DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
 -                               tmp);
 -                      if (reinit_cp) {
 -                              return r100_cp_init(rdev, rdev->cp.ring_size);
 -                      }
 -                      return 0;
 -              }
 -              DRM_UDELAY(1);
 -      }
 -      tmp = RREG32(RADEON_RBBM_STATUS);
 -      DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
 -      return -1;
 -}
 -
  void r100_cp_commit(struct radeon_device *rdev)
  {
        WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@@ -1680,163 -1732,76 +1680,163 @@@ int r100_mc_wait_for_idle(struct radeon
        return -1;
  }
  
 -void r100_gpu_init(struct radeon_device *rdev)
 +void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
  {
 -      /* TODO: anythings to do here ? pipes ? */
 -      r100_hdp_reset(rdev);
 +      lockup->last_cp_rptr = cp->rptr;
 +      lockup->last_jiffies = jiffies;
 +}
 +
 +/**
 + * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
 + * @rdev:     radeon device structure
 + * @lockup:   r100_gpu_lockup structure holding CP lockup tracking informations
 + * @cp:               radeon_cp structure holding CP information
 + *
 + * We don't need to initialize the lockup tracking information as we will either
 + * have CP rptr to a different value of jiffies wrap around which will force
 + * initialization of the lockup tracking informations.
 + *
 + * A possible false positivie is if we get call after while and last_cp_rptr ==
 + * the current CP rptr, even if it's unlikely it might happen. To avoid this
 + * if the elapsed time since last call is bigger than 2 second than we return
 + * false and update the tracking information. Due to this the caller must call
 + * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
 + * the fencing code should be cautious about that.
 + *
 + * Caller should write to the ring to force CP to do something so we don't get
 + * false positive when CP is just gived nothing to do.
 + *
 + **/
 +bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
 +{
 +      unsigned long cjiffies, elapsed;
 +
 +      cjiffies = jiffies;
 +      if (!time_after(cjiffies, lockup->last_jiffies)) {
 +              /* likely a wrap around */
 +              lockup->last_cp_rptr = cp->rptr;
 +              lockup->last_jiffies = jiffies;
 +              return false;
 +      }
 +      if (cp->rptr != lockup->last_cp_rptr) {
 +              /* CP is still working no lockup */
 +              lockup->last_cp_rptr = cp->rptr;
 +              lockup->last_jiffies = jiffies;
 +              return false;
 +      }
 +      elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
 +      if (elapsed >= 3000) {
 +              /* very likely the improbable case where current
 +               * rptr is equal to last recorded, a while ago, rptr
 +               * this is more likely a false positive update tracking
 +               * information which should force us to be recall at
 +               * latter point
 +               */
 +              lockup->last_cp_rptr = cp->rptr;
 +              lockup->last_jiffies = jiffies;
 +              return false;
 +      }
 +      if (elapsed >= 1000) {
 +              dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
 +              return true;
 +      }
 +      /* give a chance to the GPU ... */
 +      return false;
  }
  
 -void r100_hdp_reset(struct radeon_device *rdev)
 +bool r100_gpu_is_lockup(struct radeon_device *rdev)
  {
 -      uint32_t tmp;
 +      u32 rbbm_status;
 +      int r;
  
 -      tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
 -      tmp |= (7 << 28);
 -      WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
 -      (void)RREG32(RADEON_HOST_PATH_CNTL);
 -      udelay(200);
 -      WREG32(RADEON_RBBM_SOFT_RESET, 0);
 -      WREG32(RADEON_HOST_PATH_CNTL, tmp);
 -      (void)RREG32(RADEON_HOST_PATH_CNTL);
 +      rbbm_status = RREG32(R_000E40_RBBM_STATUS);
 +      if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
 +              r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
 +              return false;
 +      }
 +      /* force CP activities */
 +      r = radeon_ring_lock(rdev, 2);
 +      if (!r) {
 +              /* PACKET2 NOP */
 +              radeon_ring_write(rdev, 0x80000000);
 +              radeon_ring_write(rdev, 0x80000000);
 +              radeon_ring_unlock_commit(rdev);
 +      }
 +      rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
 +      return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
  }
  
 -int r100_rb2d_reset(struct radeon_device *rdev)
 +void r100_bm_disable(struct radeon_device *rdev)
  {
 -      uint32_t tmp;
 -      int i;
 +      u32 tmp;
  
 -      WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
 -      (void)RREG32(RADEON_RBBM_SOFT_RESET);
 -      udelay(200);
 -      WREG32(RADEON_RBBM_SOFT_RESET, 0);
 -      /* Wait to prevent race in RBBM_STATUS */
 +      /* disable bus mastering */
 +      tmp = RREG32(R_000030_BUS_CNTL);
 +      WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
 +      mdelay(1);
 +      WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
 +      mdelay(1);
 +      WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
 +      tmp = RREG32(RADEON_BUS_CNTL);
 +      mdelay(1);
 +      pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
 +      pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
        mdelay(1);
 -      for (i = 0; i < rdev->usec_timeout; i++) {
 -              tmp = RREG32(RADEON_RBBM_STATUS);
 -              if (!(tmp & (1 << 26))) {
 -                      DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
 -                               tmp);
 -                      return 0;
 -              }
 -              DRM_UDELAY(1);
 -      }
 -      tmp = RREG32(RADEON_RBBM_STATUS);
 -      DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
 -      return -1;
  }
  
 -int r100_gpu_reset(struct radeon_device *rdev)
 +int r100_asic_reset(struct radeon_device *rdev)
  {
 -      uint32_t status;
 +      struct r100_mc_save save;
 +      u32 status, tmp;
  
 -      /* reset order likely matter */
 -      status = RREG32(RADEON_RBBM_STATUS);
 -      /* reset HDP */
 -      r100_hdp_reset(rdev);
 -      /* reset rb2d */
 -      if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
 -              r100_rb2d_reset(rdev);
 +      r100_mc_stop(rdev, &save);
 +      status = RREG32(R_000E40_RBBM_STATUS);
 +      if (!G_000E40_GUI_ACTIVE(status)) {
 +              return 0;
        }
 -      /* TODO: reset 3D engine */
 +      status = RREG32(R_000E40_RBBM_STATUS);
 +      dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 +      /* stop CP */
 +      WREG32(RADEON_CP_CSQ_CNTL, 0);
 +      tmp = RREG32(RADEON_CP_RB_CNTL);
 +      WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
 +      WREG32(RADEON_CP_RB_RPTR_WR, 0);
 +      WREG32(RADEON_CP_RB_WPTR, 0);
 +      WREG32(RADEON_CP_RB_CNTL, tmp);
 +      /* save PCI state */
 +      pci_save_state(rdev->pdev);
 +      /* disable bus mastering */
 +      r100_bm_disable(rdev);
 +      WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
 +                                      S_0000F0_SOFT_RESET_RE(1) |
 +                                      S_0000F0_SOFT_RESET_PP(1) |
 +                                      S_0000F0_SOFT_RESET_RB(1));
 +      RREG32(R_0000F0_RBBM_SOFT_RESET);
 +      mdelay(500);
 +      WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
 +      mdelay(1);
 +      status = RREG32(R_000E40_RBBM_STATUS);
 +      dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
        /* reset CP */
 -      status = RREG32(RADEON_RBBM_STATUS);
 -      if (status & (1 << 16)) {
 -              r100_cp_reset(rdev);
 -      }
 +      WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
 +      RREG32(R_0000F0_RBBM_SOFT_RESET);
 +      mdelay(500);
 +      WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
 +      mdelay(1);
 +      status = RREG32(R_000E40_RBBM_STATUS);
 +      dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 +      /* restore PCI & busmastering */
 +      pci_restore_state(rdev->pdev);
 +      r100_enable_bm(rdev);
        /* Check if GPU is idle */
 -      status = RREG32(RADEON_RBBM_STATUS);
 -      if (status & RADEON_RBBM_ACTIVE) {
 -              DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
 +      if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
 +              G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
 +              dev_err(rdev->dev, "failed to reset GPU\n");
 +              rdev->gpu_lockup = true;
                return -1;
        }
 -      DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
 +      r100_mc_resume(rdev, &save);
 +      dev_info(rdev->dev, "GPU reset succeed\n");
        return 0;
  }
  
@@@ -2036,11 -2001,6 +2036,6 @@@ void r100_vram_init_sizes(struct radeon
                else
                        rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
        }
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
  }
  
  void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@@ -2925,7 -2885,7 +2920,7 @@@ static int r100_cs_track_texture_check(
  {
        struct radeon_bo *robj;
        unsigned long size;
 -      unsigned u, i, w, h;
 +      unsigned u, i, w, h, d;
        int ret;
  
        for (u = 0; u < track->num_texture; u++) {
                        h = h / (1 << i);
                        if (track->textures[u].roundup_h)
                                h = roundup_pow_of_two(h);
 +                      if (track->textures[u].tex_coord_type == 1) {
 +                              d = (1 << track->textures[u].txdepth) / (1 << i);
 +                              if (!d)
 +                                      d = 1;
 +                      } else {
 +                              d = 1;
 +                      }
                        if (track->textures[u].compress_format) {
  
 -                              size += r100_track_compress_size(track->textures[u].compress_format, w, h);
 +                              size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
                                /* compressed textures are block based */
                        } else
 -                              size += w * h;
 +                              size += w * h * d;
                }
                size *= track->textures[u].cpp;
  
                switch (track->textures[u].tex_coord_type) {
                case 0:
 -                      break;
                case 1:
 -                      size *= (1 << track->textures[u].txdepth);
                        break;
                case 2:
                        if (track->separate_cube) {
@@@ -3046,11 -3001,7 +3041,11 @@@ int r100_cs_track_check(struct radeon_d
                }
        }
        prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
 -      nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
 +      if (track->vap_vf_cntl & (1 << 14)) {
 +              nverts = track->vap_alt_nverts;
 +      } else {
 +              nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
 +      }
        switch (prim_walk) {
        case 1:
                for (i = 0; i < track->num_arrays; i++) {
@@@ -3433,7 -3384,7 +3428,7 @@@ static int r100_startup(struct radeon_d
        /* Resume clock */
        r100_clock_startup(rdev);
        /* Initialize GPU configuration (# pipes, ...) */
 -      r100_gpu_init(rdev);
 +//    r100_gpu_init(rdev);
        /* Initialize GART (initialize after TTM so we can allocate
         * memory through TTM but finalize after TTM) */
        r100_enable_bm(rdev);
@@@ -3470,7 -3421,7 +3465,7 @@@ int r100_resume(struct radeon_device *r
        /* Resume clock before doing reset */
        r100_clock_startup(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
 -      if (radeon_gpu_reset(rdev)) {
 +      if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@@ -3539,7 -3490,7 +3534,7 @@@ int r100_init(struct radeon_device *rde
                        return r;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
 -      if (radeon_gpu_reset(rdev)) {
 +      if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
index 9b08c5743c8662ae33ec80f752fc59ed614a15bf,dce41b167f8576a5ac5eb69d8e835cfb21675428..c325cb121059e2ff4006ce8a4388e63ba2f7b21d
@@@ -25,7 -25,6 +25,7 @@@
   *          Alex Deucher
   *          Jerome Glisse
   */
 +#include <linux/slab.h>
  #include <linux/seq_file.h>
  #include <linux/firmware.h>
  #include <linux/platform_device.h>
@@@ -44,9 -43,6 +44,9 @@@
  #define R700_PFP_UCODE_SIZE 848
  #define R700_PM4_UCODE_SIZE 1360
  #define R700_RLC_UCODE_SIZE 1024
 +#define EVERGREEN_PFP_UCODE_SIZE 1120
 +#define EVERGREEN_PM4_UCODE_SIZE 1376
 +#define EVERGREEN_RLC_UCODE_SIZE 768
  
  /* Firmware Names */
  MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@@ -71,18 -67,6 +71,18 @@@ MODULE_FIRMWARE("radeon/RV710_pfp.bin")
  MODULE_FIRMWARE("radeon/RV710_me.bin");
  MODULE_FIRMWARE("radeon/R600_rlc.bin");
  MODULE_FIRMWARE("radeon/R700_rlc.bin");
 +MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
 +MODULE_FIRMWARE("radeon/CEDAR_me.bin");
 +MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
 +MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
 +MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
 +MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
 +MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
 +MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
 +MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
 +MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
 +MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
 +MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
  
  int r600_debugfs_mc_info_init(struct radeon_device *rdev);
  
@@@ -90,7 -74,6 +90,7 @@@
  int r600_mc_wait_for_idle(struct radeon_device *rdev);
  void r600_gpu_init(struct radeon_device *rdev);
  void r600_fini(struct radeon_device *rdev);
 +void r600_irq_disable(struct radeon_device *rdev);
  
  /* hpd for digital panel detect/disconnect */
  bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@@ -730,11 -713,6 +730,6 @@@ int r600_mc_init(struct radeon_device *
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
  
        if (rdev->flags & RADEON_IS_IGP)
@@@ -766,6 -744,7 +761,6 @@@ int r600_gpu_soft_reset(struct radeon_d
                        S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
                        S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
                        S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
 -      u32 srbm_reset = 0;
        u32 tmp;
  
        dev_info(rdev->dev, "GPU softreset \n");
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
        /* Disable CP parsing/prefetching */
 -      WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
 +      WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
        /* Check if any of the rendering block is busy and reset it */
        if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
            (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
                        S_008020_SOFT_RESET_VGT(1);
                dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
                WREG32(R_008020_GRBM_SOFT_RESET, tmp);
 -              (void)RREG32(R_008020_GRBM_SOFT_RESET);
 -              udelay(50);
 +              RREG32(R_008020_GRBM_SOFT_RESET);
 +              mdelay(15);
                WREG32(R_008020_GRBM_SOFT_RESET, 0);
 -              (void)RREG32(R_008020_GRBM_SOFT_RESET);
        }
        /* Reset CP (we always reset CP) */
        tmp = S_008020_SOFT_RESET_CP(1);
        dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
        WREG32(R_008020_GRBM_SOFT_RESET, tmp);
 -      (void)RREG32(R_008020_GRBM_SOFT_RESET);
 -      udelay(50);
 +      RREG32(R_008020_GRBM_SOFT_RESET);
 +      mdelay(15);
        WREG32(R_008020_GRBM_SOFT_RESET, 0);
 -      (void)RREG32(R_008020_GRBM_SOFT_RESET);
 -      /* Reset others GPU block if necessary */
 -      if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
 -      if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
 -      if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_IH(1);
 -      if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
 -      if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_MC(1);
 -      if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_MC(1);
 -      if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_MC(1);
 -      if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_MC(1);
 -      if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_MC(1);
 -      if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
 -      if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
 -      if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
 -              srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
 -      dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
 -      WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
 -      (void)RREG32(R_000E60_SRBM_SOFT_RESET);
 -      udelay(50);
 -      WREG32(R_000E60_SRBM_SOFT_RESET, 0);
 -      (void)RREG32(R_000E60_SRBM_SOFT_RESET);
 -      WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
 -      (void)RREG32(R_000E60_SRBM_SOFT_RESET);
 -      udelay(50);
 -      WREG32(R_000E60_SRBM_SOFT_RESET, 0);
 -      (void)RREG32(R_000E60_SRBM_SOFT_RESET);
        /* Wait a little for things to settle down */
 -      udelay(50);
 +      mdelay(1);
        dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
                RREG32(R_008010_GRBM_STATUS));
        dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
                RREG32(R_008014_GRBM_STATUS2));
        dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
                RREG32(R_000E50_SRBM_STATUS));
 -      /* After reset we need to reinit the asic as GPU often endup in an
 -       * incoherent state.
 -       */
 -      atom_asic_init(rdev->mode_info.atom_context);
        rv515_mc_resume(rdev, &save);
        return 0;
  }
  
 -int r600_gpu_reset(struct radeon_device *rdev)
 +bool r600_gpu_is_lockup(struct radeon_device *rdev)
 +{
 +      u32 srbm_status;
 +      u32 grbm_status;
 +      u32 grbm_status2;
 +      int r;
 +
 +      srbm_status = RREG32(R_000E50_SRBM_STATUS);
 +      grbm_status = RREG32(R_008010_GRBM_STATUS);
 +      grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
 +      if (!G_008010_GUI_ACTIVE(grbm_status)) {
 +              r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
 +              return false;
 +      }
 +      /* force CP activities */
 +      r = radeon_ring_lock(rdev, 2);
 +      if (!r) {
 +              /* PACKET2 NOP */
 +              radeon_ring_write(rdev, 0x80000000);
 +              radeon_ring_write(rdev, 0x80000000);
 +              radeon_ring_unlock_commit(rdev);
 +      }
 +      rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
 +      return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
 +}
 +
 +int r600_asic_reset(struct radeon_device *rdev)
  {
        return r600_gpu_soft_reset(rdev);
  }
@@@ -1466,31 -1461,10 +1461,31 @@@ int r600_init_microcode(struct radeon_d
                chip_name = "RV710";
                rlc_chip_name = "R700";
                break;
 +      case CHIP_CEDAR:
 +              chip_name = "CEDAR";
 +              rlc_chip_name = "CEDAR";
 +              break;
 +      case CHIP_REDWOOD:
 +              chip_name = "REDWOOD";
 +              rlc_chip_name = "REDWOOD";
 +              break;
 +      case CHIP_JUNIPER:
 +              chip_name = "JUNIPER";
 +              rlc_chip_name = "JUNIPER";
 +              break;
 +      case CHIP_CYPRESS:
 +      case CHIP_HEMLOCK:
 +              chip_name = "CYPRESS";
 +              rlc_chip_name = "CYPRESS";
 +              break;
        default: BUG();
        }
  
 -      if (rdev->family >= CHIP_RV770) {
 +      if (rdev->family >= CHIP_CEDAR) {
 +              pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
 +              me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
 +              rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
 +      } else if (rdev->family >= CHIP_RV770) {
                pfp_req_size = R700_PFP_UCODE_SIZE * 4;
                me_req_size = R700_PM4_UCODE_SIZE * 4;
                rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@@ -1604,15 -1578,12 +1599,15 @@@ int r600_cp_start(struct radeon_device 
        }
        radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
        radeon_ring_write(rdev, 0x1);
 -      if (rdev->family < CHIP_RV770) {
 -              radeon_ring_write(rdev, 0x3);
 -              radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
 -      } else {
 +      if (rdev->family >= CHIP_CEDAR) {
 +              radeon_ring_write(rdev, 0x0);
 +              radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
 +      } else if (rdev->family >= CHIP_RV770) {
                radeon_ring_write(rdev, 0x0);
                radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
 +      } else {
 +              radeon_ring_write(rdev, 0x3);
 +              radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
        }
        radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(rdev, 0);
@@@ -2313,11 -2284,10 +2308,11 @@@ static void r600_ih_ring_fini(struct ra
        }
  }
  
 -static void r600_rlc_stop(struct radeon_device *rdev)
 +void r600_rlc_stop(struct radeon_device *rdev)
  {
  
 -      if (rdev->family >= CHIP_RV770) {
 +      if ((rdev->family >= CHIP_RV770) &&
 +          (rdev->family <= CHIP_RV740)) {
                /* r7xx asics need to soft reset RLC before halting */
                WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
                RREG32(SRBM_SOFT_RESET);
@@@ -2354,12 -2324,7 +2349,12 @@@ static int r600_rlc_init(struct radeon_
        WREG32(RLC_UCODE_CNTL, 0);
  
        fw_data = (const __be32 *)rdev->rlc_fw->data;
 -      if (rdev->family >= CHIP_RV770) {
 +      if (rdev->family >= CHIP_CEDAR) {
 +              for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
 +                      WREG32(RLC_UCODE_ADDR, i);
 +                      WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
 +              }
 +      } else if (rdev->family >= CHIP_RV770) {
                for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
                        WREG32(RLC_UCODE_ADDR, i);
                        WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@@ -2389,7 -2354,7 +2384,7 @@@ static void r600_enable_interrupts(stru
        rdev->ih.enabled = true;
  }
  
 -static void r600_disable_interrupts(struct radeon_device *rdev)
 +void r600_disable_interrupts(struct radeon_device *rdev)
  {
        u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
        u32 ih_cntl = RREG32(IH_CNTL);
@@@ -2504,10 -2469,7 +2499,10 @@@ int r600_irq_init(struct radeon_device 
        WREG32(IH_CNTL, ih_cntl);
  
        /* force the active interrupt state to all disabled */
 -      r600_disable_interrupt_state(rdev);
 +      if (rdev->family >= CHIP_CEDAR)
 +              evergreen_disable_interrupt_state(rdev);
 +      else
 +              r600_disable_interrupt_state(rdev);
  
        /* enable irqs */
        r600_enable_interrupts(rdev);
  
  void r600_irq_suspend(struct radeon_device *rdev)
  {
 -      r600_disable_interrupts(rdev);
 +      r600_irq_disable(rdev);
        r600_rlc_stop(rdev);
  }
  
index 122774742bd551ebecdd6b5509f904ff24ebe660,57b3f95c0efae878ec801f84c29112fd4ef84083..6a8617bac1429400105b626d796f86029f7c132d
@@@ -30,7 -30,6 +30,7 @@@
   *    Dave Airlie
   */
  #include <linux/list.h>
 +#include <linux/slab.h>
  #include <drm/drmP.h>
  #include "radeon_drm.h"
  #include "radeon.h"
@@@ -192,7 -191,7 +192,7 @@@ int radeon_bo_pin(struct radeon_bo *bo
        }
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
@@@ -216,7 -215,7 +216,7 @@@ int radeon_bo_unpin(struct radeon_bo *b
                return 0;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
        if (unlikely(r != 0))
                dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
        return r;
@@@ -331,7 -330,7 +331,7 @@@ int radeon_bo_list_validate(struct list
                                                                lobj->rdomain);
                        }
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
-                                               true, false);
+                                               true, false, false);
                        if (unlikely(r))
                                return r;
                }
@@@ -499,11 -498,33 +499,33 @@@ void radeon_bo_move_notify(struct ttm_b
        radeon_bo_check_tiling(rbo, 0, 1);
  }
  
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
  {
+       struct radeon_device *rdev;
        struct radeon_bo *rbo;
+       unsigned long offset, size;
+       int r;
        if (!radeon_ttm_bo_is_radeon_bo(bo))
-               return;
+               return 0;
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
+       rdev = rbo->rdev;
+       if (bo->mem.mem_type == TTM_PL_VRAM) {
+               size = bo->mem.num_pages << PAGE_SHIFT;
+               offset = bo->mem.mm_node->start << PAGE_SHIFT;
+               if ((offset + size) > rdev->mc.visible_vram_size) {
+                       /* hurrah the memory is not visible ! */
+                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+                       r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+                       if (unlikely(r != 0))
+                               return r;
+                       offset = bo->mem.mm_node->start << PAGE_SHIFT;
+                       /* this should not happen */
+                       if ((offset + size) > rdev->mc.visible_vram_size)
+                               return -EINVAL;
+               }
+       }
+       return 0;
  }
index f06533676e7dbf6d40662d21dd4f63dc2ad3f9c4,91030eab22b02722acf191c3202352ee2682f038..af98f45954b31884e748a1d922dee33964949424
  #include <ttm/ttm_bo_driver.h>
  #include <ttm/ttm_placement.h>
  #include <ttm/ttm_module.h>
 +#include <ttm/ttm_page_alloc.h>
  #include <drm/drmP.h>
  #include <drm/radeon_drm.h>
  #include <linux/seq_file.h>
 +#include <linux/slab.h>
  #include "radeon_reg.h"
  #include "radeon.h"
  
@@@ -163,34 -161,21 +163,21 @@@ static int radeon_init_mem_type(struct 
                                          (unsigned)type);
                                return -EINVAL;
                        }
-                       man->io_offset = rdev->mc.agp_base;
-                       man->io_size = rdev->mc.gtt_size;
-                       man->io_addr = NULL;
                        if (!rdev->ddev->agp->cant_use_aperture)
-                               man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
-                                            TTM_MEMTYPE_FLAG_MAPPABLE;
+                               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
                        man->available_caching = TTM_PL_FLAG_UNCACHED |
                                                 TTM_PL_FLAG_WC;
                        man->default_caching = TTM_PL_FLAG_WC;
-               } else
- #endif
-               {
-                       man->io_offset = 0;
-                       man->io_size = 0;
-                       man->io_addr = NULL;
                }
+ #endif
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
                man->gpu_offset = rdev->mc.vram_start;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
                man->default_caching = TTM_PL_FLAG_WC;
-               man->io_addr = NULL;
-               man->io_offset = rdev->mc.aper_base;
-               man->io_size = rdev->mc.aper_size;
                break;
        default:
                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@@ -245,9 -230,9 +232,9 @@@ static void radeon_move_null(struct ttm
  }
  
  static int radeon_move_blit(struct ttm_buffer_object *bo,
-                           bool evict, int no_wait,
-                           struct ttm_mem_reg *new_mem,
-                           struct ttm_mem_reg *old_mem)
+                       bool evict, int no_wait_reserve, bool no_wait_gpu,
+                       struct ttm_mem_reg *new_mem,
+                       struct ttm_mem_reg *old_mem)
  {
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
        r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
-                                     evict, no_wait, new_mem);
+                                     evict, no_wait_reserve, no_wait_gpu, new_mem);
        radeon_fence_unref(&fence);
        return r;
  }
  
  static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible, bool no_wait,
+                               bool evict, bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
  {
        struct radeon_device *rdev;
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
-                            interruptible, no_wait);
+                            interruptible, no_wait_reserve, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
  out_cleanup:
        if (tmp_mem.mm_node) {
                struct ttm_bo_global *glob = rdev->mman.bdev.glob;
  }
  
  static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible, bool no_wait,
+                               bool evict, bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
  {
        struct radeon_device *rdev;
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@@ -395,8 -382,9 +384,9 @@@ out_cleanup
  }
  
  static int radeon_bo_move(struct ttm_buffer_object *bo,
-                         bool evict, bool interruptible, bool no_wait,
-                         struct ttm_mem_reg *new_mem)
+                       bool evict, bool interruptible,
+                       bool no_wait_reserve, bool no_wait_gpu,
+                       struct ttm_mem_reg *new_mem)
  {
        struct radeon_device *rdev;
        struct ttm_mem_reg *old_mem = &bo->mem;
        if (old_mem->mem_type == TTM_PL_VRAM &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
                r = radeon_move_vram_ram(bo, evict, interruptible,
-                                           no_wait, new_mem);
+                                       no_wait_reserve, no_wait_gpu, new_mem);
        } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
                   new_mem->mem_type == TTM_PL_VRAM) {
                r = radeon_move_ram_vram(bo, evict, interruptible,
-                                           no_wait, new_mem);
+                                           no_wait_reserve, no_wait_gpu, new_mem);
        } else {
-               r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+               r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
        }
  
        if (r) {
  memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
        }
        return r;
  }
  
+ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct radeon_device *rdev = radeon_get_rdev(bdev);
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* system memory */
+               return 0;
+       case TTM_PL_TT:
+ #if __OS_HAS_AGP
+               if (rdev->flags & RADEON_IS_AGP) {
+                       /* RADEON_IS_AGP is set only if AGP is active */
+                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.base = rdev->mc.agp_base;
+                       mem->bus.is_iomem = true;
+               }
+ #endif
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               /* check if it's visible */
+               if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+                       return -EINVAL;
+               mem->bus.base = rdev->mc.aper_base;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+ }
  static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
                                bool lazy, bool interruptible)
  {
@@@ -480,6 -511,8 +513,8 @@@ static struct ttm_bo_driver radeon_bo_d
        .sync_obj_ref = &radeon_sync_obj_ref,
        .move_notify = &radeon_bo_move_notify,
        .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+       .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+       .io_mem_free = &radeon_ttm_io_mem_free,
  };
  
  int radeon_ttm_init(struct radeon_device *rdev)
@@@ -746,8 -779,8 +781,8 @@@ static int radeon_mm_dump_table(struct 
  static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
  {
  #if defined(CONFIG_DEBUG_FS)
 -      static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
 -      static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
 +      static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
 +      static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
        unsigned i;
  
        for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
                        radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
  
        }
 -      return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
 +      /* Add ttm page pool to debugfs */
 +      sprintf(radeon_mem_types_names[i], "ttm_page_pool");
 +      radeon_mem_types_list[i].name = radeon_mem_types_names[i];
 +      radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
 +      radeon_mem_types_list[i].driver_features = 0;
 +      radeon_mem_types_list[i].data = NULL;
 +      return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
  
  #endif
        return 0;
index c14f3be25b4b2e5596079f4849c364a09a629e8f,e2089faa95936a60f69abc539bf03ac43d6948fe..a74683e186128eca769ae61f435743b038746f93
@@@ -27,7 -27,6 +27,7 @@@
   */
  #include <linux/firmware.h>
  #include <linux/platform_device.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "radeon.h"
  #include "radeon_asic.h"
@@@ -237,6 -236,7 +237,6 @@@ void r700_cp_stop(struct radeon_device 
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
  }
  
 -
  static int rv770_cp_load_microcode(struct radeon_device *rdev)
  {
        const __be32 *fw_data;
        return 0;
  }
  
 +void r700_cp_fini(struct radeon_device *rdev)
 +{
 +      r700_cp_stop(rdev);
 +      radeon_ring_fini(rdev);
 +}
  
  /*
   * Core functions
@@@ -910,17 -905,18 +910,12 @@@ int rv770_mc_init(struct radeon_device 
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
  
        return 0;
  }
  
 -int rv770_gpu_reset(struct radeon_device *rdev)
 -{
 -      /* FIXME: implement any rv770 specific bits */
 -      return r600_gpu_reset(rdev);
 -}
 -
  static int rv770_startup(struct radeon_device *rdev)
  {
        int r;
@@@ -1130,7 -1126,7 +1125,7 @@@ int rv770_init(struct radeon_device *rd
        r = rv770_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
 -              r600_cp_fini(rdev);
 +              r700_cp_fini(rdev);
                r600_wb_fini(rdev);
                r600_irq_fini(rdev);
                radeon_irq_kms_fini(rdev);
@@@ -1164,7 -1160,7 +1159,7 @@@ void rv770_fini(struct radeon_device *r
  {
        radeon_pm_fini(rdev);
        r600_blit_fini(rdev);
 -      r600_cp_fini(rdev);
 +      r700_cp_fini(rdev);
        r600_wb_fini(rdev);
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
index d764e82e799b4a264618a723ad66d46aa3e9b217,333b401ca4c94820b085edc2824914a55ca7f025..a37a94872a14c888001356ca45ea5832881397a0
@@@ -33,7 -33,6 +33,7 @@@
  #include <linux/io.h>
  #include <linux/highmem.h>
  #include <linux/wait.h>
 +#include <linux/slab.h>
  #include <linux/vmalloc.h>
  #include <linux/module.h>
  
@@@ -50,7 -49,8 +50,8 @@@ void ttm_bo_free_old_node(struct ttm_bu
  }
  
  int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+                   bool evict, bool no_wait_reserve,
+                   bool no_wait_gpu, struct ttm_mem_reg *new_mem)
  {
        struct ttm_tt *ttm = bo->ttm;
        struct ttm_mem_reg *old_mem = &bo->mem;
  }
  EXPORT_SYMBOL(ttm_bo_move_ttm);
  
+ int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+       int ret;
+       if (!mem->bus.io_reserved) {
+               mem->bus.io_reserved = true;
+               ret = bdev->driver->io_mem_reserve(bdev, mem);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+       return 0;
+ }
+ void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+       if (bdev->driver->io_mem_reserve) {
+               if (mem->bus.io_reserved) {
+                       mem->bus.io_reserved = false;
+                       bdev->driver->io_mem_free(bdev, mem);
+               }
+       }
+ }
  int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
                        void **virtual)
  {
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-       unsigned long bus_offset;
-       unsigned long bus_size;
-       unsigned long bus_base;
        int ret;
        void *addr;
  
        *virtual = NULL;
-       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
-       if (ret || bus_size == 0)
+       ret = ttm_mem_io_reserve(bdev, mem);
+       if (ret)
                return ret;
  
-       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
-               addr = (void *)(((u8 *) man->io_addr) + bus_offset);
-       else {
+       if (mem->bus.addr) {
+               addr = mem->bus.addr;
+       else {
                if (mem->placement & TTM_PL_FLAG_WC)
-                       addr = ioremap_wc(bus_base + bus_offset, bus_size);
+                       addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
                else
-                       addr = ioremap_nocache(bus_base + bus_offset, bus_size);
-               if (!addr)
+                       addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+               if (!addr) {
+                       ttm_mem_io_free(bdev, mem);
                        return -ENOMEM;
+               }
        }
        *virtual = addr;
        return 0;
@@@ -117,8 -138,9 +139,9 @@@ void ttm_mem_reg_iounmap(struct ttm_bo_
  
        man = &bdev->man[mem->mem_type];
  
-       if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+       if (virtual && mem->bus.addr == NULL)
                iounmap(virtual);
+       ttm_mem_io_free(bdev, mem);
  }
  
  static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@@ -208,7 -230,8 +231,8 @@@ static int ttm_copy_ttm_io_page(struct 
  }
  
  int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+                      bool evict, bool no_wait_reserve, bool no_wait_gpu,
+                      struct ttm_mem_reg *new_mem)
  {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@@ -369,26 -392,23 +393,23 @@@ pgprot_t ttm_io_prot(uint32_t caching_f
  EXPORT_SYMBOL(ttm_io_prot);
  
  static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
-                         unsigned long bus_base,
-                         unsigned long bus_offset,
-                         unsigned long bus_size,
+                         unsigned long offset,
+                         unsigned long size,
                          struct ttm_bo_kmap_obj *map)
  {
-       struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_reg *mem = &bo->mem;
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  
-       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+       if (bo->mem.bus.addr) {
                map->bo_kmap_type = ttm_bo_map_premapped;
-               map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+               map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
        } else {
                map->bo_kmap_type = ttm_bo_map_iomap;
                if (mem->placement & TTM_PL_FLAG_WC)
-                       map->virtual = ioremap_wc(bus_base + bus_offset,
-                                                 bus_size);
+                       map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+                                                 size);
                else
-                       map->virtual = ioremap_nocache(bus_base + bus_offset,
-                                                      bus_size);
+                       map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+                                                      size);
        }
        return (!map->virtual) ? -ENOMEM : 0;
  }
@@@ -441,13 -461,12 +462,12 @@@ int ttm_bo_kmap(struct ttm_buffer_objec
                unsigned long start_page, unsigned long num_pages,
                struct ttm_bo_kmap_obj *map)
  {
+       unsigned long offset, size;
        int ret;
-       unsigned long bus_base;
-       unsigned long bus_offset;
-       unsigned long bus_size;
  
        BUG_ON(!list_empty(&bo->swap));
        map->virtual = NULL;
+       map->bo = bo;
        if (num_pages > bo->num_pages)
                return -EINVAL;
        if (start_page > bo->num_pages)
        if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
                return -EPERM;
  #endif
-       ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
-                               &bus_offset, &bus_size);
+       ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
        if (ret)
                return ret;
-       if (bus_size == 0) {
+       if (!bo->mem.bus.is_iomem) {
                return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
        } else {
-               bus_offset += start_page << PAGE_SHIFT;
-               bus_size = num_pages << PAGE_SHIFT;
-               return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+               offset = start_page << PAGE_SHIFT;
+               size = num_pages << PAGE_SHIFT;
+               return ttm_bo_ioremap(bo, offset, size, map);
        }
  }
  EXPORT_SYMBOL(ttm_bo_kmap);
@@@ -477,6 -495,7 +496,7 @@@ void ttm_bo_kunmap(struct ttm_bo_kmap_o
        switch (map->bo_kmap_type) {
        case ttm_bo_map_iomap:
                iounmap(map->virtual);
+               ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
                break;
        case ttm_bo_map_vmap:
                vunmap(map->virtual);
  }
  EXPORT_SYMBOL(ttm_bo_kunmap);
  
- int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
-                   unsigned long dst_offset,
-                   unsigned long *pfn, pgprot_t *prot)
- {
-       struct ttm_mem_reg *mem = &bo->mem;
-       struct ttm_bo_device *bdev = bo->bdev;
-       unsigned long bus_offset;
-       unsigned long bus_size;
-       unsigned long bus_base;
-       int ret;
-       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
-                       &bus_size);
-       if (ret)
-               return -EINVAL;
-       if (bus_size != 0)
-               *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
-       else
-               if (!bo->ttm)
-                       return -EINVAL;
-               else
-                       *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
-                                                          dst_offset >>
-                                                          PAGE_SHIFT));
-       *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
-               PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-       return 0;
- }
  int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              void *sync_obj,
                              void *sync_obj_arg,
-                             bool evict, bool no_wait,
+                             bool evict, bool no_wait_reserve,
+                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
  {
        struct ttm_bo_device *bdev = bo->bdev;