]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge remote branch 'korg/drm-core-next' into drm-next-stage
authorDave Airlie <airlied@redhat.com>
Mon, 1 Mar 2010 05:40:12 +0000 (15:40 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 1 Mar 2010 05:40:12 +0000 (15:40 +1000)
* korg/drm-core-next:
  drm/ttm: handle OOM in ttm_tt_swapout
  drm/radeon/kms/atom: fix shr/shl ops
  drm/kms: fix spelling of "CLOCK"
  drm/kms: fix fb_changed = true else statement
  drivers/gpu/drm/drm_fb_helper.c: don't use private implementation of atoi()
  drm: switch all GEM/KMS ioctls to unlocked ioctl status.
  Use drm_gem_object_[handle_]unreference_unlocked where possible
  drm: introduce drm_gem_object_[handle_]unreference_unlocked

1  2 
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/ttm/ttm_tt.c

index ec8a0d7ffa3990ea9f7ded8871f3ff384b2bf24d,6844ca4f42655f5a821d23e3bbe7eee3a13199d2..9d87d5a41bdcb96dc357b93a4ea541e233031fa5
@@@ -128,9 -128,7 +128,7 @@@ i915_gem_create_ioctl(struct drm_devic
                return -ENOMEM;
  
        ret = drm_gem_handle_create(file_priv, obj, &handle);
-       mutex_lock(&dev->struct_mutex);
-       drm_gem_object_handle_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference_unlocked(obj);
  
        if (ret)
                return ret;
@@@ -488,7 -486,7 +486,7 @@@ i915_gem_pread_ioctl(struct drm_device 
         */
        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
-               drm_gem_object_unreference(obj);
+               drm_gem_object_unreference_unlocked(obj);
                return -EINVAL;
        }
  
                                                        file_priv);
        }
  
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference_unlocked(obj);
  
        return ret;
  }
@@@ -961,7 -959,7 +959,7 @@@ i915_gem_pwrite_ioctl(struct drm_devic
         */
        if (args->offset > obj->size || args->size > obj->size ||
            args->offset + args->size > obj->size) {
-               drm_gem_object_unreference(obj);
+               drm_gem_object_unreference_unlocked(obj);
                return -EINVAL;
        }
  
                DRM_INFO("pwrite failed %d\n", ret);
  #endif
  
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference_unlocked(obj);
  
        return ret;
  }
@@@ -1138,9 -1136,7 +1136,7 @@@ i915_gem_mmap_ioctl(struct drm_device *
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
        up_write(&current->mm->mmap_sem);
-       mutex_lock(&dev->struct_mutex);
-       drm_gem_object_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference_unlocked(obj);
        if (IS_ERR((void *)addr))
                return addr;
  
@@@ -1552,8 -1548,6 +1548,8 @@@ i915_gem_object_move_to_inactive(struc
        else
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  
 +      BUG_ON(!list_empty(&obj_priv->gpu_write_list));
 +
        obj_priv->last_rendering_seqno = 0;
        if (obj_priv->active) {
                obj_priv->active = 0;
@@@ -1624,8 -1618,7 +1620,8 @@@ i915_add_request(struct drm_device *dev
                struct drm_i915_gem_object *obj_priv, *next;
  
                list_for_each_entry_safe(obj_priv, next,
 -                                       &dev_priv->mm.flushing_list, list) {
 +                                       &dev_priv->mm.gpu_write_list,
 +                                       gpu_write_list) {
                        struct drm_gem_object *obj = obj_priv->obj;
  
                        if ((obj->write_domain & flush_domains) ==
                                uint32_t old_write_domain = obj->write_domain;
  
                                obj->write_domain = 0;
 +                              list_del_init(&obj_priv->gpu_write_list);
                                i915_gem_object_move_to_active(obj, seqno);
  
                                trace_i915_gem_object_change_domain(obj,
@@@ -2088,8 -2080,8 +2084,8 @@@ static in
  i915_gem_evict_everything(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
 -      uint32_t seqno;
        int ret;
 +      uint32_t seqno;
        bool lists_empty;
  
        spin_lock(&dev_priv->mm.active_list_lock);
        if (ret)
                return ret;
  
 +      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 +
        ret = i915_gem_evict_from_inactive_list(dev);
        if (ret)
                return ret;
@@@ -2707,7 -2697,7 +2703,7 @@@ i915_gem_object_flush_gpu_write_domain(
        old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
        seqno = i915_add_request(dev, NULL, obj->write_domain);
 -      obj->write_domain = 0;
 +      BUG_ON(obj->write_domain);
        i915_gem_object_move_to_active(obj, seqno);
  
        trace_i915_gem_object_change_domain(obj,
@@@ -3688,10 -3678,8 +3684,10 @@@ i915_gem_do_execbuffer(struct drm_devic
        if (args->num_cliprects != 0) {
                cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
                                    GFP_KERNEL);
 -              if (cliprects == NULL)
 +              if (cliprects == NULL) {
 +                      ret = -ENOMEM;
                        goto pre_mutex_err;
 +              }
  
                ret = copy_from_user(cliprects,
                                     (struct drm_clip_rect __user *)
                i915_gem_flush(dev,
                               dev->invalidate_domains,
                               dev->flush_domains);
 -              if (dev->flush_domains)
 +              if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
                        (void)i915_add_request(dev, file_priv,
                                               dev->flush_domains);
        }
  
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
 +              struct drm_i915_gem_object *obj_priv = obj->driver_private;
                uint32_t old_write_domain = obj->write_domain;
  
                obj->write_domain = obj->pending_write_domain;
 +              if (obj->write_domain)
 +                      list_move_tail(&obj_priv->gpu_write_list,
 +                                     &dev_priv->mm.gpu_write_list);
 +              else
 +                      list_del_init(&obj_priv->gpu_write_list);
 +
                trace_i915_gem_object_change_domain(obj,
                                                    obj->read_domains,
                                                    old_write_domain);
@@@ -4385,7 -4366,6 +4381,7 @@@ int i915_gem_init_object(struct drm_gem
        obj_priv->obj = obj;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
 +      INIT_LIST_HEAD(&obj_priv->gpu_write_list);
        INIT_LIST_HEAD(&obj_priv->fence_list);
        obj_priv->madv = I915_MADV_WILLNEED;
  
@@@ -4837,7 -4817,6 +4833,7 @@@ i915_gem_load(struct drm_device *dev
        spin_lock_init(&dev_priv->mm.active_list_lock);
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 +      INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
index b27202d23ebc1d9aab4a327eec55d733bd43cae8,8f72d0bfa7c5f3493a01e82ed4bca41667d19643..c8fd15f146afe73722cd702e8c7c4607db3af4bc
@@@ -240,86 -240,33 +240,86 @@@ struct intel_limit 
  #define IRONLAKE_DOT_MAX         350000
  #define IRONLAKE_VCO_MIN         1760000
  #define IRONLAKE_VCO_MAX         3510000
 -#define IRONLAKE_N_MIN           1
 -#define IRONLAKE_N_MAX           6
 -#define IRONLAKE_M_MIN           79
 -#define IRONLAKE_M_MAX           127
  #define IRONLAKE_M1_MIN          12
  #define IRONLAKE_M1_MAX          22
  #define IRONLAKE_M2_MIN          5
  #define IRONLAKE_M2_MAX          9
 -#define IRONLAKE_P_SDVO_DAC_MIN  5
 -#define IRONLAKE_P_SDVO_DAC_MAX  80
 -#define IRONLAKE_P_LVDS_MIN      28
 -#define IRONLAKE_P_LVDS_MAX      112
 -#define IRONLAKE_P1_MIN          1
 -#define IRONLAKE_P1_MAX          8
 -#define IRONLAKE_P2_SDVO_DAC_SLOW 10
 -#define IRONLAKE_P2_SDVO_DAC_FAST 5
 -#define IRONLAKE_P2_LVDS_SLOW    14 /* single channel */
 -#define IRONLAKE_P2_LVDS_FAST    7  /* double channel */
  #define IRONLAKE_P2_DOT_LIMIT    225000 /* 225Mhz */
  
 -#define IRONLAKE_P_DISPLAY_PORT_MIN   10
 -#define IRONLAKE_P_DISPLAY_PORT_MAX   20
 -#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
 -#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
 -#define IRONLAKE_P2_DISPLAY_PORT_LIMIT        0
 -#define IRONLAKE_P1_DISPLAY_PORT_MIN  1
 -#define IRONLAKE_P1_DISPLAY_PORT_MAX  2
 +/* We have parameter ranges for different type of outputs. */
 +
 +/* DAC & HDMI Refclk 120Mhz */
 +#define IRONLAKE_DAC_N_MIN    1
 +#define IRONLAKE_DAC_N_MAX    5
 +#define IRONLAKE_DAC_M_MIN    79
 +#define IRONLAKE_DAC_M_MAX    127
 +#define IRONLAKE_DAC_P_MIN    5
 +#define IRONLAKE_DAC_P_MAX    80
 +#define IRONLAKE_DAC_P1_MIN   1
 +#define IRONLAKE_DAC_P1_MAX   8
 +#define IRONLAKE_DAC_P2_SLOW  10
 +#define IRONLAKE_DAC_P2_FAST  5
 +
 +/* LVDS single-channel 120Mhz refclk */
 +#define IRONLAKE_LVDS_S_N_MIN 1
 +#define IRONLAKE_LVDS_S_N_MAX 3
 +#define IRONLAKE_LVDS_S_M_MIN 79
 +#define IRONLAKE_LVDS_S_M_MAX 118
 +#define IRONLAKE_LVDS_S_P_MIN 28
 +#define IRONLAKE_LVDS_S_P_MAX 112
 +#define IRONLAKE_LVDS_S_P1_MIN        2
 +#define IRONLAKE_LVDS_S_P1_MAX        8
 +#define IRONLAKE_LVDS_S_P2_SLOW       14
 +#define IRONLAKE_LVDS_S_P2_FAST       14
 +
 +/* LVDS dual-channel 120Mhz refclk */
 +#define IRONLAKE_LVDS_D_N_MIN 1
 +#define IRONLAKE_LVDS_D_N_MAX 3
 +#define IRONLAKE_LVDS_D_M_MIN 79
 +#define IRONLAKE_LVDS_D_M_MAX 127
 +#define IRONLAKE_LVDS_D_P_MIN 14
 +#define IRONLAKE_LVDS_D_P_MAX 56
 +#define IRONLAKE_LVDS_D_P1_MIN        2
 +#define IRONLAKE_LVDS_D_P1_MAX        8
 +#define IRONLAKE_LVDS_D_P2_SLOW       7
 +#define IRONLAKE_LVDS_D_P2_FAST       7
 +
 +/* LVDS single-channel 100Mhz refclk */
 +#define IRONLAKE_LVDS_S_SSC_N_MIN     1
 +#define IRONLAKE_LVDS_S_SSC_N_MAX     2
 +#define IRONLAKE_LVDS_S_SSC_M_MIN     79
 +#define IRONLAKE_LVDS_S_SSC_M_MAX     126
 +#define IRONLAKE_LVDS_S_SSC_P_MIN     28
 +#define IRONLAKE_LVDS_S_SSC_P_MAX     112
 +#define IRONLAKE_LVDS_S_SSC_P1_MIN    2
 +#define IRONLAKE_LVDS_S_SSC_P1_MAX    8
 +#define IRONLAKE_LVDS_S_SSC_P2_SLOW   14
 +#define IRONLAKE_LVDS_S_SSC_P2_FAST   14
 +
 +/* LVDS dual-channel 100Mhz refclk */
 +#define IRONLAKE_LVDS_D_SSC_N_MIN     1
 +#define IRONLAKE_LVDS_D_SSC_N_MAX     3
 +#define IRONLAKE_LVDS_D_SSC_M_MIN     79
 +#define IRONLAKE_LVDS_D_SSC_M_MAX     126
 +#define IRONLAKE_LVDS_D_SSC_P_MIN     14
 +#define IRONLAKE_LVDS_D_SSC_P_MAX     42
 +#define IRONLAKE_LVDS_D_SSC_P1_MIN    2
 +#define IRONLAKE_LVDS_D_SSC_P1_MAX    6
 +#define IRONLAKE_LVDS_D_SSC_P2_SLOW   7
 +#define IRONLAKE_LVDS_D_SSC_P2_FAST   7
 +
 +/* DisplayPort */
 +#define IRONLAKE_DP_N_MIN             1
 +#define IRONLAKE_DP_N_MAX             2
 +#define IRONLAKE_DP_M_MIN             81
 +#define IRONLAKE_DP_M_MAX             90
 +#define IRONLAKE_DP_P_MIN             10
 +#define IRONLAKE_DP_P_MAX             20
 +#define IRONLAKE_DP_P2_FAST           10
 +#define IRONLAKE_DP_P2_SLOW           10
 +#define IRONLAKE_DP_P2_LIMIT          0
 +#define IRONLAKE_DP_P1_MIN            1
 +#define IRONLAKE_DP_P1_MAX            2
  
  static bool
  intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@@ -527,78 -474,33 +527,78 @@@ static const intel_limit_t intel_limits
        .find_pll = intel_find_best_PLL,
  };
  
 -static const intel_limit_t intel_limits_ironlake_sdvo = {
 +static const intel_limit_t intel_limits_ironlake_dac = {
        .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
        .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
 -      .n   = { .min = IRONLAKE_N_MIN,            .max = IRONLAKE_N_MAX },
 -      .m   = { .min = IRONLAKE_M_MIN,            .max = IRONLAKE_M_MAX },
 +      .n   = { .min = IRONLAKE_DAC_N_MIN,        .max = IRONLAKE_DAC_N_MAX },
 +      .m   = { .min = IRONLAKE_DAC_M_MIN,        .max = IRONLAKE_DAC_M_MAX },
        .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
        .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
 -      .p   = { .min = IRONLAKE_P_SDVO_DAC_MIN,   .max = IRONLAKE_P_SDVO_DAC_MAX },
 -      .p1  = { .min = IRONLAKE_P1_MIN,           .max = IRONLAKE_P1_MAX },
 +      .p   = { .min = IRONLAKE_DAC_P_MIN,        .max = IRONLAKE_DAC_P_MAX },
 +      .p1  = { .min = IRONLAKE_DAC_P1_MIN,       .max = IRONLAKE_DAC_P1_MAX },
        .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
 -               .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
 -               .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
 +               .p2_slow = IRONLAKE_DAC_P2_SLOW,
 +               .p2_fast = IRONLAKE_DAC_P2_FAST },
        .find_pll = intel_g4x_find_best_PLL,
  };
  
 -static const intel_limit_t intel_limits_ironlake_lvds = {
 +static const intel_limit_t intel_limits_ironlake_single_lvds = {
        .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
        .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
 -      .n   = { .min = IRONLAKE_N_MIN,            .max = IRONLAKE_N_MAX },
 -      .m   = { .min = IRONLAKE_M_MIN,            .max = IRONLAKE_M_MAX },
 +      .n   = { .min = IRONLAKE_LVDS_S_N_MIN,     .max = IRONLAKE_LVDS_S_N_MAX },
 +      .m   = { .min = IRONLAKE_LVDS_S_M_MIN,     .max = IRONLAKE_LVDS_S_M_MAX },
        .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
        .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
 -      .p   = { .min = IRONLAKE_P_LVDS_MIN,       .max = IRONLAKE_P_LVDS_MAX },
 -      .p1  = { .min = IRONLAKE_P1_MIN,           .max = IRONLAKE_P1_MAX },
 +      .p   = { .min = IRONLAKE_LVDS_S_P_MIN,     .max = IRONLAKE_LVDS_S_P_MAX },
 +      .p1  = { .min = IRONLAKE_LVDS_S_P1_MIN,    .max = IRONLAKE_LVDS_S_P1_MAX },
        .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
 -               .p2_slow = IRONLAKE_P2_LVDS_SLOW,
 -               .p2_fast = IRONLAKE_P2_LVDS_FAST },
 +               .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
 +               .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
 +      .find_pll = intel_g4x_find_best_PLL,
 +};
 +
 +static const intel_limit_t intel_limits_ironlake_dual_lvds = {
 +      .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
 +      .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
 +      .n   = { .min = IRONLAKE_LVDS_D_N_MIN,     .max = IRONLAKE_LVDS_D_N_MAX },
 +      .m   = { .min = IRONLAKE_LVDS_D_M_MIN,     .max = IRONLAKE_LVDS_D_M_MAX },
 +      .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
 +      .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
 +      .p   = { .min = IRONLAKE_LVDS_D_P_MIN,     .max = IRONLAKE_LVDS_D_P_MAX },
 +      .p1  = { .min = IRONLAKE_LVDS_D_P1_MIN,    .max = IRONLAKE_LVDS_D_P1_MAX },
 +      .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
 +               .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
 +               .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
 +      .find_pll = intel_g4x_find_best_PLL,
 +};
 +
 +static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
 +      .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
 +      .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
 +      .n   = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
 +      .m   = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
 +      .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
 +      .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
 +      .p   = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
 +      .p1  = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
 +      .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
 +               .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
 +               .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
 +      .find_pll = intel_g4x_find_best_PLL,
 +};
 +
 +static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
 +      .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
 +      .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
 +      .n   = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
 +      .m   = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
 +      .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
 +      .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
 +      .p   = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
 +      .p1  = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
 +      .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
 +               .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
 +               .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
        .find_pll = intel_g4x_find_best_PLL,
  };
  
@@@ -607,53 -509,34 +607,53 @@@ static const intel_limit_t intel_limits
                   .max = IRONLAKE_DOT_MAX },
          .vco = { .min = IRONLAKE_VCO_MIN,
                   .max = IRONLAKE_VCO_MAX},
 -        .n   = { .min = IRONLAKE_N_MIN,
 -                 .max = IRONLAKE_N_MAX },
 -        .m   = { .min = IRONLAKE_M_MIN,
 -                 .max = IRONLAKE_M_MAX },
 +        .n   = { .min = IRONLAKE_DP_N_MIN,
 +                 .max = IRONLAKE_DP_N_MAX },
 +        .m   = { .min = IRONLAKE_DP_M_MIN,
 +                 .max = IRONLAKE_DP_M_MAX },
          .m1  = { .min = IRONLAKE_M1_MIN,
                   .max = IRONLAKE_M1_MAX },
          .m2  = { .min = IRONLAKE_M2_MIN,
                   .max = IRONLAKE_M2_MAX },
 -        .p   = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
 -                 .max = IRONLAKE_P_DISPLAY_PORT_MAX },
 -        .p1  = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
 -                 .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
 -        .p2  = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
 -                 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
 -                 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
 +        .p   = { .min = IRONLAKE_DP_P_MIN,
 +                 .max = IRONLAKE_DP_P_MAX },
 +        .p1  = { .min = IRONLAKE_DP_P1_MIN,
 +                 .max = IRONLAKE_DP_P1_MAX},
 +        .p2  = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
 +                 .p2_slow = IRONLAKE_DP_P2_SLOW,
 +                 .p2_fast = IRONLAKE_DP_P2_FAST },
          .find_pll = intel_find_pll_ironlake_dp,
  };
  
  static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
  {
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        const intel_limit_t *limit;
 -      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 -              limit = &intel_limits_ironlake_lvds;
 -      else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
 +      int refclk = 120;
 +
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 +              if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
 +                      refclk = 100;
 +
 +              if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
 +                  LVDS_CLKB_POWER_UP) {
 +                      /* LVDS dual channel */
 +                      if (refclk == 100)
 +                              limit = &intel_limits_ironlake_dual_lvds_100m;
 +                      else
 +                              limit = &intel_limits_ironlake_dual_lvds;
 +              } else {
 +                      if (refclk == 100)
 +                              limit = &intel_limits_ironlake_single_lvds_100m;
 +                      else
 +                              limit = &intel_limits_ironlake_single_lvds;
 +              }
 +      } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
                        HAS_eDP)
                limit = &intel_limits_ironlake_display_port;
        else
 -              limit = &intel_limits_ironlake_sdvo;
 +              limit = &intel_limits_ironlake_dac;
  
        return limit;
  }
@@@ -1031,8 -914,6 +1031,8 @@@ static void i8xx_enable_fbc(struct drm_
  
        /* enable it... */
        fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
 +      if (IS_I945GM(dev))
 +              fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
        fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
        if (obj_priv->tiling_mode != I915_TILING_NONE)
@@@ -3553,11 -3434,10 +3553,10 @@@ static int intel_crtc_cursor_set(struc
        intel_crtc->cursor_bo = bo;
  
        return 0;
- fail:
-       mutex_lock(&dev->struct_mutex);
  fail_locked:
-       drm_gem_object_unreference(bo);
        mutex_unlock(&dev->struct_mutex);
+ fail:
+       drm_gem_object_unreference_unlocked(bo);
        return ret;
  }
  
@@@ -4081,8 -3961,7 +4080,8 @@@ static void intel_crtc_destroy(struct d
  struct intel_unpin_work {
        struct work_struct work;
        struct drm_device *dev;
 -      struct drm_gem_object *obj;
 +      struct drm_gem_object *old_fb_obj;
 +      struct drm_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
        int pending;
  };
@@@ -4093,9 -3972,8 +4092,9 @@@ static void intel_unpin_work_fn(struct 
                container_of(__work, struct intel_unpin_work, work);
  
        mutex_lock(&work->dev->struct_mutex);
 -      i915_gem_object_unpin(work->obj);
 -      drm_gem_object_unreference(work->obj);
 +      i915_gem_object_unpin(work->old_fb_obj);
 +      drm_gem_object_unreference(work->pending_flip_obj);
 +      drm_gem_object_unreference(work->old_fb_obj);
        mutex_unlock(&work->dev->struct_mutex);
        kfree(work);
  }
@@@ -4119,7 -3997,7 +4118,7 @@@ void intel_finish_page_flip(struct drm_
        work = intel_crtc->unpin_work;
        if (work == NULL || !work->pending) {
                if (work && !work->pending) {
 -                      obj_priv = work->obj->driver_private;
 +                      obj_priv = work->pending_flip_obj->driver_private;
                        DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
                                         obj_priv,
                                         atomic_read(&obj_priv->pending_flip));
  
        spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -      obj_priv = work->obj->driver_private;
 +      obj_priv = work->pending_flip_obj->driver_private;
  
        /* Initial scanout buffer will have a 0 pending flip count */
        if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@@ -4181,8 -4059,7 +4180,8 @@@ static int intel_crtc_page_flip(struct 
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        unsigned long flags;
 -      int ret;
 +      int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
 +      int ret, pipesrc;
        RING_LOCALS;
  
        work = kzalloc(sizeof *work, GFP_KERNEL);
        work->event = event;
        work->dev = crtc->dev;
        intel_fb = to_intel_framebuffer(crtc->fb);
 -      work->obj = intel_fb->obj;
 +      work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
  
        /* We borrow the event spin lock for protecting unpin_work */
                return ret;
        }
  
 -      /* Reference the old fb object for the scheduled work. */
 -      drm_gem_object_reference(work->obj);
 +      /* Reference the objects for the scheduled work. */
 +      drm_gem_object_reference(work->old_fb_obj);
 +      drm_gem_object_reference(obj);
  
        crtc->fb = fb;
        i915_gem_object_flush_write_domain(obj);
        drm_vblank_get(dev, intel_crtc->pipe);
        obj_priv = obj->driver_private;
        atomic_inc(&obj_priv->pending_flip);
 +      work->pending_flip_obj = obj;
  
        BEGIN_LP_RING(4);
        OUT_RING(MI_DISPLAY_FLIP |
        OUT_RING(fb->pitch);
        if (IS_I965G(dev)) {
                OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
 -              OUT_RING((fb->width << 16) | fb->height);
 +              pipesrc = I915_READ(pipesrc_reg); 
 +              OUT_RING(pipesrc & 0x0fff0fff);
        } else {
                OUT_RING(obj_priv->gtt_offset);
                OUT_RING(MI_NOOP);
@@@ -4476,9 -4350,7 +4475,7 @@@ static void intel_user_framebuffer_dest
                intelfb_remove(dev, fb);
  
        drm_framebuffer_cleanup(fb);
-       mutex_lock(&dev->struct_mutex);
-       drm_gem_object_unreference(intel_fb->obj);
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference_unlocked(intel_fb->obj);
  
        kfree(intel_fb);
  }
@@@ -4541,9 -4413,7 +4538,7 @@@ intel_user_framebuffer_create(struct dr
  
        ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
        if (ret) {
-               mutex_lock(&dev->struct_mutex);
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
+               drm_gem_object_unreference_unlocked(obj);
                return NULL;
        }
  
index 7f152f66f196394dc7ee1318e64d651d216dfe19,614295a11dfba98c01db70ef30e20f8f950ea8f7..d75788feac6c6a64fe042e0e7d3e4a446d8b2e26
@@@ -24,7 -24,6 +24,7 @@@
  
  #include <linux/module.h>
  #include <linux/sched.h>
 +#include <asm/unaligned.h>
  
  #define ATOM_DEBUG
  
@@@ -213,9 -212,7 +213,9 @@@ static uint32_t atom_get_src_int(atom_e
        case ATOM_ARG_PS:
                idx = U8(*ptr);
                (*ptr)++;
 -              val = le32_to_cpu(ctx->ps[idx]);
 +              /* get_unaligned_le32 avoids unaligned accesses from atombios
 +               * tables, noticed on a DEC Alpha. */
 +              val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
                if (print)
                        DEBUG("PS[0x%02X,0x%04X]", idx, val);
                break;
@@@ -643,7 -640,7 +643,7 @@@ static void atom_op_delay(atom_exec_con
        uint8_t count = U8((*ptr)++);
        SDEBUG("   count: %d\n", count);
        if (arg == ATOM_UNIT_MICROSEC)
 -              schedule_timeout_uninterruptible(usecs_to_jiffies(count));
 +              udelay(count);
        else
                schedule_timeout_uninterruptible(msecs_to_jiffies(count));
  }
@@@ -881,8 -878,6 +881,6 @@@ static void atom_op_shl(atom_exec_conte
        uint8_t attr = U8((*ptr)++), shift;
        uint32_t saved, dst;
        int dptr = *ptr;
-       attr &= 0x38;
-       attr |= atom_def_dst[attr >> 3] << 6;
        SDEBUG("   dst: ");
        dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
        shift = atom_get_src(ctx, attr, ptr);
@@@ -897,8 -892,6 +895,6 @@@ static void atom_op_shr(atom_exec_conte
        uint8_t attr = U8((*ptr)++), shift;
        uint32_t saved, dst;
        int dptr = *ptr;
-       attr &= 0x38;
-       attr |= atom_def_dst[attr >> 3] << 6;
        SDEBUG("   dst: ");
        dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
        shift = atom_get_src(ctx, attr, ptr);
index e9d085021c1f08d2fb7b0edc40450eb9fa7580a2,da59f5e78e0927b9a65d2f215f4e2047fa6711a6..70ba02ed77237ba31299a7cbc56694dc3e62d57a
@@@ -86,7 -86,7 +86,7 @@@ int radeon_cs_parser_relocs(struct rade
                                                &p->validated);
                }
        }
 -      return radeon_bo_list_validate(&p->validated, p->ib->fence);
 +      return radeon_bo_list_validate(&p->validated);
  }
  
  int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@@ -189,16 -189,15 +189,13 @@@ static void radeon_cs_parser_fini(struc
  {
        unsigned i;
  
 -      if (error && parser->ib) {
 -              radeon_bo_list_unvalidate(&parser->validated,
 -                                              parser->ib->fence);
 -      } else {
 -              radeon_bo_list_unreserve(&parser->validated);
 +      if (!error && parser->ib) {
 +              radeon_bo_list_fence(&parser->validated, parser->ib->fence);
        }
 +      radeon_bo_list_unreserve(&parser->validated);
        for (i = 0; i < parser->nrelocs; i++) {
-               if (parser->relocs[i].gobj) {
-                       mutex_lock(&parser->rdev->ddev->struct_mutex);
-                       drm_gem_object_unreference(parser->relocs[i].gobj);
-                       mutex_unlock(&parser->rdev->ddev->struct_mutex);
-               }
+               if (parser->relocs[i].gobj)
+                       drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
        }
        kfree(parser->track);
        kfree(parser->relocs);
index 3d47a2c12322173fa3621e9cb5886408dcb1c19e,160c2745f8d82e8e130af6845cf7c0608e680015..a759170763bb6bc2e878a26834f7aa665dff943e
@@@ -196,15 -196,14 +196,15 @@@ EXPORT_SYMBOL(ttm_tt_populate)
  
  #ifdef CONFIG_X86
  static inline int ttm_tt_set_page_caching(struct page *p,
 -                                        enum ttm_caching_state c_state)
 +                                        enum ttm_caching_state c_old,
 +                                        enum ttm_caching_state c_new)
  {
        int ret = 0;
  
        if (PageHighMem(p))
                return 0;
  
 -      if (get_page_memtype(p) != -1) {
 +      if (c_old != tt_cached) {
                /* p isn't in the default caching state, set it to
                 * writeback first to free its current memtype. */
  
                        return ret;
        }
  
 -      if (c_state == tt_wc)
 +      if (c_new == tt_wc)
                ret = set_memory_wc((unsigned long) page_address(p), 1);
 -      else if (c_state == tt_uncached)
 +      else if (c_new == tt_uncached)
                ret = set_pages_uc(p, 1);
  
        return ret;
  }
  #else /* CONFIG_X86 */
  static inline int ttm_tt_set_page_caching(struct page *p,
 -                                        enum ttm_caching_state c_state)
 +                                        enum ttm_caching_state c_old,
 +                                        enum ttm_caching_state c_new)
  {
        return 0;
  }
@@@ -256,9 -254,7 +256,9 @@@ static int ttm_tt_set_caching(struct tt
        for (i = 0; i < ttm->num_pages; ++i) {
                cur_page = ttm->pages[i];
                if (likely(cur_page != NULL)) {
 -                      ret = ttm_tt_set_page_caching(cur_page, c_state);
 +                      ret = ttm_tt_set_page_caching(cur_page,
 +                                                    ttm->caching_state,
 +                                                    c_state);
                        if (unlikely(ret != 0))
                                goto out_err;
                }
@@@ -272,7 -268,7 +272,7 @@@ out_err
        for (j = 0; j < i; ++j) {
                cur_page = ttm->pages[j];
                if (likely(cur_page != NULL)) {
 -                      (void)ttm_tt_set_page_caching(cur_page,
 +                      (void)ttm_tt_set_page_caching(cur_page, c_state,
                                                      ttm->caching_state);
                }
        }
@@@ -480,7 -476,7 +480,7 @@@ static int ttm_tt_swapin(struct ttm_tt 
        void *from_virtual;
        void *to_virtual;
        int i;
-       int ret;
+       int ret = -ENOMEM;
  
        if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
                ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
  
        for (i = 0; i < ttm->num_pages; ++i) {
                from_page = read_mapping_page(swap_space, i, NULL);
-               if (IS_ERR(from_page))
+               if (IS_ERR(from_page)) {
+                       ret = PTR_ERR(from_page);
                        goto out_err;
+               }
                to_page = __ttm_tt_get_page(ttm, i);
                if (unlikely(to_page == NULL))
                        goto out_err;
        return 0;
  out_err:
        ttm_tt_free_alloced_pages(ttm);
-       return -ENOMEM;
+       return ret;
  }
  
  int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
        void *from_virtual;
        void *to_virtual;
        int i;
+       int ret = -ENOMEM;
  
        BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
        BUG_ON(ttm->caching_state != tt_cached);
                                                0);
                if (unlikely(IS_ERR(swap_storage))) {
                        printk(KERN_ERR "Failed allocating swap storage.\n");
-                       return -ENOMEM;
+                       return PTR_ERR(swap_storage);
                }
        } else
                swap_storage = persistant_swap_storage;
                if (unlikely(from_page == NULL))
                        continue;
                to_page = read_mapping_page(swap_space, i, NULL);
-               if (unlikely(to_page == NULL))
+               if (unlikely(IS_ERR(to_page))) {
+                       ret = PTR_ERR(to_page);
                        goto out_err;
+               }
                preempt_disable();
                from_virtual = kmap_atomic(from_page, KM_USER0);
                to_virtual = kmap_atomic(to_page, KM_USER1);
@@@ -595,5 -595,5 +599,5 @@@ out_err
        if (!persistant_swap_storage)
                fput(swap_storage);
  
-       return -ENOMEM;
+       return ret;
  }