]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge remote branch 'nouveau/for-airlied' of ../drm-nouveau-next into drm-testing
authorDave Airlie <airlied@redhat.com>
Tue, 1 Jun 2010 01:32:29 +0000 (11:32 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 1 Jun 2010 01:32:29 +0000 (11:32 +1000)
* 'nouveau/for-airlied' of ../drm-nouveau-next:
  drm/nv50: cast IGP memory location to u64 before shifting
  drm/nv50: use alternate source of SOR_MODE_CTRL for DP hack
  drm/nouveau: fix dual-link displays when plugged into single-link outputs
  drm/nv50: obey dcb->duallink_possible
  drm/nv50: fix duallink_possible calculation for DCB 4.0 cards
  drm/nouveau: don't execute INIT_GPIO unless we're really running the table
  drm/nv40: allow cold-booting of nv4x chipsets
  drm/nouveau: fix POST detection for certain chipsets
  drm/nouveau: Add getparam for current PTIMER time.
  drm/nouveau: allow cursor image and position to survive suspend

35 files changed:
drivers/char/agp/amd64-agp.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv40_graph.c
drivers/gpu/drm/radeon/Kconfig
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_agp.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
include/drm/drm_crtc_helper.h

index 67ea3a60de74580fe7ba3f99ac579725b23a2f45..70312da4c968f9e4af7c39e9a5a649b4952f8bd3 100644 (file)
@@ -384,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
 {
        u32 httfea,baseaddr,enuscr;
        struct pci_dev *dev1;
-       int i;
+       int i, ret;
        unsigned size = amd64_fetch_size();
 
        dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -400,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
 
        if (i == ARRAY_SIZE(uli_sizes)) {
                dev_info(&pdev->dev, "no ULi size found for %d\n", size);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto put;
        }
 
        /* shadow x86-64 registers into ULi registers */
        pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
 
        /* if x86-64 aperture base is beyond 4G, exit here */
-       if ((httfea & 0x7fff) >> (32 - 25))
-               return -ENODEV;
+       if ((httfea & 0x7fff) >> (32 - 25)) {
+               ret = -ENODEV;
+               goto put;
+       }
 
        httfea = (httfea& 0x7fff) << 25;
 
@@ -420,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
        enuscr= httfea+ (size * 1024 * 1024) - 1;
        pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
        pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
-
+       ret = 0;
+put:
        pci_dev_put(dev1);
-       return 0;
+       return ret;
 }
 
 
@@ -441,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
 {
        u32 tmp, apbase, apbar, aplimit;
        struct pci_dev *dev1;
-       int i;
+       int i, ret;
        unsigned size = amd64_fetch_size();
 
        dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -458,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
 
        if (i == ARRAY_SIZE(nforce3_sizes)) {
                dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto put;
        }
 
        pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -472,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
        /* if x86-64 aperture base is beyond 4G, exit here */
        if ( (apbase & 0x7fff) >> (32 - 25) ) {
                dev_info(&pdev->dev, "aperture base > 4G\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto put;
        }
 
        apbase = (apbase & 0x7fff) << 25;
@@ -488,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
        pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
        pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
 
+       ret = 0;
+put:
        pci_dev_put(dev1);
 
-       return 0;
+       return ret;
 }
 
 static int __devinit agp_amd64_probe(struct pci_dev *pdev,
index 2583ddfcc33ef568144699c77737c6623168e680..88910e5a2c77275654e8b431d96a93b07e438685 100644 (file)
@@ -60,6 +60,7 @@ config DRM_RADEON
        select FW_LOADER
         select DRM_KMS_HELPER
         select DRM_TTM
+       select POWER_SUPPLY
        help
          Choose this option if you have an ATI Radeon graphics card.  There
          are both PCI and AGP versions.  You don't need to choose this to
index 764401951041910692901ea9c1489962de3277b8..9b2a54117c91c0a306c41c7eb437b641bbd58b31 100644 (file)
@@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work)
        }
 }
 
-void drm_kms_helper_poll_init(struct drm_device *dev)
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+       if (!dev->mode_config.poll_enabled)
+               return;
+       delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
-       struct drm_connector *connector;
        bool poll = false;
+       struct drm_connector *connector;
        int ret;
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                if (connector->polled)
                        poll = true;
        }
-       slow_work_register_user(THIS_MODULE);
-       delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
-                              &output_poll_ops);
 
        if (poll) {
                ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
@@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
                        DRM_ERROR("delayed enqueue failed %d\n", ret);
        }
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+       slow_work_register_user(THIS_MODULE);
+       delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+                              &output_poll_ops);
+       dev->mode_config.poll_enabled = true;
+
+       drm_kms_helper_poll_enable(dev);
+}
 EXPORT_SYMBOL(drm_kms_helper_poll_init);
 
 void drm_kms_helper_poll_fini(struct drm_device *dev)
 {
-       delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+       drm_kms_helper_poll_disable(dev);
        slow_work_unregister_user(THIS_MODULE);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
index dfd4f3677f3b79d69f6e65545c23632be1bea96c..c1981861bbbdb418ab58f9c9400e6c41a9430216 100644 (file)
@@ -147,7 +147,10 @@ drm_edid_block_valid(u8 *raw_edid)
                csum += raw_edid[i];
        if (csum) {
                DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
-               goto bad;
+
+               /* allow CEA to slide through, switches mangle this */
+               if (raw_edid[0] != 0x02)
+                       goto bad;
        }
 
        /* per-block-type checks */
@@ -587,7 +590,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
                   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1600x1200@75Hz */
-       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
                   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1600x1200@85Hz */
index 2a6b5de5ae5d07330232b8aff6b0b989300147a2..cc6e56a18edd9e8bc9ac9f11e41e9209951f0a62 100644 (file)
@@ -1399,12 +1399,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
        struct drm_device *dev = pci_get_drvdata(pdev);
        pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
        if (state == VGA_SWITCHEROO_ON) {
-               printk(KERN_INFO "i915: switched off\n");
+               printk(KERN_INFO "i915: switched on\n");
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
                i915_resume(dev);
+               drm_kms_helper_poll_enable(dev);
        } else {
                printk(KERN_ERR "i915: switched off\n");
+               drm_kms_helper_poll_disable(dev);
                i915_suspend(dev, pmm);
        }
 }
index e13f6af0037ac4dc805a821d785bf5ef541d9c07..d4bcca8a5133b2a4565b8f7d794b4d558da5557e 100644 (file)
@@ -34,7 +34,7 @@
 static struct nouveau_dsm_priv {
        bool dsm_detected;
        acpi_handle dhandle;
-       acpi_handle dsm_handle;
+       acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
 static const char nouveau_dsm_muid[] = {
@@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
 static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
 {
        if (id == VGA_SWITCHEROO_IGD)
-               return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
+               return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
        else
-               return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
+               return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
 }
 
 static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
        if (id == VGA_SWITCHEROO_IGD)
                return 0;
 
-       return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
+       return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
 }
 
 static int nouveau_dsm_init(void)
@@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
        dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return false;
+
        status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
        if (ACPI_FAILURE(status)) {
                return false;
        }
 
-       ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
-                        NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+       ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
+                         NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
        if (ret < 0)
                return false;
 
        nouveau_dsm_priv.dhandle = dhandle;
-       nouveau_dsm_priv.dsm_handle = nvidia_handle;
        return true;
 }
 
@@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void)
        struct pci_dev *pdev = NULL;
        int has_dsm = 0;
        int vga_count = 0;
+
        while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
                vga_count++;
 
@@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void)
        }
 
        if (vga_count == 2 && has_dsm) {
-               acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
+               acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
                printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
                       acpi_method_name);
                nouveau_dsm_priv.dsm_detected = true;
@@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void)
 {
        vga_switcheroo_unregister_handler();
 }
+
+/* retrieve the ROM in 4k blocks */
+static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
+                           int offset, int len)
+{
+       acpi_status status;
+       union acpi_object rom_arg_elements[2], *obj;
+       struct acpi_object_list rom_arg;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+       rom_arg.count = 2;
+       rom_arg.pointer = &rom_arg_elements[0];
+
+       rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+       rom_arg_elements[0].integer.value = offset;
+
+       rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+       rom_arg_elements[1].integer.value = len;
+
+       status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+       if (ACPI_FAILURE(status)) {
+               printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
+               return -ENODEV;
+       }
+       obj = (union acpi_object *)buffer.pointer;
+       memcpy(bios+offset, obj->buffer.pointer, len);
+       kfree(buffer.pointer);
+       return len;
+}
+
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+       acpi_status status;
+       acpi_handle dhandle, rom_handle;
+
+       if (!nouveau_dsm_priv.dsm_detected)
+               return false;
+
+       dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+       if (!dhandle)
+               return false;
+
+       status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+       if (ACPI_FAILURE(status))
+               return false;
+
+       nouveau_dsm_priv.rom_handle = rom_handle;
+       return true;
+}
+
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+       return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
+}
index 9f30fb8eafe8a576c1d625fb05551ea2fda4321a..9ba2deaadcc7d38b833ff63fc99f4b76cc144ac1 100644 (file)
@@ -178,6 +178,25 @@ out:
        pci_disable_rom(dev->pdev);
 }
 
+static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
+{
+       int i;
+       int ret;
+       int size = 64 * 1024;
+
+       if (!nouveau_acpi_rom_supported(dev->pdev))
+               return;
+
+       for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
+               ret = nouveau_acpi_get_bios_chunk(data,
+                                                 (i * ROM_BIOS_PAGE),
+                                                 ROM_BIOS_PAGE);
+               if (ret <= 0)
+                       break;
+       }
+       return;
+}
+
 struct methods {
        const char desc[8];
        void (*loadbios)(struct drm_device *, uint8_t *);
@@ -191,6 +210,7 @@ static struct methods nv04_methods[] = {
 };
 
 static struct methods nv50_methods[] = {
+       { "ACPI", load_vbios_acpi, true },
        { "PRAMIN", load_vbios_pramin, true },
        { "PROM", load_vbios_prom, false },
        { "PCIROM", load_vbios_pci, true },
index 256e82bd91dd616e83be300e58b45f2068745f9f..149ed224c3cb48c86c56e2f4b9bb18a599190d10 100644 (file)
@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector)
        if (nv_encoder && nv_connector->native_mode) {
                unsigned status = connector_status_connected;
 
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI_BUTTON) || \
+       (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
                if (!nouveau_ignorelid && !acpi_lid_open())
                        status = connector_status_unknown;
 #endif
index 5b134438effebcda05e2fbb8c4a74581f6148bcc..c697191064894345efbbf76bf3bfd1fc83188270 100644 (file)
@@ -851,12 +851,17 @@ extern int  nouveau_dma_init(struct nouveau_channel *);
 extern int  nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
 
 /* nouveau_acpi.c */
+#define ROM_BIOS_PAGE 4096
 #if defined(CONFIG_ACPI)
 void nouveau_register_dsm_handler(void);
 void nouveau_unregister_dsm_handler(void);
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
 #else
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
 #endif
 
 /* nouveau_backlight.c */
index a2544ffd02cc8ecedea168563e166876dce9bf0f..147e59c4015148e4b645648346b2e959370d3dcf 100644 (file)
@@ -376,12 +376,15 @@ out_err:
 static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
                                         enum vga_switcheroo_state state)
 {
+       struct drm_device *dev = pci_get_drvdata(pdev);
        pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
        if (state == VGA_SWITCHEROO_ON) {
                printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
                nouveau_pci_resume(pdev);
+               drm_kms_helper_poll_enable(dev);
        } else {
                printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+               drm_kms_helper_poll_disable(dev);
                nouveau_pci_suspend(pdev, pmm);
        }
 }
index 0616c96e4b67834f9de49511d85c93614e0bf9ea..704a25d04ac92c75a20fe4f019a808c91ccd7304 100644 (file)
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
 
        if (!dev_priv->engine.graph.ctxprog) {
                struct nouveau_grctx ctx = {};
-               uint32_t cp[256];
+               uint32_t *cp;
+
+               cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+               if (!cp)
+                       return -ENOMEM;
 
                ctx.dev = dev;
                ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
                nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
                for (i = 0; i < ctx.ctxprog_len; i++)
                        nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+
+               kfree(cp);
        }
 
        /* No context present currently */
index 80c5b3ea28b417e70dc2aa4c7f6068c4e442b20f..1c02d23f6fcca9f9a6030bba9ea309ef98838419 100644 (file)
@@ -1,7 +1,6 @@
 config DRM_RADEON_KMS
        bool "Enable modesetting on radeon by default - NEW DRIVER"
        depends on DRM_RADEON
-       depends on POWER_SUPPLY
        help
          Choose this option if you want kernel modesetting enabled by default.
 
index 03dd6c41dc192f272f6499d3bfea03a4556e7db9..f3f2827017ef39603bae514432f875a51472eec1 100644 (file)
@@ -707,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
                break;
        case ATOM_DCPLL:
        case ATOM_PPLL_INVALID:
+       default:
                pll = &rdev->clock.dcpll;
                break;
        }
index 7ffc3892c65220c6367761e6f0792cd4c77d489e..44e96a2ae25aa2be1640d491a68570aa1257bae9 100644 (file)
@@ -430,7 +430,7 @@ void r600_pm_init_profile(struct radeon_device *rdev)
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 2;
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
                        } else {
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
index 5c9ce2beaca3b949300f46817fbd74d49c017f13..669feb689bfcdc6df0f8536493c4defc5bd0e42a 100644 (file)
@@ -261,6 +261,7 @@ struct radeon_bo_list {
        unsigned                rdomain;
        unsigned                wdomain;
        u32                     tiling_flags;
+       bool                    reserved;
 };
 
 /*
@@ -575,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
  */
 int radeon_agp_init(struct radeon_device *rdev);
 void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
 void radeon_agp_fini(struct radeon_device *rdev);
 
 
index 28e473f1f56fd688c339c169be1a1cbf5f11aaad..f40dfb77f9b12cfa75c90c83ec608714f15ae423 100644 (file)
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
        }
 #endif
 }
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+       radeon_agp_fini(rdev);
+}
index 6e733fdc334991ce9828dfe40e482b2e2cb9b85b..24ea683f7cf53ec5bd2a2958a0a86d95a5185624 100644 (file)
@@ -680,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
        uint8_t dac;
        union atom_supported_devices *supported_devices;
        int i, j, max_device;
-       struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+       struct bios_connector *bios_connectors;
+       size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
 
-       if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+       bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+       if (!bios_connectors)
+               return false;
+
+       if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+                                   &data_offset)) {
+               kfree(bios_connectors);
                return false;
+       }
 
        supported_devices =
            (union atom_supported_devices *)(ctx->bios + data_offset);
@@ -851,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 
        radeon_link_encoder_connector(dev);
 
+       kfree(bios_connectors);
        return true;
 }
 
index a20b612ffe75caf03425db39ddea73b9e7219057..db338522191f5fbf0bb48bc7da08063ed42efbf8 100644 (file)
@@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                /* don't suspend or resume card normally */
                rdev->powered_down = false;
                radeon_resume_kms(dev);
+               drm_kms_helper_poll_enable(dev);
        } else {
                printk(KERN_INFO "radeon: switched off\n");
+               drm_kms_helper_poll_disable(dev);
                radeon_suspend_kms(dev, pmm);
                /* don't suspend or resume card normally */
                rdev->powered_down = true;
@@ -754,6 +756,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        /* evict remaining vram memory */
        radeon_bo_evict_vram(rdev);
 
+       radeon_agp_suspend(rdev);
+
        pci_save_state(dev->pdev);
        if (state.event == PM_EVENT_SUSPEND) {
                /* Shut down the device */
index da85cad1152b0129a270870bef7249fda2678a41..1006549d157029bcb129129f417e1dd5195cc5ed 100644 (file)
@@ -983,8 +983,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
                /* set display priority to high for r3xx, rv515 chips
                 * this avoids flickering due to underflow to the
                 * display controllers during heavy acceleration.
+                * Don't force high on rs4xx igp chips as it seems to
+                * affect the sound card.  See kernel bug 15982.
                 */
-               if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+               if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+                   !(rdev->flags & RADEON_IS_IGP))
                        rdev->disp_priority = 2;
                else
                        rdev->disp_priority = 0;
index a8d18bcae7db08ba2c62cd2cedb45acc3afdeac5..d5b9373ce06c16b9be8ee81b9f27d70b20947a19 100644 (file)
@@ -301,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head)
                r = radeon_bo_reserve(lobj->bo, false);
                if (unlikely(r != 0))
                        return r;
+               lobj->reserved = true;
        }
        return 0;
 }
@@ -311,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head)
 
        list_for_each_entry(lobj, head, list) {
                /* only unreserve object we successfully reserved */
-               if (radeon_bo_is_reserved(lobj->bo))
+               if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
                        radeon_bo_unreserve(lobj->bo);
        }
 }
@@ -322,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head)
        struct radeon_bo *bo;
        int r;
 
+       list_for_each_entry(lobj, head, list) {
+               lobj->reserved = false;
+       }
        r = radeon_bo_list_reserve(head);
        if (unlikely(r != 0)) {
                return r;
index 40ab6d9c3736ab754f07f55201eaa5252048df1f..11ce94c668e71ce4180e99dd5705304e931fa467 100644 (file)
@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
                        flags |= RADEON_FRONT;
        }
        if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
-               if (!dev_priv->have_z_offset)
+               if (!dev_priv->have_z_offset) {
                        printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
-               flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+                       flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+               }
        }
 
        if (flags & (RADEON_FRONT | RADEON_BACK)) {
index 3aa3a65800abd1354aefafcd9e40287de651cc84..e9918d88f5b049675cfebe754d15ef83338bb91d 100644 (file)
@@ -451,7 +451,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
                        /* RADEON_IS_AGP is set only if AGP is active */
                        mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
                        mem->bus.base = rdev->mc.agp_base;
-                       mem->bus.is_iomem = true;
+                       mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
                }
 #endif
                break;
index 0d9a42c2394f79983344cd8bd21d99204a288109..ef910694bd634807631dd0958831bfa8c44421df 100644 (file)
@@ -77,7 +77,7 @@ struct ttm_page_pool {
 /**
  * Limits for the pool. They are handled without locks because only place where
  * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialiazation to access them is pointless.
+ * so forcing serialization to access them is pointless.
  */
 
 struct ttm_pool_opts {
@@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
                m->options.small = val;
        else if (attr == &ttm_page_pool_alloc_size) {
                if (val > NUM_PAGES_TO_ALLOC*8) {
-                       printk(KERN_ERR "[ttm] Setting allocation size to %lu "
-                                       "is not allowed. Recomended size is "
-                                       "%lu\n",
-                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
-                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+                       printk(KERN_ERR TTM_PFX
+                              "Setting allocation size to %lu "
+                              "is not allowed. Recommended size is "
+                              "%lu\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
                        return size;
                } else if (val > NUM_PAGES_TO_ALLOC) {
-                       printk(KERN_WARNING "[ttm] Setting allocation size to "
-                                       "larger than %lu is not recomended.\n",
-                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+                       printk(KERN_WARNING TTM_PFX
+                              "Setting allocation size to "
+                              "larger than %lu is not recommended.\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
                }
                m->options.alloc_size = val;
        }
@@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
 {
        unsigned i;
        if (set_pages_array_wb(pages, npages))
-               printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+               printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
                                npages);
        for (i = 0; i < npages; ++i)
                __free_page(pages[i]);
@@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
        pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
                        GFP_KERNEL);
        if (!pages_to_free) {
-               printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+               printk(KERN_ERR TTM_PFX
+                      "Failed to allocate memory for pool free operation.\n");
                return 0;
        }
 
@@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void)
 }
 
 /**
- * Calback for mm to request pool to reduce number of page held.
+ * Callback for mm to request pool to reduce number of page held.
  */
 static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
 {
@@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages,
        case tt_uncached:
                r = set_pages_array_uc(pages, cpages);
                if (r)
-                       printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
-                                       cpages);
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to set %d pages to uc!\n",
+                              cpages);
                break;
        case tt_wc:
                r = set_pages_array_wc(pages, cpages);
                if (r)
-                       printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
-                                       cpages);
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to set %d pages to wc!\n",
+                              cpages);
                break;
        default:
                break;
@@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
                struct page **failed_pages, unsigned cpages)
 {
        unsigned i;
-       /* Failed pages has to be reed */
+       /* Failed pages have to be freed */
        for (i = 0; i < cpages; ++i) {
                list_del(&failed_pages[i]->lru);
                __free_page(failed_pages[i]);
@@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
        caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 
        if (!caching_array) {
-               printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+               printk(KERN_ERR TTM_PFX
+                      "Unable to allocate table for new pages.");
                return -ENOMEM;
        }
 
@@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
                p = alloc_page(gfp_flags);
 
                if (!p) {
-                       printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+                       printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
 
                        /* store already allocated pages in the pool after
                         * setting the caching state */
                        if (cpages) {
-                               r = ttm_set_pages_caching(caching_array, cstate, cpages);
+                               r = ttm_set_pages_caching(caching_array,
+                                                         cstate, cpages);
                                if (r)
                                        ttm_handle_caching_state_failure(pages,
                                                ttm_flags, cstate,
@@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
                        ++pool->nrefills;
                        pool->npages += alloc_size;
                } else {
-                       printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to fill pool (%p).", pool);
                        /* If we have any pages left put them to the pool. */
                        list_for_each_entry(p, &pool->list, lru) {
                                ++cpages;
@@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
                if (flags & TTM_PAGE_FLAG_DMA32)
                        gfp_flags |= GFP_DMA32;
                else
-                       gfp_flags |= __GFP_HIGHMEM;
+                       gfp_flags |= GFP_HIGHUSER;
 
                for (r = 0; r < count; ++r) {
                        p = alloc_page(gfp_flags);
                        if (!p) {
 
-                               printk(KERN_ERR "[ttm] unable to allocate page.");
+                               printk(KERN_ERR TTM_PFX
+                                      "Unable to allocate page.");
                                return -ENOMEM;
                        }
 
@@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
                if (r) {
                        /* If there is any pages in the list put them back to
                         * the pool. */
-                       printk(KERN_ERR "[ttm] Failed to allocate extra pages "
-                                       "for large request.");
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to allocate extra pages "
+                              "for large request.");
                        ttm_put_pages(pages, 0, flags, cstate);
                        return r;
                }
@@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
        if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
                return 0;
 
-       printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+       printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
 
        ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
 
@@ -813,7 +823,7 @@ void ttm_page_alloc_fini()
        if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
                return;
 
-       printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+       printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
        ttm_pool_mm_shrink_fini(&_manager);
 
        for (i = 0; i < NUM_POOLS; ++i)
index 1a3cb6816d1cc1a72a58b75a1c4c952e6607c7d3..4505e17df3f557e21beb14581e72f3356985e345 100644 (file)
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
-           vmwgfx_overlay.o
+           vmwgfx_overlay.o vmwgfx_fence.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
index 0c9c0811f42dd03b9c92d0ea154e249b8ed3b397..7597323d5a5aeda2e63d115002626ad3b8747979 100644 (file)
@@ -318,6 +318,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                goto out_err3;
        }
 
+       /* Need mmio memory to check for fifo pitchlock cap. */
+       if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+           !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
+           !vmw_fifo_have_pitchlock(dev_priv)) {
+               ret = -ENOSYS;
+               DRM_ERROR("Hardware has no pitchlock\n");
+               goto out_err4;
+       }
+
        dev_priv->tdev = ttm_object_device_init
            (dev_priv->mem_global_ref.object, 12);
 
@@ -399,8 +408,6 @@ static int vmw_driver_unload(struct drm_device *dev)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
-
        unregister_pm_notifier(&dev_priv->pm_nb);
 
        vmw_fb_close(dev_priv);
@@ -546,7 +553,6 @@ static int vmw_master_create(struct drm_device *dev,
 {
        struct vmw_master *vmaster;
 
-       DRM_INFO("Master create.\n");
        vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
        if (unlikely(vmaster == NULL))
                return -ENOMEM;
@@ -563,7 +569,6 @@ static void vmw_master_destroy(struct drm_device *dev,
 {
        struct vmw_master *vmaster = vmw_master(master);
 
-       DRM_INFO("Master destroy.\n");
        master->driver_priv = NULL;
        kfree(vmaster);
 }
@@ -579,8 +584,6 @@ static int vmw_master_set(struct drm_device *dev,
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret = 0;
 
-       DRM_INFO("Master set.\n");
-
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
                ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -622,8 +625,6 @@ static void vmw_master_drop(struct drm_device *dev,
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret;
 
-       DRM_INFO("Master drop.\n");
-
        /**
         * Make sure the master doesn't disappear while we have
         * it locked.
index 356dc935ec133f974c2c95e2be5d1146cd07c9a9..1341adef408d8f288bd0d765ec2bd2bcc8ad30ec 100644 (file)
@@ -41,7 +41,7 @@
 
 #define VMWGFX_DRIVER_DATE "20100209"
 #define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_MINOR 1
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -102,6 +102,13 @@ struct vmw_surface {
        struct vmw_cursor_snooper snooper;
 };
 
+struct vmw_fence_queue {
+       struct list_head head;
+       struct timespec lag;
+       struct timespec lag_time;
+       spinlock_t lock;
+};
+
 struct vmw_fifo_state {
        unsigned long reserved_size;
        __le32 *dynamic_buffer;
@@ -115,6 +122,7 @@ struct vmw_fifo_state {
        uint32_t capabilities;
        struct mutex fifo_mutex;
        struct rw_semaphore rwsem;
+       struct vmw_fence_queue fence_queue;
 };
 
 struct vmw_relocation {
@@ -179,6 +187,7 @@ struct vmw_private {
        uint32_t vga_red_mask;
        uint32_t vga_blue_mask;
        uint32_t vga_green_mask;
+       uint32_t vga_pitchlock;
 
        /*
         * Framebuffer info.
@@ -393,6 +402,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
 extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
 
 /**
  * TTM glue - vmwgfx_ttm_glue.c
@@ -441,6 +451,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
                             uint32_t sequence,
                             bool interruptible,
                             unsigned long timeout);
+extern void vmw_update_sequence(struct vmw_private *dev_priv,
+                               struct vmw_fifo_state *fifo_state);
+
+
+/**
+ * Rudimentary fence objects currently used only for throttling -
+ * vmwgfx_fence.c
+ */
+
+extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
+extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
+extern int vmw_fence_push(struct vmw_fence_queue *queue,
+                         uint32_t sequence);
+extern int vmw_fence_pull(struct vmw_fence_queue *queue,
+                         uint32_t signaled_sequence);
+extern int vmw_wait_lag(struct vmw_private *dev_priv,
+                       struct vmw_fence_queue *queue, uint32_t us);
 
 /**
  * Kernel framebuffer - vmwgfx_fb.c
@@ -466,6 +493,9 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
                          struct ttm_object_file *tfile,
                          struct ttm_buffer_object *bo,
                          SVGA3dCmdHeader *header);
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+                       unsigned width, unsigned height, unsigned pitch,
+                       unsigned bbp, unsigned depth);
 
 /**
  * Overlay control - vmwgfx_overlay.c
index dbd36b8910cf5cc65f22879c61017f8a0de83dc3..bdd67cf83315f590165fbf6b40437454d4e29186 100644 (file)
@@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
                goto out_err;
 
        vmw_apply_relocations(sw_context);
+
+       if (arg->throttle_us) {
+               ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
+                                  arg->throttle_us);
+
+               if (unlikely(ret != 0))
+                       goto out_err;
+       }
+
        vmw_fifo_commit(dev_priv, arg->command_size);
 
        ret = vmw_fifo_send_fence(dev_priv, &sequence);
index 7421aaad8d094e17da75b3b5308d3f491a63e9fa..181f472225801754f663af2cbb8db946c2ff7db6 100644 (file)
@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
                return -EINVAL;
        }
 
-       /* without multimon its hard to resize */
-       if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
-           (var->xres != par->max_width ||
-            var->yres != par->max_height)) {
-               DRM_ERROR("Tried to resize, but we don't have multimon\n");
+       if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+           (var->xoffset != 0 || var->yoffset != 0)) {
+               DRM_ERROR("Can not handle panning without display topology\n");
                return -EINVAL;
        }
 
-       if (var->xres > par->max_width ||
-           var->yres > par->max_height) {
+       if ((var->xoffset + var->xres) > par->max_width ||
+           (var->yoffset + var->yres) > par->max_height) {
                DRM_ERROR("Requested geom can not fit in framebuffer\n");
                return -EINVAL;
        }
@@ -154,8 +152,7 @@ static int vmw_fb_set_par(struct fb_info *info)
        struct vmw_fb_par *par = info->par;
        struct vmw_private *vmw_priv = par->vmw_priv;
 
-       if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
-               vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+       if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
@@ -164,18 +161,11 @@ static int vmw_fb_set_par(struct fb_info *info)
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
 
-               vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
-               vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
-               vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
-               vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
-               vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
-               vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
-               vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
-               vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+               vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+                                  info->fix.line_length,
+                                  par->bpp, par->depth);
 
                /* TODO check if pitch and offset changes */
-
-               vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
@@ -183,13 +173,19 @@ static int vmw_fb_set_par(struct fb_info *info)
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+               vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
        } else {
-               vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
-               vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
+               vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+                                  info->fix.line_length,
+                                  par->bpp, par->depth);
 
-               /* TODO check if pitch and offset changes */
        }
 
+       /* This is really helpful since if this fails the user
+        * can probably not see anything on the screen.
+        */
+       WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
+
        return 0;
 }
 
@@ -416,48 +412,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
        unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
        int ret;
 
+       /* XXX These shouldn't be hardcoded. */
        initial_width = 800;
        initial_height = 600;
 
        fb_bbp = 32;
        fb_depth = 24;
 
-       if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
-               fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
-               fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
-       } else {
-               fb_width = min(vmw_priv->fb_max_width, initial_width);
-               fb_height = min(vmw_priv->fb_max_height, initial_height);
-       }
+       /* XXX As shouldn't these be as well. */
+       fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+       fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 
        initial_width = min(fb_width, initial_width);
        initial_height = min(fb_height, initial_height);
 
-       vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
-       vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
-       vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
-       vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
-       vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
-       vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
-       vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
-       fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+       fb_pitch = fb_width * fb_bbp / 8;
+       fb_size = fb_pitch * fb_height;
        fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
-       fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
-
-       DRM_DEBUG("width  %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
-       DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
-       DRM_DEBUG("width  %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
-       DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
-       DRM_DEBUG("bpp    %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
-       DRM_DEBUG("depth  %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
-       DRM_DEBUG("bpl    %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
-       DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
-       DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
-       DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
-       DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
-       DRM_DEBUG("fb_pitch  %u\n", fb_pitch);
-       DRM_DEBUG("fb_size   %u kiB\n", fb_size / 1024);
 
        info = framebuffer_alloc(sizeof(*par), device);
        if (!info)
@@ -659,6 +630,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
                goto err_unlock;
 
        ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
+
+       /* Could probably bug on */
+       WARN_ON(bo->offset != 0);
+
        ttm_bo_unreserve(bo);
 err_unlock:
        ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644 (file)
index 0000000..61eacc1
--- /dev/null
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_fence {
+       struct list_head head;
+       uint32_t sequence;
+       struct timespec submitted;
+};
+
+void vmw_fence_queue_init(struct vmw_fence_queue *queue)
+{
+       INIT_LIST_HEAD(&queue->head);
+       queue->lag = ns_to_timespec(0);
+       getrawmonotonic(&queue->lag_time);
+       spin_lock_init(&queue->lock);
+}
+
+void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
+{
+       struct vmw_fence *fence, *next;
+
+       spin_lock(&queue->lock);
+       list_for_each_entry_safe(fence, next, &queue->head, head) {
+               kfree(fence);
+       }
+       spin_unlock(&queue->lock);
+}
+
+int vmw_fence_push(struct vmw_fence_queue *queue,
+                  uint32_t sequence)
+{
+       struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+       if (unlikely(!fence))
+               return -ENOMEM;
+
+       fence->sequence = sequence;
+       getrawmonotonic(&fence->submitted);
+       spin_lock(&queue->lock);
+       list_add_tail(&fence->head, &queue->head);
+       spin_unlock(&queue->lock);
+
+       return 0;
+}
+
+int vmw_fence_pull(struct vmw_fence_queue *queue,
+                  uint32_t signaled_sequence)
+{
+       struct vmw_fence *fence, *next;
+       struct timespec now;
+       bool updated = false;
+
+       spin_lock(&queue->lock);
+       getrawmonotonic(&now);
+
+       if (list_empty(&queue->head)) {
+               queue->lag = ns_to_timespec(0);
+               queue->lag_time = now;
+               updated = true;
+               goto out_unlock;
+       }
+
+       list_for_each_entry_safe(fence, next, &queue->head, head) {
+               if (signaled_sequence - fence->sequence > (1 << 30))
+                       continue;
+
+               queue->lag = timespec_sub(now, fence->submitted);
+               queue->lag_time = now;
+               updated = true;
+               list_del(&fence->head);
+               kfree(fence);
+       }
+
+out_unlock:
+       spin_unlock(&queue->lock);
+
+       return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+                                       struct timespec t2)
+{
+       t1.tv_sec += t2.tv_sec;
+       t1.tv_nsec += t2.tv_nsec;
+       if (t1.tv_nsec >= 1000000000L) {
+               t1.tv_sec += 1;
+               t1.tv_nsec -= 1000000000L;
+       }
+
+       return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
+{
+       struct timespec now;
+
+       spin_lock(&queue->lock);
+       getrawmonotonic(&now);
+       queue->lag = vmw_timespec_add(queue->lag,
+                                     timespec_sub(now, queue->lag_time));
+       queue->lag_time = now;
+       spin_unlock(&queue->lock);
+       return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_fence_queue *queue,
+                      uint32_t us)
+{
+       struct timespec lag, cond;
+
+       cond = ns_to_timespec((s64) us * 1000);
+       lag = vmw_fifo_lag(queue);
+       return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+                struct vmw_fence_queue *queue, uint32_t us)
+{
+       struct vmw_fence *fence;
+       uint32_t sequence;
+       int ret;
+
+       while (!vmw_lag_lt(queue, us)) {
+               spin_lock(&queue->lock);
+               if (list_empty(&queue->head))
+                       sequence = atomic_read(&dev_priv->fence_seq);
+               else {
+                       fence = list_first_entry(&queue->head,
+                                                struct vmw_fence, head);
+                       sequence = fence->sequence;
+               }
+               spin_unlock(&queue->lock);
+
+               ret = vmw_wait_fence(dev_priv, false, sequence, true,
+                                    3*HZ);
+
+               if (unlikely(ret != 0))
+                       return ret;
+
+               (void) vmw_fence_pull(queue, sequence);
+       }
+       return 0;
+}
+
+
index 39d43a01d846fd9e7bcd6a1206b06143f90a740d..e6a1eb7ea95498f00e65d123adeae79160af8fa8 100644 (file)
@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
        uint32_t fifo_min, hwversion;
 
+       if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+               return false;
+
        fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
                return false;
@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        return true;
 }
 
+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
+{
+       __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       uint32_t caps;
+
+       if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+               return false;
+
+       caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+       if (caps & SVGA_FIFO_CAP_PITCHLOCK)
+               return true;
+
+       return false;
+}
+
 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
        atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
        iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
-
+       vmw_fence_queue_init(&fifo->fence_queue);
        return vmw_fifo_send_fence(dev_priv, &dummy);
 out_err:
        vfree(fifo->static_buffer);
@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
                  dev_priv->enable_state);
 
        mutex_unlock(&dev_priv->hw_mutex);
+       vmw_fence_queue_takedown(&fifo->fence_queue);
 
        if (likely(fifo->last_buffer != NULL)) {
                vfree(fifo->last_buffer);
@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
        fifo_state->last_buffer_add = true;
        vmw_fifo_commit(dev_priv, bytes);
        fifo_state->last_buffer_add = false;
+       (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
+       vmw_update_sequence(dev_priv, fifo_state);
 
 out_err:
        return ret;
index 4d7cb539386000b50798960f8615d2fc2b25d2ba..e92298a6a383c1fe60c290002a33a3e39caa456f 100644 (file)
@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
        return (busy == 0);
 }
 
+void vmw_update_sequence(struct vmw_private *dev_priv,
+                        struct vmw_fifo_state *fifo_state)
+{
+       __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+       uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+       if (dev_priv->last_read_sequence != sequence) {
+               dev_priv->last_read_sequence = sequence;
+               vmw_fence_pull(&fifo_state->fence_queue, sequence);
+       }
+}
 
 bool vmw_fence_signaled(struct vmw_private *dev_priv,
                        uint32_t sequence)
 {
-       __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
        struct vmw_fifo_state *fifo_state;
        bool ret;
 
        if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
                return true;
 
-       dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+       fifo_state = &dev_priv->fifo;
+       vmw_update_sequence(dev_priv, fifo_state);
        if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
                return true;
 
-       fifo_state = &dev_priv->fifo;
        if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
            vmw_fifo_idle(dev_priv, sequence))
                return true;
index bbc7c4c30bc7fc26d6c2244ba53ab70324af1593..b78dcf00185818ea674095bac94054db498f061a 100644 (file)
@@ -30,6 +30,8 @@
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
 
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
 struct vmw_framebuffer_surface {
        struct vmw_framebuffer base;
        struct vmw_surface *surface;
+       struct vmw_dma_buffer *buffer;
        struct delayed_work d_work;
        struct mutex work_lock;
        bool present_fs;
@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
        vfbs->base.base.depth = 24;
        vfbs->base.base.width = width;
        vfbs->base.base.height = height;
-       vfbs->base.pin = NULL;
-       vfbs->base.unpin = NULL;
+       vfbs->base.pin = &vmw_surface_dmabuf_pin;
+       vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
        vfbs->surface = surface;
        mutex_init(&vfbs->work_lock);
        INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
        .create_handle = vmw_framebuffer_create_handle,
 };
 
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+       struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+       struct vmw_framebuffer_surface *vfbs =
+               vmw_framebuffer_to_vfbs(&vfb->base);
+       unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
+       int ret;
+
+       vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
+       if (unlikely(vfbs->buffer == NULL))
+               return -ENOMEM;
+
+       vmw_overlay_pause_all(dev_priv);
+       ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
+                              &vmw_vram_ne_placement,
+                              false, &vmw_dmabuf_bo_free);
+       vmw_overlay_resume_all(dev_priv);
+
+       return ret;
+}
+
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+       struct ttm_buffer_object *bo;
+       struct vmw_framebuffer_surface *vfbs =
+               vmw_framebuffer_to_vfbs(&vfb->base);
+
+       bo = &vfbs->buffer->base;
+       ttm_bo_unref(&bo);
+       vfbs->buffer = NULL;
+
+       return 0;
+}
+
 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
 {
        struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
                vmw_framebuffer_to_vfbd(&vfb->base);
        int ret;
 
+
        vmw_overlay_pause_all(dev_priv);
 
        ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
 
-       if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
-               vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
-               vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
-               vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
-               vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
-               vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
-               vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
-               vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
-               vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
-               vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-       } else
-               WARN_ON(true);
-
        vmw_overlay_resume_all(dev_priv);
 
+       WARN_ON(ret != 0);
+
        return 0;
 }
 
@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 
        /* XXX get the first 3 from the surface info */
        vfbd->base.base.bits_per_pixel = 32;
-       vfbd->base.base.pitch = width * 32 / 4;
+       vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
        vfbd->base.base.depth = 24;
        vfbd->base.base.width = width;
        vfbd->base.base.height = height;
@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
        dev->mode_config.funcs = &vmw_kms_funcs;
        dev->mode_config.min_width = 1;
        dev->mode_config.min_height = 1;
-       dev->mode_config.max_width = dev_priv->fb_max_width;
-       dev->mode_config.max_height = dev_priv->fb_max_height;
+       /* assumed largest fb size */
+       dev->mode_config.max_width = 8192;
+       dev->mode_config.max_height = 8192;
 
        ret = vmw_kms_init_legacy_display_system(dev_priv);
 
@@ -826,24 +846,25 @@ out:
        return ret;
 }
 
-int vmw_kms_save_vga(struct vmw_private *vmw_priv)
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+                       unsigned width, unsigned height, unsigned pitch,
+                       unsigned bbp, unsigned depth)
 {
-       /*
-        * setup a single multimon monitor with the size
-        * of 0x0, this stops the UI from resizing when we
-        * change the framebuffer size
-        */
-       if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
-               vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
-               vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
-               vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
-               vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
-               vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
-               vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
-               vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
-               vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-       }
+       if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+               vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
+       else if (vmw_fifo_have_pitchlock(vmw_priv))
+               iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+       vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
+       vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
+       vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
+       vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
+       vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+       vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+       vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+}
 
+int vmw_kms_save_vga(struct vmw_private *vmw_priv)
+{
        vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
        vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
        vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
@@ -852,6 +873,12 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
        vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
        vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
        vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+       if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+               vmw_priv->vga_pitchlock =
+                       vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
+       else if (vmw_fifo_have_pitchlock(vmw_priv))
+               vmw_priv->vga_pitchlock =
+                       ioread32(vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
        return 0;
 }
@@ -866,9 +893,12 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
        vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
        vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
        vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
-
-       /* TODO check for multimon */
-       vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+       if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+               vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
+                         vmw_priv->vga_pitchlock);
+       else if (vmw_fifo_have_pitchlock(vmw_priv))
+               iowrite32(vmw_priv->vga_pitchlock,
+                         vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
        return 0;
 }
index 90891593bf6c2ea01cc23ab52d03452cb5d99c08..f7094dde18f9dc8a80d8bcf7312af30422895c44 100644 (file)
@@ -38,6 +38,7 @@ struct vmw_legacy_display {
        struct list_head active;
 
        unsigned num_active;
+       unsigned last_num_active;
 
        struct vmw_framebuffer *fb;
 };
@@ -49,8 +50,6 @@ struct vmw_legacy_display_unit {
        struct vmw_display_unit base;
 
        struct list_head active;
-
-       unsigned unit;
 };
 
 static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@@ -88,23 +87,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 {
        struct vmw_legacy_display *lds = dev_priv->ldu_priv;
        struct vmw_legacy_display_unit *entry;
-       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_crtc *crtc = NULL;
        int i = 0;
 
-       /* to stop the screen from changing size on resize */
-       vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
-       for (i = 0; i < lds->num_active; i++) {
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
-               vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+       /* If there is no display topology the host just assumes
+        * that the guest will set the same layout as the host.
+        */
+       if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
+               int w = 0, h = 0;
+               list_for_each_entry(entry, &lds->active, active) {
+                       crtc = &entry->base.crtc;
+                       w = max(w, crtc->x + crtc->mode.hdisplay);
+                       h = max(h, crtc->y + crtc->mode.vdisplay);
+                       i++;
+               }
+
+               if (crtc == NULL)
+                       return 0;
+               fb = entry->base.crtc.fb;
+
+               vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+                                  fb->bits_per_pixel, fb->depth);
+
+               return 0;
+       }
+
+       if (!list_empty(&lds->active)) {
+               entry = list_entry(lds->active.next, typeof(*entry), active);
+               fb = entry->base.crtc.fb;
+
+               vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+                                  fb->bits_per_pixel, fb->depth);
        }
 
-       /* Now set the mode */
-       vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+       /* Make sure we always show something. */
+       vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
+                 lds->num_active ? lds->num_active : 1);
+
        i = 0;
        list_for_each_entry(entry, &lds->active, active) {
                crtc = &entry->base.crtc;
@@ -120,6 +140,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
                i++;
        }
 
+       BUG_ON(i != lds->num_active);
+
+       lds->last_num_active = lds->num_active;
+
        return 0;
 }
 
@@ -130,6 +154,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
        if (list_empty(&ldu->active))
                return 0;
 
+       /* Must init otherwise list_empty(&ldu->active) will not work. */
        list_del_init(&ldu->active);
        if (--(ld->num_active) == 0) {
                BUG_ON(!ld->fb);
@@ -149,24 +174,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
        struct vmw_legacy_display_unit *entry;
        struct list_head *at;
 
+       BUG_ON(!ld->num_active && ld->fb);
+       if (vfb != ld->fb) {
+               if (ld->fb && ld->fb->unpin)
+                       ld->fb->unpin(ld->fb);
+               if (vfb->pin)
+                       vfb->pin(vfb);
+               ld->fb = vfb;
+       }
+
        if (!list_empty(&ldu->active))
                return 0;
 
        at = &ld->active;
        list_for_each_entry(entry, &ld->active, active) {
-               if (entry->unit > ldu->unit)
+               if (entry->base.unit > ldu->base.unit)
                        break;
 
                at = &entry->active;
        }
 
        list_add(&ldu->active, at);
-       if (ld->num_active++ == 0) {
-               BUG_ON(ld->fb);
-               if (vfb->pin)
-                       vfb->pin(vfb);
-               ld->fb = vfb;
-       }
+
+       ld->num_active++;
 
        return 0;
 }
@@ -208,6 +238,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
 
        /* ldu only supports one fb active at the time */
        if (dev_priv->ldu_priv->fb && vfb &&
+           !(dev_priv->ldu_priv->num_active == 1 &&
+             !list_empty(&ldu->active)) &&
            dev_priv->ldu_priv->fb != vfb) {
                DRM_ERROR("Multiple framebuffers not supported\n");
                return -EINVAL;
@@ -443,18 +475,16 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
        if (!ldu)
                return -ENOMEM;
 
-       ldu->unit = unit;
+       ldu->base.unit = unit;
        crtc = &ldu->base.crtc;
        encoder = &ldu->base.encoder;
        connector = &ldu->base.connector;
 
+       INIT_LIST_HEAD(&ldu->active);
+
        drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
-       /* Initial status */
-       if (unit == 0)
-               connector->status = connector_status_connected;
-       else
-               connector->status = connector_status_disconnected;
+       connector->status = vmw_ldu_connector_detect(connector);
 
        drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
                         DRM_MODE_ENCODER_LVDS);
@@ -462,8 +492,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
        encoder->possible_crtcs = (1 << unit);
        encoder->possible_clones = 0;
 
-       INIT_LIST_HEAD(&ldu->active);
-
        drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
 
        drm_connector_attach_property(connector,
@@ -487,18 +515,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 
        INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
        dev_priv->ldu_priv->num_active = 0;
+       dev_priv->ldu_priv->last_num_active = 0;
        dev_priv->ldu_priv->fb = NULL;
 
        drm_mode_create_dirty_info_property(dev_priv->dev);
 
        vmw_ldu_init(dev_priv, 0);
-       vmw_ldu_init(dev_priv, 1);
-       vmw_ldu_init(dev_priv, 2);
-       vmw_ldu_init(dev_priv, 3);
-       vmw_ldu_init(dev_priv, 4);
-       vmw_ldu_init(dev_priv, 5);
-       vmw_ldu_init(dev_priv, 6);
-       vmw_ldu_init(dev_priv, 7);
+       /* for old hardware without multimon only enable one display */
+       if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+               vmw_ldu_init(dev_priv, 1);
+               vmw_ldu_init(dev_priv, 2);
+               vmw_ldu_init(dev_priv, 3);
+               vmw_ldu_init(dev_priv, 4);
+               vmw_ldu_init(dev_priv, 5);
+               vmw_ldu_init(dev_priv, 6);
+               vmw_ldu_init(dev_priv, 7);
+       }
 
        return 0;
 }
index ad566c85b075da13305ecef856b254ff4dc30a6b..df2036ed18d5f2c920ba61f99b716998a6f25329 100644 (file)
@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
        if (stream->buf != buf)
                stream->buf = vmw_dmabuf_reference(buf);
        stream->saved = *arg;
+       /* stream is no longer stopped/paused */
+       stream->paused = false;
 
        return 0;
 }
index dc5873c21e4566ceaf6628882abae905098763f9..1121f7799c6ff527838ec50052d51bc10b10e2e7 100644 (file)
@@ -130,4 +130,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
 extern void drm_kms_helper_poll_init(struct drm_device *dev);
 extern void drm_kms_helper_poll_fini(struct drm_device *dev);
 extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+
+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
 #endif