Merge tag 'drm-intel-fixes-2015-08-14' into drm-intel-next-fixes
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 14 Aug 2015 16:10:35 +0000 (18:10 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 14 Aug 2015 16:11:30 +0000 (18:11 +0200)
Backmerge drm-intel-fixes because a bunch of atomic patch backporting
we had to do lead to horrible conflicts.

Conflicts:
drivers/gpu/drm/drm_crtc.c
Just a bit of context conflict between -next and -fixes.
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_display.c
Atomic conflicts, always pick the code from -next.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
153 files changed:
Documentation/DocBook/drm.tmpl
MAINTAINERS
arch/x86/configs/x86_64_defconfig
drivers/char/agp/intel-gtt.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cikd.h
drivers/gpu/drm/amd/amdgpu/vid.h
drivers/gpu/drm/amd/amdkfd/Kconfig
drivers/gpu/drm/amd/amdkfd/Makefile
drivers/gpu/drm/amd/amdkfd/cik_regs.h
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/vi_structs.h [new file with mode: 0644]
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_legacy.h
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo_ivch.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_render_state.h
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_ioc32.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_pll.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_frontbuffer.c
drivers/gpu/drm/i915/intel_guc_fwif.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_mocs.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_mocs.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/mgag200/mgag200_fb.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_kfd.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/sti/sti_drm_crtc.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/rgb.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/mfd/Kconfig
drivers/mfd/intel_soc_pmic_core.c
drivers/mfd/intel_soc_pmic_crc.c
drivers/pwm/Kconfig
drivers/pwm/Makefile
drivers/pwm/pwm-crc.c [new file with mode: 0644]
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_atomic_helper.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/intel-gtt.h
include/linux/gpio/machine.h
include/uapi/drm/drm_fourcc.h
include/uapi/drm/i915_drm.h

index 2fb9a5457522a48cf590f88d14463dfafefb668a..9ddf8c6cb88791e1f0f5a12629cd9ce5be30f2e7 100644 (file)
@@ -3982,7 +3982,6 @@ int num_ioctls;</synopsis>
         <title>Interrupt Handling</title>
 !Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
       </sect2>
@@ -4012,7 +4011,6 @@ int num_ioctls;</synopsis>
         <title>Frontbuffer Tracking</title>
 !Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
 !Idrivers/gpu/drm/i915/intel_frontbuffer.c
-!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
 !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
       </sect2>
       <sect2>
@@ -4044,6 +4042,11 @@ int num_ioctls;</synopsis>
          probing, so those sections fully apply.
         </para>
       </sect2>
+      <sect2>
+        <title>Hotplug</title>
+!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
+!Idrivers/gpu/drm/i915/intel_hotplug.c
+      </sect2>
       <sect2>
        <title>High Definition Audio</title>
 !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
@@ -4193,6 +4196,23 @@ int num_ioctls;</synopsis>
         <title>Global GTT views</title>
 !Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
 !Idrivers/gpu/drm/i915/i915_gem_gtt.c
+      </sect2>
+      <sect2>
+        <title>GTT Fences and Swizzling</title>
+!Idrivers/gpu/drm/i915/i915_gem_fence.c
+        <sect3>
+          <title>Global GTT Fence Handling</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c fence register handling
+        </sect3>
+        <sect3>
+          <title>Hardware Tiling and Swizzling Details</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c tiling swizzling details
+        </sect3>
+      </sect2>
+      <sect2>
+        <title>Object Tiling IOCTLs</title>
+!Idrivers/gpu/drm/i915/i915_gem_tiling.c
+!Pdrivers/gpu/drm/i915/i915_gem_tiling.c buffer object tiling
       </sect2>
       <sect2>
         <title>Buffer Object Eviction</title>
index a9ae6c105520011994801168a7841b4d713b716e..94212186990d96f90d0494a7d5a0a79fc0156a7b 100644 (file)
@@ -636,9 +636,14 @@ M: Oded Gabbay <oded.gabbay@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 T:     git git://people.freedesktop.org/~gabbayo/linux.git
 S:     Supported
+F:     drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+F:     drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+F:     drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+F:     drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
 F:     drivers/gpu/drm/amd/amdkfd/
 F:     drivers/gpu/drm/amd/include/cik_structs.h
 F:     drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+F:     drivers/gpu/drm/amd/include/vi_structs.h
 F:     drivers/gpu/drm/radeon/radeon_kfd.c
 F:     drivers/gpu/drm/radeon/radeon_kfd.h
 F:     include/uapi/linux/kfd_ioctl.h
index 315b861065725a4cd154744ee45d5331de6fb828..05630dfcb9f449180eda0414a821b66c68444dbc 100644 (file)
@@ -207,7 +207,6 @@ CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_DRM_I915_KMS=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_EFI=y
index c6dea3f6917bdcfc144fc70540cbbd26ea1918ee..1341a94cc7793aa0425eb83c4e446393be7f60c6 100644 (file)
@@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-                  phys_addr_t *mappable_base, unsigned long *mappable_end)
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+                  phys_addr_t *mappable_base, u64 *mappable_end)
 {
        *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
        *stolen_size = intel_private.stolen_size;
index bf4bd1d120c38a94fb997521c1712224a7712f8d..f25dc880b0073edd2c2449b9ad9ace77781c18c7 100644 (file)
@@ -1672,6 +1672,19 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
        mutex_unlock(&gpio_lookup_lock);
 }
 
+/**
+ * gpiod_remove_lookup_table() - unregister GPIO device consumers
+ * @table: table of consumers to unregister
+ */
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
+{
+       mutex_lock(&gpio_lookup_lock);
+
+       list_del(&table->list);
+
+       mutex_unlock(&gpio_lookup_lock);
+}
+
 static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
                                      unsigned int idx,
                                      enum gpio_lookup_flags *flags)
index 616dfd4a139812d2763d36ce0e1190e795d1be5e..908360584e4dc442b7dcab40b9631a457295afd6 100644 (file)
@@ -71,6 +71,12 @@ amdgpu-y += \
        amdgpu_vce.o \
        vce_v3_0.o
 
+# add amdkfd interfaces
+amdgpu-y += \
+        amdgpu_amdkfd.o \
+        amdgpu_amdkfd_gfx_v7.o \
+        amdgpu_amdkfd_gfx_v8.o
+
 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
 amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
index f7b49d5ce4b81d471fa3c84280560b9d0e774c78..baefa635169a953f4cb41840b24de11b632e7345 100644 (file)
@@ -2025,6 +2025,9 @@ struct amdgpu_device {
        /* tracking pinned memory */
        u64 vram_pin_size;
        u64 gart_pin_size;
+
+       /* amdkfd interface */
+       struct kfd_dev          *kfd;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
new file mode 100644 (file)
index 0000000..bc763e0
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_family.h"
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include <linux/module.h>
+
+const struct kfd2kgd_calls *kfd2kgd;
+const struct kgd2kfd_calls *kgd2kfd;
+bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+bool amdgpu_amdkfd_init(void)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+       bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+       kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+       if (kgd2kfd_init_p == NULL)
+               return false;
+#endif
+       return true;
+}
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+       bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+#endif
+
+       switch (rdev->asic_type) {
+       case CHIP_KAVERI:
+               kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
+               break;
+       case CHIP_CARRIZO:
+               kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
+               break;
+       default:
+               return false;
+       }
+
+#if defined(CONFIG_HSA_AMD_MODULE)
+       kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+       if (kgd2kfd_init_p == NULL) {
+               kfd2kgd = NULL;
+               return false;
+       }
+
+       if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+               symbol_put(kgd2kfd_init);
+               kfd2kgd = NULL;
+               kgd2kfd = NULL;
+
+               return false;
+       }
+
+       return true;
+#elif defined(CONFIG_HSA_AMD)
+       if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+               kfd2kgd = NULL;
+               kgd2kfd = NULL;
+               return false;
+       }
+
+       return true;
+#else
+       kfd2kgd = NULL;
+       return false;
+#endif
+}
+
+void amdgpu_amdkfd_fini(void)
+{
+       if (kgd2kfd) {
+               kgd2kfd->exit();
+               symbol_put(kgd2kfd_init);
+       }
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
+{
+       if (kgd2kfd)
+               rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+                                       rdev->pdev, kfd2kgd);
+}
+
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
+{
+       if (rdev->kfd) {
+               struct kgd2kfd_shared_resources gpu_resources = {
+                       .compute_vmid_bitmap = 0xFF00,
+
+                       .first_compute_pipe = 1,
+                       .compute_pipe_count = 4 - 1,
+               };
+
+               amdgpu_doorbell_get_kfd_info(rdev,
+                               &gpu_resources.doorbell_physical_address,
+                               &gpu_resources.doorbell_aperture_size,
+                               &gpu_resources.doorbell_start_offset);
+
+               kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+       }
+}
+
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
+{
+       if (rdev->kfd) {
+               kgd2kfd->device_exit(rdev->kfd);
+               rdev->kfd = NULL;
+       }
+}
+
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+               const void *ih_ring_entry)
+{
+       if (rdev->kfd)
+               kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
+{
+       if (rdev->kfd)
+               kgd2kfd->suspend(rdev->kfd);
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
+{
+       int r = 0;
+
+       if (rdev->kfd)
+               r = kgd2kfd->resume(rdev->kfd);
+
+       return r;
+}
+
+u32 pool_to_domain(enum kgd_memory_pool p)
+{
+       switch (p) {
+       case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
+       default: return AMDGPU_GEM_DOMAIN_GTT;
+       }
+}
+
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+                       void **mem_obj, uint64_t *gpu_addr,
+                       void **cpu_ptr)
+{
+       struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+       struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+       int r;
+
+       BUG_ON(kgd == NULL);
+       BUG_ON(gpu_addr == NULL);
+       BUG_ON(cpu_ptr == NULL);
+
+       *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+       if ((*mem) == NULL)
+               return -ENOMEM;
+
+       r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
+                       AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+       if (r) {
+               dev_err(rdev->dev,
+                       "failed to allocate BO for amdkfd (%d)\n", r);
+               return r;
+       }
+
+       /* map the buffer */
+       r = amdgpu_bo_reserve((*mem)->bo, true);
+       if (r) {
+               dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+               goto allocate_mem_reserve_bo_failed;
+       }
+
+       r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
+                               &(*mem)->gpu_addr);
+       if (r) {
+               dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+               goto allocate_mem_pin_bo_failed;
+       }
+       *gpu_addr = (*mem)->gpu_addr;
+
+       r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+       if (r) {
+               dev_err(rdev->dev,
+                       "(%d) failed to map bo to kernel for amdkfd\n", r);
+               goto allocate_mem_kmap_bo_failed;
+       }
+       *cpu_ptr = (*mem)->cpu_ptr;
+
+       amdgpu_bo_unreserve((*mem)->bo);
+
+       return 0;
+
+allocate_mem_kmap_bo_failed:
+       amdgpu_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+       amdgpu_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+       amdgpu_bo_unref(&(*mem)->bo);
+
+       return r;
+}
+
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+{
+       struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
+
+       BUG_ON(mem == NULL);
+
+       amdgpu_bo_reserve(mem->bo, true);
+       amdgpu_bo_kunmap(mem->bo);
+       amdgpu_bo_unpin(mem->bo);
+       amdgpu_bo_unreserve(mem->bo);
+       amdgpu_bo_unref(&(mem->bo));
+       kfree(mem);
+}
+
+uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *rdev =
+               (struct amdgpu_device *)kgd;
+
+       BUG_ON(kgd == NULL);
+
+       return rdev->mc.real_vram_size;
+}
+
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+       if (rdev->asic_funcs->get_gpu_clock_counter)
+               return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+       return 0;
+}
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+       /* The sclk is in quantas of 10kHz */
+       return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
new file mode 100644 (file)
index 0000000..a8be765
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
+
+#ifndef AMDGPU_AMDKFD_H_INCLUDED
+#define AMDGPU_AMDKFD_H_INCLUDED
+
+#include <linux/types.h>
+#include <kgd_kfd_interface.h>
+
+struct amdgpu_device;
+
+struct kgd_mem {
+       struct amdgpu_bo *bo;
+       uint64_t gpu_addr;
+       void *cpu_ptr;
+};
+
+bool amdgpu_amdkfd_init(void);
+void amdgpu_amdkfd_fini(void);
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+                       const void *ih_ring_entry);
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+
+/* Shared API */
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+                       void **mem_obj, uint64_t *gpu_addr,
+                       void **cpu_ptr);
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint64_t get_vmem_size(struct kgd_dev *kgd);
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
new file mode 100644 (file)
index 0000000..2daad33
--- /dev/null
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "cikd.h"
+#include "cik_sdma.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "cik_structs.h"
+
+#define CIK_PIPE_PER_MEC       (4)
+
+enum {
+       MAX_TRAPID = 8,         /* 3 bits in the bitfield. */
+       MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+       ADDRESS_WATCH_REG_ADDR_HI = 0,
+       ADDRESS_WATCH_REG_ADDR_LO,
+       ADDRESS_WATCH_REG_CNTL,
+       ADDRESS_WATCH_REG_MAX
+};
+
+/*  not defined in the CI/KV reg file  */
+enum {
+       ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+       ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+       ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+       /* extend the mask to 26 bits to match the low address field */
+       ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+       ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
+       mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
+       mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
+       mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
+       mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
+};
+
+union TCP_WATCH_CNTL_BITS {
+       struct {
+               uint32_t mask:24;
+               uint32_t vmid:4;
+               uint32_t atc:1;
+               uint32_t mode:2;
+               uint32_t valid:1;
+       } bitfields, bits;
+       uint32_t u32All;
+       signed int i32All;
+       float f32All;
+};
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+               uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+               uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+                                       unsigned int vmid);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+                       uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+                               uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+                               unsigned int timeout);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       uint32_t cntl_val,
+                                       uint32_t addr_hi,
+                                       uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+                                       uint32_t gfx_index_val,
+                                       uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+                                                       uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+       .init_gtt_mem_allocation = alloc_gtt_mem,
+       .free_gtt_mem = free_gtt_mem,
+       .get_vmem_size = get_vmem_size,
+       .get_gpu_clock_counter = get_gpu_clock_counter,
+       .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+       .program_sh_mem_settings = kgd_program_sh_mem_settings,
+       .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+       .init_pipeline = kgd_init_pipeline,
+       .init_interrupts = kgd_init_interrupts,
+       .hqd_load = kgd_hqd_load,
+       .hqd_sdma_load = kgd_hqd_sdma_load,
+       .hqd_is_occupied = kgd_hqd_is_occupied,
+       .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+       .hqd_destroy = kgd_hqd_destroy,
+       .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+       .address_watch_disable = kgd_address_watch_disable,
+       .address_watch_execute = kgd_address_watch_execute,
+       .wave_control_execute = kgd_wave_control_execute,
+       .address_watch_get_offset = kgd_address_watch_get_offset,
+       .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
+       .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
+       .write_vmid_invalidate_request = write_vmid_invalidate_request,
+       .get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+{
+       return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+       return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+                       uint32_t queue, uint32_t vmid)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+       mutex_lock(&adev->srbm_mutex);
+       WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       WREG32(mmSRBM_GFX_CNTL, 0);
+       mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t queue_id)
+{
+       uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+       uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+       lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+       unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+                                       uint32_t sh_mem_config,
+                                       uint32_t sh_mem_ape1_base,
+                                       uint32_t sh_mem_ape1_limit,
+                                       uint32_t sh_mem_bases)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+       WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+       WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+       WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+       unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+                                       unsigned int vmid)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       /*
+        * We have to assume that there is no outstanding mapping.
+        * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+        * a mapping is in progress or because a mapping finished and the
+        * SW cleared it. So the protocol is to always wait & clear.
+        */
+       uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+                       ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+       WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+       while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+               cpu_relax();
+       WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+       /* Mapping vmid to pasid also for IH block */
+       WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+       return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+       uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+       lock_srbm(kgd, mec, pipe, 0, 0);
+       WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
+       WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
+       WREG32(mmCP_HPD_EOP_VMID, 0);
+       WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
+       unlock_srbm(kgd);
+
+       return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t mec;
+       uint32_t pipe;
+
+       mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
+       pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+       lock_srbm(kgd, mec, pipe, 0, 0);
+
+       WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+                       CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+       unlock_srbm(kgd);
+
+       return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+       uint32_t retval;
+
+       retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+                       m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+       pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+       return retval;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+       return (struct cik_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+       return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+                       uint32_t queue_id, uint32_t __user *wptr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t wptr_shadow, is_wptr_shadow_valid;
+       struct cik_mqd *m;
+
+       m = get_mqd(mqd);
+
+       is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+       acquire_queue(kgd, pipe_id, queue_id);
+       WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+       WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+       WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+
+       WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+       WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+       WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+       WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+       WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+       WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+       WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+       WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+       WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+       WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+       WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
+       WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
+       WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
+       WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
+
+       WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+       WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+                       m->cp_hqd_pq_rptr_report_addr_hi);
+
+       WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+       WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
+       WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
+
+       WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+       WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+
+       WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+       WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+       WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+       WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+       if (is_wptr_shadow_valid)
+               WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
+
+       WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+       release_queue(kgd);
+
+       return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       struct cik_sdma_rlc_registers *m;
+       uint32_t sdma_base_addr;
+
+       m = get_sdma_mqd(mqd);
+       sdma_base_addr = get_sdma_base_addr(m);
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+                       m->sdma_rlc_virtual_addr);
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+                       m->sdma_rlc_rb_base);
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+                       m->sdma_rlc_rb_base_hi);
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+                       m->sdma_rlc_rb_rptr_addr_lo);
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+                       m->sdma_rlc_rb_rptr_addr_hi);
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+                       m->sdma_rlc_doorbell);
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+                       m->sdma_rlc_rb_cntl);
+
+       return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+                               uint32_t pipe_id, uint32_t queue_id)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t act;
+       bool retval = false;
+       uint32_t low, high;
+
+       acquire_queue(kgd, pipe_id, queue_id);
+       act = RREG32(mmCP_HQD_ACTIVE);
+       if (act) {
+               low = lower_32_bits(queue_address >> 8);
+               high = upper_32_bits(queue_address >> 8);
+
+               if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+                               high == RREG32(mmCP_HQD_PQ_BASE_HI))
+                       retval = true;
+       }
+       release_queue(kgd);
+       return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       struct cik_sdma_rlc_registers *m;
+       uint32_t sdma_base_addr;
+       uint32_t sdma_rlc_rb_cntl;
+
+       m = get_sdma_mqd(mqd);
+       sdma_base_addr = get_sdma_base_addr(m);
+
+       sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+       if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+               return true;
+
+       return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t temp;
+
+       acquire_queue(kgd, pipe_id, queue_id);
+       WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+       WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+       while (true) {
+               temp = RREG32(mmCP_HQD_ACTIVE);
+               if (temp & CP_HQD_ACTIVE__ACTIVE__SHIFT)
+                       break;
+               if (timeout == 0) {
+                       pr_err("kfd: cp queue preemption time out (%dms)\n",
+                               temp);
+                       release_queue(kgd);
+                       return -ETIME;
+               }
+               msleep(20);
+               timeout -= 20;
+       }
+
+       release_queue(kgd);
+       return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+                               unsigned int timeout)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       struct cik_sdma_rlc_registers *m;
+       uint32_t sdma_base_addr;
+       uint32_t temp;
+
+       m = get_sdma_mqd(mqd);
+       sdma_base_addr = get_sdma_base_addr(m);
+
+       temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+       temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+       while (true) {
+               temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+               if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+                       break;
+               if (timeout == 0)
+                       return -ETIME;
+               msleep(20);
+               timeout -= 20;
+       }
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+       return 0;
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       union TCP_WATCH_CNTL_BITS cntl;
+       unsigned int i;
+
+       cntl.u32All = 0;
+
+       cntl.bitfields.valid = 0;
+       cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
+       cntl.bitfields.atc = 1;
+
+       /* Turning off this address until we set all the registers */
+       for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
+               WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
+                       ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+       return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       uint32_t cntl_val,
+                                       uint32_t addr_hi,
+                                       uint32_t addr_lo)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       union TCP_WATCH_CNTL_BITS cntl;
+
+       cntl.u32All = cntl_val;
+
+       /* Turning off this watch point until we set all the registers */
+       cntl.bitfields.valid = 0;
+       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+               ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+               ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
+
+       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+               ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
+
+       /* Enable the watch point */
+       cntl.bitfields.valid = 1;
+
+       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+               ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+       return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+                                       uint32_t gfx_index_val,
+                                       uint32_t sq_cmd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t data;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+
+       WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+       WREG32(mmSQ_CMD, sq_cmd);
+
+       /*  Restore the GRBM_GFX_INDEX register  */
+
+       data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
+               GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+               GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
+
+       WREG32(mmGRBM_GFX_INDEX, data);
+
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       unsigned int reg_offset)
+{
+       return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+                                                       uint8_t vmid)
+{
+       uint32_t reg;
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+       reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+       return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+                                                               uint8_t vmid)
+{
+       uint32_t reg;
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+       reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+       return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+       WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+       const union amdgpu_firmware_header *hdr;
+
+       BUG_ON(kgd == NULL);
+
+       switch (type) {
+       case KGD_ENGINE_PFP:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.pfp_fw->data;
+               break;
+
+       case KGD_ENGINE_ME:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.me_fw->data;
+               break;
+
+       case KGD_ENGINE_CE:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.ce_fw->data;
+               break;
+
+       case KGD_ENGINE_MEC1:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.mec_fw->data;
+               break;
+
+       case KGD_ENGINE_MEC2:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.mec2_fw->data;
+               break;
+
+       case KGD_ENGINE_RLC:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.rlc_fw->data;
+               break;
+
+       case KGD_ENGINE_SDMA1:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->sdma[0].fw->data;
+               break;
+
+       case KGD_ENGINE_SDMA2:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->sdma[1].fw->data;
+               break;
+
+       default:
+               return 0;
+       }
+
+       if (hdr == NULL)
+               return 0;
+
+       /* Only 12 bit in use*/
+       return hdr->common.ucode_version;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
new file mode 100644 (file)
index 0000000..dfd1d50
--- /dev/null
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "vi_structs.h"
+#include "vid.h"
+
+#define VI_PIPE_PER_MEC        (4)
+
+struct cik_sdma_rlc_registers;
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+               uint32_t sh_mem_config,
+               uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+               uint32_t sh_mem_bases);
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+               unsigned int vmid);
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+               uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+               uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+               uint32_t pipe_id, uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+                               unsigned int timeout);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       uint32_t cntl_val,
+                                       uint32_t addr_hi,
+                                       uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+                                       uint32_t gfx_index_val,
+                                       uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+               uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+               uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+       .init_gtt_mem_allocation = alloc_gtt_mem,
+       .free_gtt_mem = free_gtt_mem,
+       .get_vmem_size = get_vmem_size,
+       .get_gpu_clock_counter = get_gpu_clock_counter,
+       .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+       .program_sh_mem_settings = kgd_program_sh_mem_settings,
+       .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+       .init_pipeline = kgd_init_pipeline,
+       .init_interrupts = kgd_init_interrupts,
+       .hqd_load = kgd_hqd_load,
+       .hqd_sdma_load = kgd_hqd_sdma_load,
+       .hqd_is_occupied = kgd_hqd_is_occupied,
+       .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+       .hqd_destroy = kgd_hqd_destroy,
+       .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+       .address_watch_disable = kgd_address_watch_disable,
+       .address_watch_execute = kgd_address_watch_execute,
+       .wave_control_execute = kgd_wave_control_execute,
+       .address_watch_get_offset = kgd_address_watch_get_offset,
+       .get_atc_vmid_pasid_mapping_pasid =
+                       get_atc_vmid_pasid_mapping_pasid,
+       .get_atc_vmid_pasid_mapping_valid =
+                       get_atc_vmid_pasid_mapping_valid,
+       .write_vmid_invalidate_request = write_vmid_invalidate_request,
+       .get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+{
+       return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+       return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+                       uint32_t queue, uint32_t vmid)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+       mutex_lock(&adev->srbm_mutex);
+       WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       WREG32(mmSRBM_GFX_CNTL, 0);
+       mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t queue_id)
+{
+       uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+       uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+       lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+       unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+                                       uint32_t sh_mem_config,
+                                       uint32_t sh_mem_ape1_base,
+                                       uint32_t sh_mem_ape1_limit,
+                                       uint32_t sh_mem_bases)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+       WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+       WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+       WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+       unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+                                       unsigned int vmid)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       /*
+        * We have to assume that there is no outstanding mapping.
+        * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+        * a mapping is in progress or because a mapping finished
+        * and the SW cleared it.
+        * So the protocol is to always wait & clear.
+        */
+       uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+                       ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+       WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+       while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+               cpu_relax();
+       WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+       /* Mapping vmid to pasid also for IH block */
+       WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+       return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+                               uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+       return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t mec;
+       uint32_t pipe;
+
+       mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+       pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+       lock_srbm(kgd, mec, pipe, 0, 0);
+
+       WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+
+       unlock_srbm(kgd);
+
+       return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+       return 0;
+}
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+       return (struct vi_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+       return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+                       uint32_t queue_id, uint32_t __user *wptr)
+{
+       struct vi_mqd *m;
+       uint32_t shadow_wptr, valid_wptr;
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       m = get_mqd(mqd);
+
+       valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
+       acquire_queue(kgd, pipe_id, queue_id);
+
+       WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+       WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+       WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+
+       WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+       WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+       WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+       WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+       WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+       WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+       WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+       WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+       WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+                       m->cp_hqd_pq_rptr_report_addr_hi);
+
+       if (valid_wptr > 0)
+               WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
+
+       WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+       WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+       WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
+       WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
+       WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
+       WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+       WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+       WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
+
+       WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
+       WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
+       WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
+       WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
+       WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
+       WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
+       WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
+
+       WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+
+       WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
+       WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
+       WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+       WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
+
+       WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+
+       release_queue(kgd);
+
+       return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+       return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+                               uint32_t pipe_id, uint32_t queue_id)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t act;
+       bool retval = false;
+       uint32_t low, high;
+
+       acquire_queue(kgd, pipe_id, queue_id);
+       act = RREG32(mmCP_HQD_ACTIVE);
+       if (act) {
+               low = lower_32_bits(queue_address >> 8);
+               high = upper_32_bits(queue_address >> 8);
+
+               if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+                               high == RREG32(mmCP_HQD_PQ_BASE_HI))
+                       retval = true;
+       }
+       release_queue(kgd);
+       return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       struct cik_sdma_rlc_registers *m;
+       uint32_t sdma_base_addr;
+       uint32_t sdma_rlc_rb_cntl;
+
+       m = get_sdma_mqd(mqd);
+       sdma_base_addr = get_sdma_base_addr(m);
+
+       sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+       if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+               return true;
+
+       return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t temp;
+
+       acquire_queue(kgd, pipe_id, queue_id);
+
+       WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+       while (true) {
+               temp = RREG32(mmCP_HQD_ACTIVE);
+               if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+                       break;
+               if (timeout == 0) {
+                       pr_err("kfd: cp queue preemption time out (%dms)\n",
+                               temp);
+                       release_queue(kgd);
+                       return -ETIME;
+               }
+               msleep(20);
+               timeout -= 20;
+       }
+
+       release_queue(kgd);
+       return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+                               unsigned int timeout)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       struct cik_sdma_rlc_registers *m;
+       uint32_t sdma_base_addr;
+       uint32_t temp;
+
+       m = get_sdma_mqd(mqd);
+       sdma_base_addr = get_sdma_base_addr(m);
+
+       temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+       temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+       while (true) {
+               temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+               if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+                       break;
+               if (timeout == 0)
+                       return -ETIME;
+               msleep(20);
+               timeout -= 20;
+       }
+
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+       return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+                                                       uint8_t vmid)
+{
+       uint32_t reg;
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+       reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+       return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+                                                               uint8_t vmid)
+{
+       uint32_t reg;
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+       reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+       return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+       WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+       return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       uint32_t cntl_val,
+                                       uint32_t addr_hi,
+                                       uint32_t addr_lo)
+{
+       return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+                                       uint32_t gfx_index_val,
+                                       uint32_t sq_cmd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+       uint32_t data = 0;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+
+       WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+       WREG32(mmSQ_CMD, sq_cmd);
+
+       data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+               INSTANCE_BROADCAST_WRITES, 1);
+       data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+               SH_BROADCAST_WRITES, 1);
+       data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+               SE_BROADCAST_WRITES, 1);
+
+       WREG32(mmGRBM_GFX_INDEX, data);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+                                       unsigned int watch_point_id,
+                                       unsigned int reg_offset)
+{
+       return 0;
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+       const union amdgpu_firmware_header *hdr;
+
+       BUG_ON(kgd == NULL);
+
+       switch (type) {
+       case KGD_ENGINE_PFP:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.pfp_fw->data;
+               break;
+
+       case KGD_ENGINE_ME:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.me_fw->data;
+               break;
+
+       case KGD_ENGINE_CE:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.ce_fw->data;
+               break;
+
+       case KGD_ENGINE_MEC1:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.mec_fw->data;
+               break;
+
+       case KGD_ENGINE_MEC2:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.mec2_fw->data;
+               break;
+
+       case KGD_ENGINE_RLC:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->gfx.rlc_fw->data;
+               break;
+
+       case KGD_ENGINE_SDMA1:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->sdma[0].fw->data;
+               break;
+
+       case KGD_ENGINE_SDMA2:
+               hdr = (const union amdgpu_firmware_header *)
+                                                       adev->sdma[1].fw->data;
+               break;
+
+       default:
+               return 0;
+       }
+
+       if (hdr == NULL)
+               return 0;
+
+       /* Only 12 bit in use*/
+       return hdr->common.ucode_version;
+}
index 56da962231fc7b9810cb9bc8717862afefc2ff05..115906f5fda0a38e3a070ed160da25778725563b 100644 (file)
@@ -44,6 +44,8 @@
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * KMS wrapper.
  * - 3.0.0 - initial driver
@@ -527,12 +529,15 @@ static int __init amdgpu_init(void)
        driver->num_ioctls = amdgpu_max_kms_ioctl;
        amdgpu_register_atpx_handler();
 
+       amdgpu_amdkfd_init();
+
        /* let modprobe override vga console setting */
        return drm_pci_init(driver, pdriver);
 }
 
 static void __exit amdgpu_exit(void)
 {
+       amdgpu_amdkfd_fini();
        drm_pci_exit(driver, pdriver);
        amdgpu_unregister_atpx_handler();
 }
index db5422e65ec5f684b0d88ce5fa04bac986324653..fb44dd2231b1c5fa03590fd7793358f0c8782aa3 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_ih.h"
+#include "amdgpu_amdkfd.h"
 
 /**
  * amdgpu_ih_ring_alloc - allocate memory for the IH ring
@@ -199,6 +200,12 @@ restart_ih:
        rmb();
 
        while (adev->irq.ih.rptr != wptr) {
+               u32 ring_index = adev->irq.ih.rptr >> 2;
+
+               /* Before dispatching irq to IP blocks, send it to amdkfd */
+               amdgpu_amdkfd_interrupt(adev,
+                               (const void *) &adev->irq.ih.ring[ring_index]);
+
                amdgpu_ih_decode_iv(adev, &entry);
                adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
 
index 3bfe67de834904628e0e4e11677c706c4848fde7..93000af92283f3619114c4dfd1f0e916f7adeec8 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include "amdgpu_amdkfd.h"
 
 #if defined(CONFIG_VGA_SWITCHEROO)
 bool amdgpu_has_atpx(void);
@@ -61,6 +62,8 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
 
        pm_runtime_get_sync(dev->dev);
 
+       amdgpu_amdkfd_device_fini(adev);
+
        amdgpu_acpi_fini(adev);
 
        amdgpu_device_fini(adev);
@@ -118,6 +121,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
                                "Error during ACPI methods call\n");
        }
 
+       amdgpu_amdkfd_load_interface(adev);
+       amdgpu_amdkfd_device_probe(adev);
+       amdgpu_amdkfd_device_init(adev);
+
        if (amdgpu_device_is_px(dev)) {
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
index 341c566818419317a0c3d16a3d5b738840e30b46..b3b66a0d5ff7ce2d6f3fa36ae7b2cdb25cb468b8 100644 (file)
@@ -64,6 +64,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * Indirect registers accessor
  */
@@ -2448,14 +2450,21 @@ static int cik_common_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_amdkfd_suspend(adev);
+
        return cik_common_hw_fini(adev);
 }
 
 static int cik_common_resume(void *handle)
 {
+       int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       return cik_common_hw_init(adev);
+       r = cik_common_hw_init(adev);
+       if (r)
+               return r;
+
+       return amdgpu_amdkfd_resume(adev);
 }
 
 static bool cik_common_is_idle(void *handle)
index d19085a9706489a00a0e13306af0d6275587b88c..a3e3dfaa01a4330a276f604ae912b65af3d9c512 100644 (file)
 #define VCE_CMD_IB_AUTO                0x00000005
 #define VCE_CMD_SEMAPHORE      0x00000006
 
+/* if PTR32, these are the bases for scratch and lds */
+#define        PRIVATE_BASE(x) ((x) << 0) /* scratch */
+#define        SHARED_BASE(x)  ((x) << 16) /* LDS */
+
+#define KFD_CIK_SDMA_QUEUE_OFFSET      0x200
+
 /* valid for both DEFAULT_MTYPE and APE1_MTYPE */
 enum {
        MTYPE_CACHED = 0,
index 31bb89452e1245d5b78fc71015a6d2d021403c76..d98aa9d82fa193dc5e13a98a34df6ab87733e597 100644 (file)
 
 #define AMDGPU_NUM_OF_VMIDS                    8
 
+#define                PIPEID(x)                                       ((x) << 0)
+#define                MEID(x)                                         ((x) << 2)
+#define                VMID(x)                                         ((x) << 4)
+#define                QUEUEID(x)                                      ((x) << 8)
+
 #define RB_BITMAP_WIDTH_PER_SH     2
 
 #define MC_SEQ_MISC0__MT__MASK 0xf0000000
index 8dfac37ff32723bdf43b0d397ed64bfc0a45f08d..e13c67c8d2c0ed85ac4666a9d31ff929dc8e05b6 100644 (file)
@@ -4,6 +4,6 @@
 
 config HSA_AMD
        tristate "HSA kernel driver for AMD GPU devices"
-       depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+       depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
        help
          Enable this if you want to use HSA features on AMD GPU devices.
index 28551153ec6d0ea18031f361618a5158a537c88e..7fc9b0f444cbbada434ff2c5b9c793b32cb773d3 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for Heterogenous System Architecture support for AMD GPU devices
 #
 
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/  \
+               -Idrivers/gpu/drm/amd/include/asic_reg
 
 amdkfd-y       := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
                kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
index 183be5b8414fd2e979e2e449aa047e4a512d41ce..48769d12dd7beea06c3b7d3e5cf0846314970cec 100644 (file)
 
 #define        AQL_ENABLE                                      1
 
-#define        SDMA_RB_VMID(x)                                 (x << 24)
-#define        SDMA_RB_ENABLE                                  (1 << 0)
-#define        SDMA_RB_SIZE(x)                                 ((x) << 1) /* log2 */
-#define        SDMA_RPTR_WRITEBACK_ENABLE                      (1 << 12)
-#define        SDMA_RPTR_WRITEBACK_TIMER(x)                    ((x) << 16) /* log2 */
-#define        SDMA_OFFSET(x)                                  (x << 0)
-#define        SDMA_DB_ENABLE                                  (1 << 28)
-#define        SDMA_ATC                                        (1 << 0)
-#define        SDMA_VA_PTR32                                   (1 << 4)
-#define        SDMA_VA_SHARED_BASE(x)                          (x << 8)
-
 #define GRBM_GFX_INDEX                                 0x30800
 
 #define        ATC_VMID_PASID_MAPPING_VALID                    (1U << 31)
index 75312c82969f3de1fe6f8049a3175fc800e5af22..3f95f7cb4019468b1011e0483fa50192d0d45f6a 100644 (file)
@@ -80,7 +80,12 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x1318, &kaveri_device_info },        /* Kaveri */
        { 0x131B, &kaveri_device_info },        /* Kaveri */
        { 0x131C, &kaveri_device_info },        /* Kaveri */
-       { 0x131D, &kaveri_device_info }         /* Kaveri */
+       { 0x131D, &kaveri_device_info },        /* Kaveri */
+       { 0x9870, &carrizo_device_info },       /* Carrizo */
+       { 0x9874, &carrizo_device_info },       /* Carrizo */
+       { 0x9875, &carrizo_device_info },       /* Carrizo */
+       { 0x9876, &carrizo_device_info },       /* Carrizo */
+       { 0x9877, &carrizo_device_info }        /* Carrizo */
 };
 
 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
index 9ce8a20a7aff0760e57e8028b635abd21749546a..23ce774ff09d324662c511f14e0188e53f079496 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "kfd_device_queue_manager.h"
 #include "cik_regs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
                                   struct qcm_process_device *qpd,
@@ -135,13 +136,16 @@ static int register_process_cik(struct device_queue_manager *dqm,
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
                                struct qcm_process_device *qpd)
 {
-       uint32_t value = SDMA_ATC;
+       uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
 
        if (q->process->is_32bit_user_mode)
-               value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+               value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+                               get_sh_mem_bases_32(qpd_to_pdd(qpd));
        else
-               value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
-                                                       qpd_to_pdd(qpd)));
+               value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+                               SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &&
+                               SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
        q->properties.sdma_vm_addr = value;
 }
 
index 4c15212a38996ec6bde7ccf083d3a0f413b35465..44c38e8e54d303b0bbb3cbcab51a968882f3589c 100644 (file)
  */
 
 #include "kfd_device_queue_manager.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
 
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
                                   struct qcm_process_device *qpd,
@@ -37,14 +41,40 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 
 void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
 {
-       pr_warn("amdkfd: VI DQM is not currently supported\n");
-
        ops->set_cache_memory_policy = set_cache_memory_policy_vi;
        ops->register_process = register_process_vi;
        ops->initialize = initialize_cpsch_vi;
        ops->init_sdma_vm = init_sdma_vm;
 }
 
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+       /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+        * scratch and GPUVM apertures.
+        * The hardware fills in the remaining 59 bits according to the
+        * following pattern:
+        * LDS:         X0000000'00000000 - X0000001'00000000 (4GB)
+        * Scratch:     X0000001'00000000 - X0000002'00000000 (4GB)
+        * GPUVM:       Y0010000'00000000 - Y0020000'00000000 (1TB)
+        *
+        * (where X/Y is the configurable nybble with the low-bit 0)
+        *
+        * LDS and scratch will have the same top nybble programmed in the
+        * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+        * GPUVM can have a different top nybble programmed in the
+        * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+        * We don't bother to support different top nybbles
+        * for LDS/Scratch and GPUVM.
+        */
+
+       BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+               top_address_nybble == 0);
+
+       return top_address_nybble << 12 |
+                       (top_address_nybble << 12) <<
+                       SH_MEM_BASES__SHARED_BASE__SHIFT;
+}
+
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
                                   struct qcm_process_device *qpd,
                                   enum cache_policy default_policy,
@@ -52,18 +82,83 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
                                   void __user *alternate_aperture_base,
                                   uint64_t alternate_aperture_size)
 {
-       return false;
+       uint32_t default_mtype;
+       uint32_t ape1_mtype;
+
+       default_mtype = (default_policy == cache_policy_coherent) ?
+                       MTYPE_CC :
+                       MTYPE_NC;
+
+       ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+                       MTYPE_CC :
+                       MTYPE_NC;
+
+       qpd->sh_mem_config = (qpd->sh_mem_config &
+                       SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
+               SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+                               SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+               default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+               ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+               SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+       return true;
 }
 
 static int register_process_vi(struct device_queue_manager *dqm,
                                        struct qcm_process_device *qpd)
 {
-       return -1;
+       struct kfd_process_device *pdd;
+       unsigned int temp;
+
+       BUG_ON(!dqm || !qpd);
+
+       pdd = qpd_to_pdd(qpd);
+
+       /* check if sh_mem_config register already configured */
+       if (qpd->sh_mem_config == 0) {
+               qpd->sh_mem_config =
+                       SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+                               SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+                       MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+                       MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+                       SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+               qpd->sh_mem_ape1_limit = 0;
+               qpd->sh_mem_ape1_base = 0;
+       }
+
+       if (qpd->pqm->process->is_32bit_user_mode) {
+               temp = get_sh_mem_bases_32(pdd);
+               qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
+               qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
+                                       SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+       } else {
+               temp = get_sh_mem_bases_nybble_64(pdd);
+               qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+               qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
+                       SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+       }
+
+       pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+               qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+       return 0;
 }
 
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
                                struct qcm_process_device *qpd)
 {
+       uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
+
+       if (q->process->is_32bit_user_mode)
+               value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+                               get_sh_mem_bases_32(qpd_to_pdd(qpd));
+       else
+               value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+                               SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &&
+                               SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
+       q->properties.sdma_vm_addr = value;
 }
 
 static int initialize_cpsch_vi(struct device_queue_manager *dqm)
index 434979428fc01264647b6189d41aea235d4fda4b..d83de985e88cf882a150f2823a08632a7e2e5594 100644 (file)
@@ -27,6 +27,7 @@
 #include "kfd_mqd_manager.h"
 #include "cik_regs.h"
 #include "cik_structs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static inline struct cik_mqd *get_mqd(void *mqd)
 {
@@ -214,17 +215,20 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        BUG_ON(!mm || !mqd || !q);
 
        m = get_sdma_mqd(mqd);
-       m->sdma_rlc_rb_cntl =
-               SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
-               SDMA_RB_VMID(q->vmid) |
-               SDMA_RPTR_WRITEBACK_ENABLE |
-               SDMA_RPTR_WRITEBACK_TIMER(6);
+       m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+                       SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+                       q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+                       1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+                       6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
 
        m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
        m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
        m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
        m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
-       m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+       m->sdma_rlc_doorbell = q->doorbell_off <<
+                       SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
+                       1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+
        m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
 
        m->sdma_engine_id = q->sdma_engine_id;
@@ -234,7 +238,9 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        if (q->queue_size > 0 &&
                        q->queue_address != 0 &&
                        q->queue_percent > 0) {
-               m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+               m->sdma_rlc_rb_cntl |=
+                               1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
+
                q->is_active = true;
        }
 
index b3a7e3ba1e380cb270da2469fd9a3e53fbcd89a2..fa32c32fa1c2bc1fc29bc8f6baea120987a47685 100644 (file)
  */
 
 #include <linux/printk.h>
+#include <linux/slab.h>
 #include "kfd_priv.h"
 #include "kfd_mqd_manager.h"
+#include "vi_structs.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+       return (struct vi_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+                       struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+                       struct queue_properties *q)
+{
+       int retval;
+       uint64_t addr;
+       struct vi_mqd *m;
+
+       retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd),
+                       mqd_mem_obj);
+       if (retval != 0)
+               return -ENOMEM;
+
+       m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr;
+       addr = (*mqd_mem_obj)->gpu_addr;
+
+       memset(m, 0, sizeof(struct vi_mqd));
+
+       m->header = 0xC0310800;
+       m->compute_pipelinestat_enable = 1;
+       m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+       m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+       m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+                       0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+
+       m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT |
+                       MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT;
+
+       m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+       m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+
+       m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
+                       1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+                       10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+       m->cp_hqd_pipe_priority = 1;
+       m->cp_hqd_queue_priority = 15;
+
+       m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT;
+
+       if (q->format == KFD_QUEUE_FORMAT_AQL)
+               m->cp_hqd_iq_rptr = 1;
+
+       *mqd = m;
+       if (gart_addr != NULL)
+               *gart_addr = addr;
+       retval = mm->update_mqd(mm, m, q);
+
+       return retval;
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd,
+                       uint32_t pipe_id, uint32_t queue_id,
+                       uint32_t __user *wptr)
+{
+       return mm->dev->kfd2kgd->hqd_load
+               (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+
+static int __update_mqd(struct mqd_manager *mm, void *mqd,
+                       struct queue_properties *q, unsigned int mtype,
+                       unsigned int atc_bit)
+{
+       struct vi_mqd *m;
+
+       BUG_ON(!mm || !q || !mqd);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       m = get_mqd(mqd);
+
+       m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
+                       atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
+                       mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
+       m->cp_hqd_pq_control |=
+                       ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+       pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+       m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+       m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+
+       m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+       m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+
+       m->cp_hqd_pq_doorbell_control =
+               1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
+               q->doorbell_off <<
+                       CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+       pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+                       m->cp_hqd_pq_doorbell_control);
+
+       m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
+                       mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT;
+
+       m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT |
+                       3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+                       mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
+
+       m->cp_hqd_eop_control |=
+               ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+       m->cp_hqd_eop_base_addr_lo =
+                       lower_32_bits(q->eop_ring_buffer_address >> 8);
+       m->cp_hqd_eop_base_addr_hi =
+                       upper_32_bits(q->eop_ring_buffer_address >> 8);
+
+       m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT |
+                       mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT;
+
+       m->cp_hqd_vmid = q->vmid;
+
+       if (q->format == KFD_QUEUE_FORMAT_AQL) {
+               m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+                               2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
+       }
+
+       m->cp_hqd_active = 0;
+       q->is_active = false;
+       if (q->queue_size > 0 &&
+                       q->queue_address != 0 &&
+                       q->queue_percent > 0) {
+               m->cp_hqd_active = 1;
+               q->is_active = true;
+       }
+
+       return 0;
+}
+
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+                       struct queue_properties *q)
+{
+       return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+                       enum kfd_preempt_type type,
+                       unsigned int timeout, uint32_t pipe_id,
+                       uint32_t queue_id)
+{
+       return mm->dev->kfd2kgd->hqd_destroy
+               (mm->dev->kgd, type, timeout,
+               pipe_id, queue_id);
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+                       struct kfd_mem_obj *mqd_mem_obj)
+{
+       BUG_ON(!mm || !mqd);
+       kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+                       uint64_t queue_address, uint32_t pipe_id,
+                       uint32_t queue_id)
+{
+       return mm->dev->kfd2kgd->hqd_is_occupied(
+               mm->dev->kgd, queue_address,
+               pipe_id, queue_id);
+}
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+                       struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+                       struct queue_properties *q)
+{
+       struct vi_mqd *m;
+       int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+
+       if (retval != 0)
+               return retval;
+
+       m = get_mqd(*mqd);
+
+       m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+                       1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+
+       return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+                       struct queue_properties *q)
+{
+       struct vi_mqd *m;
+       int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+
+       if (retval != 0)
+               return retval;
+
+       m = get_mqd(mqd);
+       m->cp_hqd_vmid = q->vmid;
+       return retval;
+}
 
 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
-                                       struct kfd_dev *dev)
+               struct kfd_dev *dev)
 {
-       pr_warn("amdkfd: VI MQD is not currently supported\n");
-       return NULL;
+       struct mqd_manager *mqd;
+
+       BUG_ON(!dev);
+       BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+       if (!mqd)
+               return NULL;
+
+       mqd->dev = dev;
+
+       switch (type) {
+       case KFD_MQD_TYPE_CP:
+       case KFD_MQD_TYPE_COMPUTE:
+               mqd->init_mqd = init_mqd;
+               mqd->uninit_mqd = uninit_mqd;
+               mqd->load_mqd = load_mqd;
+               mqd->update_mqd = update_mqd;
+               mqd->destroy_mqd = destroy_mqd;
+               mqd->is_occupied = is_occupied;
+               break;
+       case KFD_MQD_TYPE_HIQ:
+               mqd->init_mqd = init_mqd_hiq;
+               mqd->uninit_mqd = uninit_mqd;
+               mqd->load_mqd = load_mqd;
+               mqd->update_mqd = update_mqd_hiq;
+               mqd->destroy_mqd = destroy_mqd;
+               mqd->is_occupied = is_occupied;
+               break;
+       case KFD_MQD_TYPE_SDMA:
+               break;
+       default:
+               kfree(mqd);
+               return NULL;
+       }
+
+       return mqd;
 }
index 99b6d28a11c3e9030c734a58170ec4bb35bac56f..90f391434fa392b79d9fb2bc3bb41b6ea79bd746 100644 (file)
@@ -27,6 +27,7 @@
 #include "kfd_kernel_queue.h"
 #include "kfd_priv.h"
 #include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
 #include "kfd_pm4_opcodes.h"
 
 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
@@ -55,6 +56,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
                                bool *over_subscription)
 {
        unsigned int process_count, queue_count;
+       unsigned int map_queue_size;
 
        BUG_ON(!pm || !rlib_size || !over_subscription);
 
@@ -69,9 +71,13 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
                pr_debug("kfd: over subscribed runlist\n");
        }
 
+       map_queue_size =
+               (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
+               sizeof(struct pm4_mes_map_queues) :
+               sizeof(struct pm4_map_queues);
        /* calculate run list ib allocation size */
        *rlib_size = process_count * sizeof(struct pm4_map_process) +
-                    queue_count * sizeof(struct pm4_map_queues);
+                    queue_count * map_queue_size;
 
        /*
         * Increase the allocation size in case we need a chained run list
@@ -176,6 +182,71 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
        return 0;
 }
 
+static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+               struct queue *q, bool is_static)
+{
+       struct pm4_mes_map_queues *packet;
+       bool use_static = is_static;
+
+       BUG_ON(!pm || !buffer || !q);
+
+       pr_debug("kfd: In func %s\n", __func__);
+
+       packet = (struct pm4_mes_map_queues *)buffer;
+       memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+       packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+                                               sizeof(struct pm4_map_queues));
+       packet->bitfields2.alloc_format =
+               alloc_format__mes_map_queues__one_per_pipe_vi;
+       packet->bitfields2.num_queues = 1;
+       packet->bitfields2.queue_sel =
+               queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+       packet->bitfields2.engine_sel =
+               engine_sel__mes_map_queues__compute_vi;
+       packet->bitfields2.queue_type =
+               queue_type__mes_map_queues__normal_compute_vi;
+
+       switch (q->properties.type) {
+       case KFD_QUEUE_TYPE_COMPUTE:
+               if (use_static)
+                       packet->bitfields2.queue_type =
+               queue_type__mes_map_queues__normal_latency_static_queue_vi;
+               break;
+       case KFD_QUEUE_TYPE_DIQ:
+               packet->bitfields2.queue_type =
+                       queue_type__mes_map_queues__debug_interface_queue_vi;
+               break;
+       case KFD_QUEUE_TYPE_SDMA:
+               packet->bitfields2.engine_sel =
+                               engine_sel__mes_map_queues__sdma0_vi;
+               use_static = false; /* no static queues under SDMA */
+               break;
+       default:
+               pr_err("kfd: in %s queue type %d\n", __func__,
+                               q->properties.type);
+               BUG();
+               break;
+       }
+       packet->bitfields3.doorbell_offset =
+                       q->properties.doorbell_off;
+
+       packet->mqd_addr_lo =
+                       lower_32_bits(q->gart_mqd_addr);
+
+       packet->mqd_addr_hi =
+                       upper_32_bits(q->gart_mqd_addr);
+
+       packet->wptr_addr_lo =
+                       lower_32_bits((uint64_t)q->properties.write_ptr);
+
+       packet->wptr_addr_hi =
+                       upper_32_bits((uint64_t)q->properties.write_ptr);
+
+       return 0;
+}
+
 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
                                struct queue *q, bool is_static)
 {
@@ -292,8 +363,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
                        pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
                                kq->queue->queue, qpd->is_debug);
 
-                       retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-                                               kq->queue, qpd->is_debug);
+                       if (pm->dqm->dev->device_info->asic_family ==
+                                       CHIP_CARRIZO)
+                               retval = pm_create_map_queue_vi(pm,
+                                               &rl_buffer[rl_wptr],
+                                               kq->queue,
+                                               qpd->is_debug);
+                       else
+                               retval = pm_create_map_queue(pm,
+                                               &rl_buffer[rl_wptr],
+                                               kq->queue,
+                                               qpd->is_debug);
                        if (retval != 0)
                                return retval;
 
@@ -309,8 +389,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
                        pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
                                q->queue, qpd->is_debug);
 
-                       retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-                                               q,  qpd->is_debug);
+                       if (pm->dqm->dev->device_info->asic_family ==
+                                       CHIP_CARRIZO)
+                               retval = pm_create_map_queue_vi(pm,
+                                               &rl_buffer[rl_wptr],
+                                               q,
+                                               qpd->is_debug);
+                       else
+                               retval = pm_create_map_queue(pm,
+                                               &rl_buffer[rl_wptr],
+                                               q,
+                                               qpd->is_debug);
 
                        if (retval != 0)
                                return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
new file mode 100644 (file)
index 0000000..08c7219
--- /dev/null
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef F32_MES_PM4_PACKETS_H
+#define F32_MES_PM4_PACKETS_H
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+       struct {
+               uint32_t reserved1 : 8; /* < reserved */
+               uint32_t opcode    : 8; /* < IT opcode */
+               uint32_t count     : 14;/* < number of DWORDs - 1 in the
+               information body. */
+               uint32_t type      : 2; /* < packet identifier.
+                                       It should be 3 for type 3 packets */
+       };
+       uint32_t u32All;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/*--------------------MES_SET_RESOURCES--------------------*/
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum mes_set_resources_queue_type_enum {
+       queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+       queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+       queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+
+struct pm4_mes_set_resources {
+       union {
+               union PM4_MES_TYPE_3_HEADER     header;         /* header */
+               uint32_t                        ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t vmid_mask:16;
+                       uint32_t unmap_latency:8;
+                       uint32_t reserved1:5;
+                       enum mes_set_resources_queue_type_enum queue_type:3;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       uint32_t queue_mask_lo;
+       uint32_t queue_mask_hi;
+       uint32_t gws_mask_lo;
+       uint32_t gws_mask_hi;
+
+       union {
+               struct {
+                       uint32_t oac_mask:16;
+                       uint32_t reserved2:16;
+               } bitfields7;
+               uint32_t ordinal7;
+       };
+
+       union {
+               struct {
+               uint32_t gds_heap_base:6;
+               uint32_t reserved3:5;
+               uint32_t gds_heap_size:6;
+               uint32_t reserved4:15;
+               } bitfields8;
+               uint32_t ordinal8;
+       };
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST--------------------*/
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_mes_runlist {
+       union {
+           union PM4_MES_TYPE_3_HEADER   header;            /* header */
+           uint32_t            ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved1:2;
+                       uint32_t ib_base_lo:30;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t ib_base_hi:16;
+                       uint32_t reserved2:16;
+               } bitfields3;
+               uint32_t ordinal3;
+       };
+
+       union {
+               struct {
+                       uint32_t ib_size:20;
+                       uint32_t chain:1;
+                       uint32_t offload_polling:1;
+                       uint32_t reserved3:1;
+                       uint32_t valid:1;
+                       uint32_t reserved4:8;
+               } bitfields4;
+               uint32_t ordinal4;
+       };
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_mes_map_process {
+       union {
+               union PM4_MES_TYPE_3_HEADER   header;            /* header */
+               uint32_t            ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t pasid:16;
+                       uint32_t reserved1:8;
+                       uint32_t diq_enable:1;
+                       uint32_t process_quantum:7;
+               } bitfields2;
+               uint32_t ordinal2;
+};
+
+       union {
+               struct {
+                       uint32_t page_table_base:28;
+                       uint32_t reserved2:4;
+               } bitfields3;
+               uint32_t ordinal3;
+       };
+
+       uint32_t sh_mem_bases;
+       uint32_t sh_mem_ape1_base;
+       uint32_t sh_mem_ape1_limit;
+       uint32_t sh_mem_config;
+       uint32_t gds_addr_lo;
+       uint32_t gds_addr_hi;
+
+       union {
+               struct {
+                       uint32_t num_gws:6;
+                       uint32_t reserved3:2;
+                       uint32_t num_oac:4;
+                       uint32_t reserved4:4;
+                       uint32_t gds_size:6;
+                       uint32_t num_queues:10;
+               } bitfields10;
+               uint32_t ordinal10;
+       };
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED
+#define PM4_MES_MAP_QUEUES_VI_DEFINED
+enum mes_map_queues_queue_sel_vi_enum {
+       queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0,
+queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1
+};
+
+enum mes_map_queues_queue_type_vi_enum {
+       queue_type__mes_map_queues__normal_compute_vi = 0,
+       queue_type__mes_map_queues__debug_interface_queue_vi = 1,
+       queue_type__mes_map_queues__normal_latency_static_queue_vi = 2,
+queue_type__mes_map_queues__low_latency_static_queue_vi = 3
+};
+
+enum mes_map_queues_alloc_format_vi_enum {
+       alloc_format__mes_map_queues__one_per_pipe_vi = 0,
+alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
+};
+
+enum mes_map_queues_engine_sel_vi_enum {
+       engine_sel__mes_map_queues__compute_vi = 0,
+       engine_sel__mes_map_queues__sdma0_vi = 2,
+       engine_sel__mes_map_queues__sdma1_vi = 3
+};
+
+
+struct pm4_mes_map_queues {
+       union {
+               union PM4_MES_TYPE_3_HEADER   header;            /* header */
+               uint32_t            ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved1:4;
+                       enum mes_map_queues_queue_sel_vi_enum queue_sel:2;
+                       uint32_t reserved2:15;
+                       enum mes_map_queues_queue_type_vi_enum queue_type:3;
+                       enum mes_map_queues_alloc_format_vi_enum alloc_format:2;
+                       enum mes_map_queues_engine_sel_vi_enum engine_sel:3;
+                       uint32_t num_queues:3;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved3:1;
+                       uint32_t check_disable:1;
+                       uint32_t doorbell_offset:21;
+                       uint32_t reserved4:3;
+                       uint32_t queue:6;
+               } bitfields3;
+               uint32_t ordinal3;
+       };
+
+       uint32_t mqd_addr_lo;
+       uint32_t mqd_addr_hi;
+       uint32_t wptr_addr_lo;
+       uint32_t wptr_addr_hi;
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum mes_query_status_interrupt_sel_enum {
+       interrupt_sel__mes_query_status__completion_status = 0,
+       interrupt_sel__mes_query_status__process_status = 1,
+       interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum mes_query_status_command_enum {
+       command__mes_query_status__interrupt_only = 0,
+       command__mes_query_status__fence_only_immediate = 1,
+       command__mes_query_status__fence_only_after_write_ack = 2,
+       command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum mes_query_status_engine_sel_enum {
+       engine_sel__mes_query_status__compute = 0,
+       engine_sel__mes_query_status__sdma0_queue = 2,
+       engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_mes_query_status {
+       union {
+               union PM4_MES_TYPE_3_HEADER   header;            /* header */
+               uint32_t            ordinal1;
+       };
+
+       union {
+               struct {
+                       uint32_t context_id:28;
+                       enum mes_query_status_interrupt_sel_enum
+                               interrupt_sel:2;
+                       enum mes_query_status_command_enum command:2;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t pasid:16;
+                       uint32_t reserved1:16;
+               } bitfields3a;
+               struct {
+                       uint32_t reserved2:2;
+                       uint32_t doorbell_offset:21;
+                       uint32_t reserved3:2;
+                       enum mes_query_status_engine_sel_enum engine_sel:3;
+                       uint32_t reserved4:4;
+               } bitfields3b;
+               uint32_t ordinal3;
+       };
+
+       uint32_t addr_lo;
+       uint32_t addr_hi;
+       uint32_t data_lo;
+       uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum mes_unmap_queues_action_enum {
+       action__mes_unmap_queues__preempt_queues = 0,
+       action__mes_unmap_queues__reset_queues = 1,
+       action__mes_unmap_queues__disable_process_queues = 2,
+       action__mes_unmap_queues__reserved = 3
+};
+
+enum mes_unmap_queues_queue_sel_enum {
+       queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+       queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+       queue_sel__mes_unmap_queues__unmap_all_queues = 2,
+       queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3
+};
+
+enum mes_unmap_queues_engine_sel_enum {
+       engine_sel__mes_unmap_queues__compute = 0,
+       engine_sel__mes_unmap_queues__sdma0 = 2,
+       engine_sel__mes_unmap_queues__sdmal = 3
+};
+
+struct PM4_MES_UNMAP_QUEUES {
+       union {
+               union PM4_MES_TYPE_3_HEADER   header;            /* header */
+               uint32_t            ordinal1;
+       };
+
+       union {
+               struct {
+                       enum mes_unmap_queues_action_enum action:2;
+                       uint32_t reserved1:2;
+                       enum mes_unmap_queues_queue_sel_enum queue_sel:2;
+                       uint32_t reserved2:20;
+                       enum mes_unmap_queues_engine_sel_enum engine_sel:3;
+                       uint32_t num_queues:3;
+               } bitfields2;
+               uint32_t ordinal2;
+       };
+
+       union {
+               struct {
+                       uint32_t pasid:16;
+                       uint32_t reserved3:16;
+               } bitfields3a;
+               struct {
+                       uint32_t reserved4:2;
+                       uint32_t doorbell_offset0:21;
+                       uint32_t reserved5:9;
+               } bitfields3b;
+               uint32_t ordinal3;
+       };
+
+       union {
+       struct {
+                       uint32_t reserved6:2;
+                       uint32_t doorbell_offset1:21;
+                       uint32_t reserved7:9;
+               } bitfields4;
+               uint32_t ordinal4;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved8:2;
+                       uint32_t doorbell_offset2:21;
+                       uint32_t reserved9:9;
+               } bitfields5;
+               uint32_t ordinal5;
+       };
+
+       union {
+               struct {
+                       uint32_t reserved10:2;
+                       uint32_t doorbell_offset3:21;
+                       uint32_t reserved11:9;
+               } bitfields6;
+               uint32_t ordinal6;
+       };
+};
+#endif
+
+#endif
index c25728bc388a2be7134cb3e6b895a7a39d4189a2..74909e72a00929c8a87b73c9e25afb633fb50cf4 100644 (file)
@@ -1186,6 +1186,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
         * TODO: Retrieve max engine clock values from KGD
         */
 
+       if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+               dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
+               pr_info("amdkfd: adding doorbell packet type capability\n");
+       }
+
        res = 0;
 
 err:
index 989624b3cd14841422b004028df7cbda9ee5be86..c3ddb9b95ff8de6a52196f581d9eee1c1ccd3cb0 100644 (file)
@@ -40,6 +40,7 @@
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK    0x00000f00
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT   8
 #define HSA_CAP_RESERVED                       0xfffff000
+#define HSA_CAP_DOORBELL_PACKET_TYPE           0x00001000
 
 struct kfd_node_properties {
        uint32_t cpu_cores_count;
index 9080daa116b60031e156f0d10e200d2017c2950e..888250b33ea8993683538abed3de77bda023e5a4 100644 (file)
@@ -52,7 +52,8 @@ enum kgd_engine_type {
        KGD_ENGINE_MEC1,
        KGD_ENGINE_MEC2,
        KGD_ENGINE_RLC,
-       KGD_ENGINE_SDMA,
+       KGD_ENGINE_SDMA1,
+       KGD_ENGINE_SDMA2,
        KGD_ENGINE_MAX
 };
 
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
new file mode 100644 (file)
index 0000000..65cfacd
--- /dev/null
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VI_STRUCTS_H_
+#define VI_STRUCTS_H_
+
+struct vi_sdma_mqd {
+       uint32_t sdmax_rlcx_rb_cntl;
+       uint32_t sdmax_rlcx_rb_base;
+       uint32_t sdmax_rlcx_rb_base_hi;
+       uint32_t sdmax_rlcx_rb_rptr;
+       uint32_t sdmax_rlcx_rb_wptr;
+       uint32_t sdmax_rlcx_rb_wptr_poll_cntl;
+       uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
+       uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
+       uint32_t sdmax_rlcx_rb_rptr_addr_hi;
+       uint32_t sdmax_rlcx_rb_rptr_addr_lo;
+       uint32_t sdmax_rlcx_ib_cntl;
+       uint32_t sdmax_rlcx_ib_rptr;
+       uint32_t sdmax_rlcx_ib_offset;
+       uint32_t sdmax_rlcx_ib_base_lo;
+       uint32_t sdmax_rlcx_ib_base_hi;
+       uint32_t sdmax_rlcx_ib_size;
+       uint32_t sdmax_rlcx_skip_cntl;
+       uint32_t sdmax_rlcx_context_status;
+       uint32_t sdmax_rlcx_doorbell;
+       uint32_t sdmax_rlcx_virtual_addr;
+       uint32_t sdmax_rlcx_ape1_cntl;
+       uint32_t sdmax_rlcx_doorbell_log;
+       uint32_t reserved_22;
+       uint32_t reserved_23;
+       uint32_t reserved_24;
+       uint32_t reserved_25;
+       uint32_t reserved_26;
+       uint32_t reserved_27;
+       uint32_t reserved_28;
+       uint32_t reserved_29;
+       uint32_t reserved_30;
+       uint32_t reserved_31;
+       uint32_t reserved_32;
+       uint32_t reserved_33;
+       uint32_t reserved_34;
+       uint32_t reserved_35;
+       uint32_t reserved_36;
+       uint32_t reserved_37;
+       uint32_t reserved_38;
+       uint32_t reserved_39;
+       uint32_t reserved_40;
+       uint32_t reserved_41;
+       uint32_t reserved_42;
+       uint32_t reserved_43;
+       uint32_t reserved_44;
+       uint32_t reserved_45;
+       uint32_t reserved_46;
+       uint32_t reserved_47;
+       uint32_t reserved_48;
+       uint32_t reserved_49;
+       uint32_t reserved_50;
+       uint32_t reserved_51;
+       uint32_t reserved_52;
+       uint32_t reserved_53;
+       uint32_t reserved_54;
+       uint32_t reserved_55;
+       uint32_t reserved_56;
+       uint32_t reserved_57;
+       uint32_t reserved_58;
+       uint32_t reserved_59;
+       uint32_t reserved_60;
+       uint32_t reserved_61;
+       uint32_t reserved_62;
+       uint32_t reserved_63;
+       uint32_t reserved_64;
+       uint32_t reserved_65;
+       uint32_t reserved_66;
+       uint32_t reserved_67;
+       uint32_t reserved_68;
+       uint32_t reserved_69;
+       uint32_t reserved_70;
+       uint32_t reserved_71;
+       uint32_t reserved_72;
+       uint32_t reserved_73;
+       uint32_t reserved_74;
+       uint32_t reserved_75;
+       uint32_t reserved_76;
+       uint32_t reserved_77;
+       uint32_t reserved_78;
+       uint32_t reserved_79;
+       uint32_t reserved_80;
+       uint32_t reserved_81;
+       uint32_t reserved_82;
+       uint32_t reserved_83;
+       uint32_t reserved_84;
+       uint32_t reserved_85;
+       uint32_t reserved_86;
+       uint32_t reserved_87;
+       uint32_t reserved_88;
+       uint32_t reserved_89;
+       uint32_t reserved_90;
+       uint32_t reserved_91;
+       uint32_t reserved_92;
+       uint32_t reserved_93;
+       uint32_t reserved_94;
+       uint32_t reserved_95;
+       uint32_t reserved_96;
+       uint32_t reserved_97;
+       uint32_t reserved_98;
+       uint32_t reserved_99;
+       uint32_t reserved_100;
+       uint32_t reserved_101;
+       uint32_t reserved_102;
+       uint32_t reserved_103;
+       uint32_t reserved_104;
+       uint32_t reserved_105;
+       uint32_t reserved_106;
+       uint32_t reserved_107;
+       uint32_t reserved_108;
+       uint32_t reserved_109;
+       uint32_t reserved_110;
+       uint32_t reserved_111;
+       uint32_t reserved_112;
+       uint32_t reserved_113;
+       uint32_t reserved_114;
+       uint32_t reserved_115;
+       uint32_t reserved_116;
+       uint32_t reserved_117;
+       uint32_t reserved_118;
+       uint32_t reserved_119;
+       uint32_t reserved_120;
+       uint32_t reserved_121;
+       uint32_t reserved_122;
+       uint32_t reserved_123;
+       uint32_t reserved_124;
+       uint32_t reserved_125;
+       uint32_t reserved_126;
+       uint32_t reserved_127;
+};
+
+struct vi_mqd {
+       uint32_t header;
+       uint32_t compute_dispatch_initiator;
+       uint32_t compute_dim_x;
+       uint32_t compute_dim_y;
+       uint32_t compute_dim_z;
+       uint32_t compute_start_x;
+       uint32_t compute_start_y;
+       uint32_t compute_start_z;
+       uint32_t compute_num_thread_x;
+       uint32_t compute_num_thread_y;
+       uint32_t compute_num_thread_z;
+       uint32_t compute_pipelinestat_enable;
+       uint32_t compute_perfcount_enable;
+       uint32_t compute_pgm_lo;
+       uint32_t compute_pgm_hi;
+       uint32_t compute_tba_lo;
+       uint32_t compute_tba_hi;
+       uint32_t compute_tma_lo;
+       uint32_t compute_tma_hi;
+       uint32_t compute_pgm_rsrc1;
+       uint32_t compute_pgm_rsrc2;
+       uint32_t compute_vmid;
+       uint32_t compute_resource_limits;
+       uint32_t compute_static_thread_mgmt_se0;
+       uint32_t compute_static_thread_mgmt_se1;
+       uint32_t compute_tmpring_size;
+       uint32_t compute_static_thread_mgmt_se2;
+       uint32_t compute_static_thread_mgmt_se3;
+       uint32_t compute_restart_x;
+       uint32_t compute_restart_y;
+       uint32_t compute_restart_z;
+       uint32_t compute_thread_trace_enable;
+       uint32_t compute_misc_reserved;
+       uint32_t compute_dispatch_id;
+       uint32_t compute_threadgroup_id;
+       uint32_t compute_relaunch;
+       uint32_t compute_wave_restore_addr_lo;
+       uint32_t compute_wave_restore_addr_hi;
+       uint32_t compute_wave_restore_control;
+       uint32_t reserved_39;
+       uint32_t reserved_40;
+       uint32_t reserved_41;
+       uint32_t reserved_42;
+       uint32_t reserved_43;
+       uint32_t reserved_44;
+       uint32_t reserved_45;
+       uint32_t reserved_46;
+       uint32_t reserved_47;
+       uint32_t reserved_48;
+       uint32_t reserved_49;
+       uint32_t reserved_50;
+       uint32_t reserved_51;
+       uint32_t reserved_52;
+       uint32_t reserved_53;
+       uint32_t reserved_54;
+       uint32_t reserved_55;
+       uint32_t reserved_56;
+       uint32_t reserved_57;
+       uint32_t reserved_58;
+       uint32_t reserved_59;
+       uint32_t reserved_60;
+       uint32_t reserved_61;
+       uint32_t reserved_62;
+       uint32_t reserved_63;
+       uint32_t reserved_64;
+       uint32_t compute_user_data_0;
+       uint32_t compute_user_data_1;
+       uint32_t compute_user_data_2;
+       uint32_t compute_user_data_3;
+       uint32_t compute_user_data_4;
+       uint32_t compute_user_data_5;
+       uint32_t compute_user_data_6;
+       uint32_t compute_user_data_7;
+       uint32_t compute_user_data_8;
+       uint32_t compute_user_data_9;
+       uint32_t compute_user_data_10;
+       uint32_t compute_user_data_11;
+       uint32_t compute_user_data_12;
+       uint32_t compute_user_data_13;
+       uint32_t compute_user_data_14;
+       uint32_t compute_user_data_15;
+       uint32_t cp_compute_csinvoc_count_lo;
+       uint32_t cp_compute_csinvoc_count_hi;
+       uint32_t reserved_83;
+       uint32_t reserved_84;
+       uint32_t reserved_85;
+       uint32_t cp_mqd_query_time_lo;
+       uint32_t cp_mqd_query_time_hi;
+       uint32_t cp_mqd_connect_start_time_lo;
+       uint32_t cp_mqd_connect_start_time_hi;
+       uint32_t cp_mqd_connect_end_time_lo;
+       uint32_t cp_mqd_connect_end_time_hi;
+       uint32_t cp_mqd_connect_end_wf_count;
+       uint32_t cp_mqd_connect_end_pq_rptr;
+       uint32_t cp_mqd_connect_end_pq_wptr;
+       uint32_t cp_mqd_connect_end_ib_rptr;
+       uint32_t reserved_96;
+       uint32_t reserved_97;
+       uint32_t cp_mqd_save_start_time_lo;
+       uint32_t cp_mqd_save_start_time_hi;
+       uint32_t cp_mqd_save_end_time_lo;
+       uint32_t cp_mqd_save_end_time_hi;
+       uint32_t cp_mqd_restore_start_time_lo;
+       uint32_t cp_mqd_restore_start_time_hi;
+       uint32_t cp_mqd_restore_end_time_lo;
+       uint32_t cp_mqd_restore_end_time_hi;
+       uint32_t reserved_106;
+       uint32_t reserved_107;
+       uint32_t gds_cs_ctxsw_cnt0;
+       uint32_t gds_cs_ctxsw_cnt1;
+       uint32_t gds_cs_ctxsw_cnt2;
+       uint32_t gds_cs_ctxsw_cnt3;
+       uint32_t reserved_112;
+       uint32_t reserved_113;
+       uint32_t cp_pq_exe_status_lo;
+       uint32_t cp_pq_exe_status_hi;
+       uint32_t cp_packet_id_lo;
+       uint32_t cp_packet_id_hi;
+       uint32_t cp_packet_exe_status_lo;
+       uint32_t cp_packet_exe_status_hi;
+       uint32_t gds_save_base_addr_lo;
+       uint32_t gds_save_base_addr_hi;
+       uint32_t gds_save_mask_lo;
+       uint32_t gds_save_mask_hi;
+       uint32_t ctx_save_base_addr_lo;
+       uint32_t ctx_save_base_addr_hi;
+       uint32_t reserved_126;
+       uint32_t reserved_127;
+       uint32_t cp_mqd_base_addr_lo;
+       uint32_t cp_mqd_base_addr_hi;
+       uint32_t cp_hqd_active;
+       uint32_t cp_hqd_vmid;
+       uint32_t cp_hqd_persistent_state;
+       uint32_t cp_hqd_pipe_priority;
+       uint32_t cp_hqd_queue_priority;
+       uint32_t cp_hqd_quantum;
+       uint32_t cp_hqd_pq_base_lo;
+       uint32_t cp_hqd_pq_base_hi;
+       uint32_t cp_hqd_pq_rptr;
+       uint32_t cp_hqd_pq_rptr_report_addr_lo;
+       uint32_t cp_hqd_pq_rptr_report_addr_hi;
+       uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+       uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+       uint32_t cp_hqd_pq_doorbell_control;
+       uint32_t cp_hqd_pq_wptr;
+       uint32_t cp_hqd_pq_control;
+       uint32_t cp_hqd_ib_base_addr_lo;
+       uint32_t cp_hqd_ib_base_addr_hi;
+       uint32_t cp_hqd_ib_rptr;
+       uint32_t cp_hqd_ib_control;
+       uint32_t cp_hqd_iq_timer;
+       uint32_t cp_hqd_iq_rptr;
+       uint32_t cp_hqd_dequeue_request;
+       uint32_t cp_hqd_dma_offload;
+       uint32_t cp_hqd_sema_cmd;
+       uint32_t cp_hqd_msg_type;
+       uint32_t cp_hqd_atomic0_preop_lo;
+       uint32_t cp_hqd_atomic0_preop_hi;
+       uint32_t cp_hqd_atomic1_preop_lo;
+       uint32_t cp_hqd_atomic1_preop_hi;
+       uint32_t cp_hqd_hq_status0;
+       uint32_t cp_hqd_hq_control0;
+       uint32_t cp_mqd_control;
+       uint32_t cp_hqd_hq_status1;
+       uint32_t cp_hqd_hq_control1;
+       uint32_t cp_hqd_eop_base_addr_lo;
+       uint32_t cp_hqd_eop_base_addr_hi;
+       uint32_t cp_hqd_eop_control;
+       uint32_t cp_hqd_eop_rptr;
+       uint32_t cp_hqd_eop_wptr;
+       uint32_t cp_hqd_eop_done_events;
+       uint32_t cp_hqd_ctx_save_base_addr_lo;
+       uint32_t cp_hqd_ctx_save_base_addr_hi;
+       uint32_t cp_hqd_ctx_save_control;
+       uint32_t cp_hqd_cntl_stack_offset;
+       uint32_t cp_hqd_cntl_stack_size;
+       uint32_t cp_hqd_wg_state_offset;
+       uint32_t cp_hqd_ctx_save_size;
+       uint32_t cp_hqd_gds_resource_state;
+       uint32_t cp_hqd_error;
+       uint32_t cp_hqd_eop_wptr_mem;
+       uint32_t cp_hqd_eop_dones;
+       uint32_t reserved_182;
+       uint32_t reserved_183;
+       uint32_t reserved_184;
+       uint32_t reserved_185;
+       uint32_t reserved_186;
+       uint32_t reserved_187;
+       uint32_t reserved_188;
+       uint32_t reserved_189;
+       uint32_t reserved_190;
+       uint32_t reserved_191;
+       uint32_t iqtimer_pkt_header;
+       uint32_t iqtimer_pkt_dw0;
+       uint32_t iqtimer_pkt_dw1;
+       uint32_t iqtimer_pkt_dw2;
+       uint32_t iqtimer_pkt_dw3;
+       uint32_t iqtimer_pkt_dw4;
+       uint32_t iqtimer_pkt_dw5;
+       uint32_t iqtimer_pkt_dw6;
+       uint32_t iqtimer_pkt_dw7;
+       uint32_t iqtimer_pkt_dw8;
+       uint32_t iqtimer_pkt_dw9;
+       uint32_t iqtimer_pkt_dw10;
+       uint32_t iqtimer_pkt_dw11;
+       uint32_t iqtimer_pkt_dw12;
+       uint32_t iqtimer_pkt_dw13;
+       uint32_t iqtimer_pkt_dw14;
+       uint32_t iqtimer_pkt_dw15;
+       uint32_t iqtimer_pkt_dw16;
+       uint32_t iqtimer_pkt_dw17;
+       uint32_t iqtimer_pkt_dw18;
+       uint32_t iqtimer_pkt_dw19;
+       uint32_t iqtimer_pkt_dw20;
+       uint32_t iqtimer_pkt_dw21;
+       uint32_t iqtimer_pkt_dw22;
+       uint32_t iqtimer_pkt_dw23;
+       uint32_t iqtimer_pkt_dw24;
+       uint32_t iqtimer_pkt_dw25;
+       uint32_t iqtimer_pkt_dw26;
+       uint32_t iqtimer_pkt_dw27;
+       uint32_t iqtimer_pkt_dw28;
+       uint32_t iqtimer_pkt_dw29;
+       uint32_t iqtimer_pkt_dw30;
+       uint32_t iqtimer_pkt_dw31;
+       uint32_t reserved_225;
+       uint32_t reserved_226;
+       uint32_t reserved_227;
+       uint32_t set_resources_header;
+       uint32_t set_resources_dw1;
+       uint32_t set_resources_dw2;
+       uint32_t set_resources_dw3;
+       uint32_t set_resources_dw4;
+       uint32_t set_resources_dw5;
+       uint32_t set_resources_dw6;
+       uint32_t set_resources_dw7;
+       uint32_t reserved_236;
+       uint32_t reserved_237;
+       uint32_t reserved_238;
+       uint32_t reserved_239;
+       uint32_t queue_doorbell_id0;
+       uint32_t queue_doorbell_id1;
+       uint32_t queue_doorbell_id2;
+       uint32_t queue_doorbell_id3;
+       uint32_t queue_doorbell_id4;
+       uint32_t queue_doorbell_id5;
+       uint32_t queue_doorbell_id6;
+       uint32_t queue_doorbell_id7;
+       uint32_t queue_doorbell_id8;
+       uint32_t queue_doorbell_id9;
+       uint32_t queue_doorbell_id10;
+       uint32_t queue_doorbell_id11;
+       uint32_t queue_doorbell_id12;
+       uint32_t queue_doorbell_id13;
+       uint32_t queue_doorbell_id14;
+       uint32_t queue_doorbell_id15;
+};
+
+#endif /* VI_STRUCTS_H_ */
index 5ae5c69231280a5e1c23882289388f9ceb75a701..9f6e234e70296a9c35f4877cac98d5ff73b2c73c 100644 (file)
@@ -239,7 +239,8 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
        return atmel_hlcdc_plane_prepare_disc_area(s);
 }
 
-static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
+                                         struct drm_crtc_state *old_s)
 {
        struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
 
@@ -253,7 +254,8 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
        }
 }
 
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
+                                         struct drm_crtc_state *old_s)
 {
        /* TODO: write common plane control register if available */
 }
index f6f2fb58eb37f583f6568964f9ad8aab8be75968..3efd91c0c6cb07b542fcd82296269b41729cdcad 100644 (file)
@@ -1063,7 +1063,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
         * Changed connectors are already in @state, so only need to look at the
         * current configuration.
         */
-       list_for_each_entry(connector, &config->connector_list, head) {
+       drm_for_each_connector(connector, state->dev) {
                if (connector->state->crtc != crtc)
                        continue;
 
@@ -1463,24 +1463,18 @@ retry:
 
                if (get_user(obj_id, objs_ptr + copied_objs)) {
                        ret = -EFAULT;
-                       goto fail;
+                       goto out;
                }
 
                obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
                if (!obj || !obj->properties) {
                        ret = -ENOENT;
-                       goto fail;
-               }
-
-               if (obj->type == DRM_MODE_OBJECT_PLANE) {
-                       plane = obj_to_plane(obj);
-                       plane_mask |= (1 << drm_plane_index(plane));
-                       plane->old_fb = plane->fb;
+                       goto out;
                }
 
                if (get_user(count_props, count_props_ptr + copied_objs)) {
                        ret = -EFAULT;
-                       goto fail;
+                       goto out;
                }
 
                copied_objs++;
@@ -1492,28 +1486,34 @@ retry:
 
                        if (get_user(prop_id, props_ptr + copied_props)) {
                                ret = -EFAULT;
-                               goto fail;
+                               goto out;
                        }
 
                        prop = drm_property_find(dev, prop_id);
                        if (!prop) {
                                ret = -ENOENT;
-                               goto fail;
+                               goto out;
                        }
 
                        if (copy_from_user(&prop_value,
                                           prop_values_ptr + copied_props,
                                           sizeof(prop_value))) {
                                ret = -EFAULT;
-                               goto fail;
+                               goto out;
                        }
 
                        ret = atomic_set_prop(state, obj, prop, prop_value);
                        if (ret)
-                               goto fail;
+                               goto out;
 
                        copied_props++;
                }
+
+               if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) {
+                       plane = obj_to_plane(obj);
+                       plane_mask |= (1 << drm_plane_index(plane));
+                       plane->old_fb = plane->fb;
+               }
        }
 
        if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1523,7 +1523,7 @@ retry:
                        e = create_vblank_event(dev, file_priv, arg->user_data);
                        if (!e) {
                                ret = -ENOMEM;
-                               goto fail;
+                               goto out;
                        }
 
                        crtc_state->event = e;
@@ -1533,13 +1533,15 @@ retry:
        if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
                ret = drm_atomic_check_only(state);
                /* _check_only() does not free state, unlike _commit() */
-               drm_atomic_state_free(state);
+               if (!ret)
+                       drm_atomic_state_free(state);
        } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
                ret = drm_atomic_async_commit(state);
        } else {
                ret = drm_atomic_commit(state);
        }
 
+out:
        /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
         * locks (ie. while it is still safe to deref plane->state).  We
         * need to do this here because the driver entry points cannot
@@ -1552,41 +1554,35 @@ retry:
                                drm_framebuffer_reference(new_fb);
                        plane->fb = new_fb;
                        plane->crtc = plane->state->crtc;
-               } else {
-                       plane->old_fb = NULL;
-               }
-               if (plane->old_fb) {
-                       drm_framebuffer_unreference(plane->old_fb);
-                       plane->old_fb = NULL;
+
+                       if (plane->old_fb)
+                               drm_framebuffer_unreference(plane->old_fb);
                }
+               plane->old_fb = NULL;
        }
 
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-
-       return ret;
+       if (ret == -EDEADLK) {
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       }
 
-fail:
-       if (ret == -EDEADLK)
-               goto backoff;
+       if (ret) {
+               if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+                       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+                               if (!crtc_state->event)
+                                       continue;
 
-       if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-               for_each_crtc_in_state(state, crtc, crtc_state, i) {
-                       destroy_vblank_event(dev, file_priv, crtc_state->event);
-                       crtc_state->event = NULL;
+                               destroy_vblank_event(dev, file_priv,
+                                                    crtc_state->event);
+                       }
                }
-       }
 
-       drm_atomic_state_free(state);
+               drm_atomic_state_free(state);
+       }
 
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
        return ret;
-
-backoff:
-       drm_atomic_state_clear(state);
-       drm_modeset_backoff(&ctx);
-
-       goto retry;
 }
index 9dcc7280e5720255baed2786ab7d8fc11554c845..cf27b6b605d8403270d348378e5720680852f24a 100644 (file)
@@ -89,7 +89,7 @@ get_current_crtc_for_encoder(struct drm_device *dev,
 
        WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-       list_for_each_entry(connector, &config->connector_list, head) {
+       drm_for_each_connector(connector, dev) {
                if (connector->state->best_encoder != encoder)
                        continue;
 
@@ -124,7 +124,7 @@ steal_encoder(struct drm_atomic_state *state,
        if (IS_ERR(crtc_state))
                return PTR_ERR(crtc_state);
 
-       crtc_state->mode_changed = true;
+       crtc_state->connectors_changed = true;
 
        list_for_each_entry(connector, &config->connector_list, head) {
                if (connector->state->best_encoder != encoder)
@@ -174,14 +174,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
                        idx = drm_crtc_index(connector->state->crtc);
 
                        crtc_state = state->crtc_states[idx];
-                       crtc_state->mode_changed = true;
+                       crtc_state->connectors_changed = true;
                }
 
                if (connector_state->crtc) {
                        idx = drm_crtc_index(connector_state->crtc);
 
                        crtc_state = state->crtc_states[idx];
-                       crtc_state->mode_changed = true;
+                       crtc_state->connectors_changed = true;
                }
        }
 
@@ -241,7 +241,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        idx = drm_crtc_index(connector_state->crtc);
 
        crtc_state = state->crtc_states[idx];
-       crtc_state->mode_changed = true;
+       crtc_state->connectors_changed = true;
 
        DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
                         connector->base.id,
@@ -264,7 +264,8 @@ mode_fixup(struct drm_atomic_state *state)
        bool ret;
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!crtc_state->mode_changed)
+               if (!crtc_state->mode_changed &&
+                   !crtc_state->connectors_changed)
                        continue;
 
                drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
@@ -320,7 +321,8 @@ mode_fixup(struct drm_atomic_state *state)
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
                const struct drm_crtc_helper_funcs *funcs;
 
-               if (!crtc_state->mode_changed)
+               if (!crtc_state->mode_changed &&
+                   !crtc_state->connectors_changed)
                        continue;
 
                funcs = crtc->helper_private;
@@ -346,9 +348,14 @@ mode_fixup(struct drm_atomic_state *state)
  *
  * Check the state object to see if the requested state is physically possible.
  * This does all the crtc and connector related computations for an atomic
- * update. It computes and updates crtc_state->mode_changed, adds any additional
- * connectors needed for full modesets and calls down into ->mode_fixup
- * functions of the driver backend.
+ * update and adds any additional connectors needed for full modesets and calls
+ * down into ->mode_fixup functions of the driver backend.
+ *
+ * crtc_state->mode_changed is set when the input mode is changed.
+ * crtc_state->connectors_changed is set when a connector is added or
+ * removed from the crtc.
+ * crtc_state->active_changed is set when crtc_state->active changes,
+ * which is used for dpms.
  *
  * IMPORTANT:
  *
@@ -381,7 +388,17 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                if (crtc->state->enable != crtc_state->enable) {
                        DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
                                         crtc->base.id);
+
+                       /*
+                        * For clarity this assignment is done here, but
+                        * enable == 0 is only true when there are no
+                        * connectors and a NULL mode.
+                        *
+                        * The other way around is true as well. enable != 0
+                        * iff connectors are attached and a mode is set.
+                        */
                        crtc_state->mode_changed = true;
+                       crtc_state->connectors_changed = true;
                }
        }
 
@@ -456,6 +473,9 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
  * This does all the plane update related checks using by calling into the
  * ->atomic_check hooks provided by the driver.
  *
+ * It also sets crtc_state->planes_changed to indicate that a crtc has
+ * updated planes.
+ *
  * RETURNS
  * Zero for success or -errno
  */
@@ -648,15 +668,29 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
        struct drm_crtc_state *old_crtc_state;
        int i;
 
-       /* clear out existing links */
+       /* clear out existing links and update dpms */
        for_each_connector_in_state(old_state, connector, old_conn_state, i) {
-               if (!connector->encoder)
-                       continue;
+               if (connector->encoder) {
+                       WARN_ON(!connector->encoder->crtc);
 
-               WARN_ON(!connector->encoder->crtc);
+                       connector->encoder->crtc = NULL;
+                       connector->encoder = NULL;
+               }
 
-               connector->encoder->crtc = NULL;
-               connector->encoder = NULL;
+               crtc = connector->state->crtc;
+               if ((!crtc && old_conn_state->crtc) ||
+                   (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
+                       struct drm_property *dpms_prop =
+                               dev->mode_config.dpms_property;
+                       int mode = DRM_MODE_DPMS_OFF;
+
+                       if (crtc && crtc->state->active)
+                               mode = DRM_MODE_DPMS_ON;
+
+                       connector->dpms = mode;
+                       drm_object_property_set_value(&connector->base,
+                                                     dpms_prop, mode);
+               }
        }
 
        /* set new links */
@@ -673,10 +707,16 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
 
        /* set legacy state in the crtc structure */
        for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               struct drm_plane *primary = crtc->primary;
+
                crtc->mode = crtc->state->mode;
                crtc->enabled = crtc->state->enable;
-               crtc->x = crtc->primary->state->src_x >> 16;
-               crtc->y = crtc->primary->state->src_y >> 16;
+
+               if (drm_atomic_get_existing_plane_state(old_state, primary) &&
+                   primary->state->crtc == crtc) {
+                       crtc->x = primary->state->src_x >> 16;
+                       crtc->y = primary->state->src_y >> 16;
+               }
 
                if (crtc->state->enable)
                        drm_calc_timestamping_constants(crtc,
@@ -1146,7 +1186,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
                if (!funcs || !funcs->atomic_begin)
                        continue;
 
-               funcs->atomic_begin(crtc);
+               funcs->atomic_begin(crtc, old_crtc_state);
        }
 
        for_each_plane_in_state(old_state, plane, old_plane_state, i) {
@@ -1176,7 +1216,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
                if (!funcs || !funcs->atomic_flush)
                        continue;
 
-               funcs->atomic_flush(crtc);
+               funcs->atomic_flush(crtc, old_crtc_state);
        }
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -1212,7 +1252,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
 
        crtc_funcs = crtc->helper_private;
        if (crtc_funcs && crtc_funcs->atomic_begin)
-               crtc_funcs->atomic_begin(crtc);
+               crtc_funcs->atomic_begin(crtc, old_crtc_state);
 
        drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
                struct drm_plane_state *old_plane_state =
@@ -1235,7 +1275,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
        }
 
        if (crtc_funcs && crtc_funcs->atomic_flush)
-               crtc_funcs->atomic_flush(crtc);
+               crtc_funcs->atomic_flush(crtc, old_crtc_state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
 
@@ -1923,10 +1963,6 @@ retry:
        if (ret != 0)
                goto fail;
 
-       /* TODO: ->page_flip is the only driver callback where the core
-        * doesn't update plane->fb. For now patch it up here. */
-       plane->fb = plane->state->fb;
-
        /* Driver takes ownership of state on successful async commit. */
        return 0;
 fail:
@@ -1960,9 +1996,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
  * implementing the legacy DPMS connector interface. It computes the new desired
  * ->active state for the corresponding CRTC (if the connector is enabled) and
  *  updates it.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
  */
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-                                     int mode)
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+                                    int mode)
 {
        struct drm_mode_config *config = &connector->dev->mode_config;
        struct drm_atomic_state *state;
@@ -1971,6 +2010,7 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
        struct drm_connector *tmp_connector;
        int ret;
        bool active = false;
+       int old_mode = connector->dpms;
 
        if (mode != DRM_MODE_DPMS_ON)
                mode = DRM_MODE_DPMS_OFF;
@@ -1979,22 +2019,23 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
        crtc = connector->state->crtc;
 
        if (!crtc)
-               return;
+               return 0;
 
-       /* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
        state = drm_atomic_state_alloc(connector->dev);
        if (!state)
-               return;
+               return -ENOMEM;
 
        state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 retry:
        crtc_state = drm_atomic_get_crtc_state(state, crtc);
-       if (IS_ERR(crtc_state))
-               return;
+       if (IS_ERR(crtc_state)) {
+               ret = PTR_ERR(crtc_state);
+               goto fail;
+       }
 
        WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-       list_for_each_entry(tmp_connector, &config->connector_list, head) {
+       drm_for_each_connector(tmp_connector, connector->dev) {
                if (tmp_connector->state->crtc != crtc)
                        continue;
 
@@ -2009,17 +2050,16 @@ retry:
        if (ret != 0)
                goto fail;
 
-       /* Driver takes ownership of state on successful async commit. */
-       return;
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
 fail:
        if (ret == -EDEADLK)
                goto backoff;
 
+       connector->dpms = old_mode;
        drm_atomic_state_free(state);
 
-       WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
-
-       return;
+       return ret;
 backoff:
        drm_atomic_state_clear(state);
        drm_atomic_legacy_backoff(state);
@@ -2080,6 +2120,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
        state->mode_changed = false;
        state->active_changed = false;
        state->planes_changed = false;
+       state->connectors_changed = false;
        state->event = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
index 9b23525c0ed043f0220760010c2584e86ed163f6..192a5f9eeb74213610fdf8f4a10529fdf88555ea 100644 (file)
@@ -53,6 +53,10 @@ struct drm_ctx_list {
  */
 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        mutex_lock(&dev->struct_mutex);
        idr_remove(&dev->ctx_idr, ctx_handle);
        mutex_unlock(&dev->struct_mutex);
@@ -85,10 +89,13 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
  *
  * Initialise the drm_device::ctx_idr
  */
-int drm_legacy_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        idr_init(&dev->ctx_idr);
-       return 0;
 }
 
 /**
@@ -101,6 +108,10 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
  */
 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        mutex_lock(&dev->struct_mutex);
        idr_destroy(&dev->ctx_idr);
        mutex_unlock(&dev->struct_mutex);
@@ -119,6 +130,10 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_ctx_list *pos, *tmp;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        mutex_lock(&dev->ctxlist_mutex);
 
        list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
@@ -161,6 +176,10 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map;
        struct drm_map_list *_entry;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        mutex_lock(&dev->struct_mutex);
 
        map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -205,6 +224,10 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map = NULL;
        struct drm_map_list *r_list = NULL;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(r_list, &dev->maplist, head) {
                if (r_list->map
@@ -305,6 +328,10 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
        struct drm_ctx ctx;
        int i;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (res->count >= DRM_RESERVED_CONTEXTS) {
                memset(&ctx, 0, sizeof(ctx));
                for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -335,6 +362,10 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
        struct drm_ctx_list *ctx_entry;
        struct drm_ctx *ctx = data;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        ctx->handle = drm_legacy_ctxbitmap_next(dev);
        if (ctx->handle == DRM_KERNEL_CONTEXT) {
                /* Skip kernel's context and get a new one. */
@@ -378,6 +409,10 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        /* This is 0, because we don't handle any context flags */
        ctx->flags = 0;
 
@@ -400,6 +435,10 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        DRM_DEBUG("%d\n", ctx->handle);
        return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -420,6 +459,10 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        DRM_DEBUG("%d\n", ctx->handle);
        drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -442,6 +485,10 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
+       if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+           drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        DRM_DEBUG("%d\n", ctx->handle);
        if (ctx->handle != DRM_KERNEL_CONTEXT) {
                if (dev->driver->context_dtor)
index fed748311b928cc534f781505a37686ec7a46ba3..ca077657604edb7e9acb6a33785ee2b9b092b1c8 100644 (file)
@@ -615,7 +615,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
        if (atomic_read(&fb->refcount.refcount) > 1) {
                drm_modeset_lock_all(dev);
                /* remove from any CRTC */
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               drm_for_each_crtc(crtc, dev) {
                        if (crtc->primary->fb == fb) {
                                /* should turn off the crtc */
                                memset(&set, 0, sizeof(struct drm_mode_set));
@@ -627,7 +627,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
                        }
                }
 
-               list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               drm_for_each_plane(plane, dev) {
                        if (plane->fb == fb)
                                drm_plane_force_disable(plane);
                }
@@ -736,7 +736,7 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc)
        unsigned int index = 0;
        struct drm_crtc *tmp;
 
-       list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(tmp, crtc->dev) {
                if (tmp == crtc)
                        return index;
 
@@ -988,7 +988,7 @@ unsigned int drm_connector_index(struct drm_connector *connector)
 
        WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-       list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+       drm_for_each_connector(tmp, connector->dev) {
                if (tmp == connector)
                        return index;
 
@@ -1054,7 +1054,7 @@ void drm_connector_unplug_all(struct drm_device *dev)
 {
        struct drm_connector *connector;
 
-       /* taking the mode config mutex ends up in a clash with sysfs */
+       /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head)
                drm_connector_unregister(connector);
 
@@ -1280,7 +1280,7 @@ unsigned int drm_plane_index(struct drm_plane *plane)
        unsigned int index = 0;
        struct drm_plane *tmp;
 
-       list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+       drm_for_each_plane(tmp, plane->dev) {
                if (tmp == plane)
                        return index;
 
@@ -1305,7 +1305,7 @@ drm_plane_from_index(struct drm_device *dev, int idx)
        struct drm_plane *plane;
        unsigned int i = 0;
 
-       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+       drm_for_each_plane(plane, dev) {
                if (i == idx)
                        return plane;
                i++;
@@ -1679,70 +1679,6 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
 
-static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
-       uint32_t total_objects = 0;
-
-       total_objects += dev->mode_config.num_crtc;
-       total_objects += dev->mode_config.num_connector;
-       total_objects += dev->mode_config.num_encoder;
-
-       group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
-       if (!group->id_list)
-               return -ENOMEM;
-
-       group->num_crtcs = 0;
-       group->num_connectors = 0;
-       group->num_encoders = 0;
-       return 0;
-}
-
-void drm_mode_group_destroy(struct drm_mode_group *group)
-{
-       kfree(group->id_list);
-       group->id_list = NULL;
-}
-
-/*
- * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
- * the drm core's responsibility to set up mode control groups.
- */
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
-                                    struct drm_mode_group *group)
-{
-       struct drm_crtc *crtc;
-       struct drm_encoder *encoder;
-       struct drm_connector *connector;
-       int ret;
-
-       ret = drm_mode_group_init(dev, group);
-       if (ret)
-               return ret;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-               group->id_list[group->num_crtcs++] = crtc->base.id;
-
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-               group->id_list[group->num_crtcs + group->num_encoders++] =
-               encoder->base.id;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-               group->id_list[group->num_crtcs + group->num_encoders +
-                              group->num_connectors++] = connector->base.id;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
-
-void drm_reinit_primary_mode_group(struct drm_device *dev)
-{
-       drm_modeset_lock_all(dev);
-       drm_mode_group_destroy(&dev->primary->mode_group);
-       drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
-       drm_modeset_unlock_all(dev);
-}
-EXPORT_SYMBOL(drm_reinit_primary_mode_group);
-
 /**
  * drm_mode_getresources - get graphics configuration
  * @dev: drm device for the ioctl
@@ -1771,12 +1707,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        int crtc_count = 0;
        int fb_count = 0;
        int encoder_count = 0;
-       int copied = 0, i;
+       int copied = 0;
        uint32_t __user *fb_id;
        uint32_t __user *crtc_id;
        uint32_t __user *connector_id;
        uint32_t __user *encoder_id;
-       struct drm_mode_group *mode_group;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
@@ -1809,24 +1744,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        /* mode_config.mutex protects the connector list against e.g. DP MST
         * connector hot-adding. CRTC/Plane lists are invariant. */
        mutex_lock(&dev->mode_config.mutex);
-       if (!drm_is_primary_client(file_priv)) {
-
-               mode_group = NULL;
-               list_for_each(lh, &dev->mode_config.crtc_list)
-                       crtc_count++;
-
-               list_for_each(lh, &dev->mode_config.connector_list)
-                       connector_count++;
+       drm_for_each_crtc(crtc, dev)
+               crtc_count++;
 
-               list_for_each(lh, &dev->mode_config.encoder_list)
-                       encoder_count++;
-       } else {
+       drm_for_each_connector(connector, dev)
+               connector_count++;
 
-               mode_group = &file_priv->master->minor->mode_group;
-               crtc_count = mode_group->num_crtcs;
-               connector_count = mode_group->num_connectors;
-               encoder_count = mode_group->num_encoders;
-       }
+       drm_for_each_encoder(encoder, dev)
+               encoder_count++;
 
        card_res->max_height = dev->mode_config.max_height;
        card_res->min_height = dev->mode_config.min_height;
@@ -1837,25 +1762,13 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        if (card_res->count_crtcs >= crtc_count) {
                copied = 0;
                crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
-               if (!mode_group) {
-                       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-                                           head) {
-                               DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-                               if (put_user(crtc->base.id, crtc_id + copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
-                       }
-               } else {
-                       for (i = 0; i < mode_group->num_crtcs; i++) {
-                               if (put_user(mode_group->id_list[i],
-                                            crtc_id + copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
+               drm_for_each_crtc(crtc, dev) {
+                       DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+                       if (put_user(crtc->base.id, crtc_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
                        }
+                       copied++;
                }
        }
        card_res->count_crtcs = crtc_count;
@@ -1864,29 +1777,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        if (card_res->count_encoders >= encoder_count) {
                copied = 0;
                encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
-               if (!mode_group) {
-                       list_for_each_entry(encoder,
-                                           &dev->mode_config.encoder_list,
-                                           head) {
-                               DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
-                                               encoder->name);
-                               if (put_user(encoder->base.id, encoder_id +
-                                            copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
-                       }
-               } else {
-                       for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
-                               if (put_user(mode_group->id_list[i],
-                                            encoder_id + copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
+               drm_for_each_encoder(encoder, dev) {
+                       DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+                                       encoder->name);
+                       if (put_user(encoder->base.id, encoder_id +
+                                    copied)) {
+                               ret = -EFAULT;
+                               goto out;
                        }
-
+                       copied++;
                }
        }
        card_res->count_encoders = encoder_count;
@@ -1895,31 +1794,16 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        if (card_res->count_connectors >= connector_count) {
                copied = 0;
                connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
-               if (!mode_group) {
-                       list_for_each_entry(connector,
-                                           &dev->mode_config.connector_list,
-                                           head) {
-                               DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                                       connector->base.id,
-                                       connector->name);
-                               if (put_user(connector->base.id,
-                                            connector_id + copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
-                       }
-               } else {
-                       int start = mode_group->num_crtcs +
-                               mode_group->num_encoders;
-                       for (i = start; i < start + mode_group->num_connectors; i++) {
-                               if (put_user(mode_group->id_list[i],
-                                            connector_id + copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
+               drm_for_each_connector(connector, dev) {
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                               connector->base.id,
+                               connector->name);
+                       if (put_user(connector->base.id,
+                                    connector_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
                        }
+                       copied++;
                }
        }
        card_res->count_connectors = connector_count;
@@ -2187,7 +2071,7 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
 
        /* For atomic drivers only state objects are synchronously updated and
         * protected by modeset locks, so check those first. */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_for_each_connector(connector, dev) {
                if (!connector->state)
                        continue;
 
@@ -2291,7 +2175,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
                plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
                /* Plane lists are invariant, no locking needed. */
-               list_for_each_entry(plane, &config->plane_list, head) {
+               drm_for_each_plane(plane, dev) {
                        /*
                         * Unless userspace set the 'universal planes'
                         * capability bit, only advertise overlays.
@@ -2596,7 +2480,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
         * connectors from it), hence we need to refcount the fbs across all
         * crtcs. Atomic modeset will have saner semantics ...
         */
-       list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+       drm_for_each_crtc(tmp, crtc->dev)
                tmp->primary->old_fb = tmp->primary->fb;
 
        fb = set->fb;
@@ -2607,7 +2491,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
                crtc->primary->fb = fb;
        }
 
-       list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(tmp, crtc->dev) {
                if (tmp->primary->fb)
                        drm_framebuffer_reference(tmp->primary->fb);
                if (tmp->primary->old_fb)
@@ -4301,7 +4185,6 @@ void drm_property_unreference_blob(struct drm_property_blob *blob)
                mutex_unlock(&dev->mode_config.blob_lock);
        else
                might_lock(&dev->mode_config.blob_lock);
-
 }
 EXPORT_SYMBOL(drm_property_unreference_blob);
 
@@ -4472,9 +4355,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev,
                        goto err_created;
        }
 
-       if (old_blob)
-               drm_property_unreference_blob(old_blob);
-
+       drm_property_unreference_blob(old_blob);
        *replace = new_blob;
 
        return 0;
@@ -4872,9 +4753,9 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
 
        /* Do DPMS ourselves */
        if (property == connector->dev->mode_config.dpms_property) {
-               if (connector->funcs->dpms)
-                       (*connector->funcs->dpms)(connector, (int)value);
                ret = 0;
+               if (connector->funcs->dpms)
+                       ret = (*connector->funcs->dpms)(connector, (int)value);
        } else if (connector->funcs->set_property)
                ret = connector->funcs->set_property(connector, property, value);
 
@@ -5349,13 +5230,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
                /* Keep the old fb, don't unref it. */
                crtc->primary->old_fb = NULL;
        } else {
-               /*
-                * Warn if the driver hasn't properly updated the crtc->fb
-                * field to reflect that the new framebuffer is now used.
-                * Failing to do so will screw with the reference counting
-                * on framebuffers.
-                */
-               WARN_ON(crtc->primary->fb != fb);
+               crtc->primary->fb = fb;
                /* Unref only the old framebuffer. */
                fb = NULL;
        }
@@ -5386,19 +5261,19 @@ void drm_mode_config_reset(struct drm_device *dev)
        struct drm_encoder *encoder;
        struct drm_connector *connector;
 
-       list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+       drm_for_each_plane(plane, dev)
                if (plane->funcs->reset)
                        plane->funcs->reset(plane);
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+       drm_for_each_crtc(crtc, dev)
                if (crtc->funcs->reset)
                        crtc->funcs->reset(crtc);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+       drm_for_each_encoder(encoder, dev)
                if (encoder->funcs->reset)
                        encoder->funcs->reset(encoder);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       drm_for_each_connector(connector, dev)
                if (connector->funcs->reset)
                        connector->funcs->reset(connector);
 }
index 393114df88a3dbeb5326db27c7a16465fcf1550b..ef534758a02c6f946061107aaa0262527c8a25fb 100644 (file)
@@ -121,7 +121,7 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
                WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
        }
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       drm_for_each_connector(connector, dev)
                if (connector->encoder == encoder)
                        return true;
        return false;
@@ -151,7 +151,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
        if (!oops_in_progress)
                WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+       drm_for_each_encoder(encoder, dev)
                if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
                        return true;
        return false;
@@ -180,7 +180,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
 
        drm_warn_on_modeset_not_all_locked(dev);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
                if (!drm_helper_encoder_in_use(encoder)) {
                        drm_encoder_disable(encoder);
                        /* disconnect encoder from any connector */
@@ -188,7 +188,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
                }
        }
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(crtc, dev) {
                const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
                crtc->enabled = drm_helper_crtc_in_use(crtc);
                if (!crtc->enabled) {
@@ -230,7 +230,7 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
        const struct drm_encoder_helper_funcs *encoder_funcs;
        struct drm_encoder *encoder;
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
                encoder_funcs = encoder->helper_private;
                /* Disable unused encoders */
                if (encoder->crtc == NULL)
@@ -305,7 +305,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
         * adjust it according to limitations or connector properties, and also
         * a chance to reject the mode entirely.
         */
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
 
                if (encoder->crtc != crtc)
                        continue;
@@ -334,7 +334,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
        crtc->hwmode = *adjusted_mode;
 
        /* Prepare the encoders and CRTCs before setting the mode. */
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
 
                if (encoder->crtc != crtc)
                        continue;
@@ -359,7 +359,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
        if (!ret)
            goto done;
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
 
                if (encoder->crtc != crtc)
                        continue;
@@ -376,7 +376,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        crtc_funcs->commit(crtc);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
 
                if (encoder->crtc != crtc)
                        continue;
@@ -418,11 +418,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
        struct drm_encoder *encoder;
 
        /* Decouple all encoders and their attached connectors from this crtc */
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
                if (encoder->crtc != crtc)
                        continue;
 
-               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               drm_for_each_connector(connector, dev) {
                        if (connector->encoder != encoder)
                                continue;
 
@@ -519,12 +519,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
         * restored, not the drivers personal bookkeeping.
         */
        count = 0;
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
                save_encoders[count++] = *encoder;
        }
 
        count = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_for_each_connector(connector, dev) {
                save_connectors[count++] = *connector;
        }
 
@@ -562,7 +562,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 
        /* a) traverse passed in connector list and get encoders for them */
        count = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_for_each_connector(connector, dev) {
                const struct drm_connector_helper_funcs *connector_funcs =
                        connector->helper_private;
                new_encoder = connector->encoder;
@@ -602,7 +602,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
        }
 
        count = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_for_each_connector(connector, dev) {
                if (!connector->encoder)
                        continue;
 
@@ -685,12 +685,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 fail:
        /* Restore all previous data. */
        count = 0;
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       drm_for_each_encoder(encoder, dev) {
                *encoder = save_encoders[count++];
        }
 
        count = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_for_each_connector(connector, dev) {
                *connector = save_connectors[count++];
        }
 
@@ -712,7 +712,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
        struct drm_connector *connector;
        struct drm_device *dev = encoder->dev;
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       drm_for_each_connector(connector, dev)
                if (connector->encoder == encoder)
                        if (connector->dpms < dpms)
                                dpms = connector->dpms;
@@ -746,7 +746,7 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
        struct drm_connector *connector;
        struct drm_device *dev = crtc->dev;
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       drm_for_each_connector(connector, dev)
                if (connector->encoder && connector->encoder->crtc == crtc)
                        if (connector->dpms < dpms)
                                dpms = connector->dpms;
@@ -762,15 +762,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
  * implementing the DPMS connector attribute. It computes the new desired DPMS
  * state for all encoders and crtcs in the output mesh and calls the ->dpms()
  * callback provided by the driver appropriately.
+ *
+ * Returns:
+ * Always returns 0.
  */
-void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
        struct drm_encoder *encoder = connector->encoder;
        struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
        int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
        if (mode == connector->dpms)
-               return;
+               return 0;
 
        old_dpms = connector->dpms;
        connector->dpms = mode;
@@ -802,7 +805,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
                }
        }
 
-       return;
+       return 0;
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
@@ -862,7 +865,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
        bool ret;
 
        drm_modeset_lock_all(dev);
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(crtc, dev) {
 
                if (!crtc->enabled)
                        continue;
@@ -876,7 +879,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
 
                /* Turn off outputs that were already powered off */
                if (drm_helper_choose_crtc_dpms(crtc)) {
-                       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       drm_for_each_encoder(encoder, dev) {
 
                                if(encoder->crtc != crtc)
                                        continue;
@@ -928,15 +931,15 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
        if (crtc->funcs->atomic_duplicate_state)
                crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
        else {
-               crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-               if (!crtc_state)
-                       return -ENOMEM;
-               if (crtc->state)
-                       __drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state);
-               else
-                       crtc_state->crtc = crtc;
+               if (!crtc->state)
+                       drm_atomic_helper_crtc_reset(crtc);
+
+               crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
        }
 
+       if (!crtc_state)
+               return -ENOMEM;
+
        crtc_state->planes_changed = true;
        crtc_state->mode_changed = true;
        ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
@@ -957,11 +960,11 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
        ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
 
 out:
-       if (crtc->funcs->atomic_destroy_state)
-               crtc->funcs->atomic_destroy_state(crtc, crtc_state);
-       else {
-               __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
-               kfree(crtc_state);
+       if (crtc_state) {
+               if (crtc->funcs->atomic_destroy_state)
+                       crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+               else
+                       drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
        }
 
        return ret;
index b7bf4ce8c012bfb68442307b2b6f9f5658d0df48..53d09a19f7e13cb8c138f97da613df42e7170491 100644 (file)
@@ -285,7 +285,6 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type)
        if (!minor)
                return;
 
-       drm_mode_group_destroy(&minor->mode_group);
        put_device(minor->kdev);
 
        spin_lock_irqsave(&drm_minor_lock, flags);
@@ -582,11 +581,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
        if (drm_ht_create(&dev->map_hash, 12))
                goto err_minors;
 
-       ret = drm_legacy_ctxbitmap_init(dev);
-       if (ret) {
-               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-               goto err_ht;
-       }
+       drm_legacy_ctxbitmap_init(dev);
 
        if (drm_core_check_feature(dev, DRIVER_GEM)) {
                ret = drm_gem_init(dev);
@@ -600,7 +595,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
 
 err_ctxbitmap:
        drm_legacy_ctxbitmap_cleanup(dev);
-err_ht:
        drm_ht_remove(&dev->map_hash);
 err_minors:
        drm_minor_free(dev, DRM_MINOR_LEGACY);
@@ -705,20 +699,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
                        goto err_minors;
        }
 
-       /* setup grouping for legacy outputs */
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = drm_mode_group_init_legacy_group(dev,
-                               &dev->primary->mode_group);
-               if (ret)
-                       goto err_unload;
-       }
-
        ret = 0;
        goto out_unlock;
 
-err_unload:
-       if (dev->driver->unload)
-               dev->driver->unload(dev);
 err_minors:
        drm_minor_unregister(dev, DRM_MINOR_LEGACY);
        drm_minor_unregister(dev, DRM_MINOR_RENDER);
index 7087da37dae0bd6490831ae7a4cfc56700da1596..e6e05bb75a7715f71d894962ed435e36c9667dc8 100644 (file)
@@ -3413,7 +3413,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       drm_for_each_connector(connector, dev)
                if (connector->encoder == encoder && connector->eld[0])
                        return connector;
 
index 5c1aca443e54f851cf7abeb9679962f3ec96ef29..f01dc25df2dcb61477f9cb4bbb6537516dda2b58 100644 (file)
@@ -209,23 +209,11 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_framebuffer *fb;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->mode_config.mutex);
-       if (ret)
-               return ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret) {
-               mutex_unlock(&dev->mode_config.mutex);
-               return ret;
-       }
 
-       list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+       mutex_lock(&dev->mode_config.fb_lock);
+       drm_for_each_fb(fb, dev)
                drm_fb_cma_describe(fb, m);
-
-       mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev->mode_config.mutex);
+       mutex_unlock(&dev->mode_config.fb_lock);
 
        return 0;
 }
index cac422916c7afbbaf38095354784fa3d81632294..73f90f7e2f74407d2b346ce8d37f410624459314 100644 (file)
@@ -89,8 +89,9 @@ static LIST_HEAD(kernel_fb_helper_list);
  * connectors to the fbdev, e.g. if some are reserved for special purposes or
  * not adequate to be used for the fbcon.
  *
- * Since this is part of the initial setup before the fbdev is published, no
- * locking is required.
+ * This function is protected against concurrent connector hotadds/removals
+ * using drm_fb_helper_add_one_connector() and
+ * drm_fb_helper_remove_one_connector().
  */
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
@@ -98,7 +99,8 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
        struct drm_connector *connector;
        int i;
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       mutex_lock(&dev->mode_config.mutex);
+       drm_for_each_connector(connector, dev) {
                struct drm_fb_helper_connector *fb_helper_connector;
 
                fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
@@ -108,6 +110,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
                fb_helper_connector->connector = connector;
                fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
        }
+       mutex_unlock(&dev->mode_config.mutex);
        return 0;
 fail:
        for (i = 0; i < fb_helper->connector_count; i++) {
@@ -115,6 +118,8 @@ fail:
                fb_helper->connector_info[i] = NULL;
        }
        fb_helper->connector_count = 0;
+       mutex_unlock(&dev->mode_config.mutex);
+
        return -ENOMEM;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@@ -269,7 +274,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_crtc *c;
 
-       list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(c, dev) {
                if (crtc->base.id == c->base.id)
                        return c->primary->fb;
        }
@@ -321,7 +326,7 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 
        drm_warn_on_modeset_not_all_locked(dev);
 
-       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+       drm_for_each_plane(plane, dev) {
                if (plane->type != DRM_PLANE_TYPE_PRIMARY)
                        drm_plane_force_disable(plane);
 
@@ -429,24 +434,6 @@ static bool drm_fb_helper_force_kernel_mode(void)
        return error;
 }
 
-static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
-                       void *panic_str)
-{
-       /*
-        * It's a waste of time and effort to switch back to text console
-        * if the kernel should reboot before panic messages can be seen.
-        */
-       if (panic_timeout < 0)
-               return 0;
-
-       pr_err("panic occurred, switching back to text console\n");
-       return drm_fb_helper_force_kernel_mode();
-}
-
-static struct notifier_block paniced = {
-       .notifier_call = drm_fb_helper_panic,
-};
-
 static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
 {
        struct drm_device *dev = fb_helper->dev;
@@ -458,7 +445,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
        if (dev->primary->master)
                return false;
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(crtc, dev) {
                if (crtc->primary->fb)
                        crtcs_bound++;
                if (crtc->primary->fb == fb_helper->fb)
@@ -655,7 +642,7 @@ int drm_fb_helper_init(struct drm_device *dev,
        }
 
        i = 0;
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(crtc, dev) {
                fb_helper->crtc_info[i].mode_set.crtc = crtc;
                i++;
        }
@@ -672,9 +659,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
        if (!list_empty(&fb_helper->kernel_fb_list)) {
                list_del(&fb_helper->kernel_fb_list);
                if (list_empty(&kernel_fb_helper_list)) {
-                       pr_info("drm: unregistered panic notifier\n");
-                       atomic_notifier_chain_unregister(&panic_notifier_list,
-                                                        &paniced);
                        unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
                }
        }
@@ -1109,12 +1093,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
                        info->node, info->fix.id);
 
-       /* Switch back to kernel console on panic */
-       /* multi card linked list maybe */
        if (list_empty(&kernel_fb_helper_list)) {
-               dev_info(fb_helper->dev->dev, "registered panic notifier\n");
-               atomic_notifier_chain_register(&panic_notifier_list,
-                                              &paniced);
                register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
        }
 
index 16a1647707136cb7207a83967af7235367e1ee73..27a4228b43431b05da2fcd3138a9c33ab861d73a 100644 (file)
@@ -778,22 +778,14 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
 
        drm_gem_object_reference(obj);
-
-       mutex_lock(&obj->dev->struct_mutex);
-       drm_vm_open_locked(obj->dev, vma);
-       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
 void drm_gem_vm_close(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
-       struct drm_device *dev = obj->dev;
 
-       mutex_lock(&dev->struct_mutex);
-       drm_vm_close_locked(obj->dev, vma);
-       drm_gem_object_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference_unlocked(obj);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
@@ -850,7 +842,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
         */
        drm_gem_object_reference(obj);
 
-       drm_vm_open_locked(dev, vma);
        return 0;
 }
 EXPORT_SYMBOL(drm_gem_mmap_obj);
index bd75f303da63d9c638c027b89fac17cdc2c00cc9..9edad11dca9852f551138b180b4eddbdc32038f2 100644 (file)
@@ -381,11 +381,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
                          struct seq_file *m)
 {
        struct drm_gem_object *obj = &cma_obj->base;
-       struct drm_device *dev = obj->dev;
        uint64_t off;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
        off = drm_vma_node_start(&obj->vma_node);
 
        seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
index 9cfcd0aef0dfacf8389407d78981b78c8c85ddf8..ddfa6014c2c2e2cb0e9aa9b99a8fa41c7eb926f2 100644 (file)
@@ -95,7 +95,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        version = compat_alloc_user_space(sizeof(*version));
-       if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+       if (!version)
                return -EFAULT;
        if (__put_user(v32.name_len, &version->name_len)
            || __put_user((void __user *)(unsigned long)v32.name,
@@ -142,7 +142,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        u = compat_alloc_user_space(sizeof(*u));
-       if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+       if (!u)
                return -EFAULT;
        if (__put_user(uq32.unique_len, &u->unique_len)
            || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -170,7 +170,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        u = compat_alloc_user_space(sizeof(*u));
-       if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+       if (!u)
                return -EFAULT;
        if (__put_user(uq32.unique_len, &u->unique_len)
            || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -202,7 +202,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        map = compat_alloc_user_space(sizeof(*map));
-       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+       if (!map)
                return -EFAULT;
        if (__put_user(idx, &map->offset))
                return -EFAULT;
@@ -239,7 +239,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        map = compat_alloc_user_space(sizeof(*map));
-       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+       if (!map)
                return -EFAULT;
        if (__put_user(m32.offset, &map->offset)
            || __put_user(m32.size, &map->size)
@@ -279,7 +279,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        map = compat_alloc_user_space(sizeof(*map));
-       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+       if (!map)
                return -EFAULT;
        if (__put_user((void *)(unsigned long)handle, &map->handle))
                return -EFAULT;
@@ -308,7 +308,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        client = compat_alloc_user_space(sizeof(*client));
-       if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+       if (!client)
                return -EFAULT;
        if (__put_user(idx, &client->idx))
                return -EFAULT;
@@ -347,7 +347,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
        int i, err;
 
        stats = compat_alloc_user_space(sizeof(*stats));
-       if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+       if (!stats)
                return -EFAULT;
 
        err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
@@ -384,8 +384,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
        unsigned long agp_start;
 
        buf = compat_alloc_user_space(sizeof(*buf));
-       if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
-           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+       if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
                return -EFAULT;
 
        if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
@@ -416,7 +415,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        buf = compat_alloc_user_space(sizeof(*buf));
-       if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+       if (!buf)
                return -EFAULT;
 
        if (__put_user(b32.size, &buf->size)
@@ -457,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
 
        nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
        request = compat_alloc_user_space(nbytes);
-       if (!access_ok(VERIFY_WRITE, request, nbytes))
+       if (!request)
                return -EFAULT;
        list = (struct drm_buf_desc *) (request + 1);
 
@@ -518,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
                return -EINVAL;
        nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
        request = compat_alloc_user_space(nbytes);
-       if (!access_ok(VERIFY_WRITE, request, nbytes))
+       if (!request)
                return -EFAULT;
        list = (struct drm_buf_pub *) (request + 1);
 
@@ -565,7 +564,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+       if (!request)
                return -EFAULT;
        if (__put_user(req32.count, &request->count)
            || __put_user((int __user *)(unsigned long)req32.list,
@@ -591,7 +590,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+       if (!request)
                return -EFAULT;
        if (__put_user(req32.ctx_id, &request->ctx_id)
            || __put_user((void *)(unsigned long)req32.handle,
@@ -615,7 +614,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+       if (!request)
                return -EFAULT;
        if (__put_user(ctx_id, &request->ctx_id))
                return -EFAULT;
@@ -648,7 +647,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        res = compat_alloc_user_space(sizeof(*res));
-       if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+       if (!res)
                return -EFAULT;
        if (__put_user(res32.count, &res->count)
            || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
@@ -691,7 +690,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        d = compat_alloc_user_space(sizeof(*d));
-       if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+       if (!d)
                return -EFAULT;
 
        if (__put_user(d32.context, &d->context)
@@ -766,7 +765,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
        int err;
 
        info = compat_alloc_user_space(sizeof(*info));
-       if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+       if (!info)
                return -EFAULT;
 
        err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
@@ -809,7 +808,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+       if (!request
            || __put_user(req32.size, &request->size)
            || __put_user(req32.type, &request->type))
                return -EFAULT;
@@ -836,7 +835,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
        u32 handle;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+       if (!request
            || get_user(handle, &argp->handle)
            || __put_user(handle, &request->handle))
                return -EFAULT;
@@ -860,7 +859,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+       if (!request
            || __put_user(req32.handle, &request->handle)
            || __put_user(req32.offset, &request->offset))
                return -EFAULT;
@@ -876,7 +875,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
        u32 handle;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+       if (!request
            || get_user(handle, &argp->handle)
            || __put_user(handle, &request->handle))
                return -EFAULT;
@@ -899,8 +898,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
        unsigned long x;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+       if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
            || __get_user(x, &argp->size)
            || __put_user(x, &request->size))
                return -EFAULT;
@@ -925,8 +923,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
        unsigned long x;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+       if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
            || __get_user(x, &argp->handle)
            || __put_user(x << PAGE_SHIFT, &request->handle))
                return -EFAULT;
@@ -954,7 +951,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+       if (!request ||
            __put_user(update32.handle, &request->handle) ||
            __put_user(update32.type, &request->type) ||
            __put_user(update32.num, &request->num) ||
@@ -996,7 +993,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+       if (!request
            || __put_user(req32.request.type, &request->request.type)
            || __put_user(req32.request.sequence, &request->request.sequence)
            || __put_user(req32.request.signal, &request->request.signal))
index b50fa0afd9071f6c64c36de23253a2ee22ce7480..ee14324522cece2b9316860be6955287caa5ffe1 100644 (file)
@@ -1267,7 +1267,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
 
 /**
  * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @crtc: CRTC in question
+ * @drm_crtc: CRTC in question
  *
  * Drivers can use this function to reset the vblank state to off at load time.
  * Drivers should use this together with the drm_crtc_vblank_off() and
index c1dc61473db58b1a518c65eae09f25f00fa6fffc..9b731786e4db2d90b95ff6020e253b0c6f883ee7 100644 (file)
@@ -42,7 +42,7 @@ struct drm_file;
 #define DRM_KERNEL_CONTEXT             0
 #define DRM_RESERVED_CONTEXTS          1
 
-int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_init(struct drm_device *dev);
 void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
 void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
index f861361a635e05c3ab91e82fdde52b0f7f0e07e2..4924d381b6642f51a4b98698994e6b2ab3c0744e 100644 (file)
@@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
        struct drm_master *master = file_priv->master;
        int ret = 0;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        ++file_priv->lock_count;
 
        if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
        struct drm_lock *lock = data;
        struct drm_master *master = file_priv->master;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (lock->context == DRM_KERNEL_CONTEXT) {
                DRM_ERROR("Process %d using kernel context %d\n",
                          task_pid_nr(current), lock->context);
index c0a5cd8c52621301d0cb848e485bbe26cb1190bc..744dfbc6a329aef96dfcab631c2f55fae61de7db 100644 (file)
@@ -276,7 +276,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
        if (oops_in_progress)
                return;
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+       drm_for_each_crtc(crtc, dev)
                WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -464,18 +464,17 @@ EXPORT_SYMBOL(drm_modeset_unlock);
 int drm_modeset_lock_all_crtcs(struct drm_device *dev,
                struct drm_modeset_acquire_ctx *ctx)
 {
-       struct drm_mode_config *config = &dev->mode_config;
        struct drm_crtc *crtc;
        struct drm_plane *plane;
        int ret = 0;
 
-       list_for_each_entry(crtc, &config->crtc_list, head) {
+       drm_for_each_crtc(crtc, dev) {
                ret = drm_modeset_lock(&crtc->mutex, ctx);
                if (ret)
                        return ret;
        }
 
-       list_for_each_entry(plane, &config->plane_list, head) {
+       drm_for_each_plane(plane, dev) {
                ret = drm_modeset_lock(&plane->mutex, ctx);
                if (ret)
                        return ret;
index aaa130736bf8d303bd949d0e5588b413e137b23d..be3884073ea4df9661903544f28358bb5b55359f 100644 (file)
@@ -19,7 +19,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
        unsigned int index = 0;
        struct drm_crtc *tmp;
 
-       list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+       drm_for_each_crtc(tmp, dev) {
                if (tmp->port == port)
                        return 1 << index;
 
index 2f0ed11024eb8322676e000066a238e55b0dcb9b..5e5a07af02c85c4297df213847759bd620a82390 100644 (file)
@@ -91,13 +91,14 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
         */
        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       drm_for_each_connector(connector, dev) {
                if (connector->encoder && connector->encoder->crtc == crtc) {
                        if (connector_list != NULL && count < num_connectors)
                                *(connector_list++) = connector;
 
                        count++;
                }
+       }
 
        return count;
 }
@@ -436,7 +437,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
 
        for (i = 0; i < 2; i++) {
                if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
-                       crtc_funcs[i]->atomic_begin(crtc[i]);
+                       crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
        }
 
        /*
@@ -451,7 +452,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
 
        for (i = 0; i < 2; i++) {
                if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
-                       crtc_funcs[i]->atomic_flush(crtc[i]);
+                       crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
        }
 
        /*
@@ -525,10 +526,12 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
 
        if (plane->funcs->atomic_duplicate_state)
                plane_state = plane->funcs->atomic_duplicate_state(plane);
-       else if (plane->state)
+       else {
+               if (!plane->state)
+                       drm_atomic_helper_plane_reset(plane);
+
                plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-       else
-               plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+       }
        if (!plane_state)
                return -ENOMEM;
        plane_state->plane = plane;
@@ -572,10 +575,12 @@ int drm_plane_helper_disable(struct drm_plane *plane)
 
        if (plane->funcs->atomic_duplicate_state)
                plane_state = plane->funcs->atomic_duplicate_state(plane);
-       else if (plane->state)
+       else {
+               if (!plane->state)
+                       drm_atomic_helper_plane_reset(plane);
+
                plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-       else
-               plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+       }
        if (!plane_state)
                return -ENOMEM;
        plane_state->plane = plane;
index 04203c0d2ecbe0c2a73bc0a5909f9ef85f533baa..d734780b31c0fdcd67d2d622333b0a78c8098e10 100644 (file)
@@ -93,6 +93,27 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
        return 1;
 }
 
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+       bool poll = false;
+       struct drm_connector *connector;
+
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+       if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+               return;
+
+       drm_for_each_connector(connector, dev) {
+               if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+                                        DRM_CONNECTOR_POLL_DISCONNECT))
+                       poll = true;
+       }
+
+       if (poll)
+               schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
                                                              uint32_t maxX, uint32_t maxY, bool merge_type_bits)
 {
@@ -153,7 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 
        /* Re-enable polling in case the global poll config changed. */
        if (drm_kms_helper_poll != dev->mode_config.poll_running)
-               drm_kms_helper_poll_enable(dev);
+               __drm_kms_helper_poll_enable(dev);
 
        dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -295,7 +316,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 static void output_poll_execute(struct work_struct *work)
 {
        struct delayed_work *delayed_work = to_delayed_work(work);
@@ -312,7 +332,7 @@ static void output_poll_execute(struct work_struct *work)
                goto out;
 
        mutex_lock(&dev->mode_config.mutex);
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_for_each_connector(connector, dev) {
 
                /* Ignore forced connectors. */
                if (connector->force)
@@ -407,20 +427,9 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
  */
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
-       bool poll = false;
-       struct drm_connector *connector;
-
-       if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
-               return;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
-                                        DRM_CONNECTOR_POLL_DISCONNECT))
-                       poll = true;
-       }
-
-       if (poll)
-               schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+       mutex_lock(&dev->mode_config.mutex);
+       __drm_kms_helper_poll_enable(dev);
+       mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
@@ -495,7 +504,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
                return false;
 
        mutex_lock(&dev->mode_config.mutex);
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_for_each_connector(connector, dev) {
 
                /* Only handle HPD capable connectors. */
                if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
index 644b4b76e07176e3df75b0415227eac162fb3d26..1610757230a5f2545f7dbb7b77367cf2c5d6cc56 100644 (file)
@@ -80,7 +80,8 @@ exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
                exynos_crtc->ops->commit(exynos_crtc);
 }
 
-static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *old_crtc_state)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
@@ -90,7 +91,8 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
        }
 }
 
-static void exynos_crtc_atomic_flush(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *old_crtc_state)
 {
 }
 
index 74acca9bcd9dc5211fc7f552eee4bb2199f28f08..eb87e2538861506409ad7651a6a5b7738e771fa4 100644 (file)
@@ -36,15 +36,6 @@ config DRM_I915
          i810 driver instead, and the Atom z5xx series has an entirely
          different implementation.
 
-config DRM_I915_KMS
-       bool "Enable modesetting on intel by default"
-       depends on DRM_I915
-       default y
-       help
-         Choose this option if you want kernel modesetting enabled by default.
-
-         If in doubt, say "Y".
-
 config DRM_I915_FBDEV
        bool "Enable legacy fbdev support for the modesetting intel driver"
        depends on DRM_I915
index b7ddf48e1d758de11771ed3e6910e06a8a69cb60..41fb8a9c5bef4e1b6f9c2b248860e0408133cc87 100644 (file)
@@ -6,12 +6,13 @@
 
 # core driver code
 i915-y := i915_drv.o \
+         i915_irq.o \
          i915_params.o \
           i915_suspend.o \
          i915_sysfs.o \
+         intel_csr.o \
          intel_pm.o \
-         intel_runtime_pm.o \
-         intel_csr.o
+         intel_runtime_pm.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -20,21 +21,22 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 i915-y += i915_cmd_parser.o \
          i915_gem_batch_pool.o \
          i915_gem_context.o \
-         i915_gem_render_state.o \
          i915_gem_debug.o \
          i915_gem_dmabuf.o \
          i915_gem_evict.o \
          i915_gem_execbuffer.o \
+         i915_gem_fence.o \
          i915_gem_gtt.o \
          i915_gem.o \
+         i915_gem_render_state.o \
          i915_gem_shrinker.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
          i915_gem_userptr.o \
          i915_gpu_error.o \
-         i915_irq.o \
          i915_trace_points.o \
          intel_lrc.o \
+         intel_mocs.o \
          intel_ringbuffer.o \
          intel_uncore.o
 
@@ -46,11 +48,14 @@ i915-y += intel_renderstate_gen6.o \
 
 # modesetting core code
 i915-y += intel_audio.o \
+         intel_atomic.o \
+         intel_atomic_plane.o \
          intel_bios.o \
          intel_display.o \
          intel_fbc.o \
          intel_fifo_underrun.o \
          intel_frontbuffer.o \
+         intel_hotplug.o \
          intel_modes.o \
          intel_overlay.o \
          intel_psr.o \
@@ -66,15 +71,13 @@ i915-y += dvo_ch7017.o \
          dvo_ns2501.o \
          dvo_sil164.o \
          dvo_tfp410.o \
-         intel_atomic.o \
-         intel_atomic_plane.o \
          intel_crt.o \
          intel_ddi.o \
-         intel_dp.o \
          intel_dp_mst.o \
+         intel_dp.o \
          intel_dsi.o \
-         intel_dsi_pll.o \
          intel_dsi_panel_vbt.o \
+         intel_dsi_pll.o \
          intel_dvo.o \
          intel_hdmi.o \
          intel_i2c.o \
index 89b08a896d20caacc2c3490dacf4dd32ab81ca8b..732ce8785945dd555557f8c6a383bfe36d20ee8e 100644 (file)
@@ -22,6 +22,7 @@
  *
  * Authors:
  *    Eric Anholt <eric@anholt.net>
+ *    Thomas Richter <thor@math.tu-berlin.de>
  *
  * Minor modifications (Dithering enable):
  *    Thomas Richter <thor@math.tu-berlin.de>
@@ -90,7 +91,7 @@
 /*
  * LCD Vertical Display Size
  */
-#define VR21   0x20
+#define VR21   0x21
 
 /*
  * Panel power down status
 # define VR8F_POWER_MASK               (0x3c)
 # define VR8F_POWER_POS                        (2)
 
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const uint16_t backup_addresses[] = {
+       0x11, 0x12,
+       0x18, 0x19, 0x1a, 0x1f,
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+       0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+       0x8e, 0x8f,
+       0x10            /* this must come last */
+};
+
 
 struct ivch_priv {
        bool quiet;
 
        uint16_t width, height;
+
+       /* Register backup */
+
+       uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
 };
 
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo);
-
 /**
  * Reads a register on the ivch.
  *
@@ -246,6 +264,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
 {
        struct ivch_priv *priv;
        uint16_t temp;
+       int i;
 
        priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
        if (priv == NULL)
@@ -273,6 +292,14 @@ static bool ivch_init(struct intel_dvo_device *dvo,
        ivch_read(dvo, VR20, &priv->width);
        ivch_read(dvo, VR21, &priv->height);
 
+       /* Make a backup of the registers to be able to restore them
+        * upon suspend.
+        */
+       for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+               ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+       ivch_dump_regs(dvo);
+
        return true;
 
 out:
@@ -294,12 +321,31 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
        return MODE_OK;
 }
 
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+       struct ivch_priv *priv = dvo->dev_priv;
+       int i;
+
+       DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+       ivch_write(dvo, VR10, 0x0000);
+
+       for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+               ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
 /** Sets the power state of the panel connected to the ivch */
 static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
        int i;
        uint16_t vr01, vr30, backlight;
 
+       ivch_reset(dvo);
+
        /* Set the new power state of the panel. */
        if (!ivch_read(dvo, VR01, &vr01))
                return;
@@ -308,6 +354,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
                backlight = 1;
        else
                backlight = 0;
+
        ivch_write(dvo, VR80, backlight);
 
        if (enable)
@@ -334,6 +381,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
 {
        uint16_t vr01;
 
+       ivch_reset(dvo);
+
        /* Set the new power state of the panel. */
        if (!ivch_read(dvo, VR01, &vr01))
                return false;
@@ -348,11 +397,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
                          struct drm_display_mode *mode,
                          struct drm_display_mode *adjusted_mode)
 {
+       struct ivch_priv *priv = dvo->dev_priv;
        uint16_t vr40 = 0;
        uint16_t vr01 = 0;
        uint16_t vr10;
 
-       ivch_read(dvo, VR10, &vr10);
+       ivch_reset(dvo);
+
+       vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
        /* Enable dithering for 18 bpp pipelines */
        vr10 &= VR10_INTERFACE_DEPTH_MASK;
        if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
@@ -366,7 +419,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
                uint16_t x_ratio, y_ratio;
 
                vr01 |= VR01_PANEL_FIT_ENABLE;
-               vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
+               vr40 |= VR40_CLOCK_GATING_ENABLE;
                x_ratio = (((mode->hdisplay - 1) << 16) /
                           (adjusted_mode->hdisplay - 1)) >> 2;
                y_ratio = (((mode->vdisplay - 1) << 16) /
@@ -381,8 +434,6 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 
        ivch_write(dvo, VR01, vr01);
        ivch_write(dvo, VR40, vr40);
-
-       ivch_dump_regs(dvo);
 }
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo)
index 306d9e4e5cf376bc57901d665622a8b16fdce4d9..237ff6884a2227bc9b7520ed4bcaabe75d924f74 100644 (file)
@@ -131,7 +131,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
                        .mask = MI_GLOBAL_GTT,
                        .expected = 0,
              }},                                                      ),
-       CMD(  MI_LOAD_REGISTER_MEM,             SMI,   !F,  0xFF,   W | B,
+       CMD(  MI_LOAD_REGISTER_MEM(1),             SMI,   !F,  0xFF,   W | B,
              .reg = { .offset = 1, .mask = 0x007FFFFC },
              .bits = {{
                        .offset = 0,
@@ -151,8 +151,8 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
        CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
        CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
        CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
-       CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
        CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+       CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
        CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
        CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
        CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
@@ -564,7 +564,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
 
                for (j = 0; j < table->count; j++) {
                        const struct drm_i915_cmd_descriptor *desc =
-                               &table->table[i];
+                               &table->table[j];
                        u32 curr = desc->cmd.value & desc->cmd.mask;
 
                        if (curr < previous) {
@@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                         * only MI_LOAD_REGISTER_IMM commands.
                         */
                        if (reg_addr == OACONTROL) {
-                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
                                        DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
                                        return false;
                                }
@@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                         * allowed mask/value pair given in the whitelist entry.
                         */
                        if (reg->mask) {
-                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
                                        DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
                                                         reg_addr);
                                        return false;
index 82bbe3f2a7e1e6c7b1a2c56d2f066377f7254ceb..d1c643a82267d22053e6322b967b6b634e774fa2 100644 (file)
@@ -117,6 +117,20 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
        return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 }
 
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+       u64 size = 0;
+       struct i915_vma *vma;
+
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (i915_is_ggtt(vma->vm) &&
+                   drm_mm_node_allocated(&vma->node))
+                       size += vma->node.size;
+       }
+
+       return size;
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
@@ -156,13 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        if (obj->fence_reg != I915_FENCE_REG_NONE)
                seq_printf(m, " (fence: %d)", obj->fence_reg);
        list_for_each_entry(vma, &obj->vma_list, vma_link) {
-               if (!i915_is_ggtt(vma->vm))
-                       seq_puts(m, " (pp");
+               seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+                          i915_is_ggtt(vma->vm) ? "g" : "pp",
+                          vma->node.start, vma->node.size);
+               if (i915_is_ggtt(vma->vm))
+                       seq_printf(m, ", type: %u)", vma->ggtt_view.type);
                else
-                       seq_puts(m, " (g");
-               seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
-                          vma->node.start, vma->node.size,
-                          vma->ggtt_view.type);
+                       seq_puts(m, ")");
        }
        if (obj->stolen)
                seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -198,7 +212,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_address_space *vm = &dev_priv->gtt.base;
        struct i915_vma *vma;
-       size_t total_obj_size, total_gtt_size;
+       u64 total_obj_size, total_gtt_size;
        int count, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -231,7 +245,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        }
        mutex_unlock(&dev->struct_mutex);
 
-       seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+       seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
                   count, total_obj_size, total_gtt_size);
        return 0;
 }
@@ -253,7 +267,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
-       size_t total_obj_size, total_gtt_size;
+       u64 total_obj_size, total_gtt_size;
        LIST_HEAD(stolen);
        int count, ret;
 
@@ -269,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
                list_add(&obj->obj_exec_link, &stolen);
 
                total_obj_size += obj->base.size;
-               total_gtt_size += i915_gem_obj_ggtt_size(obj);
+               total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
                count++;
        }
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -292,14 +306,14 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
        }
        mutex_unlock(&dev->struct_mutex);
 
-       seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+       seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
                   count, total_obj_size, total_gtt_size);
        return 0;
 }
 
 #define count_objects(list, member) do { \
        list_for_each_entry(obj, list, member) { \
-               size += i915_gem_obj_ggtt_size(obj); \
+               size += i915_gem_obj_total_ggtt_size(obj); \
                ++count; \
                if (obj->map_and_fenceable) { \
                        mappable_size += i915_gem_obj_ggtt_size(obj); \
@@ -310,10 +324,10 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 
 struct file_stats {
        struct drm_i915_file_private *file_priv;
-       int count;
-       size_t total, unbound;
-       size_t global, shared;
-       size_t active, inactive;
+       unsigned long count;
+       u64 total, unbound;
+       u64 global, shared;
+       u64 active, inactive;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
@@ -370,7 +384,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 
 #define print_file_stats(m, name, stats) do { \
        if (stats.count) \
-               seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
                           name, \
                           stats.count, \
                           stats.total, \
@@ -405,7 +419,7 @@ static void print_batch_pool_stats(struct seq_file *m,
 
 #define count_vmas(list, member) do { \
        list_for_each_entry(vma, list, member) { \
-               size += i915_gem_obj_ggtt_size(vma->obj); \
+               size += i915_gem_obj_total_ggtt_size(vma->obj); \
                ++count; \
                if (vma->obj->map_and_fenceable) { \
                        mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -420,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 count, mappable_count, purgeable_count;
-       size_t size, mappable_size, purgeable_size;
+       u64 size, mappable_size, purgeable_size;
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_file *file;
@@ -437,17 +451,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 
        size = count = mappable_size = mappable_count = 0;
        count_objects(&dev_priv->mm.bound_list, global_list);
-       seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+       seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
        count_vmas(&vm->active_list, mm_list);
-       seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
+       seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
        count_vmas(&vm->inactive_list, mm_list);
-       seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
+       seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = purgeable_size = purgeable_count = 0;
@@ -456,7 +470,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                if (obj->madv == I915_MADV_DONTNEED)
                        purgeable_size += obj->base.size, ++purgeable_count;
        }
-       seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+       seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
        size = count = mappable_size = mappable_count = 0;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -473,16 +487,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                        ++purgeable_count;
                }
        }
-       seq_printf(m, "%u purgeable objects, %zu bytes\n",
+       seq_printf(m, "%u purgeable objects, %llu bytes\n",
                   purgeable_count, purgeable_size);
-       seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+       seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
                   mappable_count, mappable_size);
-       seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+       seq_printf(m, "%u fault mappable objects, %llu bytes\n",
                   count, size);
 
-       seq_printf(m, "%zu [%lu] gtt total\n",
+       seq_printf(m, "%llu [%llu] gtt total\n",
                   dev_priv->gtt.base.total,
-                  dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+                  (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
        seq_putc(m, '\n');
        print_batch_pool_stats(m, dev_priv);
@@ -519,7 +533,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
        uintptr_t list = (uintptr_t) node->info_ent->data;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
-       size_t total_obj_size, total_gtt_size;
+       u64 total_obj_size, total_gtt_size;
        int count, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -535,13 +549,13 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
                describe_obj(m, obj);
                seq_putc(m, '\n');
                total_obj_size += obj->base.size;
-               total_gtt_size += i915_gem_obj_ggtt_size(obj);
+               total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
                count++;
        }
 
        mutex_unlock(&dev->struct_mutex);
 
-       seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+       seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
                   count, total_obj_size, total_gtt_size);
 
        return 0;
@@ -1132,9 +1146,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
        } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
                   IS_BROADWELL(dev) || IS_GEN9(dev)) {
-               u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
-               u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-               u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+               u32 rp_state_limits;
+               u32 gt_perf_status;
+               u32 rp_state_cap;
                u32 rpmodectl, rpinclimit, rpdeclimit;
                u32 rpstat, cagf, reqf;
                u32 rpupei, rpcurup, rpprevup;
@@ -1142,6 +1156,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
                int max_freq;
 
+               rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+               if (IS_BROXTON(dev)) {
+                       rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+                       gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+               } else {
+                       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+                       gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+               }
+
                /* RPSTAT1 is in the GT power well */
                ret = mutex_lock_interruptible(&dev->struct_mutex);
                if (ret)
@@ -1229,7 +1252,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "Down threshold: %d%%\n",
                           dev_priv->rps.down_threshold);
 
-               max_freq = (rp_state_cap & 0xff0000) >> 16;
+               max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+                           rp_state_cap >> 16) & 0xff;
                max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
@@ -1239,7 +1263,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
 
-               max_freq = rp_state_cap & 0xff;
+               max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+                           rp_state_cap >> 0) & 0xff;
                max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
@@ -1581,6 +1606,21 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
                return ironlake_drpc_info(m);
 }
 
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+                  dev_priv->fb_tracking.busy_bits);
+
+       seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+                  dev_priv->fb_tracking.flip_bits);
+
+       return 0;
+}
+
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
@@ -1593,51 +1633,20 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
        }
 
        intel_runtime_pm_get(dev_priv);
+       mutex_lock(&dev_priv->fbc.lock);
 
-       if (intel_fbc_enabled(dev)) {
+       if (intel_fbc_enabled(dev_priv))
                seq_puts(m, "FBC enabled\n");
-       } else {
-               seq_puts(m, "FBC disabled: ");
-               switch (dev_priv->fbc.no_fbc_reason) {
-               case FBC_OK:
-                       seq_puts(m, "FBC actived, but currently disabled in hardware");
-                       break;
-               case FBC_UNSUPPORTED:
-                       seq_puts(m, "unsupported by this chipset");
-                       break;
-               case FBC_NO_OUTPUT:
-                       seq_puts(m, "no outputs");
-                       break;
-               case FBC_STOLEN_TOO_SMALL:
-                       seq_puts(m, "not enough stolen memory");
-                       break;
-               case FBC_UNSUPPORTED_MODE:
-                       seq_puts(m, "mode not supported");
-                       break;
-               case FBC_MODE_TOO_LARGE:
-                       seq_puts(m, "mode too large");
-                       break;
-               case FBC_BAD_PLANE:
-                       seq_puts(m, "FBC unsupported on plane");
-                       break;
-               case FBC_NOT_TILED:
-                       seq_puts(m, "scanout buffer not tiled");
-                       break;
-               case FBC_MULTIPLE_PIPES:
-                       seq_puts(m, "multiple pipes are enabled");
-                       break;
-               case FBC_MODULE_PARAM:
-                       seq_puts(m, "disabled per module param (default off)");
-                       break;
-               case FBC_CHIP_DEFAULT:
-                       seq_puts(m, "disabled per chip default");
-                       break;
-               default:
-                       seq_puts(m, "unknown reason");
-               }
-               seq_putc(m, '\n');
-       }
+       else
+               seq_printf(m, "FBC disabled: %s\n",
+                         intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
+
+       if (INTEL_INFO(dev_priv)->gen >= 7)
+               seq_printf(m, "Compressing: %s\n",
+                          yesno(I915_READ(FBC_STATUS2) &
+                                FBC_COMPRESSION_MASK));
 
+       mutex_unlock(&dev_priv->fbc.lock);
        intel_runtime_pm_put(dev_priv);
 
        return 0;
@@ -1651,9 +1660,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
        if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
                return -ENODEV;
 
-       drm_modeset_lock_all(dev);
        *val = dev_priv->fbc.false_color;
-       drm_modeset_unlock_all(dev);
 
        return 0;
 }
@@ -1667,7 +1674,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
        if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
                return -ENODEV;
 
-       drm_modeset_lock_all(dev);
+       mutex_lock(&dev_priv->fbc.lock);
 
        reg = I915_READ(ILK_DPFC_CONTROL);
        dev_priv->fbc.false_color = val;
@@ -1676,7 +1683,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
                   (reg | FBC_CTL_FALSE_COLOR) :
                   (reg & ~FBC_CTL_FALSE_COLOR));
 
-       drm_modeset_unlock_all(dev);
+       mutex_unlock(&dev_priv->fbc.lock);
        return 0;
 }
 
@@ -1778,8 +1785,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret = 0;
        int gpu_freq, ia_freq;
+       unsigned int max_gpu_freq, min_gpu_freq;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+       if (!HAS_CORE_RING_FREQ(dev)) {
                seq_puts(m, "unsupported on this chipset\n");
                return 0;
        }
@@ -1792,17 +1800,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        if (ret)
                goto out;
 
+       if (IS_SKYLAKE(dev)) {
+               /* Convert GT frequency to 50 HZ units */
+               min_gpu_freq =
+                       dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+               max_gpu_freq =
+                       dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+       } else {
+               min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+               max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+       }
+
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
-       for (gpu_freq = dev_priv->rps.min_freq_softlimit;
-            gpu_freq <= dev_priv->rps.max_freq_softlimit;
-            gpu_freq++) {
+       for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
                ia_freq = gpu_freq;
                sandybridge_pcode_read(dev_priv,
                                       GEN6_PCODE_READ_MIN_FREQ_TABLE,
                                       &ia_freq);
                seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-                          intel_gpu_freq(dev_priv, gpu_freq),
+                          intel_gpu_freq(dev_priv, (gpu_freq *
+                               (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
                           ((ia_freq >> 0) & 0xff) * 100,
                           ((ia_freq >> 8) & 0xff) * 100);
        }
@@ -1848,6 +1866,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct intel_fbdev *ifbdev = NULL;
        struct intel_framebuffer *fb;
+       struct drm_framebuffer *drm_fb;
 
 #ifdef CONFIG_DRM_I915_FBDEV
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1867,7 +1886,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 #endif
 
        mutex_lock(&dev->mode_config.fb_lock);
-       list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+       drm_for_each_fb(drm_fb, dev) {
+               fb = to_intel_framebuffer(drm_fb);
                if (ifbdev && &fb->base == ifbdev->helper.fb)
                        continue;
 
@@ -2248,7 +2268,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
                struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
                seq_puts(m, "aliasing PPGTT:\n");
-               seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+               seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
 
                ppgtt->debug_dump(ppgtt, m);
        }
@@ -2479,13 +2499,13 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
        return 0;
 }
 
-static int i915_pc8_status(struct seq_file *m, void *unused)
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+       if (!HAS_RUNTIME_PM(dev)) {
                seq_puts(m, "not supported\n");
                return 0;
        }
@@ -2493,6 +2513,12 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
        seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
        seq_printf(m, "IRQs disabled: %s\n",
                   yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+       seq_printf(m, "Usage count: %d\n",
+                  atomic_read(&dev->dev->power.usage_count));
+#else
+       seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
 
        return 0;
 }
@@ -2780,13 +2806,16 @@ static int i915_display_info(struct seq_file *m, void *unused)
        seq_printf(m, "---------\n");
        for_each_intel_crtc(dev, crtc) {
                bool active;
+               struct intel_crtc_state *pipe_config;
                int x, y;
 
+               pipe_config = to_intel_crtc_state(crtc->base.state);
+
                seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
                           crtc->base.base.id, pipe_name(crtc->pipe),
-                          yesno(crtc->active), crtc->config->pipe_src_w,
-                          crtc->config->pipe_src_h);
-               if (crtc->active) {
+                          yesno(pipe_config->base.active),
+                          pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+               if (pipe_config->base.active) {
                        intel_crtc_info(m, crtc);
 
                        active = cursor_position(dev, crtc->pipe, &x, &y);
@@ -3027,7 +3056,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
 
        seq_puts(m, "\n\n");
 
-       if (intel_crtc->config->has_drrs) {
+       if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
                struct intel_panel *panel;
 
                mutex_lock(&drrs->mutex);
@@ -3079,7 +3108,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
        for_each_intel_crtc(dev, intel_crtc) {
                drm_modeset_lock(&intel_crtc->base.mutex, NULL);
 
-               if (intel_crtc->active) {
+               if (intel_crtc->base.state->active) {
                        active_crtc_cnt++;
                        seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
 
@@ -3616,53 +3645,40 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
        return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+       struct intel_crtc_state *pipe_config;
+       struct drm_atomic_state *state;
+       int ret = 0;
 
        drm_modeset_lock_all(dev);
-       /*
-        * If we use the eDP transcoder we need to make sure that we don't
-        * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-        * relevant on hsw with pipe A when using the always-on power well
-        * routing.
-        */
-       if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
-           !crtc->config->pch_pfit.enabled) {
-               crtc->config->pch_pfit.force_thru = true;
-
-               intel_display_power_get(dev_priv,
-                                       POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-
-               intel_crtc_reset(crtc);
+       state = drm_atomic_state_alloc(dev);
+       if (!state) {
+               ret = -ENOMEM;
+               goto out;
        }
-       drm_modeset_unlock_all(dev);
-}
-
-static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc =
-               to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
 
-       drm_modeset_lock_all(dev);
-       /*
-        * If we use the eDP transcoder we need to make sure that we don't
-        * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-        * relevant on hsw with pipe A when using the always-on power well
-        * routing.
-        */
-       if (crtc->config->pch_pfit.force_thru) {
-               crtc->config->pch_pfit.force_thru = false;
+       state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+       pipe_config = intel_atomic_get_crtc_state(state, crtc);
+       if (IS_ERR(pipe_config)) {
+               ret = PTR_ERR(pipe_config);
+               goto out;
+       }
 
-               intel_crtc_reset(crtc);
+       pipe_config->pch_pfit.force_thru = enable;
+       if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+           pipe_config->pch_pfit.enabled != enable)
+               pipe_config->base.connectors_changed = true;
 
-               intel_display_power_put(dev_priv,
-                                       POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-       }
+       ret = drm_atomic_commit(state);
+out:
        drm_modeset_unlock_all(dev);
+       WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+       if (ret)
+               drm_atomic_state_free(state);
 }
 
 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
@@ -3682,7 +3698,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
                break;
        case INTEL_PIPE_CRC_SOURCE_PF:
                if (IS_HASWELL(dev) && pipe == PIPE_A)
-                       hsw_trans_edp_pipe_A_crc_wa(dev);
+                       hsw_trans_edp_pipe_A_crc_wa(dev, true);
 
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
                break;
@@ -3776,7 +3792,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                                 pipe_name(pipe));
 
                drm_modeset_lock(&crtc->base.mutex, NULL);
-               if (crtc->active)
+               if (crtc->base.state->active)
                        intel_wait_for_vblank(dev, pipe);
                drm_modeset_unlock(&crtc->base.mutex);
 
@@ -3794,7 +3810,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                else if (IS_VALLEYVIEW(dev))
                        vlv_undo_pipe_scramble_reset(dev, pipe);
                else if (IS_HASWELL(dev) && pipe == PIPE_A)
-                       hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+                       hsw_trans_edp_pipe_A_crc_wa(dev, false);
 
                hsw_enable_ips(crtc);
        }
@@ -3980,24 +3996,14 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 {
        char *input_buffer;
        int status = 0;
-       struct seq_file *m;
        struct drm_device *dev;
        struct drm_connector *connector;
        struct list_head *connector_list;
        struct intel_dp *intel_dp;
        int val = 0;
 
-       m = file->private_data;
-       if (!m) {
-               status = -ENODEV;
-               return status;
-       }
-       dev = m->private;
+       dev = ((struct seq_file *)file->private_data)->private;
 
-       if (!dev) {
-               status = -ENODEV;
-               return status;
-       }
        connector_list = &dev->mode_config.connector_list;
 
        if (len == 0)
@@ -4021,9 +4027,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
 
-               if (connector->connector_type ==
-                   DRM_MODE_CONNECTOR_DisplayPort &&
-                   connector->status == connector_status_connected &&
+               if (connector->status == connector_status_connected &&
                    connector->encoder != NULL) {
                        intel_dp = enc_to_intel_dp(connector->encoder);
                        status = kstrtoint(input_buffer, 10, &val);
@@ -4055,9 +4059,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
        struct list_head *connector_list = &dev->mode_config.connector_list;
        struct intel_dp *intel_dp;
 
-       if (!dev)
-               return -ENODEV;
-
        list_for_each_entry(connector, connector_list, head) {
 
                if (connector->connector_type !=
@@ -4102,9 +4103,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
        struct list_head *connector_list = &dev->mode_config.connector_list;
        struct intel_dp *intel_dp;
 
-       if (!dev)
-               return -ENODEV;
-
        list_for_each_entry(connector, connector_list, head) {
 
                if (connector->connector_type !=
@@ -4144,9 +4142,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
        struct list_head *connector_list = &dev->mode_config.connector_list;
        struct intel_dp *intel_dp;
 
-       if (!dev)
-               return -ENODEV;
-
        list_for_each_entry(connector, connector_list, head) {
 
                if (connector->connector_type !=
@@ -4183,8 +4178,15 @@ static const struct file_operations i915_displayport_test_type_fops = {
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
        struct drm_device *dev = m->private;
-       int num_levels = ilk_wm_max_level(dev) + 1;
        int level;
+       int num_levels;
+
+       if (IS_CHERRYVIEW(dev))
+               num_levels = 3;
+       else if (IS_VALLEYVIEW(dev))
+               num_levels = 1;
+       else
+               num_levels = ilk_wm_max_level(dev) + 1;
 
        drm_modeset_lock_all(dev);
 
@@ -4193,9 +4195,9 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 
                /*
                 * - WM1+ latency values in 0.5us units
-                * - latencies are in us on gen9
+                * - latencies are in us on gen9/vlv/chv
                 */
-               if (INTEL_INFO(dev)->gen >= 9)
+               if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
                        latency *= 10;
                else if (level > 0)
                        latency *= 5;
@@ -4259,7 +4261,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
 
-       if (HAS_GMCH_DISPLAY(dev))
+       if (INTEL_INFO(dev)->gen < 5)
                return -ENODEV;
 
        return single_open(file, pri_wm_latency_show, dev);
@@ -4291,11 +4293,18 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
        uint16_t new[8] = { 0 };
-       int num_levels = ilk_wm_max_level(dev) + 1;
+       int num_levels;
        int level;
        int ret;
        char tmp[32];
 
+       if (IS_CHERRYVIEW(dev))
+               num_levels = 3;
+       else if (IS_VALLEYVIEW(dev))
+               num_levels = 1;
+       else
+               num_levels = ilk_wm_max_level(dev) + 1;
+
        if (len >= sizeof(tmp))
                return -EINVAL;
 
@@ -5027,6 +5036,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_drpc_info", i915_drpc_info, 0},
        {"i915_emon_status", i915_emon_status, 0},
        {"i915_ring_freq_table", i915_ring_freq_table, 0},
+       {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
        {"i915_fbc_status", i915_fbc_status, 0},
        {"i915_ips_status", i915_ips_status, 0},
        {"i915_sr_status", i915_sr_status, 0},
@@ -5042,7 +5052,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_edp_psr_status", i915_edp_psr_status, 0},
        {"i915_sink_crc_eDP1", i915_sink_crc, 0},
        {"i915_energy_uJ", i915_energy_uJ, 0},
-       {"i915_pc8_status", i915_pc8_status, 0},
+       {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
        {"i915_power_domain_info", i915_power_domain_info, 0},
        {"i915_display_info", i915_display_info, 0},
        {"i915_semaphore_status", i915_semaphore_status, 0},
index d2df321ba6349832d300a9551fe863806b2eddce..ab37d1121be8277728bff5d25a0cb4a4599de0aa 100644 (file)
@@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
                if (!value)
                        return -ENODEV;
                break;
+       case I915_PARAM_HAS_GPU_RESET:
+               value = i915.enable_hangcheck &&
+                       intel_has_gpu_reset(dev);
+               break;
+       case I915_PARAM_HAS_RESOURCE_STREAMER:
+               value = HAS_RESOURCE_STREAMER(dev);
+               break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
                return -EINVAL;
@@ -719,11 +726,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
 
        info = (struct intel_device_info *)&dev_priv->info;
 
+       /*
+        * Skylake and Broxton currently don't expose the topmost plane as its
+        * use is exclusive with the legacy cursor and we only want to expose
+        * one of those, not both. Until we can safely expose the topmost plane
+        * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+        * we don't expose the topmost plane at all to prevent ABI breakage
+        * down the line.
+        */
        if (IS_BROXTON(dev)) {
-               info->num_sprites[PIPE_A] = 3;
-               info->num_sprites[PIPE_B] = 3;
-               info->num_sprites[PIPE_C] = 2;
-       } else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+               info->num_sprites[PIPE_A] = 2;
+               info->num_sprites[PIPE_B] = 2;
+               info->num_sprites[PIPE_C] = 1;
+       } else if (IS_VALLEYVIEW(dev))
                for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 2;
        else
@@ -933,8 +948,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_mtrrfree;
        }
 
-       dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-       if (dev_priv->dp_wq == NULL) {
+       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+       if (dev_priv->hotplug.dp_wq == NULL) {
                DRM_ERROR("Failed to create our dp workqueue.\n");
                ret = -ENOMEM;
                goto out_freewq;
@@ -1029,7 +1044,7 @@ out_gem_unload:
        pm_qos_remove_request(&dev_priv->pm_qos);
        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 out_freedpwq:
-       destroy_workqueue(dev_priv->dp_wq);
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
 out_freewq:
        destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
@@ -1116,6 +1131,7 @@ int i915_driver_unload(struct drm_device *dev)
        i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
+       intel_fbc_cleanup_cfb(dev_priv);
        i915_gem_cleanup_stolen(dev);
 
        intel_csr_ucode_fini(dev);
@@ -1123,7 +1139,7 @@ int i915_driver_unload(struct drm_device *dev)
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
 
-       destroy_workqueue(dev_priv->dp_wq);
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
        destroy_workqueue(dev_priv->wq);
        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1258,13 +1274,3 @@ const struct drm_ioctl_desc i915_ioctls[] = {
 };
 
 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
-/*
- * This is really ugly: Because old userspace abused the linux agp interface to
- * manage the gtt, we need to claim that all intel devices are agp.  For
- * otherwise the drm core refuses to initialize the agp support code.
- */
-int i915_driver_device_is_agp(struct drm_device *dev)
-{
-       return 1;
-}
index 884b4f9b81c4abb163ec181deebb44c3cfc6abce..1d887459e37fd717992ddbfb52228d8550e55a09 100644 (file)
@@ -356,7 +356,6 @@ static const struct intel_device_info intel_cherryview_info = {
 };
 
 static const struct intel_device_info intel_skylake_info = {
-       .is_preliminary = 1,
        .is_skylake = 1,
        .gen = 9, .num_pipes = 3,
        .need_gfx_hws = 1, .has_hotplug = 1,
@@ -369,7 +368,6 @@ static const struct intel_device_info intel_skylake_info = {
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
-       .is_preliminary = 1,
        .is_skylake = 1,
        .gen = 9, .num_pipes = 3,
        .need_gfx_hws = 1, .has_hotplug = 1,
@@ -440,9 +438,7 @@ static const struct pci_device_id pciidlist[] = {           /* aka */
        {0, 0, 0}
 };
 
-#if defined(CONFIG_DRM_I915_KMS)
 MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
 
 void intel_detect_pch(struct drm_device *dev)
 {
@@ -541,21 +537,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
        return true;
 }
 
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
-{
-       spin_lock_irq(&dev_priv->irq_lock);
-
-       dev_priv->long_hpd_port_mask = 0;
-       dev_priv->short_hpd_port_mask = 0;
-       dev_priv->hpd_event_bits = 0;
-
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       cancel_work_sync(&dev_priv->dig_port_work);
-       cancel_work_sync(&dev_priv->hotplug_work);
-       cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
-}
-
 void i915_firmware_load_error_print(const char *fw_path, int err)
 {
        DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
@@ -601,7 +582,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
 static int i915_drm_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc;
        pci_power_t opregion_target_state;
        int error;
 
@@ -632,8 +612,7 @@ static int i915_drm_suspend(struct drm_device *dev)
         * for _thaw. Also, power gate the CRTC power wells.
         */
        drm_modeset_lock_all(dev);
-       for_each_crtc(dev, crtc)
-               intel_crtc_control(crtc, false);
+       intel_display_suspend(dev);
        drm_modeset_unlock_all(dev);
 
        intel_dp_mst_suspend(dev);
@@ -760,7 +739,7 @@ static int i915_drm_resume(struct drm_device *dev)
        spin_unlock_irq(&dev_priv->irq_lock);
 
        drm_modeset_lock_all(dev);
-       intel_modeset_setup_hw_state(dev, true);
+       intel_display_resume(dev);
        drm_modeset_unlock_all(dev);
 
        intel_dp_mst_resume(dev);
@@ -865,9 +844,6 @@ int i915_reset(struct drm_device *dev)
        bool simulated;
        int ret;
 
-       if (!i915.reset)
-               return 0;
-
        intel_reset_gt_powersave(dev);
 
        mutex_lock(&dev->struct_mutex);
@@ -959,8 +935,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (PCI_FUNC(pdev->devfn))
                return -ENODEV;
 
-       driver.driver_features &= ~(DRIVER_USE_AGP);
-
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -1515,7 +1489,15 @@ static int intel_runtime_suspend(struct device *device)
         * FIXME: We really should find a document that references the arguments
         * used below!
         */
-       if (IS_HASWELL(dev)) {
+       if (IS_BROADWELL(dev)) {
+               /*
+                * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+                * being detected, and the call we do at intel_runtime_resume()
+                * won't be able to restore them. Since PCI_D3hot matches the
+                * actual specification and appears to be working, use it.
+                */
+               intel_opregion_notify_adapter(dev, PCI_D3hot);
+       } else {
                /*
                 * current versions of firmware which depend on this opregion
                 * notification have repurposed the D1 definition to mean
@@ -1524,16 +1506,6 @@ static int intel_runtime_suspend(struct device *device)
                 * the suspend path.
                 */
                intel_opregion_notify_adapter(dev, PCI_D1);
-       } else {
-               /*
-                * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-                * being detected, and the call we do at intel_runtime_resume()
-                * won't be able to restore them. Since PCI_D3hot matches the
-                * actual specification and appears to be working, use it. Let's
-                * assume the other non-Haswell platforms will stay the same as
-                * Broadwell.
-                */
-               intel_opregion_notify_adapter(dev, PCI_D3hot);
        }
 
        assert_forcewakes_inactive(dev_priv);
@@ -1673,7 +1645,6 @@ static struct drm_driver driver = {
         * deal with them for Intel hardware.
         */
        .driver_features =
-           DRIVER_USE_AGP |
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
            DRIVER_RENDER,
        .load = i915_driver_load,
@@ -1688,7 +1659,6 @@ static struct drm_driver driver = {
        .suspend = i915_suspend_legacy,
        .resume = i915_resume_legacy,
 
-       .device_is_agp = i915_driver_device_is_agp,
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = i915_debugfs_init,
        .debugfs_cleanup = i915_debugfs_cleanup,
@@ -1727,20 +1697,14 @@ static int __init i915_init(void)
        driver.num_ioctls = i915_max_ioctl;
 
        /*
-        * If CONFIG_DRM_I915_KMS is set, default to KMS unless
-        * explicitly disabled with the module pararmeter.
-        *
-        * Otherwise, just follow the parameter (defaulting to off).
-        *
-        * Allow optional vga_text_mode_force boot option to override
-        * the default behavior.
+        * Enable KMS by default, unless explicitly overriden by
+        * either the i915.modeset prarameter or by the
+        * vga_text_mode_force boot option.
         */
-#if defined(CONFIG_DRM_I915_KMS)
-       if (i915.modeset != 0)
-               driver.driver_features |= DRIVER_MODESET;
-#endif
-       if (i915.modeset == 1)
-               driver.driver_features |= DRIVER_MODESET;
+       driver.driver_features |= DRIVER_MODESET;
+
+       if (i915.modeset == 0)
+               driver.driver_features &= ~DRIVER_MODESET;
 
 #ifdef CONFIG_VGA_CONSOLE
        if (vgacon_text_force() && i915.modeset == -1)
@@ -1759,7 +1723,7 @@ static int __init i915_init(void)
         * to the atomic ioctl and the atomic properties.  Only plane operations on
         * a single CRTC will actually work.
         */
-       if (i915.nuclear_pageflip)
+       if (driver.driver_features & DRIVER_MODESET)
                driver.driver_features |= DRIVER_ATOMIC;
 
        return drm_pci_init(&driver, &i915_pci_driver);
index fd1de451c8c6bae13f42eaae5e9cfe572039ea18..01abe13e98e010066576c9173e8cd200ebde0268 100644 (file)
@@ -56,7 +56,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20150522"
+#define DRIVER_DATE            "20150731"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -206,17 +206,50 @@ enum intel_display_power_domain {
 
 enum hpd_pin {
        HPD_NONE = 0,
-       HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
        HPD_CRT,
        HPD_SDVO_B,
        HPD_SDVO_C,
+       HPD_PORT_A,
        HPD_PORT_B,
        HPD_PORT_C,
        HPD_PORT_D,
        HPD_NUM_PINS
 };
 
+#define for_each_hpd_pin(__pin) \
+       for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+struct i915_hotplug {
+       struct work_struct hotplug_work;
+
+       struct {
+               unsigned long last_jiffies;
+               int count;
+               enum {
+                       HPD_ENABLED = 0,
+                       HPD_DISABLED = 1,
+                       HPD_MARK_DISABLED = 2
+               } state;
+       } stats[HPD_NUM_PINS];
+       u32 event_bits;
+       struct delayed_work reenable_work;
+
+       struct intel_digital_port *irq_port[I915_MAX_PORTS];
+       u32 long_port_mask;
+       u32 short_port_mask;
+       struct work_struct dig_port_work;
+
+       /*
+        * if we get a HPD irq from DP and a HPD irq from non-DP
+        * the non-DP HPD could block the workqueue on a mode config
+        * mutex getting, that userspace may have taken. However
+        * userspace is waiting on the DP workqueue to run which is
+        * blocked behind the non-DP one.
+        */
+       struct workqueue_struct *dp_wq;
+};
+
 #define I915_GEM_GPU_DOMAINS \
        (I915_GEM_DOMAIN_RENDER | \
         I915_GEM_DOMAIN_SAMPLER | \
@@ -243,6 +276,12 @@ enum hpd_pin {
                            &dev->mode_config.plane_list,       \
                            base.head)
 
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)     \
+       list_for_each_entry(intel_plane,                                \
+                           &(dev)->mode_config.plane_list,             \
+                           base.head)                                  \
+               if ((intel_plane)->pipe == (intel_crtc)->pipe)
+
 #define for_each_intel_crtc(dev, intel_crtc) \
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
@@ -333,7 +372,8 @@ struct intel_dpll_hw_state {
        uint32_t cfgcr1, cfgcr2;
 
        /* bxt */
-       uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
+       uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+                pcsdw12;
 };
 
 struct intel_shared_dpll_config {
@@ -343,7 +383,6 @@ struct intel_shared_dpll_config {
 
 struct intel_shared_dpll {
        struct intel_shared_dpll_config config;
-       struct intel_shared_dpll_config *new_config;
 
        int active; /* count of number of active CRTCs (i.e. DPMS on) */
        bool on; /* is the PLL actually active? Disabled during modeset */
@@ -445,6 +484,7 @@ struct drm_i915_error_state {
        struct timeval time;
 
        char error_msg[128];
+       int iommu;
        u32 reset_count;
        u32 suspend_count;
 
@@ -559,9 +599,6 @@ struct intel_limit;
 struct dpll;
 
 struct drm_i915_display_funcs {
-       bool (*fbc_enabled)(struct drm_device *dev);
-       void (*enable_fbc)(struct drm_crtc *crtc);
-       void (*disable_fbc)(struct drm_device *dev);
        int (*get_display_clock_speed)(struct drm_device *dev);
        int (*get_fifo_size)(struct drm_device *dev, int plane);
        /**
@@ -587,7 +624,8 @@ struct drm_i915_display_funcs {
                                 struct drm_crtc *crtc,
                                 uint32_t sprite_width, uint32_t sprite_height,
                                 int pixel_size, bool enable, bool scaled);
-       void (*modeset_global_resources)(struct drm_atomic_state *state);
+       int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+       void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
        /* Returns the active state of the crtc, and if the crtc is active,
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
@@ -598,7 +636,6 @@ struct drm_i915_display_funcs {
                                  struct intel_crtc_state *crtc_state);
        void (*crtc_enable)(struct drm_crtc *crtc);
        void (*crtc_disable)(struct drm_crtc *crtc);
-       void (*off)(struct drm_crtc *crtc);
        void (*audio_codec_enable)(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   struct drm_display_mode *mode);
@@ -608,7 +645,7 @@ struct drm_i915_display_funcs {
        int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
                          struct drm_framebuffer *fb,
                          struct drm_i915_gem_object *obj,
-                         struct intel_engine_cs *ring,
+                         struct drm_i915_gem_request *req,
                          uint32_t flags);
        void (*update_primary_plane)(struct drm_crtc *crtc,
                                     struct drm_framebuffer *fb,
@@ -706,7 +743,7 @@ enum csr_state {
 
 struct intel_csr {
        const char *fw_path;
-       __be32 *dmc_payload;
+       uint32_t *dmc_payload;
        uint32_t dmc_fw_size;
        uint32_t mmio_count;
        uint32_t mmioaddr[8];
@@ -805,11 +842,15 @@ struct i915_ctx_hang_stats {
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0
+
+#define CONTEXT_NO_ZEROMAP (1<<0)
 /**
  * struct intel_context - as the name implies, represents a context.
  * @ref: reference count.
  * @user_handle: userspace tracking identity for this context.
  * @remap_slice: l3 row remapping information.
+ * @flags: context specific flags:
+ *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
  * @file_priv: filp associated with this context (NULL for global default
  *            context).
  * @hang_stats: information about the role of this context in possible GPU
@@ -827,6 +868,7 @@ struct intel_context {
        int user_handle;
        uint8_t remap_slice;
        struct drm_i915_private *i915;
+       int flags;
        struct drm_i915_file_private *file_priv;
        struct i915_ctx_hang_stats hang_stats;
        struct i915_hw_ppgtt *ppgtt;
@@ -853,9 +895,13 @@ enum fb_op_origin {
        ORIGIN_CPU,
        ORIGIN_CS,
        ORIGIN_FLIP,
+       ORIGIN_DIRTYFB,
 };
 
 struct i915_fbc {
+       /* This is always the inner lock when overlapping with struct_mutex and
+        * it's the outer lock when overlapping with stolen_lock. */
+       struct mutex lock;
        unsigned long uncompressed_size;
        unsigned threshold;
        unsigned int fb_id;
@@ -875,7 +921,7 @@ struct i915_fbc {
 
        struct intel_fbc_work {
                struct delayed_work work;
-               struct drm_crtc *crtc;
+               struct intel_crtc *crtc;
                struct drm_framebuffer *fb;
        } *fbc_work;
 
@@ -891,7 +937,13 @@ struct i915_fbc {
                FBC_MULTIPLE_PIPES, /* more than one pipe active */
                FBC_MODULE_PARAM,
                FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+               FBC_ROTATION, /* rotation is not supported */
+               FBC_IN_DBG_MASTER, /* kernel debugger is active */
        } no_fbc_reason;
+
+       bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
+       void (*enable_fbc)(struct intel_crtc *crtc);
+       void (*disable_fbc)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -1201,6 +1253,10 @@ struct intel_l3_parity {
 struct i915_gem_mm {
        /** Memory allocator for GTT stolen memory */
        struct drm_mm stolen;
+       /** Protects the usage of the GTT stolen memory allocator. This is
+        * always the inner lock when overlapping with struct_mutex. */
+       struct mutex stolen_lock;
+
        /** List of all objects in gtt_space. Used to restore gtt
         * mappings on resume */
        struct list_head bound_list;
@@ -1354,6 +1410,11 @@ enum modeset_restore {
        MODESET_SUSPENDED,
 };
 
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+
 struct ddi_vbt_port_info {
        /*
         * This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1366,6 +1427,8 @@ struct ddi_vbt_port_info {
        uint8_t supports_dvi:1;
        uint8_t supports_hdmi:1;
        uint8_t supports_dp:1;
+
+       uint8_t alternate_aux_channel;
 };
 
 enum psr_lines_to_wait {
@@ -1461,23 +1524,27 @@ struct ilk_wm_values {
        enum intel_ddb_partitioning partitioning;
 };
 
-struct vlv_wm_values {
-       struct {
-               uint16_t primary;
-               uint16_t sprite[2];
-               uint8_t cursor;
-       } pipe[3];
+struct vlv_pipe_wm {
+       uint16_t primary;
+       uint16_t sprite[2];
+       uint8_t cursor;
+};
 
-       struct {
-               uint16_t plane;
-               uint8_t cursor;
-       } sr;
+struct vlv_sr_wm {
+       uint16_t plane;
+       uint8_t cursor;
+};
 
+struct vlv_wm_values {
+       struct vlv_pipe_wm pipe[3];
+       struct vlv_sr_wm sr;
        struct {
                uint8_t cursor;
                uint8_t sprite[2];
                uint8_t primary;
        } ddl[3];
+       uint8_t level;
+       bool cxsr;
 };
 
 struct skl_ddb_entry {
@@ -1611,6 +1678,18 @@ struct i915_virtual_gpu {
        bool active;
 };
 
+struct i915_execbuffer_params {
+       struct drm_device               *dev;
+       struct drm_file                 *file;
+       uint32_t                        dispatch_flags;
+       uint32_t                        args_batch_start_offset;
+       uint32_t                        batch_obj_vm_offset;
+       struct intel_engine_cs          *ring;
+       struct drm_i915_gem_object      *batch_obj;
+       struct intel_context            *ctx;
+       struct drm_i915_gem_request     *request;
+};
+
 struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *objects;
@@ -1680,19 +1759,7 @@ struct drm_i915_private {
        u32 pm_rps_events;
        u32 pipestat_irq_mask[I915_MAX_PIPES];
 
-       struct work_struct hotplug_work;
-       struct {
-               unsigned long hpd_last_jiffies;
-               int hpd_cnt;
-               enum {
-                       HPD_ENABLED = 0,
-                       HPD_DISABLED = 1,
-                       HPD_MARK_DISABLED = 2
-               } hpd_mark;
-       } hpd_stats[HPD_NUM_PINS];
-       u32 hpd_event_bits;
-       struct delayed_work hotplug_reenable_work;
-
+       struct i915_hotplug hotplug;
        struct i915_fbc fbc;
        struct i915_drrs drrs;
        struct intel_opregion opregion;
@@ -1718,7 +1785,7 @@ struct drm_i915_private {
 
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int skl_boot_cdclk;
-       unsigned int cdclk_freq;
+       unsigned int cdclk_freq, max_cdclk_freq;
        unsigned int hpll_freq;
 
        /**
@@ -1769,9 +1836,6 @@ struct drm_i915_private {
 
        /* Reclocking support */
        bool render_reclock_avail;
-       bool lvds_downclock_avail;
-       /* indicates the reduced downclock for LVDS*/
-       int lvds_downclock;
 
        struct i915_frontbuffer_tracking fb_tracking;
 
@@ -1858,29 +1922,11 @@ struct drm_i915_private {
 
        struct i915_runtime_pm pm;
 
-       struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
-       u32 long_hpd_port_mask;
-       u32 short_hpd_port_mask;
-       struct work_struct dig_port_work;
-
-       /*
-        * if we get a HPD irq from DP and a HPD irq from non-DP
-        * the non-DP HPD could block the workqueue on a mode config
-        * mutex getting, that userspace may have taken. However
-        * userspace is waiting on the DP workqueue to run which is
-        * blocked behind the non-DP one.
-        */
-       struct workqueue_struct *dp_wq;
-
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
-               int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
-                                     struct intel_engine_cs *ring,
-                                     struct intel_context *ctx,
+               int (*execbuf_submit)(struct i915_execbuffer_params *params,
                                      struct drm_i915_gem_execbuffer2 *args,
-                                     struct list_head *vmas,
-                                     struct drm_i915_gem_object *batch_obj,
-                                     u64 exec_start, u32 flags);
+                                     struct list_head *vmas);
                int (*init_rings)(struct drm_device *dev);
                void (*cleanup_ring)(struct intel_engine_cs *ring);
                void (*stop_ring)(struct intel_engine_cs *ring);
@@ -2148,7 +2194,8 @@ struct drm_i915_gem_request {
        struct intel_context *ctx;
        struct intel_ringbuffer *ringbuf;
 
-       /** Batch buffer related to this request if any */
+       /** Batch buffer related to this request if any (used for
+           error state dump only) */
        struct drm_i915_gem_object *batch_obj;
 
        /** Time at which this request was emitted, in jiffies. */
@@ -2186,8 +2233,12 @@ struct drm_i915_gem_request {
 };
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx);
+                          struct intel_context *ctx,
+                          struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file);
 
 static inline uint32_t
 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
@@ -2391,6 +2442,9 @@ struct drm_i915_cmd_table {
                                 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
                                 (INTEL_DEVID(dev) & 0xf) == 0xb ||     \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
+/* ULX machines are also considered ULT. */
+#define IS_BDW_ULX(dev)                (IS_BROADWELL(dev) && \
+                                (INTEL_DEVID(dev) & 0xf) == 0xe)
 #define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 #define IS_HSW_ULT(dev)                (IS_HASWELL(dev) && \
@@ -2400,6 +2454,14 @@ struct drm_i915_cmd_table {
 /* ULX machines are also considered ULT. */
 #define IS_HSW_ULX(dev)                (INTEL_DEVID(dev) == 0x0A0E || \
                                 INTEL_DEVID(dev) == 0x0A1E)
+#define IS_SKL_ULT(dev)                (INTEL_DEVID(dev) == 0x1906 || \
+                                INTEL_DEVID(dev) == 0x1913 || \
+                                INTEL_DEVID(dev) == 0x1916 || \
+                                INTEL_DEVID(dev) == 0x1921 || \
+                                INTEL_DEVID(dev) == 0x1926)
+#define IS_SKL_ULX(dev)                (INTEL_DEVID(dev) == 0x190E || \
+                                INTEL_DEVID(dev) == 0x1915 || \
+                                INTEL_DEVID(dev) == 0x191E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 #define SKL_REVID_A0           (0x0)
@@ -2466,9 +2528,6 @@ struct drm_i915_cmd_table {
  */
 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
                                                      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)  (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)  (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)    (IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_TV(dev)               (INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)           (INTEL_INFO(dev)->has_hotplug)
 
@@ -2494,6 +2553,12 @@ struct drm_i915_cmd_table {
 
 #define HAS_CSR(dev)   (IS_SKYLAKE(dev))
 
+#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
+                                   INTEL_INFO(dev)->gen >= 8)
+
+#define HAS_CORE_RING_FREQ(dev)        (INTEL_INFO(dev)->gen >= 6 && \
+                                !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+
 #define INTEL_PCH_DEVICE_ID_MASK               0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE           0x1c00
@@ -2533,7 +2598,6 @@ struct i915_params {
        int modeset;
        int panel_ignore_lid;
        int semaphores;
-       unsigned int lvds_downclock;
        int lvds_channel_mode;
        int panel_use_ssc;
        int vbt_sdvo_panel_type;
@@ -2555,10 +2619,11 @@ struct i915_params {
        bool reset;
        bool disable_display;
        bool disable_vtd_wa;
+       bool enable_guc_submission;
+       int guc_log_level;
        int use_mmio_flip;
        int mmio_debug;
        bool verbose_state_checks;
-       bool nuclear_pageflip;
        int edp_vswing;
 };
 extern struct i915_params i915 __read_mostly;
@@ -2572,21 +2637,27 @@ extern void i915_driver_preclose(struct drm_device *dev,
                                 struct drm_file *file);
 extern void i915_driver_postclose(struct drm_device *dev,
                                  struct drm_file *file);
-extern int i915_driver_device_is_agp(struct drm_device * dev);
 #ifdef CONFIG_COMPAT
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
 #endif
 extern int intel_gpu_reset(struct drm_device *dev);
+extern bool intel_has_gpu_reset(struct drm_device *dev);
 extern int i915_reset(struct drm_device *dev);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 void i915_firmware_load_error_print(const char *fw_path, int err);
 
+/* intel_hotplug.c */
+void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+
 /* i915_irq.c */
 void i915_queue_hangcheck(struct drm_device *dev);
 __printf(3, 4)
@@ -2594,7 +2665,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
                       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_hpd_init(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
@@ -2661,19 +2731,11 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-                                       struct intel_engine_cs *ring);
-void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-                                        struct drm_file *file,
-                                        struct intel_engine_cs *ring,
-                                        struct drm_i915_gem_object *obj);
-int i915_gem_ringbuffer_submission(struct drm_device *dev,
-                                  struct drm_file *file,
-                                  struct intel_engine_cs *ring,
-                                  struct intel_context *ctx,
+                                       struct drm_i915_gem_request *req);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
                                   struct drm_i915_gem_execbuffer2 *args,
-                                  struct list_head *vmas,
-                                  struct drm_i915_gem_object *batch_obj,
-                                  u64 exec_start, u32 flags);
+                                  struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2706,6 +2768,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
                         const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
+struct drm_i915_gem_object *i915_gem_object_create_from_data(
+               struct drm_device *dev, const void *data, size_t size);
 void i915_init_vm(struct drm_i915_private *dev_priv,
                  struct i915_address_space *vm);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -2780,9 +2844,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                        struct intel_engine_cs *to);
+                        struct intel_engine_cs *to,
+                        struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_engine_cs *ring);
+                            struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
@@ -2811,11 +2876,6 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
 
 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring);
@@ -2824,7 +2884,6 @@ bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
-int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
@@ -2859,16 +2918,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
 int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
-                      struct drm_file *file,
-                      struct drm_i915_gem_object *batch_obj);
-#define i915_add_request(ring) \
-       __i915_add_request(ring, NULL, NULL)
+void __i915_add_request(struct drm_i915_gem_request *req,
+                       struct drm_i915_gem_object *batch_obj,
+                       bool flush_caches);
+#define i915_add_request(req) \
+       __i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+       __i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
@@ -2888,6 +2949,7 @@ int __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     struct intel_engine_cs *pipelined,
+                                    struct drm_i915_gem_request **pipelined_request,
                                     const struct i915_ggtt_view *view);
 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
                                              const struct i915_ggtt_view *view);
@@ -2911,8 +2973,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                struct drm_gem_object *gem_obj, int flags);
 
-void i915_gem_restore_fences(struct drm_device *dev);
-
 unsigned long
 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
                              const struct i915_ggtt_view *view);
@@ -3007,15 +3067,27 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
        i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
 }
 
+/* i915_gem_fence.c */
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_private *dev_priv);
+int i915_gem_context_enable(struct drm_i915_gem_request *req);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
-                       struct intel_context *to);
+int i915_switch_context(struct drm_i915_gem_request *req);
 struct intel_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
 void i915_gem_context_free(struct kref *ctx_ref);
@@ -3065,9 +3137,12 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
 }
 
 /* i915_gem_stolen.c */
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+                               struct drm_mm_node *node, u64 size,
+                               unsigned alignment);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+                                struct drm_mm_node *node);
 int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
 void i915_gem_cleanup_stolen(struct drm_device *dev);
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3097,10 +3172,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
                obj->tiling_mode != I915_TILING_NONE;
 }
 
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
-
 /* i915_gem_debug.c */
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
@@ -3222,8 +3293,7 @@ extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern void intel_connector_unregister(struct intel_connector *);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev,
-                                        bool force_restore);
+extern void intel_display_resume(struct drm_device *dev);
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
index 52b446b27b4d08359ce50577f53176629e323a64..84f91bcc12f7946de23d71632cd3fbf66f62312e 100644 (file)
@@ -46,11 +46,6 @@ static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
 static void
 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj);
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-                                        struct drm_i915_fence_reg *fence,
-                                        bool enable);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
                                  enum i915_cache_level level)
@@ -66,18 +61,6 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
        return obj->pin_display;
 }
 
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
-       if (obj->tiling_mode)
-               i915_gem_release_mmap(obj);
-
-       /* As we do not have an associated fence register, we will force
-        * a tiling change if we ever need to acquire one.
-        */
-       obj->fence_dirty = false;
-       obj->fence_reg = I915_FENCE_REG_NONE;
-}
-
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
                                  size_t size)
@@ -149,14 +132,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_get_aperture *args = data;
-       struct drm_i915_gem_object *obj;
+       struct i915_gtt *ggtt = &dev_priv->gtt;
+       struct i915_vma *vma;
        size_t pinned;
 
        pinned = 0;
        mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-               if (i915_gem_obj_is_pinned(obj))
-                       pinned += i915_gem_obj_ggtt_size(obj);
+       list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+               if (vma->pin_count)
+                       pinned += vma->node.size;
+       list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+               if (vma->pin_count)
+                       pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
 
        args->aper_size = dev_priv->gtt.base.total;
@@ -347,7 +334,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
                unsigned long unwritten;
 
@@ -368,7 +355,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        i915_gem_chipset_flush(dev);
 
 out:
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
        return ret;
 }
 
@@ -801,7 +788,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 
        offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
-       intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
        while (remain > 0) {
                /* Operation in this page
@@ -832,7 +819,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        }
 
 out_flush:
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
        i915_gem_object_ggtt_unpin(obj);
 out:
@@ -945,7 +932,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        if (ret)
                return ret;
 
-       intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
        i915_gem_object_pin_pages(obj);
 
@@ -1025,7 +1012,7 @@ out:
        if (needs_clflush_after)
                i915_gem_chipset_flush(dev);
 
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
        return ret;
 }
 
@@ -1146,23 +1133,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
        return 0;
 }
 
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
-       int ret;
-
-       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
-       ret = 0;
-       if (req == req->ring->outstanding_lazy_request)
-               ret = i915_add_request(req->ring);
-
-       return ret;
-}
-
 static void fake_irq(unsigned long data)
 {
        wake_up_process((struct task_struct *)data);
@@ -1334,6 +1304,33 @@ out:
        return ret;
 }
 
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file)
+{
+       struct drm_i915_private *dev_private;
+       struct drm_i915_file_private *file_priv;
+
+       WARN_ON(!req || !file || req->file_priv);
+
+       if (!req || !file)
+               return -EINVAL;
+
+       if (req->file_priv)
+               return -EINVAL;
+
+       dev_private = req->ring->dev->dev_private;
+       file_priv = file->driver_priv;
+
+       spin_lock(&file_priv->mm.lock);
+       req->file_priv = file_priv;
+       list_add_tail(&req->client_list, &file_priv->mm.request_list);
+       spin_unlock(&file_priv->mm.lock);
+
+       req->pid = get_pid(task_pid(current));
+
+       return 0;
+}
+
 static inline void
 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
@@ -1346,6 +1343,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        list_del(&request->client_list);
        request->file_priv = NULL;
        spin_unlock(&file_priv->mm.lock);
+
+       put_pid(request->pid);
+       request->pid = NULL;
 }
 
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
@@ -1365,8 +1365,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        list_del_init(&request->list);
        i915_gem_request_remove_from_client(request);
 
-       put_pid(request->pid);
-
        i915_gem_request_unreference(request);
 }
 
@@ -1415,10 +1413,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       ret = i915_gem_check_olr(req);
-       if (ret)
-               return ret;
-
        ret = __i915_wait_request(req,
                                  atomic_read(&dev_priv->gpu_error.reset_counter),
                                  interruptible, NULL, NULL);
@@ -1518,10 +1512,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                if (req == NULL)
                        return 0;
 
-               ret = i915_gem_check_olr(req);
-               if (ret)
-                       goto err;
-
                requests[n++] = i915_gem_request_reference(req);
        } else {
                for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -1531,10 +1521,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                        if (req == NULL)
                                continue;
 
-                       ret = i915_gem_check_olr(req);
-                       if (ret)
-                               goto err;
-
                        requests[n++] = i915_gem_request_reference(req);
                }
        }
@@ -1545,7 +1531,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                                          NULL, rps);
        mutex_lock(&dev->struct_mutex);
 
-err:
        for (i = 0; i < n; i++) {
                if (ret == 0)
                        i915_gem_object_retire_request(obj, requests[i]);
@@ -1613,6 +1598,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        else
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
+       if (write_domain != 0)
+               intel_fb_obj_invalidate(obj,
+                                       write_domain == I915_GEM_DOMAIN_GTT ?
+                                       ORIGIN_GTT : ORIGIN_CPU);
+
 unref:
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -2349,9 +2339,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_engine_cs *ring)
+                            struct drm_i915_gem_request *req)
 {
        struct drm_i915_gem_object *obj = vma->obj;
+       struct intel_engine_cs *ring;
+
+       ring = i915_gem_request_get_ring(req);
 
        /* Add a reference if we're newly entering the active list. */
        if (obj->active == 0)
@@ -2359,8 +2352,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
        obj->active |= intel_ring_flag(ring);
 
        list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-       i915_gem_request_assign(&obj->last_read_req[ring->id],
-                               intel_ring_get_request(ring));
+       i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
        list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
@@ -2372,7 +2364,7 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
        RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
 
        i915_gem_request_assign(&obj->last_write_req, NULL);
-       intel_fb_obj_flush(obj, true);
+       intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
@@ -2393,6 +2385,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
        if (obj->active)
                return;
 
+       /* Bump our place on the bound list to keep it roughly in LRU order
+        * so that we don't steal from recently used but inactive objects
+        * (unless we are forced to ofc!)
+        */
+       list_move_tail(&obj->global_list,
+                      &to_i915(obj->base.dev)->mm.bound_list);
+
        list_for_each_entry(vma, &obj->vma_list, vma_link) {
                if (!list_empty(&vma->mm_list))
                        list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
@@ -2472,24 +2471,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
        return 0;
 }
 
-int __i915_add_request(struct intel_engine_cs *ring,
-                      struct drm_file *file,
-                      struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+                       struct drm_i915_gem_object *obj,
+                       bool flush_caches)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct drm_i915_gem_request *request;
+       struct intel_engine_cs *ring;
+       struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
        int ret;
 
-       request = ring->outstanding_lazy_request;
        if (WARN_ON(request == NULL))
-               return -ENOMEM;
+               return;
 
-       if (i915.enable_execlists) {
-               ringbuf = request->ctx->engine[ring->id].ringbuf;
-       } else
-               ringbuf = ring->buffer;
+       ring = request->ring;
+       dev_priv = ring->dev->dev_private;
+       ringbuf = request->ringbuf;
+
+       /*
+        * To ensure that this call will not fail, space for its emissions
+        * should already have been reserved in the ring buffer. Let the ring
+        * know that it is time to use that space up.
+        */
+       intel_ring_reserved_space_use(ringbuf);
 
        request_start = intel_ring_get_tail(ringbuf);
        /*
@@ -2499,14 +2508,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
         * is that the flush _must_ happen before the next request, no matter
         * what.
         */
-       if (i915.enable_execlists) {
-               ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
-               if (ret)
-                       return ret;
-       } else {
-               ret = intel_ring_flush_all_caches(ring);
-               if (ret)
-                       return ret;
+       if (flush_caches) {
+               if (i915.enable_execlists)
+                       ret = logical_ring_flush_all_caches(request);
+               else
+                       ret = intel_ring_flush_all_caches(request);
+               /* Not allowed to fail! */
+               WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
 
        /* Record the position of the start of the request so that
@@ -2516,17 +2524,15 @@ int __i915_add_request(struct intel_engine_cs *ring,
         */
        request->postfix = intel_ring_get_tail(ringbuf);
 
-       if (i915.enable_execlists) {
-               ret = ring->emit_request(ringbuf, request);
-               if (ret)
-                       return ret;
-       } else {
-               ret = ring->add_request(ring);
-               if (ret)
-                       return ret;
+       if (i915.enable_execlists)
+               ret = ring->emit_request(request);
+       else {
+               ret = ring->add_request(request);
 
                request->tail = intel_ring_get_tail(ringbuf);
        }
+       /* Not allowed to fail! */
+       WARN(ret, "emit|add_request failed: %d!\n", ret);
 
        request->head = request_start;
 
@@ -2538,34 +2544,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
         */
        request->batch_obj = obj;
 
-       if (!i915.enable_execlists) {
-               /* Hold a reference to the current context so that we can inspect
-                * it later in case a hangcheck error event fires.
-                */
-               request->ctx = ring->last_context;
-               if (request->ctx)
-                       i915_gem_context_reference(request->ctx);
-       }
-
        request->emitted_jiffies = jiffies;
        ring->last_submitted_seqno = request->seqno;
        list_add_tail(&request->list, &ring->request_list);
-       request->file_priv = NULL;
-
-       if (file) {
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-
-               spin_lock(&file_priv->mm.lock);
-               request->file_priv = file_priv;
-               list_add_tail(&request->client_list,
-                             &file_priv->mm.request_list);
-               spin_unlock(&file_priv->mm.lock);
-
-               request->pid = get_pid(task_pid(current));
-       }
 
        trace_i915_gem_request_add(request);
-       ring->outstanding_lazy_request = NULL;
 
        i915_queue_hangcheck(ring->dev);
 
@@ -2574,7 +2557,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
                           round_jiffies_up_relative(HZ));
        intel_mark_busy(dev_priv->dev);
 
-       return 0;
+       /* Sanity check that the reserved size was large enough. */
+       intel_ring_reserved_space_end(ringbuf);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2628,12 +2612,13 @@ void i915_gem_request_free(struct kref *req_ref)
                                                 typeof(*req), ref);
        struct intel_context *ctx = req->ctx;
 
+       if (req->file_priv)
+               i915_gem_request_remove_from_client(req);
+
        if (ctx) {
                if (i915.enable_execlists) {
-                       struct intel_engine_cs *ring = req->ring;
-
-                       if (ctx != ring->default_context)
-                               intel_lr_context_unpin(ring, ctx);
+                       if (ctx != req->ring->default_context)
+                               intel_lr_context_unpin(req);
                }
 
                i915_gem_context_unreference(ctx);
@@ -2643,36 +2628,63 @@ void i915_gem_request_free(struct kref *req_ref)
 }
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx)
+                          struct intel_context *ctx,
+                          struct drm_i915_gem_request **req_out)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
        struct drm_i915_gem_request *req;
        int ret;
 
-       if (ring->outstanding_lazy_request)
-               return 0;
+       if (!req_out)
+               return -EINVAL;
+
+       *req_out = NULL;
 
        req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
        if (req == NULL)
                return -ENOMEM;
 
-       kref_init(&req->ref);
-       req->i915 = dev_priv;
-
        ret = i915_gem_get_seqno(ring->dev, &req->seqno);
        if (ret)
                goto err;
 
+       kref_init(&req->ref);
+       req->i915 = dev_priv;
        req->ring = ring;
+       req->ctx  = ctx;
+       i915_gem_context_reference(req->ctx);
 
        if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(req, ctx);
+               ret = intel_logical_ring_alloc_request_extras(req);
        else
                ret = intel_ring_alloc_request_extras(req);
-       if (ret)
+       if (ret) {
+               i915_gem_context_unreference(req->ctx);
                goto err;
+       }
+
+       /*
+        * Reserve space in the ring buffer for all the commands required to
+        * eventually emit this request. This is to guarantee that the
+        * i915_add_request() call can't fail. Note that the reserve may need
+        * to be redone if the request is not actually submitted straight
+        * away, e.g. because a GPU scheduler has deferred it.
+        */
+       if (i915.enable_execlists)
+               ret = intel_logical_ring_reserve_space(req);
+       else
+               ret = intel_ring_reserve_space(req);
+       if (ret) {
+               /*
+                * At this point, the request is fully allocated even if not
+                * fully prepared. Thus it can be cleaned up using the proper
+                * free code.
+                */
+               i915_gem_request_cancel(req);
+               return ret;
+       }
 
-       ring->outstanding_lazy_request = req;
+       *req_out = req;
        return 0;
 
 err:
@@ -2680,6 +2692,13 @@ err:
        return ret;
 }
 
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+       intel_ring_reserved_space_cancel(req->ringbuf);
+
+       i915_gem_request_unreference(req);
+}
+
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
@@ -2741,7 +2760,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                list_del(&submit_req->execlist_link);
 
                if (submit_req->ctx != ring->default_context)
-                       intel_lr_context_unpin(ring, submit_req->ctx);
+                       intel_lr_context_unpin(submit_req);
 
                i915_gem_request_unreference(submit_req);
        }
@@ -2762,30 +2781,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
                i915_gem_request_retire(request);
        }
-
-       /* This may not have been flushed before the reset, so clean it now */
-       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
-}
-
-void i915_gem_restore_fences(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
-
-       for (i = 0; i < dev_priv->num_fence_regs; i++) {
-               struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
-               /*
-                * Commit delayed tiling changes if we have an object still
-                * attached to the fence, otherwise just clear the fence.
-                */
-               if (reg->obj) {
-                       i915_gem_object_update_fence(reg->obj, reg,
-                                                    reg->obj->tiling_mode);
-               } else {
-                       i915_gem_write_fence(dev, i, NULL);
-               }
-       }
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2947,7 +2942,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
-       int ret, i;
+       int i;
 
        if (!obj->active)
                return 0;
@@ -2962,10 +2957,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
                if (list_empty(&req->list))
                        goto retire;
 
-               ret = i915_gem_check_olr(req);
-               if (ret)
-                       return ret;
-
                if (i915_gem_request_completed(req, true)) {
                        __i915_gem_request_retire__upto(req);
 retire:
@@ -3068,25 +3059,22 @@ out:
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
                       struct intel_engine_cs *to,
-                      struct drm_i915_gem_request *req)
+                      struct drm_i915_gem_request *from_req,
+                      struct drm_i915_gem_request **to_req)
 {
        struct intel_engine_cs *from;
        int ret;
 
-       from = i915_gem_request_get_ring(req);
+       from = i915_gem_request_get_ring(from_req);
        if (to == from)
                return 0;
 
-       if (i915_gem_request_completed(req, true))
+       if (i915_gem_request_completed(from_req, true))
                return 0;
 
-       ret = i915_gem_check_olr(req);
-       if (ret)
-               return ret;
-
        if (!i915_semaphore_is_enabled(obj->base.dev)) {
                struct drm_i915_private *i915 = to_i915(obj->base.dev);
-               ret = __i915_wait_request(req,
+               ret = __i915_wait_request(from_req,
                                          atomic_read(&i915->gpu_error.reset_counter),
                                          i915->mm.interruptible,
                                          NULL,
@@ -3094,16 +3082,24 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
                if (ret)
                        return ret;
 
-               i915_gem_object_retire_request(obj, req);
+               i915_gem_object_retire_request(obj, from_req);
        } else {
                int idx = intel_ring_sync_index(from, to);
-               u32 seqno = i915_gem_request_get_seqno(req);
+               u32 seqno = i915_gem_request_get_seqno(from_req);
+
+               WARN_ON(!to_req);
 
                if (seqno <= from->semaphore.sync_seqno[idx])
                        return 0;
 
-               trace_i915_gem_ring_sync_to(from, to, req);
-               ret = to->semaphore.sync_to(to, from, seqno);
+               if (*to_req == NULL) {
+                       ret = i915_gem_request_alloc(to, to->default_context, to_req);
+                       if (ret)
+                               return ret;
+               }
+
+               trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+               ret = to->semaphore.sync_to(*to_req, from, seqno);
                if (ret)
                        return ret;
 
@@ -3123,11 +3119,14 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  *
  * @obj: object which may be in use on another ring.
  * @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ *          This will be allocated and returned if a request is
+ *          required but not passed in.
  *
  * This code is meant to abstract object synchronization with the GPU.
  * Calling with NULL implies synchronizing the object with the CPU
  * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow on engine to write
+ * between engines inside the GPU. We only allow one engine to write
  * into a buffer at any time, but multiple readers. To ensure each has
  * a coherent view of memory, we must:
  *
@@ -3138,11 +3137,22 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                    struct intel_engine_cs *to)
+                    struct intel_engine_cs *to,
+                    struct drm_i915_gem_request **to_req)
 {
        const bool readonly = obj->base.pending_write_domain == 0;
        struct drm_i915_gem_request *req[I915_NUM_RINGS];
@@ -3164,7 +3174,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
                                req[n++] = obj->last_read_req[i];
        }
        for (i = 0; i < n; i++) {
-               ret = __i915_gem_object_sync(obj, to, req[i]);
+               ret = __i915_gem_object_sync(obj, to, req[i], to_req);
                if (ret)
                        return ret;
        }
@@ -3275,354 +3285,27 @@ int i915_gpu_idle(struct drm_device *dev)
        /* Flush everything onto the inactive list. */
        for_each_ring(ring, dev_priv, i) {
                if (!i915.enable_execlists) {
-                       ret = i915_switch_context(ring, ring->default_context);
+                       struct drm_i915_gem_request *req;
+
+                       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
                        if (ret)
                                return ret;
-               }
 
-               ret = intel_ring_idle(ring);
-               if (ret)
-                       return ret;
-       }
-
-       WARN_ON(i915_verify_lists(dev));
-       return 0;
-}
-
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int fence_reg;
-       int fence_pitch_shift;
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               fence_reg = FENCE_REG_SANDYBRIDGE_0;
-               fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
-       } else {
-               fence_reg = FENCE_REG_965_0;
-               fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
-       }
-
-       fence_reg += reg * 8;
+                       ret = i915_switch_context(req);
+                       if (ret) {
+                               i915_gem_request_cancel(req);
+                               return ret;
+                       }
 
-       /* To w/a incoherency with non-atomic 64-bit register updates,
-        * we split the 64-bit update into two 32-bit writes. In order
-        * for a partial fence not to be evaluated between writes, we
-        * precede the update with write to turn off the fence register,
-        * and only enable the fence as the last step.
-        *
-        * For extra levels of paranoia, we make sure each step lands
-        * before applying the next step.
-        */
-       I915_WRITE(fence_reg, 0);
-       POSTING_READ(fence_reg);
-
-       if (obj) {
-               u32 size = i915_gem_obj_ggtt_size(obj);
-               uint64_t val;
-
-               /* Adjust fence size to match tiled area */
-               if (obj->tiling_mode != I915_TILING_NONE) {
-                       uint32_t row_size = obj->stride *
-                               (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
-                       size = (size / row_size) * row_size;
+                       i915_add_request_no_flush(req);
                }
 
-               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
-                                0xfffff000) << 32;
-               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
-               val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
-               if (obj->tiling_mode == I915_TILING_Y)
-                       val |= 1 << I965_FENCE_TILING_Y_SHIFT;
-               val |= I965_FENCE_REG_VALID;
-
-               I915_WRITE(fence_reg + 4, val >> 32);
-               POSTING_READ(fence_reg + 4);
-
-               I915_WRITE(fence_reg + 0, val);
-               POSTING_READ(fence_reg);
-       } else {
-               I915_WRITE(fence_reg + 4, 0);
-               POSTING_READ(fence_reg + 4);
-       }
-}
-
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 val;
-
-       if (obj) {
-               u32 size = i915_gem_obj_ggtt_size(obj);
-               int pitch_val;
-               int tile_width;
-
-               WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
-                    (size & -size) != size ||
-                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-                    "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-                    i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
-
-               if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
-                       tile_width = 128;
-               else
-                       tile_width = 512;
-
-               /* Note: pitch better be a power of two tile widths */
-               pitch_val = obj->stride / tile_width;
-               pitch_val = ffs(pitch_val) - 1;
-
-               val = i915_gem_obj_ggtt_offset(obj);
-               if (obj->tiling_mode == I915_TILING_Y)
-                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-               val |= I915_FENCE_SIZE_BITS(size);
-               val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-               val |= I830_FENCE_REG_VALID;
-       } else
-               val = 0;
-
-       if (reg < 8)
-               reg = FENCE_REG_830_0 + reg * 4;
-       else
-               reg = FENCE_REG_945_8 + (reg - 8) * 4;
-
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
-}
-
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
-                               struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t val;
-
-       if (obj) {
-               u32 size = i915_gem_obj_ggtt_size(obj);
-               uint32_t pitch_val;
-
-               WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
-                    (size & -size) != size ||
-                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-                    "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
-                    i915_gem_obj_ggtt_offset(obj), size);
-
-               pitch_val = obj->stride / 128;
-               pitch_val = ffs(pitch_val) - 1;
-
-               val = i915_gem_obj_ggtt_offset(obj);
-               if (obj->tiling_mode == I915_TILING_Y)
-                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-               val |= I830_FENCE_SIZE_BITS(size);
-               val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-               val |= I830_FENCE_REG_VALID;
-       } else
-               val = 0;
-
-       I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
-       POSTING_READ(FENCE_REG_830_0 + reg * 4);
-}
-
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
-       return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
-}
-
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* Ensure that all CPU reads are completed before installing a fence
-        * and all writes before removing the fence.
-        */
-       if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
-               mb();
-
-       WARN(obj && (!obj->stride || !obj->tiling_mode),
-            "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-            obj->stride, obj->tiling_mode);
-
-       if (IS_GEN2(dev))
-               i830_write_fence_reg(dev, reg, obj);
-       else if (IS_GEN3(dev))
-               i915_write_fence_reg(dev, reg, obj);
-       else if (INTEL_INFO(dev)->gen >= 4)
-               i965_write_fence_reg(dev, reg, obj);
-
-       /* And similarly be paranoid that no direct access to this region
-        * is reordered to before the fence is installed.
-        */
-       if (i915_gem_object_needs_mb(obj))
-               mb();
-}
-
-static inline int fence_number(struct drm_i915_private *dev_priv,
-                              struct drm_i915_fence_reg *fence)
-{
-       return fence - dev_priv->fence_regs;
-}
-
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-                                        struct drm_i915_fence_reg *fence,
-                                        bool enable)
-{
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       int reg = fence_number(dev_priv, fence);
-
-       i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
-
-       if (enable) {
-               obj->fence_reg = reg;
-               fence->obj = obj;
-               list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
-       } else {
-               obj->fence_reg = I915_FENCE_REG_NONE;
-               fence->obj = NULL;
-               list_del_init(&fence->lru_list);
-       }
-       obj->fence_dirty = false;
-}
-
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->last_fenced_req) {
-               int ret = i915_wait_request(obj->last_fenced_req);
-               if (ret)
-                       return ret;
-
-               i915_gem_request_assign(&obj->last_fenced_req, NULL);
-       }
-
-       return 0;
-}
-
-int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct drm_i915_fence_reg *fence;
-       int ret;
-
-       ret = i915_gem_object_wait_fence(obj);
-       if (ret)
-               return ret;
-
-       if (obj->fence_reg == I915_FENCE_REG_NONE)
-               return 0;
-
-       fence = &dev_priv->fence_regs[obj->fence_reg];
-
-       if (WARN_ON(fence->pin_count))
-               return -EBUSY;
-
-       i915_gem_object_fence_lost(obj);
-       i915_gem_object_update_fence(obj, fence, false);
-
-       return 0;
-}
-
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_fence_reg *reg, *avail;
-       int i;
-
-       /* First try to find a free reg */
-       avail = NULL;
-       for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
-               reg = &dev_priv->fence_regs[i];
-               if (!reg->obj)
-                       return reg;
-
-               if (!reg->pin_count)
-                       avail = reg;
-       }
-
-       if (avail == NULL)
-               goto deadlock;
-
-       /* None available, try to steal one or wait for a user to finish */
-       list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
-               if (reg->pin_count)
-                       continue;
-
-               return reg;
-       }
-
-deadlock:
-       /* Wait for completion of pending flips which consume fences */
-       if (intel_has_pending_fb_unpin(dev))
-               return ERR_PTR(-EAGAIN);
-
-       return ERR_PTR(-EDEADLK);
-}
-
-/**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- * This function walks the fence regs looking for a free one for @obj,
- * stealing one if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- *
- * For an untiled surface, this removes any existing fence.
- */
-int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       bool enable = obj->tiling_mode != I915_TILING_NONE;
-       struct drm_i915_fence_reg *reg;
-       int ret;
-
-       /* Have we updated the tiling parameters upon the object and so
-        * will need to serialise the write to the associated fence register?
-        */
-       if (obj->fence_dirty) {
-               ret = i915_gem_object_wait_fence(obj);
+               ret = intel_ring_idle(ring);
                if (ret)
                        return ret;
        }
 
-       /* Just update our place in the LRU if our fence is getting reused. */
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               reg = &dev_priv->fence_regs[obj->fence_reg];
-               if (!obj->fence_dirty) {
-                       list_move_tail(&reg->lru_list,
-                                      &dev_priv->mm.fence_list);
-                       return 0;
-               }
-       } else if (enable) {
-               if (WARN_ON(!obj->map_and_fenceable))
-                       return -EINVAL;
-
-               reg = i915_find_fence_reg(dev);
-               if (IS_ERR(reg))
-                       return PTR_ERR(reg);
-
-               if (reg->obj) {
-                       struct drm_i915_gem_object *old = reg->obj;
-
-                       ret = i915_gem_object_wait_fence(old);
-                       if (ret)
-                               return ret;
-
-                       i915_gem_object_fence_lost(old);
-               }
-       } else
-               return 0;
-
-       i915_gem_object_update_fence(obj, reg, enable);
-
+       WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
@@ -3673,9 +3356,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
-       unsigned long start =
+       u64 start =
                flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-       unsigned long end =
+       u64 end =
                flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
        struct i915_vma *vma;
        int ret;
@@ -3731,7 +3414,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
         * attempt to find space.
         */
        if (size > end) {
-               DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+               DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
                          ggtt_view ? ggtt_view->type : 0,
                          size,
                          flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3853,7 +3536,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
@@ -3875,7 +3558,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
@@ -3937,9 +3620,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
                obj->dirty = 1;
        }
 
-       if (write)
-               intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
-
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
                                            old_write_domain);
@@ -4094,12 +3774,13 @@ int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     struct intel_engine_cs *pipelined,
+                                    struct drm_i915_gem_request **pipelined_request,
                                     const struct i915_ggtt_view *view)
 {
        u32 old_read_domains, old_write_domain;
        int ret;
 
-       ret = i915_gem_object_sync(obj, pipelined);
+       ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
        if (ret)
                return ret;
 
@@ -4210,9 +3891,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
                obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
-       if (write)
-               intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
-
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
                                            old_write_domain);
@@ -4253,6 +3931,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
+               /*
+                * Note that the request might not have been submitted yet.
+                * In which case emitted_jiffies will be zero.
+                */
+               if (!request->emitted_jiffies)
+                       continue;
+
                target = request;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4423,32 +4108,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
        --vma->pin_count;
 }
 
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-               struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-               WARN_ON(!ggtt_vma ||
-                       dev_priv->fence_regs[obj->fence_reg].pin_count >
-                       ggtt_vma->pin_count);
-               dev_priv->fence_regs[obj->fence_reg].pin_count++;
-               return true;
-       } else
-               return false;
-}
-
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-               WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-               dev_priv->fence_regs[obj->fence_reg].pin_count--;
-       }
-}
-
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
@@ -4810,8 +4469,9 @@ err:
        return ret;
 }
 
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
@@ -4821,7 +4481,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
        if (!HAS_L3_DPF(dev) || !remap_info)
                return 0;
 
-       ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
        if (ret)
                return ret;
 
@@ -4967,7 +4627,7 @@ i915_gem_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
-       int ret, i;
+       int ret, i, j;
 
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
@@ -5004,27 +4664,55 @@ i915_gem_init_hw(struct drm_device *dev)
         */
        init_unused_rings(dev);
 
+       BUG_ON(!dev_priv->ring[RCS].default_context);
+
+       ret = i915_ppgtt_init_hw(dev);
+       if (ret) {
+               DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+               goto out;
+       }
+
+       /* Need to do basic initialisation of all rings first: */
        for_each_ring(ring, dev_priv, i) {
                ret = ring->init_hw(ring);
                if (ret)
                        goto out;
        }
 
-       for (i = 0; i < NUM_L3_SLICES(dev); i++)
-               i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+       /* Now it is safe to go back round and do everything else: */
+       for_each_ring(ring, dev_priv, i) {
+               struct drm_i915_gem_request *req;
 
-       ret = i915_ppgtt_init_hw(dev);
-       if (ret && ret != -EIO) {
-               DRM_ERROR("PPGTT enable failed %d\n", ret);
-               i915_gem_cleanup_ringbuffer(dev);
-       }
+               WARN_ON(!ring->default_context);
+
+               ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+               if (ret) {
+                       i915_gem_cleanup_ringbuffer(dev);
+                       goto out;
+               }
 
-       ret = i915_gem_context_enable(dev_priv);
-       if (ret && ret != -EIO) {
-               DRM_ERROR("Context enable failed %d\n", ret);
-               i915_gem_cleanup_ringbuffer(dev);
+               if (ring->id == RCS) {
+                       for (j = 0; j < NUM_L3_SLICES(dev); j++)
+                               i915_gem_l3_remap(req, j);
+               }
 
-               goto out;
+               ret = i915_ppgtt_init_ring(req);
+               if (ret && ret != -EIO) {
+                       DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+                       i915_gem_request_cancel(req);
+                       i915_gem_cleanup_ringbuffer(dev);
+                       goto out;
+               }
+
+               ret = i915_gem_context_enable(req);
+               if (ret && ret != -EIO) {
+                       DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+                       i915_gem_request_cancel(req);
+                       i915_gem_cleanup_ringbuffer(dev);
+                       goto out;
+               }
+
+               i915_add_request_no_flush(req);
        }
 
 out:
@@ -5111,6 +4799,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 
        for_each_ring(ring, dev_priv, i)
                dev_priv->gt.cleanup_ring(ring);
+
+    if (i915.enable_execlists)
+            /*
+             * Neither the BIOS, ourselves or any other kernel
+             * expects the system to be in execlists mode on startup,
+             * so we need to reset the GPU back to legacy mode.
+             */
+            intel_gpu_reset(dev);
 }
 
 static void
@@ -5388,3 +5084,42 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
        return false;
 }
 
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_device *dev,
+                                const void *data, size_t size)
+{
+       struct drm_i915_gem_object *obj;
+       struct sg_table *sg;
+       size_t bytes;
+       int ret;
+
+       obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
+       if (IS_ERR_OR_NULL(obj))
+               return obj;
+
+       ret = i915_gem_object_set_to_cpu_domain(obj, true);
+       if (ret)
+               goto fail;
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               goto fail;
+
+       i915_gem_object_pin_pages(obj);
+       sg = obj->pages;
+       bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+       i915_gem_object_unpin_pages(obj);
+
+       if (WARN_ON(bytes != size)) {
+               DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
+               ret = -EFAULT;
+               goto fail;
+       }
+
+       return obj;
+
+fail:
+       drm_gem_object_unreference(&obj->base);
+       return ERR_PTR(ret);
+}
index 48afa777e94aa849e13890be64b3d4303283eee1..8e893b354bccdfb5205dea4b4e3559d02225fe54 100644 (file)
@@ -287,6 +287,7 @@ err_unpin:
        if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
                i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
 err_destroy:
+       idr_remove(&file_priv->context_idr, ctx->user_handle);
        i915_gem_context_unreference(ctx);
        return ERR_PTR(ret);
 }
@@ -407,32 +408,23 @@ void i915_gem_context_fini(struct drm_device *dev)
        i915_gem_context_unreference(dctx);
 }
 
-int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring;
-       int ret, i;
-
-       BUG_ON(!dev_priv->ring[RCS].default_context);
+       struct intel_engine_cs *ring = req->ring;
+       int ret;
 
        if (i915.enable_execlists) {
-               for_each_ring(ring, dev_priv, i) {
-                       if (ring->init_context) {
-                               ret = ring->init_context(ring,
-                                               ring->default_context);
-                               if (ret) {
-                                       DRM_ERROR("ring init context: %d\n",
-                                                       ret);
-                                       return ret;
-                               }
-                       }
-               }
+               if (ring->init_context == NULL)
+                       return 0;
 
+               ret = ring->init_context(req);
        } else
-               for_each_ring(ring, dev_priv, i) {
-                       ret = i915_switch_context(ring, ring->default_context);
-                       if (ret)
-                               return ret;
-               }
+               ret = i915_switch_context(req);
+
+       if (ret) {
+               DRM_ERROR("ring init context: %d\n", ret);
+               return ret;
+       }
 
        return 0;
 }
@@ -485,10 +477,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 }
 
 static inline int
-mi_set_context(struct intel_engine_cs *ring,
-              struct intel_context *new_context,
-              u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
@@ -503,13 +494,15 @@ mi_set_context(struct intel_engine_cs *ring,
         * itlb_before_ctx_switch.
         */
        if (IS_GEN6(ring->dev)) {
-               ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+               ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
                if (ret)
                        return ret;
        }
 
        /* These flags are for resource streamer on HSW+ */
-       if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+       if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+               flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+       else if (INTEL_INFO(ring->dev)->gen < 8)
                flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
@@ -517,7 +510,7 @@ mi_set_context(struct intel_engine_cs *ring,
        if (INTEL_INFO(ring->dev)->gen >= 7)
                len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
-       ret = intel_ring_begin(ring, len);
+       ret = intel_ring_begin(req, len);
        if (ret)
                return ret;
 
@@ -540,7 +533,7 @@ mi_set_context(struct intel_engine_cs *ring,
 
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
                        flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -621,9 +614,10 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
        return false;
 }
 
-static int do_switch(struct intel_engine_cs *ring,
-                    struct intel_context *to)
+static int do_switch(struct drm_i915_gem_request *req)
 {
+       struct intel_context *to = req->ctx;
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_context *from = ring->last_context;
        u32 hw_flags = 0;
@@ -659,7 +653,7 @@ static int do_switch(struct intel_engine_cs *ring,
                 * Register Immediate commands in Ring Buffer before submitting
                 * a context."*/
                trace_switch_mm(ring, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
                if (ret)
                        goto unpin_out;
 
@@ -701,7 +695,7 @@ static int do_switch(struct intel_engine_cs *ring,
        WARN_ON(needs_pd_load_pre(ring, to) &&
                needs_pd_load_post(ring, to, hw_flags));
 
-       ret = mi_set_context(ring, to, hw_flags);
+       ret = mi_set_context(req, hw_flags);
        if (ret)
                goto unpin_out;
 
@@ -710,7 +704,7 @@ static int do_switch(struct intel_engine_cs *ring,
         */
        if (needs_pd_load_post(ring, to, hw_flags)) {
                trace_switch_mm(ring, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
                /* The hardware context switch is emitted, but we haven't
                 * actually changed the state - so it's probably safe to bail
                 * here. Still, let the user know something dangerous has
@@ -726,7 +720,7 @@ static int do_switch(struct intel_engine_cs *ring,
                if (!(to->remap_slice & (1<<i)))
                        continue;
 
-               ret = i915_gem_l3_remap(ring, i);
+               ret = i915_gem_l3_remap(req, i);
                /* If it failed, try again next round */
                if (ret)
                        DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -742,7 +736,7 @@ static int do_switch(struct intel_engine_cs *ring,
         */
        if (from != NULL) {
                from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
@@ -766,7 +760,7 @@ done:
 
        if (uninitialized) {
                if (ring->init_context) {
-                       ret = ring->init_context(ring, to);
+                       ret = ring->init_context(req);
                        if (ret)
                                DRM_ERROR("ring init context: %d\n", ret);
                }
@@ -782,8 +776,7 @@ unpin_out:
 
 /**
  * i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -794,25 +787,25 @@ unpin_out:
  * switched by writing to the ELSP and requests keep a reference to their
  * context.
  */
-int i915_switch_context(struct intel_engine_cs *ring,
-                       struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
        WARN_ON(i915.enable_execlists);
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-               if (to != ring->last_context) {
-                       i915_gem_context_reference(to);
+       if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+               if (req->ctx != ring->last_context) {
+                       i915_gem_context_reference(req->ctx);
                        if (ring->last_context)
                                i915_gem_context_unreference(ring->last_context);
-                       ring->last_context = to;
+                       ring->last_context = req->ctx;
                }
                return 0;
        }
 
-       return do_switch(ring, to);
+       return do_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
@@ -898,6 +891,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
        case I915_CONTEXT_PARAM_BAN_PERIOD:
                args->value = ctx->hang_stats.ban_period_seconds;
                break;
+       case I915_CONTEXT_PARAM_NO_ZEROMAP:
+               args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -935,6 +931,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                else
                        ctx->hang_stats.ban_period_seconds = args->value;
                break;
+       case I915_CONTEXT_PARAM_NO_ZEROMAP:
+               if (args->size) {
+                       ret = -EINVAL;
+               } else {
+                       ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+                       ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+               }
+               break;
        default:
                ret = -EINVAL;
                break;
index a7fa14516cda5b002a0b42ec9a05677b62a6cd7e..923a3c4bf0b79c71b8a05c417211ae9507f919aa 100644 (file)
@@ -677,6 +677,7 @@ eb_vma_misplaced(struct i915_vma *vma)
 static int
 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                            struct list_head *vmas,
+                           struct intel_context *ctx,
                            bool *need_relocs)
 {
        struct drm_i915_gem_object *obj;
@@ -699,6 +700,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                obj = vma->obj;
                entry = vma->exec_entry;
 
+               if (ctx->flags & CONTEXT_NO_ZEROMAP)
+                       entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
                if (!has_fenced_gpu_access)
                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
@@ -776,7 +780,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_file *file,
                                  struct intel_engine_cs *ring,
                                  struct eb_vmas *eb,
-                                 struct drm_i915_gem_exec_object2 *exec)
+                                 struct drm_i915_gem_exec_object2 *exec,
+                                 struct intel_context *ctx)
 {
        struct drm_i915_gem_relocation_entry *reloc;
        struct i915_address_space *vm;
@@ -862,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                goto err;
 
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
        if (ret)
                goto err;
 
@@ -887,10 +892,10 @@ err:
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
                                struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_ring_flag(ring);
+       const unsigned other_rings = ~intel_ring_flag(req->ring);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -900,7 +905,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, ring);
+                       ret = i915_gem_object_sync(obj, req->ring, &req);
                        if (ret)
                                return ret;
                }
@@ -912,7 +917,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
        }
 
        if (flush_chipset)
-               i915_gem_chipset_flush(ring->dev);
+               i915_gem_chipset_flush(req->ring->dev);
 
        if (flush_domains & I915_GEM_DOMAIN_GTT)
                wmb();
@@ -920,7 +925,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       return intel_ring_invalidate_all_caches(ring);
+       return intel_ring_invalidate_all_caches(req);
 }
 
 static bool
@@ -953,6 +958,9 @@ validate_exec_list(struct drm_device *dev,
                if (exec[i].flags & invalid_flags)
                        return -EINVAL;
 
+               if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+                       return -EINVAL;
+
                /* First check for malicious input causing overflow in
                 * the worst case where we need to allocate the entire
                 * relocation tree as a single array.
@@ -1013,9 +1021,9 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 
 void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-                                  struct intel_engine_cs *ring)
+                                  struct drm_i915_gem_request *req)
 {
-       struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+       struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, vmas, exec_list) {
@@ -1029,12 +1037,12 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                        obj->base.pending_read_domains |= obj->base.read_domains;
                obj->base.read_domains = obj->base.pending_read_domains;
 
-               i915_vma_move_to_active(vma, ring);
+               i915_vma_move_to_active(vma, req);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        i915_gem_request_assign(&obj->last_write_req, req);
 
-                       intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+                       intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
                        /* update for the implicit flush after a batch */
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1053,22 +1061,20 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 }
 
 void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-                                   struct drm_file *file,
-                                   struct intel_engine_cs *ring,
-                                   struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
        /* Unconditionally force add_request to emit a full flush. */
-       ring->gpu_caches_dirty = true;
+       params->ring->gpu_caches_dirty = true;
 
        /* Add a breadcrumb for the completion of the batch buffer */
-       (void)__i915_add_request(ring, file, obj);
+       __i915_add_request(params->request, params->batch_obj, true);
 }
 
 static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
-                           struct intel_engine_cs *ring)
+                           struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret, i;
 
@@ -1077,7 +1083,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
                return -EINVAL;
        }
 
-       ret = intel_ring_begin(ring, 4 * 3);
+       ret = intel_ring_begin(req, 4 * 3);
        if (ret)
                return ret;
 
@@ -1093,10 +1099,11 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
 }
 
 static int
-i915_emit_box(struct intel_engine_cs *ring,
+i915_emit_box(struct drm_i915_gem_request *req,
              struct drm_clip_rect *box,
              int DR1, int DR4)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
@@ -1107,7 +1114,7 @@ i915_emit_box(struct intel_engine_cs *ring,
        }
 
        if (INTEL_INFO(ring->dev)->gen >= 4) {
-               ret = intel_ring_begin(ring, 4);
+               ret = intel_ring_begin(req, 4);
                if (ret)
                        return ret;
 
@@ -1116,7 +1123,7 @@ i915_emit_box(struct intel_engine_cs *ring,
                intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
                intel_ring_emit(ring, DR4);
        } else {
-               ret = intel_ring_begin(ring, 6);
+               ret = intel_ring_begin(req, 6);
                if (ret)
                        return ret;
 
@@ -1186,17 +1193,15 @@ err:
 }
 
 int
-i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
-                              struct intel_engine_cs *ring,
-                              struct intel_context *ctx,
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
-                              struct list_head *vmas,
-                              struct drm_i915_gem_object *batch_obj,
-                              u64 exec_start, u32 dispatch_flags)
+                              struct list_head *vmas)
 {
        struct drm_clip_rect *cliprects = NULL;
+       struct drm_device *dev = params->dev;
+       struct intel_engine_cs *ring = params->ring;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u64 exec_len;
+       u64 exec_start, exec_len;
        int instp_mode;
        u32 instp_mask;
        int i, ret = 0;
@@ -1244,15 +1249,15 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                }
        }
 
-       ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+       ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
        if (ret)
                goto error;
 
-       ret = i915_switch_context(ring, ctx);
+       ret = i915_switch_context(params->request);
        if (ret)
                goto error;
 
-       WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+       WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
             "%s didn't clear reload\n", ring->name);
 
        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1294,7 +1299,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 
        if (ring == &dev_priv->ring[RCS] &&
                        instp_mode != dev_priv->relative_constants_mode) {
-               ret = intel_ring_begin(ring, 4);
+               ret = intel_ring_begin(params->request, 4);
                if (ret)
                        goto error;
 
@@ -1308,37 +1313,40 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
        }
 
        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-               ret = i915_reset_gen7_sol_offsets(dev, ring);
+               ret = i915_reset_gen7_sol_offsets(dev, params->request);
                if (ret)
                        goto error;
        }
 
-       exec_len = args->batch_len;
+       exec_len   = args->batch_len;
+       exec_start = params->batch_obj_vm_offset +
+                    params->args_batch_start_offset;
+
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
-                       ret = i915_emit_box(ring, &cliprects[i],
+                       ret = i915_emit_box(params->request, &cliprects[i],
                                            args->DR1, args->DR4);
                        if (ret)
                                goto error;
 
-                       ret = ring->dispatch_execbuffer(ring,
+                       ret = ring->dispatch_execbuffer(params->request,
                                                        exec_start, exec_len,
-                                                       dispatch_flags);
+                                                       params->dispatch_flags);
                        if (ret)
                                goto error;
                }
        } else {
-               ret = ring->dispatch_execbuffer(ring,
+               ret = ring->dispatch_execbuffer(params->request,
                                                exec_start, exec_len,
-                                               dispatch_flags);
+                                               params->dispatch_flags);
                if (ret)
                        return ret;
        }
 
-       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+       trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-       i915_gem_execbuffer_move_to_active(vmas, ring);
-       i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+       i915_gem_execbuffer_move_to_active(vmas, params->request);
+       i915_gem_execbuffer_retire_commands(params);
 
 error:
        kfree(cliprects);
@@ -1408,8 +1416,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct intel_engine_cs *ring;
        struct intel_context *ctx;
        struct i915_address_space *vm;
+       struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+       struct i915_execbuffer_params *params = &params_master;
        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-       u64 exec_start = args->batch_start_offset;
        u32 dispatch_flags;
        int ret;
        bool need_relocs;
@@ -1482,6 +1491,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
+               if (!HAS_RESOURCE_STREAMER(dev)) {
+                       DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
+                       return -EINVAL;
+               }
+               if (ring->id != RCS) {
+                       DRM_DEBUG("RS is not available on %s\n",
+                                ring->name);
+                       return -EINVAL;
+               }
+
+               dispatch_flags |= I915_DISPATCH_RS;
+       }
+
        intel_runtime_pm_get(dev_priv);
 
        ret = i915_mutex_lock_interruptible(dev);
@@ -1502,6 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        else
                vm = &dev_priv->gtt.base;
 
+       memset(&params_master, 0x00, sizeof(params_master));
+
        eb = eb_create(args);
        if (eb == NULL) {
                i915_gem_context_unreference(ctx);
@@ -1520,7 +1545,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
        if (ret)
                goto err;
 
@@ -1530,7 +1555,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-                                                               eb, exec);
+                                                               eb, exec, ctx);
                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
                }
                if (ret)
@@ -1544,6 +1569,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
        }
 
+       params->args_batch_start_offset = args->batch_start_offset;
        if (i915_needs_cmd_parser(ring) && args->batch_len) {
                struct drm_i915_gem_object *parsed_batch_obj;
 
@@ -1575,7 +1601,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         * command parser has accepted.
                         */
                        dispatch_flags |= I915_DISPATCH_SECURE;
-                       exec_start = 0;
+                       params->args_batch_start_offset = 0;
                        batch_obj = parsed_batch_obj;
                }
        }
@@ -1600,14 +1626,35 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (ret)
                        goto err;
 
-               exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+               params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
        } else
-               exec_start += i915_gem_obj_offset(batch_obj, vm);
+               params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
-       ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
-                                         &eb->vmas, batch_obj, exec_start,
-                                         dispatch_flags);
+       /* Allocate a request for this batch buffer nice and early. */
+       ret = i915_gem_request_alloc(ring, ctx, &params->request);
+       if (ret)
+               goto err_batch_unpin;
+
+       ret = i915_gem_request_add_to_client(params->request, file);
+       if (ret)
+               goto err_batch_unpin;
 
+       /*
+        * Save assorted stuff away to pass through to *_submission().
+        * NB: This data should be 'persistent' and not local as it will
+        * kept around beyond the duration of the IOCTL once the GPU
+        * scheduler arrives.
+        */
+       params->dev                     = dev;
+       params->file                    = file;
+       params->ring                    = ring;
+       params->dispatch_flags          = dispatch_flags;
+       params->batch_obj               = batch_obj;
+       params->ctx                     = ctx;
+
+       ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
        /*
         * FIXME: We crucially rely upon the active tracking for the (ppgtt)
         * batch vma for correctness. For less ugly and less fragility this
@@ -1616,11 +1663,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         */
        if (dispatch_flags & I915_DISPATCH_SECURE)
                i915_gem_object_ggtt_unpin(batch_obj);
+
 err:
        /* the request owns the ref now */
        i915_gem_context_unreference(ctx);
        eb_destroy(eb);
 
+       /*
+        * If the request was created but not successfully submitted then it
+        * must be freed again. If it was submitted then it is being tracked
+        * on the active request list and no clean up is required here.
+        */
+       if (ret && params->request)
+               i915_gem_request_cancel(params->request);
+
        mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
new file mode 100644 (file)
index 0000000..af1f8c4
--- /dev/null
@@ -0,0 +1,787 @@
+/*
+ * Copyright Â© 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interract with frontbuffer
+ * conmpression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
+ * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int fence_reg;
+       int fence_pitch_shift;
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               fence_reg = FENCE_REG_SANDYBRIDGE_0;
+               fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+       } else {
+               fence_reg = FENCE_REG_965_0;
+               fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+       }
+
+       fence_reg += reg * 8;
+
+       /* To w/a incoherency with non-atomic 64-bit register updates,
+        * we split the 64-bit update into two 32-bit writes. In order
+        * for a partial fence not to be evaluated between writes, we
+        * precede the update with write to turn off the fence register,
+        * and only enable the fence as the last step.
+        *
+        * For extra levels of paranoia, we make sure each step lands
+        * before applying the next step.
+        */
+       I915_WRITE(fence_reg, 0);
+       POSTING_READ(fence_reg);
+
+       if (obj) {
+               u32 size = i915_gem_obj_ggtt_size(obj);
+               uint64_t val;
+
+               /* Adjust fence size to match tiled area */
+               if (obj->tiling_mode != I915_TILING_NONE) {
+                       uint32_t row_size = obj->stride *
+                               (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+                       size = (size / row_size) * row_size;
+               }
+
+               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+                                0xfffff000) << 32;
+               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+               val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+               if (obj->tiling_mode == I915_TILING_Y)
+                       val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+               val |= I965_FENCE_REG_VALID;
+
+               I915_WRITE(fence_reg + 4, val >> 32);
+               POSTING_READ(fence_reg + 4);
+
+               I915_WRITE(fence_reg + 0, val);
+               POSTING_READ(fence_reg);
+       } else {
+               I915_WRITE(fence_reg + 4, 0);
+               POSTING_READ(fence_reg + 4);
+       }
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
+
+       if (obj) {
+               u32 size = i915_gem_obj_ggtt_size(obj);
+               int pitch_val;
+               int tile_width;
+
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+                    (size & -size) != size ||
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+               if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+                       tile_width = 128;
+               else
+                       tile_width = 512;
+
+               /* Note: pitch better be a power of two tile widths */
+               pitch_val = obj->stride / tile_width;
+               pitch_val = ffs(pitch_val) - 1;
+
+               val = i915_gem_obj_ggtt_offset(obj);
+               if (obj->tiling_mode == I915_TILING_Y)
+                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+               val |= I915_FENCE_SIZE_BITS(size);
+               val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+               val |= I830_FENCE_REG_VALID;
+       } else
+               val = 0;
+
+       if (reg < 8)
+               reg = FENCE_REG_830_0 + reg * 4;
+       else
+               reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+       I915_WRITE(reg, val);
+       POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+                               struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t val;
+
+       if (obj) {
+               u32 size = i915_gem_obj_ggtt_size(obj);
+               uint32_t pitch_val;
+
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+                    (size & -size) != size ||
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), size);
+
+               pitch_val = obj->stride / 128;
+               pitch_val = ffs(pitch_val) - 1;
+
+               val = i915_gem_obj_ggtt_offset(obj);
+               if (obj->tiling_mode == I915_TILING_Y)
+                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+               val |= I830_FENCE_SIZE_BITS(size);
+               val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+               val |= I830_FENCE_REG_VALID;
+       } else
+               val = 0;
+
+       I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+       POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+       return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Ensure that all CPU reads are completed before installing a fence
+        * and all writes before removing the fence.
+        */
+       if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+               mb();
+
+       WARN(obj && (!obj->stride || !obj->tiling_mode),
+            "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+            obj->stride, obj->tiling_mode);
+
+       if (IS_GEN2(dev))
+               i830_write_fence_reg(dev, reg, obj);
+       else if (IS_GEN3(dev))
+               i915_write_fence_reg(dev, reg, obj);
+       else if (INTEL_INFO(dev)->gen >= 4)
+               i965_write_fence_reg(dev, reg, obj);
+
+       /* And similarly be paranoid that no direct access to this region
+        * is reordered to before the fence is installed.
+        */
+       if (i915_gem_object_needs_mb(obj))
+               mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+                              struct drm_i915_fence_reg *fence)
+{
+       return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+                                        struct drm_i915_fence_reg *fence,
+                                        bool enable)
+{
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       int reg = fence_number(dev_priv, fence);
+
+       i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+       if (enable) {
+               obj->fence_reg = reg;
+               fence->obj = obj;
+               list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+       } else {
+               obj->fence_reg = I915_FENCE_REG_NONE;
+               fence->obj = NULL;
+               list_del_init(&fence->lru_list);
+       }
+       obj->fence_dirty = false;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+       if (obj->tiling_mode)
+               i915_gem_release_mmap(obj);
+
+       /* As we do not have an associated fence register, we will force
+        * a tiling change if we ever need to acquire one.
+        */
+       obj->fence_dirty = false;
+       obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+       if (obj->last_fenced_req) {
+               int ret = i915_wait_request(obj->last_fenced_req);
+               if (ret)
+                       return ret;
+
+               i915_gem_request_assign(&obj->last_fenced_req, NULL);
+       }
+
+       return 0;
+}
+
+/**
+ * i915_gem_object_put_fence - force-remove fence for an object
+ * @obj: object to map through a fence reg
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_fence_reg *fence;
+       int ret;
+
+       ret = i915_gem_object_wait_fence(obj);
+       if (ret)
+               return ret;
+
+       if (obj->fence_reg == I915_FENCE_REG_NONE)
+               return 0;
+
+       fence = &dev_priv->fence_regs[obj->fence_reg];
+
+       if (WARN_ON(fence->pin_count))
+               return -EBUSY;
+
+       i915_gem_object_fence_lost(obj);
+       i915_gem_object_update_fence(obj, fence, false);
+
+       return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_fence_reg *reg, *avail;
+       int i;
+
+       /* First try to find a free reg */
+       avail = NULL;
+       for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+               reg = &dev_priv->fence_regs[i];
+               if (!reg->obj)
+                       return reg;
+
+               if (!reg->pin_count)
+                       avail = reg;
+       }
+
+       if (avail == NULL)
+               goto deadlock;
+
+       /* None available, try to steal one or wait for a user to finish */
+       list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+               if (reg->pin_count)
+                       continue;
+
+               return reg;
+       }
+
+deadlock:
+       /* Wait for completion of pending flips which consume fences */
+       if (intel_has_pending_fb_unpin(dev))
+               return ERR_PTR(-EAGAIN);
+
+       return ERR_PTR(-EDEADLK);
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       bool enable = obj->tiling_mode != I915_TILING_NONE;
+       struct drm_i915_fence_reg *reg;
+       int ret;
+
+       /* Have we updated the tiling parameters upon the object and so
+        * will need to serialise the write to the associated fence register?
+        */
+       if (obj->fence_dirty) {
+               ret = i915_gem_object_wait_fence(obj);
+               if (ret)
+                       return ret;
+       }
+
+       /* Just update our place in the LRU if our fence is getting reused. */
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               reg = &dev_priv->fence_regs[obj->fence_reg];
+               if (!obj->fence_dirty) {
+                       list_move_tail(&reg->lru_list,
+                                      &dev_priv->mm.fence_list);
+                       return 0;
+               }
+       } else if (enable) {
+               if (WARN_ON(!obj->map_and_fenceable))
+                       return -EINVAL;
+
+               reg = i915_find_fence_reg(dev);
+               if (IS_ERR(reg))
+                       return PTR_ERR(reg);
+
+               if (reg->obj) {
+                       struct drm_i915_gem_object *old = reg->obj;
+
+                       ret = i915_gem_object_wait_fence(old);
+                       if (ret)
+                               return ret;
+
+                       i915_gem_object_fence_lost(old);
+               }
+       } else
+               return 0;
+
+       i915_gem_object_update_fence(obj, reg, enable);
+
+       return 0;
+}
+
+/**
+ * i915_gem_object_pin_fence - pin fencing state
+ * @obj: object to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * object is ready to be used as a scanout target. Fencing status must be
+ * synchronize first by calling i915_gem_object_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_gem_object_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the object has a fence, false otherwise.
+ */
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+               WARN_ON(!ggtt_vma ||
+                       dev_priv->fence_regs[obj->fence_reg].pin_count >
+                       ggtt_vma->pin_count);
+               dev_priv->fence_regs[obj->fence_reg].pin_count++;
+               return true;
+       } else
+               return false;
+}
+
+/**
+ * i915_gem_object_unpin_fence - unpin fencing state
+ * @obj: object to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_gem_object_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+               dev_priv->fence_regs[obj->fence_reg].pin_count--;
+       }
+}
+
+/**
+ * i915_gem_restore_fences - restore fence state
+ * @dev: DRM device
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume.
+ */
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < dev_priv->num_fence_regs; i++) {
+               struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+               /*
+                * Commit delayed tiling changes if we have an object still
+                * attached to the fence, otherwise just clear the fence.
+                */
+               if (reg->obj) {
+                       i915_gem_object_update_fence(reg->obj, reg,
+                                                    reg->obj->tiling_mode);
+               } else {
+                       i915_gem_write_fence(dev, i, NULL);
+               }
+       }
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @dev: DRM device
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+       uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+       if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+               /*
+                * On BDW+, swizzling is not used. We leave the CPU memory
+                * controller in charge of optimizing memory accesses without
+                * the extra address manipulation GPU side.
+                *
+                * VLV and CHV don't have GPU swizzling.
+                */
+               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               if (dev_priv->preserve_bios_swizzle) {
+                       if (I915_READ(DISP_ARB_CTL) &
+                           DISP_TILE_SURFACE_SWIZZLING) {
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+                       } else {
+                               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       }
+               } else {
+                       uint32_t dimm_c0, dimm_c1;
+                       dimm_c0 = I915_READ(MAD_DIMM_C0);
+                       dimm_c1 = I915_READ(MAD_DIMM_C1);
+                       dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+                       dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+                       /* Enable swizzling when the channels are populated
+                        * with identically sized dimms. We don't need to check
+                        * the 3rd channel because no cpu with gpu attached
+                        * ships in that configuration. Also, swizzling only
+                        * makes sense for 2 channels anyway. */
+                       if (dimm_c0 == dimm_c1) {
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+                       } else {
+                               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       }
+               }
+       } else if (IS_GEN5(dev)) {
+               /* On Ironlake whatever DRAM config, GPU always do
+                * same swizzling setup.
+                */
+               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+               swizzle_y = I915_BIT_6_SWIZZLE_9;
+       } else if (IS_GEN2(dev)) {
+               /* As far as we know, the 865 doesn't have these bit 6
+                * swizzling issues.
+                */
+               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+       } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+               uint32_t dcc;
+
+               /* On 9xx chipsets, channel interleave by the CPU is
+                * determined by DCC.  For single-channel, neither the CPU
+                * nor the GPU do swizzling.  For dual channel interleaved,
+                * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+                * 9 for Y tiled.  The CPU's interleave is independent, and
+                * can be based on either bit 11 (haven't seen this yet) or
+                * bit 17 (common).
+                */
+               dcc = I915_READ(DCC);
+               switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+               case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       break;
+               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+                       if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+                               /* This is the base swizzling by the GPU for
+                                * tiled buffers.
+                                */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+                       } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+                               /* Bit 11 swizzling by the CPU in addition. */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+                       } else {
+                               /* Bit 17 swizzling by the CPU in addition. */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+                       }
+                       break;
+               }
+
+               /* check for L-shaped memory aka modified enhanced addressing */
+               if (IS_GEN4(dev)) {
+                       uint32_t ddc2 = I915_READ(DCC2);
+
+                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+                               dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+               }
+
+               if (dcc == 0xffffffff) {
+                       DRM_ERROR("Couldn't read from MCHBAR.  "
+                                 "Disabling tiling.\n");
+                       swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+                       swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+               }
+       } else {
+               /* The 965, G33, and newer, have a very flexible memory
+                * configuration.  It will enable dual-channel mode
+                * (interleaving) on as much memory as it can, and the GPU
+                * will additionally sometimes enable different bit 6
+                * swizzling for tiled objects from the CPU.
+                *
+                * Here's what I found on the G965:
+                *    slot fill         memory size  swizzling
+                * 0A   0B   1A   1B    1-ch   2-ch
+                * 512  0    0    0     512    0     O
+                * 512  0    512  0     16     1008  X
+                * 512  0    0    512   16     1008  X
+                * 0    512  0    512   16     1008  X
+                * 1024 1024 1024 0     2048   1024  O
+                *
+                * We could probably detect this based on either the DRB
+                * matching, which was the case for the swizzling required in
+                * the table above, or from the 1-ch value being less than
+                * the minimum size of a rank.
+                */
+               if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+               } else {
+                       swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                       swizzle_y = I915_BIT_6_SWIZZLE_9;
+               }
+       }
+
+       dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+       dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+       char temp[64];
+       char *vaddr;
+       int i;
+
+       vaddr = kmap(page);
+
+       for (i = 0; i < PAGE_SIZE; i += 128) {
+               memcpy(temp, &vaddr[i], 64);
+               memcpy(&vaddr[i], &vaddr[i + 64], 64);
+               memcpy(&vaddr[i + 64], temp, 64);
+       }
+
+       kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+       struct sg_page_iter sg_iter;
+       int i;
+
+       if (obj->bit_17 == NULL)
+               return;
+
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               struct page *page = sg_page_iter_page(&sg_iter);
+               char new_bit_17 = page_to_phys(page) >> 17;
+               if ((new_bit_17 & 0x1) !=
+                   (test_bit(i, obj->bit_17) != 0)) {
+                       i915_gem_swizzle_page(page);
+                       set_page_dirty(page);
+               }
+               i++;
+       }
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+       struct sg_page_iter sg_iter;
+       int page_count = obj->base.size >> PAGE_SHIFT;
+       int i;
+
+       if (obj->bit_17 == NULL) {
+               obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+                                     sizeof(long), GFP_KERNEL);
+               if (obj->bit_17 == NULL) {
+                       DRM_ERROR("Failed to allocate memory for bit 17 "
+                                 "record\n");
+                       return;
+               }
+       }
+
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+                       __set_bit(i, obj->bit_17);
+               else
+                       __clear_bit(i, obj->bit_17);
+               i++;
+       }
+}
index 31e8269e6e3dab33d809f693f7cc6ce8cf318975..96054a560f4f8da59f40a1fc532013a1b7c5b374 100644 (file)
@@ -192,9 +192,8 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen8_pde_t gen8_pde_encode(struct drm_device *dev,
-                                 dma_addr_t addr,
-                                 enum i915_cache_level level)
+static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
+                                 const enum i915_cache_level level)
 {
        gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
        pde |= addr;
@@ -301,75 +300,120 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-#define i915_dma_unmap_single(px, dev) \
-       __i915_dma_unmap_single((px)->daddr, dev)
-
-static void __i915_dma_unmap_single(dma_addr_t daddr,
-                                   struct drm_device *dev)
+static int __setup_page_dma(struct drm_device *dev,
+                           struct i915_page_dma *p, gfp_t flags)
 {
        struct device *device = &dev->pdev->dev;
 
-       dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+       p->page = alloc_page(flags);
+       if (!p->page)
+               return -ENOMEM;
+
+       p->daddr = dma_map_page(device,
+                               p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(device, p->daddr)) {
+               __free_page(p->page);
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
-/**
- * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
- * @px:        Page table/dir/etc to get a DMA map for
- * @dev:       drm device
- *
- * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping. If we keep the structs
- * symmetric here, the simple macro covers us for every page table type.
- *
- * Return: 0 if success.
- */
-#define i915_dma_map_single(px, dev) \
-       i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+       return __setup_page_dma(dev, p, GFP_KERNEL);
+}
 
-static int i915_dma_map_page_single(struct page *page,
-                                   struct drm_device *dev,
-                                   dma_addr_t *daddr)
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-       struct device *device = &dev->pdev->dev;
+       if (WARN_ON(!p->page))
+               return;
 
-       *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(device, *daddr))
-               return -ENOMEM;
+       dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+       __free_page(p->page);
+       memset(p, 0, sizeof(*p));
+}
 
-       return 0;
+static void *kmap_page_dma(struct i915_page_dma *p)
+{
+       return kmap_atomic(p->page);
 }
 
-static void unmap_and_free_pt(struct i915_page_table *pt,
-                              struct drm_device *dev)
+/* We use the flushing unmap only with ppgtt structures:
+ * page directories, page tables and scratch pages.
+ */
+static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
 {
-       if (WARN_ON(!pt->page))
-               return;
+       /* There are only few exceptions for gen >=6. chv and bxt.
+        * And we are not sure about the latter so play safe for now.
+        */
+       if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+               drm_clflush_virt_range(vaddr, PAGE_SIZE);
 
-       i915_dma_unmap_single(pt, dev);
-       __free_page(pt->page);
-       kfree(pt->used_ptes);
-       kfree(pt);
+       kunmap_atomic(vaddr);
 }
 
-static void gen8_initialize_pt(struct i915_address_space *vm,
-                              struct i915_page_table *pt)
+#define kmap_px(px) kmap_page_dma(px_base(px))
+#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+
+#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
+#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
+#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+                         const uint64_t val)
 {
-       gen8_pte_t *pt_vaddr, scratch_pte;
        int i;
+       uint64_t * const vaddr = kmap_page_dma(p);
 
-       pt_vaddr = kmap_atomic(pt->page);
-       scratch_pte = gen8_pte_encode(vm->scratch.addr,
-                                     I915_CACHE_LLC, true);
+       for (i = 0; i < 512; i++)
+               vaddr[i] = val;
+
+       kunmap_page_dma(dev, vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+                            const uint32_t val32)
+{
+       uint64_t v = val32;
 
-       for (i = 0; i < GEN8_PTES; i++)
-               pt_vaddr[i] = scratch_pte;
+       v = v << 32 | val32;
 
-       if (!HAS_LLC(vm->dev))
-               drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-       kunmap_atomic(pt_vaddr);
+       fill_page_dma(dev, p, v);
 }
 
-static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
+static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+{
+       struct i915_page_scratch *sp;
+       int ret;
+
+       sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+       if (sp == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+       if (ret) {
+               kfree(sp);
+               return ERR_PTR(ret);
+       }
+
+       set_pages_uc(px_page(sp), 1);
+
+       return sp;
+}
+
+static void free_scratch_page(struct drm_device *dev,
+                             struct i915_page_scratch *sp)
+{
+       set_pages_wb(px_page(sp), 1);
+
+       cleanup_px(dev, sp);
+       kfree(sp);
+}
+
+static struct i915_page_table *alloc_pt(struct drm_device *dev)
 {
        struct i915_page_table *pt;
        const size_t count = INTEL_INFO(dev)->gen >= 8 ?
@@ -386,19 +430,13 @@ static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
        if (!pt->used_ptes)
                goto fail_bitmap;
 
-       pt->page = alloc_page(GFP_KERNEL);
-       if (!pt->page)
-               goto fail_page;
-
-       ret = i915_dma_map_single(pt, dev);
+       ret = setup_px(dev, pt);
        if (ret)
-               goto fail_dma;
+               goto fail_page_m;
 
        return pt;
 
-fail_dma:
-       __free_page(pt->page);
-fail_page:
+fail_page_m:
        kfree(pt->used_ptes);
 fail_bitmap:
        kfree(pt);
@@ -406,18 +444,38 @@ fail_bitmap:
        return ERR_PTR(ret);
 }
 
-static void unmap_and_free_pd(struct i915_page_directory *pd,
-                             struct drm_device *dev)
+static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
 {
-       if (pd->page) {
-               i915_dma_unmap_single(pd, dev);
-               __free_page(pd->page);
-               kfree(pd->used_pdes);
-               kfree(pd);
-       }
+       cleanup_px(dev, pt);
+       kfree(pt->used_ptes);
+       kfree(pt);
 }
 
-static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
+static void gen8_initialize_pt(struct i915_address_space *vm,
+                              struct i915_page_table *pt)
+{
+       gen8_pte_t scratch_pte;
+
+       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+                                     I915_CACHE_LLC, true);
+
+       fill_px(vm->dev, pt, scratch_pte);
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+                              struct i915_page_table *pt)
+{
+       gen6_pte_t scratch_pte;
+
+       WARN_ON(px_dma(vm->scratch_page) == 0);
+
+       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+                                    I915_CACHE_LLC, true, 0);
+
+       fill32_px(vm->dev, pt, scratch_pte);
+}
+
+static struct i915_page_directory *alloc_pd(struct drm_device *dev)
 {
        struct i915_page_directory *pd;
        int ret = -ENOMEM;
@@ -429,38 +487,52 @@ static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
        pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
                                sizeof(*pd->used_pdes), GFP_KERNEL);
        if (!pd->used_pdes)
-               goto free_pd;
-
-       pd->page = alloc_page(GFP_KERNEL);
-       if (!pd->page)
-               goto free_bitmap;
+               goto fail_bitmap;
 
-       ret = i915_dma_map_single(pd, dev);
+       ret = setup_px(dev, pd);
        if (ret)
-               goto free_page;
+               goto fail_page_m;
 
        return pd;
 
-free_page:
-       __free_page(pd->page);
-free_bitmap:
+fail_page_m:
        kfree(pd->used_pdes);
-free_pd:
+fail_bitmap:
        kfree(pd);
 
        return ERR_PTR(ret);
 }
 
+static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+{
+       if (px_page(pd)) {
+               cleanup_px(dev, pd);
+               kfree(pd->used_pdes);
+               kfree(pd);
+       }
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+                              struct i915_page_directory *pd)
+{
+       gen8_pde_t scratch_pde;
+
+       scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+       fill_px(vm->dev, pd, scratch_pde);
+}
+
 /* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring,
+static int gen8_write_pdp(struct drm_i915_gem_request *req,
                          unsigned entry,
                          dma_addr_t addr)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        BUG_ON(entry >= 4);
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -476,16 +548,14 @@ static int gen8_write_pdp(struct intel_engine_cs *ring,
 }
 
 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring)
+                         struct drm_i915_gem_request *req)
 {
        int i, ret;
 
        for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
-               struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
-               dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
-               /* The page directory might be NULL, but we need to clear out
-                * whatever the previous context might have used. */
-               ret = gen8_write_pdp(ring, i, pd_daddr);
+               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+               ret = gen8_write_pdp(req, i, pd_daddr);
                if (ret)
                        return ret;
        }
@@ -507,13 +577,12 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned num_entries = length >> PAGE_SHIFT;
        unsigned last_pte, i;
 
-       scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+       scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
                                      I915_CACHE_LLC, use_scratch);
 
        while (num_entries) {
                struct i915_page_directory *pd;
                struct i915_page_table *pt;
-               struct page *page_table;
 
                if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
                        break;
@@ -525,25 +594,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 
                pt = pd->page_table[pde];
 
-               if (WARN_ON(!pt->page))
+               if (WARN_ON(!px_page(pt)))
                        break;
 
-               page_table = pt->page;
-
                last_pte = pte + num_entries;
                if (last_pte > GEN8_PTES)
                        last_pte = GEN8_PTES;
 
-               pt_vaddr = kmap_atomic(page_table);
+               pt_vaddr = kmap_px(pt);
 
                for (i = pte; i < last_pte; i++) {
                        pt_vaddr[i] = scratch_pte;
                        num_entries--;
                }
 
-               if (!HAS_LLC(ppgtt->base.dev))
-                       drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-               kunmap_atomic(pt_vaddr);
+               kunmap_px(ppgtt, pt);
 
                pte = 0;
                if (++pde == I915_PDES) {
@@ -575,18 +640,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                if (pt_vaddr == NULL) {
                        struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
                        struct i915_page_table *pt = pd->page_table[pde];
-                       struct page *page_table = pt->page;
-
-                       pt_vaddr = kmap_atomic(page_table);
+                       pt_vaddr = kmap_px(pt);
                }
 
                pt_vaddr[pte] =
                        gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
                                        cache_level, true);
                if (++pte == GEN8_PTES) {
-                       if (!HAS_LLC(ppgtt->base.dev))
-                               drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-                       kunmap_atomic(pt_vaddr);
+                       kunmap_px(ppgtt, pt_vaddr);
                        pt_vaddr = NULL;
                        if (++pde == I915_PDES) {
                                pdpe++;
@@ -595,58 +656,64 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                        pte = 0;
                }
        }
-       if (pt_vaddr) {
-               if (!HAS_LLC(ppgtt->base.dev))
-                       drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-               kunmap_atomic(pt_vaddr);
-       }
-}
-
-static void __gen8_do_map_pt(gen8_pde_t * const pde,
-                            struct i915_page_table *pt,
-                            struct drm_device *dev)
-{
-       gen8_pde_t entry =
-               gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
-       *pde = entry;
-}
 
-static void gen8_initialize_pd(struct i915_address_space *vm,
-                              struct i915_page_directory *pd)
-{
-       struct i915_hw_ppgtt *ppgtt =
-                       container_of(vm, struct i915_hw_ppgtt, base);
-       gen8_pde_t *page_directory;
-       struct i915_page_table *pt;
-       int i;
-
-       page_directory = kmap_atomic(pd->page);
-       pt = ppgtt->scratch_pt;
-       for (i = 0; i < I915_PDES; i++)
-               /* Map the PDE to the page table */
-               __gen8_do_map_pt(page_directory + i, pt, vm->dev);
-
-       if (!HAS_LLC(vm->dev))
-               drm_clflush_virt_range(page_directory, PAGE_SIZE);
-       kunmap_atomic(page_directory);
+       if (pt_vaddr)
+               kunmap_px(ppgtt, pt_vaddr);
 }
 
-static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct drm_device *dev,
+                                 struct i915_page_directory *pd)
 {
        int i;
 
-       if (!pd->page)
+       if (!px_page(pd))
                return;
 
        for_each_set_bit(i, pd->used_pdes, I915_PDES) {
                if (WARN_ON(!pd->page_table[i]))
                        continue;
 
-               unmap_and_free_pt(pd->page_table[i], dev);
+               free_pt(dev, pd->page_table[i]);
                pd->page_table[i] = NULL;
        }
 }
 
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+       struct drm_device *dev = vm->dev;
+
+       vm->scratch_page = alloc_scratch_page(dev);
+       if (IS_ERR(vm->scratch_page))
+               return PTR_ERR(vm->scratch_page);
+
+       vm->scratch_pt = alloc_pt(dev);
+       if (IS_ERR(vm->scratch_pt)) {
+               free_scratch_page(dev, vm->scratch_page);
+               return PTR_ERR(vm->scratch_pt);
+       }
+
+       vm->scratch_pd = alloc_pd(dev);
+       if (IS_ERR(vm->scratch_pd)) {
+               free_pt(dev, vm->scratch_pt);
+               free_scratch_page(dev, vm->scratch_page);
+               return PTR_ERR(vm->scratch_pd);
+       }
+
+       gen8_initialize_pt(vm, vm->scratch_pt);
+       gen8_initialize_pd(vm, vm->scratch_pd);
+
+       return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+       struct drm_device *dev = vm->dev;
+
+       free_pd(dev, vm->scratch_pd);
+       free_pt(dev, vm->scratch_pt);
+       free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt =
@@ -657,12 +724,12 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
                if (WARN_ON(!ppgtt->pdp.page_directory[i]))
                        continue;
 
-               gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
-               unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+               gen8_free_page_tables(ppgtt->base.dev,
+                                     ppgtt->pdp.page_directory[i]);
+               free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
        }
 
-       unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev);
-       unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+       gen8_free_scratch(vm);
 }
 
 /**
@@ -698,24 +765,24 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
                /* Don't reallocate page tables */
                if (pt) {
                        /* Scratch is never allocated this way */
-                       WARN_ON(pt == ppgtt->scratch_pt);
+                       WARN_ON(pt == ppgtt->base.scratch_pt);
                        continue;
                }
 
-               pt = alloc_pt_single(dev);
+               pt = alloc_pt(dev);
                if (IS_ERR(pt))
                        goto unwind_out;
 
                gen8_initialize_pt(&ppgtt->base, pt);
                pd->page_table[pde] = pt;
-               set_bit(pde, new_pts);
+               __set_bit(pde, new_pts);
        }
 
        return 0;
 
 unwind_out:
        for_each_set_bit(pde, new_pts, I915_PDES)
-               unmap_and_free_pt(pd->page_table[pde], dev);
+               free_pt(dev, pd->page_table[pde]);
 
        return -ENOMEM;
 }
@@ -756,27 +823,24 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
 
        WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
 
-       /* FIXME: upper bound must not overflow 32 bits  */
-       WARN_ON((start + length) > (1ULL << 32));
-
        gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
                if (pd)
                        continue;
 
-               pd = alloc_pd_single(dev);
+               pd = alloc_pd(dev);
                if (IS_ERR(pd))
                        goto unwind_out;
 
                gen8_initialize_pd(&ppgtt->base, pd);
                pdp->page_directory[pdpe] = pd;
-               set_bit(pdpe, new_pds);
+               __set_bit(pdpe, new_pds);
        }
 
        return 0;
 
 unwind_out:
        for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
-               unmap_and_free_pd(pdp->page_directory[pdpe], dev);
+               free_pd(dev, pdp->page_directory[pdpe]);
 
        return -ENOMEM;
 }
@@ -830,6 +894,16 @@ err_out:
        return -ENOMEM;
 }
 
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+       ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
 static int gen8_alloc_va_range(struct i915_address_space *vm,
                               uint64_t start,
                               uint64_t length)
@@ -848,7 +922,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
         * actually use the other side of the canonical address space.
         */
        if (WARN_ON(start + length < start))
-               return -ERANGE;
+               return -ENODEV;
+
+       if (WARN_ON(start + length > ppgtt->base.total))
+               return -ENODEV;
 
        ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
        if (ret)
@@ -876,7 +953,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
        /* Allocations have completed successfully, so set the bitmaps, and do
         * the mappings. */
        gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-               gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+               gen8_pde_t *const page_directory = kmap_px(pd);
                struct i915_page_table *pt;
                uint64_t pd_len = gen8_clamp_pd(start, length);
                uint64_t pd_start = start;
@@ -897,36 +974,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
                                   gen8_pte_count(pd_start, pd_len));
 
                        /* Our pde is now pointing to the pagetable, pt */
-                       set_bit(pde, pd->used_pdes);
+                       __set_bit(pde, pd->used_pdes);
 
                        /* Map the PDE to the page table */
-                       __gen8_do_map_pt(page_directory + pde, pt, vm->dev);
+                       page_directory[pde] = gen8_pde_encode(px_dma(pt),
+                                                             I915_CACHE_LLC);
 
                        /* NB: We haven't yet mapped ptes to pages. At this
                         * point we're still relying on insert_entries() */
                }
 
-               if (!HAS_LLC(vm->dev))
-                       drm_clflush_virt_range(page_directory, PAGE_SIZE);
+               kunmap_px(ppgtt, page_directory);
 
-               kunmap_atomic(page_directory);
-
-               set_bit(pdpe, ppgtt->pdp.used_pdpes);
+               __set_bit(pdpe, ppgtt->pdp.used_pdpes);
        }
 
        free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+       mark_tlbs_dirty(ppgtt);
        return 0;
 
 err_out:
        while (pdpe--) {
                for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
-                       unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev);
+                       free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
        }
 
        for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
-               unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+               free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
 
        free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+       mark_tlbs_dirty(ppgtt);
        return ret;
 }
 
@@ -939,16 +1016,11 @@ err_out:
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-       ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-       if (IS_ERR(ppgtt->scratch_pt))
-               return PTR_ERR(ppgtt->scratch_pt);
-
-       ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev);
-       if (IS_ERR(ppgtt->scratch_pd))
-               return PTR_ERR(ppgtt->scratch_pd);
+       int ret;
 
-       gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
-       gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
+       ret = gen8_init_scratch(&ppgtt->base);
+       if (ret)
+               return ret;
 
        ppgtt->base.start = 0;
        ppgtt->base.total = 1ULL << 32;
@@ -980,12 +1052,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        uint32_t  pte, pde, temp;
        uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+                                    I915_CACHE_LLC, true, 0);
 
        gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
                u32 expected;
                gen6_pte_t *pt_vaddr;
-               dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
+               const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
                pd_entry = readl(ppgtt->pd_addr + pde);
                expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 
@@ -996,7 +1069,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
                                   expected);
                seq_printf(m, "\tPDE: %x\n", pd_entry);
 
-               pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+               pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+
                for (pte = 0; pte < GEN6_PTES; pte+=4) {
                        unsigned long va =
                                (pde * PAGE_SIZE * GEN6_PTES) +
@@ -1018,7 +1092,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
                        }
                        seq_puts(m, "\n");
                }
-               kunmap_atomic(pt_vaddr);
+               kunmap_px(ppgtt, pt_vaddr);
        }
 }
 
@@ -1031,7 +1105,7 @@ static void gen6_write_pde(struct i915_page_directory *pd,
                container_of(pd, struct i915_hw_ppgtt, pd);
        u32 pd_entry;
 
-       pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+       pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
        pd_entry |= GEN6_PDE_VALID;
 
        writel(pd_entry, ppgtt->pd_addr + pde);
@@ -1056,22 +1130,23 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 {
-       BUG_ON(ppgtt->pd.pd_offset & 0x3f);
+       BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
 
-       return (ppgtt->pd.pd_offset / 64) << 16;
+       return (ppgtt->pd.base.ggtt_offset / 64) << 16;
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                        struct intel_engine_cs *ring)
+                        struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -1087,8 +1162,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring)
+                         struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 
        I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
@@ -1097,16 +1173,17 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring)
+                         struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -1120,7 +1197,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
        /* XXX: RCS is the only one to auto invalidate the TLBs? */
        if (ring->id != RCS) {
-               ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+               ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
                if (ret)
                        return ret;
        }
@@ -1129,8 +1206,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring)
+                         struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -1214,19 +1292,20 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned first_pte = first_entry % GEN6_PTES;
        unsigned last_pte, i;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+                                    I915_CACHE_LLC, true, 0);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
                if (last_pte > GEN6_PTES)
                        last_pte = GEN6_PTES;
 
-               pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+               pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
                for (i = first_pte; i < last_pte; i++)
                        pt_vaddr[i] = scratch_pte;
 
-               kunmap_atomic(pt_vaddr);
+               kunmap_px(ppgtt, pt_vaddr);
 
                num_entries -= last_pte - first_pte;
                first_pte = 0;
@@ -1250,54 +1329,25 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
        pt_vaddr = NULL;
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
                if (pt_vaddr == NULL)
-                       pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+                       pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
                pt_vaddr[act_pte] =
                        vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
                                       cache_level, true, flags);
 
                if (++act_pte == GEN6_PTES) {
-                       kunmap_atomic(pt_vaddr);
+                       kunmap_px(ppgtt, pt_vaddr);
                        pt_vaddr = NULL;
                        act_pt++;
                        act_pte = 0;
                }
        }
        if (pt_vaddr)
-               kunmap_atomic(pt_vaddr);
-}
-
-/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
- * are switching between contexts with the same LRCA, we also must do a force
- * restore.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
-       /* If current vm != vm, */
-       ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
-               struct i915_page_table *pt)
-{
-       gen6_pte_t *pt_vaddr, scratch_pte;
-       int i;
-
-       WARN_ON(vm->scratch.addr == 0);
-
-       scratch_pte = vm->pte_encode(vm->scratch.addr,
-                       I915_CACHE_LLC, true, 0);
-
-       pt_vaddr = kmap_atomic(pt->page);
-
-       for (i = 0; i < GEN6_PTES; i++)
-               pt_vaddr[i] = scratch_pte;
-
-       kunmap_atomic(pt_vaddr);
+               kunmap_px(ppgtt, pt_vaddr);
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,
-                              uint64_t start, uint64_t length)
+                              uint64_t start_in, uint64_t length_in)
 {
        DECLARE_BITMAP(new_page_tables, I915_PDES);
        struct drm_device *dev = vm->dev;
@@ -1305,11 +1355,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
        struct i915_hw_ppgtt *ppgtt =
                                container_of(vm, struct i915_hw_ppgtt, base);
        struct i915_page_table *pt;
-       const uint32_t start_save = start, length_save = length;
+       uint32_t start, length, start_save, length_save;
        uint32_t pde, temp;
        int ret;
 
-       WARN_ON(upper_32_bits(start));
+       if (WARN_ON(start_in + length_in > ppgtt->base.total))
+               return -ENODEV;
+
+       start = start_save = start_in;
+       length = length_save = length_in;
 
        bitmap_zero(new_page_tables, I915_PDES);
 
@@ -1319,7 +1373,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
         * tables.
         */
        gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
-               if (pt != ppgtt->scratch_pt) {
+               if (pt != vm->scratch_pt) {
                        WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
                        continue;
                }
@@ -1327,7 +1381,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
                /* We've already allocated a page table */
                WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
 
-               pt = alloc_pt_single(dev);
+               pt = alloc_pt(dev);
                if (IS_ERR(pt)) {
                        ret = PTR_ERR(pt);
                        goto unwind_out;
@@ -1336,7 +1390,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
                gen6_initialize_pt(vm, pt);
 
                ppgtt->pd.page_table[pde] = pt;
-               set_bit(pde, new_page_tables);
+               __set_bit(pde, new_page_tables);
                trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
        }
 
@@ -1350,7 +1404,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
                bitmap_set(tmp_bitmap, gen6_pte_index(start),
                           gen6_pte_count(start, length));
 
-               if (test_and_clear_bit(pde, new_page_tables))
+               if (__test_and_clear_bit(pde, new_page_tables))
                        gen6_write_pde(&ppgtt->pd, pde, pt);
 
                trace_i915_page_table_entry_map(vm, pde, pt,
@@ -1374,14 +1428,41 @@ unwind_out:
        for_each_set_bit(pde, new_page_tables, I915_PDES) {
                struct i915_page_table *pt = ppgtt->pd.page_table[pde];
 
-               ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
-               unmap_and_free_pt(pt, vm->dev);
+               ppgtt->pd.page_table[pde] = vm->scratch_pt;
+               free_pt(vm->dev, pt);
        }
 
        mark_tlbs_dirty(ppgtt);
        return ret;
 }
 
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+       struct drm_device *dev = vm->dev;
+
+       vm->scratch_page = alloc_scratch_page(dev);
+       if (IS_ERR(vm->scratch_page))
+               return PTR_ERR(vm->scratch_page);
+
+       vm->scratch_pt = alloc_pt(dev);
+       if (IS_ERR(vm->scratch_pt)) {
+               free_scratch_page(dev, vm->scratch_page);
+               return PTR_ERR(vm->scratch_pt);
+       }
+
+       gen6_initialize_pt(vm, vm->scratch_pt);
+
+       return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+       struct drm_device *dev = vm->dev;
+
+       free_pt(dev, vm->scratch_pt);
+       free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt =
@@ -1389,20 +1470,19 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
        struct i915_page_table *pt;
        uint32_t pde;
 
-
        drm_mm_remove_node(&ppgtt->node);
 
        gen6_for_all_pdes(pt, ppgtt, pde) {
-               if (pt != ppgtt->scratch_pt)
-                       unmap_and_free_pt(pt, ppgtt->base.dev);
+               if (pt != vm->scratch_pt)
+                       free_pt(ppgtt->base.dev, pt);
        }
 
-       unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
-       unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev);
+       gen6_free_scratch(vm);
 }
 
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
+       struct i915_address_space *vm = &ppgtt->base;
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool retried = false;
@@ -1413,11 +1493,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
         * size. We allocate at the top of the GTT to avoid fragmentation.
         */
        BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
-       ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-       if (IS_ERR(ppgtt->scratch_pt))
-               return PTR_ERR(ppgtt->scratch_pt);
 
-       gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+       ret = gen6_init_scratch(vm);
+       if (ret)
+               return ret;
 
 alloc:
        ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1448,7 +1527,7 @@ alloc:
        return 0;
 
 err_out:
-       unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+       gen6_free_scratch(vm);
        return ret;
 }
 
@@ -1464,7 +1543,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
        uint32_t pde, temp;
 
        gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
-               ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+               ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
 }
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
@@ -1500,11 +1579,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
        ppgtt->debug_dump = gen6_dump_ppgtt;
 
-       ppgtt->pd.pd_offset =
+       ppgtt->pd.base.ggtt_offset =
                ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
        ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
-               ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+               ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
        gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
 
@@ -1515,23 +1594,21 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                         ppgtt->node.start / PAGE_SIZE);
 
        DRM_DEBUG("Adding PPGTT at offset %x\n",
-                 ppgtt->pd.pd_offset << 10);
+                 ppgtt->pd.base.ggtt_offset << 10);
 
        return 0;
 }
 
 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        ppgtt->base.dev = dev;
-       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 
        if (INTEL_INFO(dev)->gen < 8)
                return gen6_ppgtt_init(ppgtt);
        else
                return gen8_ppgtt_init(ppgtt);
 }
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1550,11 +1627,6 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       int i, ret = 0;
-
        /* In the case of execlists, PPGTT is enabled by the context descriptor
         * and the PDPs are contained within the context itself.  We don't
         * need to do anything here. */
@@ -1573,16 +1645,23 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
        else
                MISSING_CASE(INTEL_INFO(dev)->gen);
 
-       if (ppgtt) {
-               for_each_ring(ring, dev_priv, i) {
-                       ret = ppgtt->switch_mm(ppgtt, ring);
-                       if (ret != 0)
-                               return ret;
-               }
-       }
+       return 0;
+}
 
-       return ret;
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
+{
+       struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+       if (i915.enable_execlists)
+               return 0;
+
+       if (!ppgtt)
+               return 0;
+
+       return ppgtt->switch_mm(ppgtt, req);
 }
+
 struct i915_hw_ppgtt *
 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 {
@@ -1843,7 +1922,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = gen8_pte_encode(vm->scratch.addr,
+       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
                                      I915_CACHE_LLC,
                                      use_scratch);
        for (i = 0; i < num_entries; i++)
@@ -1869,7 +1948,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
+       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+                                    I915_CACHE_LLC, use_scratch, 0);
 
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
@@ -2105,7 +2185,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 void i915_gem_init_global_gtt(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long gtt_size, mappable_size;
+       u64 gtt_size, mappable_size;
 
        gtt_size = dev_priv->gtt.base.total;
        mappable_size = dev_priv->gtt.mappable_end;
@@ -2135,42 +2215,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
        vm->cleanup(vm);
 }
 
-static int setup_scratch_page(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct page *page;
-       dma_addr_t dma_addr;
-
-       page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-       if (page == NULL)
-               return -ENOMEM;
-       set_pages_uc(page, 1);
-
-#ifdef CONFIG_INTEL_IOMMU
-       dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
-                               PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(dev->pdev, dma_addr))
-               return -EINVAL;
-#else
-       dma_addr = page_to_phys(page);
-#endif
-       dev_priv->gtt.base.scratch.page = page;
-       dev_priv->gtt.base.scratch.addr = dma_addr;
-
-       return 0;
-}
-
-static void teardown_scratch_page(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct page *page = dev_priv->gtt.base.scratch.page;
-
-       set_pages_wb(page, 1);
-       pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
-                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       __free_page(page);
-}
-
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
 {
        snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2253,8 +2297,8 @@ static int ggtt_probe_common(struct drm_device *dev,
                             size_t gtt_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_page_scratch *scratch_page;
        phys_addr_t gtt_phys_addr;
-       int ret;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
        gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2276,14 +2320,17 @@ static int ggtt_probe_common(struct drm_device *dev,
                return -ENOMEM;
        }
 
-       ret = setup_scratch_page(dev);
-       if (ret) {
+       scratch_page = alloc_scratch_page(dev);
+       if (IS_ERR(scratch_page)) {
                DRM_ERROR("Scratch setup failed\n");
                /* iounmap will also get called at remove, but meh */
                iounmap(dev_priv->gtt.gsm);
+               return PTR_ERR(scratch_page);
        }
 
-       return ret;
+       dev_priv->gtt.base.scratch_page = scratch_page;
+
+       return 0;
 }
 
 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2360,13 +2407,13 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
 }
 
 static int gen8_gmch_probe(struct drm_device *dev,
-                          size_t *gtt_total,
+                          u64 *gtt_total,
                           size_t *stolen,
                           phys_addr_t *mappable_base,
-                          unsigned long *mappable_end)
+                          u64 *mappable_end)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned int gtt_size;
+       u64 gtt_size;
        u16 snb_gmch_ctl;
        int ret;
 
@@ -2408,10 +2455,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
 }
 
 static int gen6_gmch_probe(struct drm_device *dev,
-                          size_t *gtt_total,
+                          u64 *gtt_total,
                           size_t *stolen,
                           phys_addr_t *mappable_base,
-                          unsigned long *mappable_end)
+                          u64 *mappable_end)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned int gtt_size;
@@ -2425,7 +2472,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
         * a coarse sanity check.
         */
        if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
-               DRM_ERROR("Unknown GMADR size (%lx)\n",
+               DRM_ERROR("Unknown GMADR size (%llx)\n",
                          dev_priv->gtt.mappable_end);
                return -ENXIO;
        }
@@ -2455,14 +2502,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
        struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
        iounmap(gtt->gsm);
-       teardown_scratch_page(vm->dev);
+       free_scratch_page(vm->dev, vm->scratch_page);
 }
 
 static int i915_gmch_probe(struct drm_device *dev,
-                          size_t *gtt_total,
+                          u64 *gtt_total,
                           size_t *stolen,
                           phys_addr_t *mappable_base,
-                          unsigned long *mappable_end)
+                          u64 *mappable_end)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
@@ -2519,17 +2566,17 @@ int i915_gem_gtt_init(struct drm_device *dev)
                dev_priv->gtt.base.cleanup = gen6_gmch_remove;
        }
 
+       gtt->base.dev = dev;
+
        ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
                             &gtt->mappable_base, &gtt->mappable_end);
        if (ret)
                return ret;
 
-       gtt->base.dev = dev;
-
        /* GMADR is the PCI mmio aperture into the global GTT. */
-       DRM_INFO("Memory usable by graphics device = %zdM\n",
+       DRM_INFO("Memory usable by graphics device = %lluM\n",
                 gtt->base.total >> 20);
-       DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+       DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 #ifdef CONFIG_INTEL_IOMMU
        if (intel_iommu_gfx_mapped)
@@ -2706,30 +2753,17 @@ static struct sg_table *
 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
                          struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
        struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
-       unsigned long size, pages, rot_pages;
+       unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
        struct sg_page_iter sg_iter;
        unsigned long i;
        dma_addr_t *page_addr_list;
        struct sg_table *st;
-       unsigned int tile_pitch, tile_height;
-       unsigned int width_pages, height_pages;
        int ret = -ENOMEM;
 
-       pages = obj->base.size / PAGE_SIZE;
-
-       /* Calculate tiling geometry. */
-       tile_height = intel_tile_height(dev, rot_info->pixel_format,
-                                       rot_info->fb_modifier);
-       tile_pitch = PAGE_SIZE / tile_height;
-       width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
-       height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
-       rot_pages = width_pages * height_pages;
-       size = rot_pages * PAGE_SIZE;
-
        /* Allocate a temporary list of source pages for random access. */
-       page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+       page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
+                                      sizeof(dma_addr_t));
        if (!page_addr_list)
                return ERR_PTR(ret);
 
@@ -2738,7 +2772,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
        if (!st)
                goto err_st_alloc;
 
-       ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+       ret = sg_alloc_table(st, size_pages, GFP_KERNEL);
        if (ret)
                goto err_sg_alloc;
 
@@ -2750,13 +2784,15 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
        }
 
        /* Rotate the pages. */
-       rotate_pages(page_addr_list, width_pages, height_pages, st);
+       rotate_pages(page_addr_list,
+                    rot_info->width_pages, rot_info->height_pages,
+                    st);
 
        DRM_DEBUG_KMS(
-                     "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
-                     size, rot_info->pitch, rot_info->height,
-                     rot_info->pixel_format, width_pages, height_pages,
-                     rot_pages);
+                     "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n",
+                     obj->base.size, rot_info->pitch, rot_info->height,
+                     rot_info->pixel_format, rot_info->width_pages,
+                     rot_info->height_pages, size_pages);
 
        drm_free_large(page_addr_list);
 
@@ -2768,10 +2804,10 @@ err_st_alloc:
        drm_free_large(page_addr_list);
 
        DRM_DEBUG_KMS(
-                     "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
-                     size, ret, rot_info->pitch, rot_info->height,
-                     rot_info->pixel_format, width_pages, height_pages,
-                     rot_pages);
+                     "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n",
+                     obj->base.size, ret, rot_info->pitch, rot_info->height,
+                     rot_info->pixel_format, rot_info->width_pages,
+                     rot_info->height_pages, size_pages);
        return ERR_PTR(ret);
 }
 
@@ -2889,9 +2925,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                                    vma->node.size,
                                    VM_TO_TRACE_NAME(vma->vm));
 
+               /* XXX: i915_vma_pin() will fix this +- hack */
+               vma->pin_count++;
                ret = vma->vm->allocate_va_range(vma->vm,
                                                 vma->node.start,
                                                 vma->node.size);
+               vma->pin_count--;
                if (ret)
                        return ret;
        }
@@ -2916,9 +2955,10 @@ size_t
 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
                    const struct i915_ggtt_view *view)
 {
-       if (view->type == I915_GGTT_VIEW_NORMAL ||
-           view->type == I915_GGTT_VIEW_ROTATED) {
+       if (view->type == I915_GGTT_VIEW_NORMAL) {
                return obj->base.size;
+       } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+               return view->rotation_info.size;
        } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
                return view->params.partial.size << PAGE_SHIFT;
        } else {
index 0d46dd20bf717507fddbf22830e053322d399f32..e1cfa292f9adf8cd63dff0c1c162115aef209eb2 100644 (file)
@@ -126,6 +126,8 @@ struct intel_rotation_info {
        unsigned int pitch;
        uint32_t pixel_format;
        uint64_t fb_modifier;
+       unsigned int width_pages, height_pages;
+       uint64_t size;
 };
 
 struct i915_ggtt_view {
@@ -205,19 +207,34 @@ struct i915_vma {
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
-struct i915_page_table {
+struct i915_page_dma {
        struct page *page;
-       dma_addr_t daddr;
+       union {
+               dma_addr_t daddr;
+
+               /* For gen6/gen7 only. This is the offset in the GGTT
+                * where the page directory entries for PPGTT begin
+                */
+               uint32_t ggtt_offset;
+       };
+};
+
+#define px_base(px) (&(px)->base)
+#define px_page(px) (px_base(px)->page)
+#define px_dma(px) (px_base(px)->daddr)
+
+struct i915_page_scratch {
+       struct i915_page_dma base;
+};
+
+struct i915_page_table {
+       struct i915_page_dma base;
 
        unsigned long *used_ptes;
 };
 
 struct i915_page_directory {
-       struct page *page; /* NULL for GEN6-GEN7 */
-       union {
-               uint32_t pd_offset;
-               dma_addr_t daddr;
-       };
+       struct i915_page_dma base;
 
        unsigned long *used_pdes;
        struct i915_page_table *page_table[I915_PDES]; /* PDEs */
@@ -233,13 +250,12 @@ struct i915_address_space {
        struct drm_mm mm;
        struct drm_device *dev;
        struct list_head global_link;
-       unsigned long start;            /* Start offset always 0 for dri2 */
-       size_t total;           /* size addr space maps (ex. 2GB for ggtt) */
+       u64 start;              /* Start offset always 0 for dri2 */
+       u64 total;              /* size addr space maps (ex. 2GB for ggtt) */
 
-       struct {
-               dma_addr_t addr;
-               struct page *page;
-       } scratch;
+       struct i915_page_scratch *scratch_page;
+       struct i915_page_table *scratch_pt;
+       struct i915_page_directory *scratch_pd;
 
        /**
         * List of objects currently involved in rendering.
@@ -300,9 +316,9 @@ struct i915_address_space {
  */
 struct i915_gtt {
        struct i915_address_space base;
-       size_t stolen_size;             /* Total size of stolen memory */
 
-       unsigned long mappable_end;     /* End offset that we can CPU map */
+       size_t stolen_size;             /* Total size of stolen memory */
+       u64 mappable_end;               /* End offset that we can CPU map */
        struct io_mapping *mappable;    /* Mapping to our CPU mappable region */
        phys_addr_t mappable_base;      /* PA of our GMADR */
 
@@ -314,9 +330,9 @@ struct i915_gtt {
        int mtrr;
 
        /* global gtt ops */
-       int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+       int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
                          size_t *stolen, phys_addr_t *mappable_base,
-                         unsigned long *mappable_end);
+                         u64 *mappable_end);
 };
 
 struct i915_hw_ppgtt {
@@ -329,16 +345,13 @@ struct i915_hw_ppgtt {
                struct i915_page_directory pd;
        };
 
-       struct i915_page_table *scratch_pt;
-       struct i915_page_directory *scratch_pd;
-
        struct drm_i915_file_private *file_priv;
 
        gen6_pte_t __iomem *pd_addr;
 
        int (*enable)(struct i915_hw_ppgtt *ppgtt);
        int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-                        struct intel_engine_cs *ring);
+                        struct drm_i915_gem_request *req);
        void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
@@ -468,6 +481,14 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
        return i915_pte_count(address, length, GEN8_PDE_SHIFT);
 }
 
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+{
+       return test_bit(n, ppgtt->pdp.used_pdpes) ?
+               px_dma(ppgtt->pdp.page_directory[n]) :
+               px_dma(ppgtt->base.scratch_pd);
+}
+
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -475,6 +496,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev);
 
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
                                        struct drm_i915_file_private *fpriv);
index 521548a08578231334f95560c7ad79d3339dd5b9..5026a6267a88034b3cae18acc0674952a711521a 100644 (file)
@@ -73,6 +73,24 @@ free_gem:
        return ret;
 }
 
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of aligment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val)                               \
+       do {                                                    \
+               if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {  \
+                       ret = -ENOSPC;                          \
+                       goto err_out;                           \
+               }                                               \
+               (batch)[(i)++] = (val);                         \
+       } while(0)
+
 static int render_state_setup(struct render_state *so)
 {
        const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -96,8 +114,10 @@ static int render_state_setup(struct render_state *so)
                        s = lower_32_bits(r);
                        if (so->gen >= 8) {
                                if (i + 1 >= rodata->batch_items ||
-                                   rodata->batch[i + 1] != 0)
-                                       return -EINVAL;
+                                   rodata->batch[i + 1] != 0) {
+                                       ret = -EINVAL;
+                                       goto err_out;
+                               }
 
                                d[i++] = s;
                                s = upper_32_bits(r);
@@ -108,6 +128,21 @@ static int render_state_setup(struct render_state *so)
 
                d[i++] = s;
        }
+
+       while (i % CACHELINE_DWORDS)
+               OUT_BATCH(d, i, MI_NOOP);
+
+       so->aux_batch_offset = i * sizeof(u32);
+
+       OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+       so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
+
+       /*
+        * Since we are sending length, we need to strictly conform to
+        * all requirements. For Gen2 this must be a multiple of 8.
+        */
+       so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+
        kunmap(page);
 
        ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
@@ -120,8 +155,14 @@ static int render_state_setup(struct render_state *so)
        }
 
        return 0;
+
+err_out:
+       kunmap(page);
+       return ret;
 }
 
+#undef OUT_BATCH
+
 void i915_gem_render_state_fini(struct render_state *so)
 {
        i915_gem_object_ggtt_unpin(so->obj);
@@ -152,29 +193,36 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
        return 0;
 }
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
        struct render_state so;
        int ret;
 
-       ret = i915_gem_render_state_prepare(ring, &so);
+       ret = i915_gem_render_state_prepare(req->ring, &so);
        if (ret)
                return ret;
 
        if (so.rodata == NULL)
                return 0;
 
-       ret = ring->dispatch_execbuffer(ring,
-                                       so.ggtt_offset,
-                                       so.rodata->batch_items * 4,
-                                       I915_DISPATCH_SECURE);
+       ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+                                            so.rodata->batch_items * 4,
+                                            I915_DISPATCH_SECURE);
        if (ret)
                goto out;
 
-       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+       if (so.aux_batch_size > 8) {
+               ret = req->ring->dispatch_execbuffer(req,
+                                                    (so.ggtt_offset +
+                                                     so.aux_batch_offset),
+                                                    so.aux_batch_size,
+                                                    I915_DISPATCH_SECURE);
+               if (ret)
+                       goto out;
+       }
+
+       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
-       ret = __i915_add_request(ring, NULL, so.obj);
-       /* __i915_add_request moves object to inactive if it fails */
 out:
        i915_gem_render_state_fini(&so);
        return ret;
index c44961ed3fad4f6308af6594177e9b5f9a70f0cf..e641bb093a903bba18e02cd0cd156e6070a40a62 100644 (file)
@@ -37,9 +37,11 @@ struct render_state {
        struct drm_i915_gem_object *obj;
        u64 ggtt_offset;
        int gen;
+       u32 aux_batch_size;
+       u32 aux_batch_offset;
 };
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
+int i915_gem_render_state_init(struct drm_i915_gem_request *req);
 void i915_gem_render_state_fini(struct render_state *so);
 int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
                                  struct render_state *so);
index 8b5b784c62fea276fc9bc947a85734f75f81a9eb..a36cb95ec798ec8a8a3e09da884cf6528d84c126 100644 (file)
  * for is a boon.
  */
 
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+                               struct drm_mm_node *node, u64 size,
+                               unsigned alignment)
+{
+       int ret;
+
+       if (!drm_mm_initialized(&dev_priv->mm.stolen))
+               return -ENODEV;
+
+       mutex_lock(&dev_priv->mm.stolen_lock);
+       ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
+                                DRM_MM_SEARCH_DEFAULT);
+       mutex_unlock(&dev_priv->mm.stolen_lock);
+
+       return ret;
+}
+
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+                                struct drm_mm_node *node)
+{
+       mutex_lock(&dev_priv->mm.stolen_lock);
+       drm_mm_remove_node(node);
+       mutex_unlock(&dev_priv->mm.stolen_lock);
+}
+
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -151,150 +176,115 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        return base;
 }
 
-static int find_compression_threshold(struct drm_device *dev,
-                                     struct drm_mm_node *node,
-                                     int size,
-                                     int fb_cpp)
+void i915_gem_cleanup_stolen(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int compression_threshold = 1;
-       int ret;
-
-       /* HACK: This code depends on what we will do in *_enable_fbc. If that
-        * code changes, this code needs to change as well.
-        *
-        * The enable_fbc code will attempt to use one of our 2 compression
-        * thresholds, therefore, in that case, we only have 1 resort.
-        */
 
-       /* Try to over-allocate to reduce reallocations and fragmentation. */
-       ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-                                size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
-       if (ret == 0)
-               return compression_threshold;
-
-again:
-       /* HW's ability to limit the CFB is 1:4 */
-       if (compression_threshold > 4 ||
-           (fb_cpp == 2 && compression_threshold == 2))
-               return 0;
+       if (!drm_mm_initialized(&dev_priv->mm.stolen))
+               return;
 
-       ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-                                size >>= 1, 4096,
-                                DRM_MM_SEARCH_DEFAULT);
-       if (ret && INTEL_INFO(dev)->gen <= 4) {
-               return 0;
-       } else if (ret) {
-               compression_threshold <<= 1;
-               goto again;
-       } else {
-               return compression_threshold;
-       }
+       drm_mm_takedown(&dev_priv->mm.stolen);
 }
 
-static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+                                    unsigned long *base, unsigned long *size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_mm_node *uninitialized_var(compressed_llb);
-       int ret;
-
-       ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
-                                        size, fb_cpp);
-       if (!ret)
-               goto err_llb;
-       else if (ret > 1) {
-               DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
-       }
-
-       dev_priv->fbc.threshold = ret;
-
-       if (INTEL_INFO(dev_priv)->gen >= 5)
-               I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-       else if (IS_GM45(dev)) {
-               I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-       } else {
-               compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
-               if (!compressed_llb)
-                       goto err_fb;
-
-               ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
-                                        4096, 4096, DRM_MM_SEARCH_DEFAULT);
-               if (ret)
-                       goto err_fb;
-
-               dev_priv->fbc.compressed_llb = compressed_llb;
-
-               I915_WRITE(FBC_CFB_BASE,
-                          dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
-               I915_WRITE(FBC_LL_BASE,
-                          dev_priv->mm.stolen_base + compressed_llb->start);
+       uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+       *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+       switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
+       case GEN6_STOLEN_RESERVED_1M:
+               *size = 1024 * 1024;
+               break;
+       case GEN6_STOLEN_RESERVED_512K:
+               *size = 512 * 1024;
+               break;
+       case GEN6_STOLEN_RESERVED_256K:
+               *size = 256 * 1024;
+               break;
+       case GEN6_STOLEN_RESERVED_128K:
+               *size = 128 * 1024;
+               break;
+       default:
+               *size = 1024 * 1024;
+               MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
        }
-
-       dev_priv->fbc.uncompressed_size = size;
-
-       DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
-                     size);
-
-       return 0;
-
-err_fb:
-       kfree(compressed_llb);
-       drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-err_llb:
-       pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
-       return -ENOSPC;
 }
 
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+                                    unsigned long *base, unsigned long *size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!drm_mm_initialized(&dev_priv->mm.stolen))
-               return -ENODEV;
-
-       if (size <= dev_priv->fbc.uncompressed_size)
-               return 0;
-
-       /* Release any current block */
-       i915_gem_stolen_cleanup_compression(dev);
-
-       return i915_setup_compression(dev, size, fb_cpp);
+       uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+       *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
+
+       switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+       case GEN7_STOLEN_RESERVED_1M:
+               *size = 1024 * 1024;
+               break;
+       case GEN7_STOLEN_RESERVED_256K:
+               *size = 256 * 1024;
+               break;
+       default:
+               *size = 1024 * 1024;
+               MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+       }
 }
 
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
+static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
+                                    unsigned long *base, unsigned long *size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (dev_priv->fbc.uncompressed_size == 0)
-               return;
-
-       drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-
-       if (dev_priv->fbc.compressed_llb) {
-               drm_mm_remove_node(dev_priv->fbc.compressed_llb);
-               kfree(dev_priv->fbc.compressed_llb);
+       uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+       *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+       switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+       case GEN8_STOLEN_RESERVED_1M:
+               *size = 1024 * 1024;
+               break;
+       case GEN8_STOLEN_RESERVED_2M:
+               *size = 2 * 1024 * 1024;
+               break;
+       case GEN8_STOLEN_RESERVED_4M:
+               *size = 4 * 1024 * 1024;
+               break;
+       case GEN8_STOLEN_RESERVED_8M:
+               *size = 8 * 1024 * 1024;
+               break;
+       default:
+               *size = 8 * 1024 * 1024;
+               MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
        }
-
-       dev_priv->fbc.uncompressed_size = 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_device *dev)
+static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+                                   unsigned long *base, unsigned long *size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+       unsigned long stolen_top;
 
-       if (!drm_mm_initialized(&dev_priv->mm.stolen))
-               return;
+       stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
 
-       i915_gem_stolen_cleanup_compression(dev);
-       drm_mm_takedown(&dev_priv->mm.stolen);
+       *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+       /* On these platforms, the register doesn't have a size field, so the
+        * size is the distance between the base and the top of the stolen
+        * memory. We also have the genuine case where base is zero and there's
+        * nothing reserved. */
+       if (*base == 0)
+               *size = 0;
+       else
+               *size = stolen_top - *base;
 }
 
 int i915_gem_init_stolen(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 tmp;
-       int bios_reserved = 0;
+       unsigned long reserved_total, reserved_base, reserved_size;
+       unsigned long stolen_top;
+
+       mutex_init(&dev_priv->mm.stolen_lock);
 
 #ifdef CONFIG_INTEL_IOMMU
        if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
@@ -310,26 +300,61 @@ int i915_gem_init_stolen(struct drm_device *dev)
        if (dev_priv->mm.stolen_base == 0)
                return 0;
 
-       DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
-                     dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
-
-       if (INTEL_INFO(dev)->gen >= 8) {
-               tmp = I915_READ(GEN7_BIOS_RESERVED);
-               tmp >>= GEN8_BIOS_RESERVED_SHIFT;
-               tmp &= GEN8_BIOS_RESERVED_MASK;
-               bios_reserved = (1024*1024) << tmp;
-       } else if (IS_GEN7(dev)) {
-               tmp = I915_READ(GEN7_BIOS_RESERVED);
-               bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
-                       256*1024 : 1024*1024;
+       stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+       switch (INTEL_INFO(dev_priv)->gen) {
+       case 2:
+       case 3:
+       case 4:
+       case 5:
+               /* Assume the gen6 maximum for the older platforms. */
+               reserved_size = 1024 * 1024;
+               reserved_base = stolen_top - reserved_size;
+               break;
+       case 6:
+               gen6_get_stolen_reserved(dev_priv, &reserved_base,
+                                        &reserved_size);
+               break;
+       case 7:
+               gen7_get_stolen_reserved(dev_priv, &reserved_base,
+                                        &reserved_size);
+               break;
+       default:
+               if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+                       bdw_get_stolen_reserved(dev_priv, &reserved_base,
+                                               &reserved_size);
+               else
+                       gen8_get_stolen_reserved(dev_priv, &reserved_base,
+                                                &reserved_size);
+               break;
+       }
+
+       /* It is possible for the reserved base to be zero, but the register
+        * field for size doesn't have a zero option. */
+       if (reserved_base == 0) {
+               reserved_size = 0;
+               reserved_base = stolen_top;
        }
 
-       if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+       if (reserved_base < dev_priv->mm.stolen_base ||
+           reserved_base + reserved_size > stolen_top) {
+               DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
+                             reserved_base, reserved_base + reserved_size,
+                             dev_priv->mm.stolen_base, stolen_top);
                return 0;
+       }
+
+       /* It is possible for the reserved area to end before the end of stolen
+        * memory, so just consider the start. */
+       reserved_total = stolen_top - reserved_base;
+
+       DRM_DEBUG_KMS("Memory reserved for graphics device: %luK, usable: %luK\n",
+                     dev_priv->gtt.stolen_size >> 10,
+                     (dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
        /* Basic memrange allocator for stolen space */
        drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
-                   bios_reserved);
+                   reserved_total);
 
        return 0;
 }
@@ -386,8 +411,10 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
        if (obj->stolen) {
-               drm_mm_remove_node(obj->stolen);
+               i915_gem_stolen_remove_node(dev_priv, obj->stolen);
                kfree(obj->stolen);
                obj->stolen = NULL;
        }
@@ -448,8 +475,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
        if (!stolen)
                return NULL;
 
-       ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
-                                4096, DRM_MM_SEARCH_DEFAULT);
+       ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
        if (ret) {
                kfree(stolen);
                return NULL;
@@ -459,7 +485,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
        if (obj)
                return obj;
 
-       drm_mm_remove_node(stolen);
+       i915_gem_stolen_remove_node(dev_priv, stolen);
        kfree(stolen);
        return NULL;
 }
@@ -494,7 +520,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
        stolen->start = stolen_offset;
        stolen->size = size;
+       mutex_lock(&dev_priv->mm.stolen_lock);
        ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+       mutex_unlock(&dev_priv->mm.stolen_lock);
        if (ret) {
                DRM_DEBUG_KMS("failed to allocate stolen space\n");
                kfree(stolen);
@@ -504,7 +532,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        obj = _i915_gem_object_create_stolen(dev, stolen);
        if (obj == NULL) {
                DRM_DEBUG_KMS("failed to allocate stolen object\n");
-               drm_mm_remove_node(stolen);
+               i915_gem_stolen_remove_node(dev_priv, stolen);
                kfree(stolen);
                return NULL;
        }
@@ -545,7 +573,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 err_vma:
        i915_gem_vma_destroy(vma);
 err_out:
-       drm_mm_remove_node(stolen);
+       i915_gem_stolen_remove_node(dev_priv, stolen);
        kfree(stolen);
        drm_gem_object_unreference(&obj->base);
        return NULL;
index d19c9db5e18c9d57057ad78ffdbdfa1a65b861b4..8a6717cc265c6c0a062d8cae9d4dc966356fa028 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled.  However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y.  So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip --  Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics.  This
- * is called "Channel XOR Randomization" in the MCH documentation.  The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
+/**
+ * DOC: buffer object tiling
  *
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
+ * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
+ * declare fence register requirements.
  *
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
+ * In principle GEM doesn't care at all about the internal data layout of an
+ * object, and hence it also doesn't care about tiling or swizzling. There's two
+ * exceptions:
  *
- * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
+ * - For X and Y tiling the hardware provides detilers for CPU access, so called
+ *   fences. Since there's only a limited amount of them the kernel must manage
+ *   these, and therefore userspace must tell the kernel the object tiling if it
+ *   wants to use fences for detiling.
+ * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
+ *   depends upon the physical page frame number. When swapping such objects the
+ *   page frame number might change and the kernel must be able to fix this up
+ *   and hence now the tiling. Note that on a subset of platforms with
+ *   asymmetric memory channel population the swizzling pattern changes in an
+ *   unknown way, and for those the kernel simply forbids swapping completely.
  *
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
-/**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
+ * Since neither of this applies for new tiling layouts on modern platforms like
+ * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
+ * Anything else can be handled in userspace entirely without the kernel's
+ * invovlement.
  */
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-       uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
-       if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
-               /*
-                * On BDW+, swizzling is not used. We leave the CPU memory
-                * controller in charge of optimizing memory accesses without
-                * the extra address manipulation GPU side.
-                *
-                * VLV and CHV don't have GPU swizzling.
-                */
-               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               if (dev_priv->preserve_bios_swizzle) {
-                       if (I915_READ(DISP_ARB_CTL) &
-                           DISP_TILE_SURFACE_SWIZZLING) {
-                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-                               swizzle_y = I915_BIT_6_SWIZZLE_9;
-                       } else {
-                               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-                               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-                       }
-               } else {
-                       uint32_t dimm_c0, dimm_c1;
-                       dimm_c0 = I915_READ(MAD_DIMM_C0);
-                       dimm_c1 = I915_READ(MAD_DIMM_C1);
-                       dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-                       dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-                       /* Enable swizzling when the channels are populated
-                        * with identically sized dimms. We don't need to check
-                        * the 3rd channel because no cpu with gpu attached
-                        * ships in that configuration. Also, swizzling only
-                        * makes sense for 2 channels anyway. */
-                       if (dimm_c0 == dimm_c1) {
-                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-                               swizzle_y = I915_BIT_6_SWIZZLE_9;
-                       } else {
-                               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-                               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-                       }
-               }
-       } else if (IS_GEN5(dev)) {
-               /* On Ironlake whatever DRAM config, GPU always do
-                * same swizzling setup.
-                */
-               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-               swizzle_y = I915_BIT_6_SWIZZLE_9;
-       } else if (IS_GEN2(dev)) {
-               /* As far as we know, the 865 doesn't have these bit 6
-                * swizzling issues.
-                */
-               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-       } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
-               uint32_t dcc;
-
-               /* On 9xx chipsets, channel interleave by the CPU is
-                * determined by DCC.  For single-channel, neither the CPU
-                * nor the GPU do swizzling.  For dual channel interleaved,
-                * the GPU's interleave is bit 9 and 10 for X tiled, and bit
-                * 9 for Y tiled.  The CPU's interleave is independent, and
-                * can be based on either bit 11 (haven't seen this yet) or
-                * bit 17 (common).
-                */
-               dcc = I915_READ(DCC);
-               switch (dcc & DCC_ADDRESSING_MODE_MASK) {
-               case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
-               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
-                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-                       break;
-               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
-                       if (dcc & DCC_CHANNEL_XOR_DISABLE) {
-                               /* This is the base swizzling by the GPU for
-                                * tiled buffers.
-                                */
-                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-                               swizzle_y = I915_BIT_6_SWIZZLE_9;
-                       } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
-                               /* Bit 11 swizzling by the CPU in addition. */
-                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
-                               swizzle_y = I915_BIT_6_SWIZZLE_9_11;
-                       } else {
-                               /* Bit 17 swizzling by the CPU in addition. */
-                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
-                               swizzle_y = I915_BIT_6_SWIZZLE_9_17;
-                       }
-                       break;
-               }
-
-               /* check for L-shaped memory aka modified enhanced addressing */
-               if (IS_GEN4(dev)) {
-                       uint32_t ddc2 = I915_READ(DCC2);
-
-                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
-                               dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
-               }
-
-               if (dcc == 0xffffffff) {
-                       DRM_ERROR("Couldn't read from MCHBAR.  "
-                                 "Disabling tiling.\n");
-                       swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-                       swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-               }
-       } else {
-               /* The 965, G33, and newer, have a very flexible memory
-                * configuration.  It will enable dual-channel mode
-                * (interleaving) on as much memory as it can, and the GPU
-                * will additionally sometimes enable different bit 6
-                * swizzling for tiled objects from the CPU.
-                *
-                * Here's what I found on the G965:
-                *    slot fill         memory size  swizzling
-                * 0A   0B   1A   1B    1-ch   2-ch
-                * 512  0    0    0     512    0     O
-                * 512  0    512  0     16     1008  X
-                * 512  0    0    512   16     1008  X
-                * 0    512  0    512   16     1008  X
-                * 1024 1024 1024 0     2048   1024  O
-                *
-                * We could probably detect this based on either the DRB
-                * matching, which was the case for the swizzling required in
-                * the table above, or from the 1-ch value being less than
-                * the minimum size of a rank.
-                */
-               if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
-                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-               } else {
-                       swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-                       swizzle_y = I915_BIT_6_SWIZZLE_9;
-               }
-       }
-
-       dev_priv->mm.bit_6_swizzle_x = swizzle_x;
-       dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
 
 /* Check pitch constriants for all chips & tiling formats */
 static bool
@@ -313,8 +144,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 }
 
 /**
+ * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Sets the tiling mode of an object, returning the required swizzling of
  * bit 6 of addresses in the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_set_tiling(struct drm_device *dev, void *data,
@@ -432,7 +273,17 @@ err:
 }
 
 /**
+ * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Returns the current tiling mode and required bit 6 swizzling for the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -478,75 +329,3 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
 
        return 0;
 }
-
-/**
- * Swap every 64 bytes of this page around, to account for it having a new
- * bit 17 of its physical address and therefore being interpreted differently
- * by the GPU.
- */
-static void
-i915_gem_swizzle_page(struct page *page)
-{
-       char temp[64];
-       char *vaddr;
-       int i;
-
-       vaddr = kmap(page);
-
-       for (i = 0; i < PAGE_SIZE; i += 128) {
-               memcpy(temp, &vaddr[i], 64);
-               memcpy(&vaddr[i], &vaddr[i + 64], 64);
-               memcpy(&vaddr[i + 64], temp, 64);
-       }
-
-       kunmap(page);
-}
-
-void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-       struct sg_page_iter sg_iter;
-       int i;
-
-       if (obj->bit_17 == NULL)
-               return;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
-               char new_bit_17 = page_to_phys(page) >> 17;
-               if ((new_bit_17 & 0x1) !=
-                   (test_bit(i, obj->bit_17) != 0)) {
-                       i915_gem_swizzle_page(page);
-                       set_page_dirty(page);
-               }
-               i++;
-       }
-}
-
-void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-       struct sg_page_iter sg_iter;
-       int page_count = obj->base.size >> PAGE_SHIFT;
-       int i;
-
-       if (obj->bit_17 == NULL) {
-               obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
-                                     sizeof(long), GFP_KERNEL);
-               if (obj->bit_17 == NULL) {
-                       DRM_ERROR("Failed to allocate memory for bit 17 "
-                                 "record\n");
-                       return;
-               }
-       }
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
-                       __set_bit(i, obj->bit_17);
-               else
-                       __clear_bit(i, obj->bit_17);
-               i++;
-       }
-}
index 6f4256918f7694804f992cd2466db250d7490ad0..41d0739e6fdfa9474a21b63b754c9000a7b33421 100644 (file)
@@ -369,6 +369,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        err_printf(m, "Reset count: %u\n", error->reset_count);
        err_printf(m, "Suspend count: %u\n", error->suspend_count);
        err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+       err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
        err_printf(m, "EIR: 0x%08x\n", error->eir);
        err_printf(m, "IER: 0x%08x\n", error->ier);
        if (INTEL_INFO(dev)->gen >= 8) {
@@ -1266,6 +1267,10 @@ static void i915_error_capture_msg(struct drm_device *dev,
 static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error)
 {
+       error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+       error->iommu = intel_iommu_gfx_mapped;
+#endif
        error->reset_count = i915_reset_count(&dev_priv->gpu_error);
        error->suspend_count = dev_priv->suspend_count;
 }
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
new file mode 100644 (file)
index 0000000..ccdc6c8
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright Â© 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_GUC_REG_H_
+#define _I915_GUC_REG_H_
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS                     0xc000
+#define   GS_BOOTROM_SHIFT             1
+#define   GS_BOOTROM_MASK                (0x7F << GS_BOOTROM_SHIFT)
+#define   GS_BOOTROM_RSA_FAILED                  (0x50 << GS_BOOTROM_SHIFT)
+#define   GS_UKERNEL_SHIFT             8
+#define   GS_UKERNEL_MASK                (0xFF << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_LAPIC_DONE                  (0x30 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_DPC_ERROR           (0x60 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_READY               (0xF0 << GS_UKERNEL_SHIFT)
+#define   GS_MIA_SHIFT                 16
+#define   GS_MIA_MASK                    (0x07 << GS_MIA_SHIFT)
+
+#define GUC_WOPCM_SIZE                 0xc050
+#define   GUC_WOPCM_SIZE_VALUE           (0x80 << 12)  /* 512KB */
+#define GUC_WOPCM_OFFSET               0x80000         /* 512KB */
+
+#define SOFT_SCRATCH(n)                        (0xc180 + ((n) * 4))
+
+#define UOS_RSA_SCRATCH_0              0xc200
+#define DMA_ADDR_0_LOW                 0xc300
+#define DMA_ADDR_0_HIGH                        0xc304
+#define DMA_ADDR_1_LOW                 0xc308
+#define DMA_ADDR_1_HIGH                        0xc30c
+#define   DMA_ADDRESS_SPACE_WOPCM        (7 << 16)
+#define   DMA_ADDRESS_SPACE_GTT                  (8 << 16)
+#define DMA_COPY_SIZE                  0xc310
+#define DMA_CTRL                       0xc314
+#define   UOS_MOVE                       (1<<4)
+#define   START_DMA                      (1<<0)
+#define DMA_GUC_WOPCM_OFFSET           0xc340
+
+#define GEN8_GT_PM_CONFIG              0x138140
+#define GEN9_GT_PM_CONFIG              0x13816c
+#define   GEN8_GT_DOORBELL_ENABLE        (1<<0)
+
+#define GEN8_GTCR                      0x4274
+#define   GEN8_GTCR_INVALIDATE           (1<<0)
+
+#define GUC_ARAT_C6DIS                 0xA178
+
+#define GUC_SHIM_CONTROL               0xc064
+#define   GUC_DISABLE_SRAM_INIT_TO_ZEROES      (1<<0)
+#define   GUC_ENABLE_READ_CACHE_LOGIC          (1<<1)
+#define   GUC_ENABLE_MIA_CACHING               (1<<2)
+#define   GUC_GEN10_MSGCH_ENABLE               (1<<4)
+#define   GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA  (1<<9)
+#define   GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10)
+#define   GUC_ENABLE_MIA_CLOCK_GATING          (1<<15)
+#define   GUC_GEN10_SHIM_WC_ENABLE             (1<<21)
+
+#define GUC_SHIM_CONTROL_VALUE (GUC_DISABLE_SRAM_INIT_TO_ZEROES        | \
+                                GUC_ENABLE_READ_CACHE_LOGIC            | \
+                                GUC_ENABLE_MIA_CACHING                 | \
+                                GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA    | \
+                                GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
+
+#define HOST2GUC_INTERRUPT             0xc4c8
+#define   HOST2GUC_TRIGGER               (1<<0)
+
+#define DRBMISC1                       0x1984
+#define   DOORBELL_ENABLE                (1<<0)
+
+#define GEN8_DRBREGL(x)                        (0x1000 + (x) * 8)
+#define   GEN8_DRB_VALID                 (1<<0)
+#define GEN8_DRBREGU(x)                        (GEN8_DRBREGL(x) + 4)
+
+#define DE_GUCRMR                      0x44054
+
+#define GUC_BCS_RCS_IER                        0xC550
+#define GUC_VCS2_VCS1_IER              0xC554
+#define GUC_WD_VECS_IER                        0xC558
+#define GUC_PM_P24C_IER                        0xC55C
+
+#endif
index 23aa04cded6b013d6e78c046926cc63d03dae628..97f3a5640289389a8328ac66da9e127c7a3ed6c6 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-typedef struct _drm_i915_batchbuffer32 {
-       int start;              /* agp offset */
-       int used;               /* nr bytes in use */
-       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
-       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
-       int num_cliprects;      /* mulitpass with multiple cliprects? */
-       u32 cliprects;          /* pointer to userspace cliprects */
-} drm_i915_batchbuffer32_t;
-
-static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
-                                  unsigned long arg)
-{
-       drm_i915_batchbuffer32_t batchbuffer32;
-       drm_i915_batchbuffer_t __user *batchbuffer;
-
-       if (copy_from_user
-           (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
-               return -EFAULT;
-
-       batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
-       if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
-           || __put_user(batchbuffer32.start, &batchbuffer->start)
-           || __put_user(batchbuffer32.used, &batchbuffer->used)
-           || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
-           || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
-           || __put_user(batchbuffer32.num_cliprects,
-                         &batchbuffer->num_cliprects)
-           || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
-                         &batchbuffer->cliprects))
-               return -EFAULT;
-
-       return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
-                        (unsigned long)batchbuffer);
-}
-
-typedef struct _drm_i915_cmdbuffer32 {
-       u32 buf;                /* pointer to userspace command buffer */
-       int sz;                 /* nr bytes in buf */
-       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
-       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
-       int num_cliprects;      /* mulitpass with multiple cliprects? */
-       u32 cliprects;          /* pointer to userspace cliprects */
-} drm_i915_cmdbuffer32_t;
-
-static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
-                                unsigned long arg)
-{
-       drm_i915_cmdbuffer32_t cmdbuffer32;
-       drm_i915_cmdbuffer_t __user *cmdbuffer;
-
-       if (copy_from_user
-           (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
-               return -EFAULT;
-
-       cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
-       if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
-           || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
-                         &cmdbuffer->buf)
-           || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
-           || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
-           || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
-           || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
-           || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
-                         &cmdbuffer->cliprects))
-               return -EFAULT;
-
-       return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
-                        (unsigned long)cmdbuffer);
-}
-
-typedef struct drm_i915_irq_emit32 {
-       u32 irq_seq;
-} drm_i915_irq_emit32_t;
-
-static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
-                               unsigned long arg)
-{
-       drm_i915_irq_emit32_t req32;
-       drm_i915_irq_emit_t __user *request;
-
-       if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-               return -EFAULT;
-
-       request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-           || __put_user((int __user *)(unsigned long)req32.irq_seq,
-                         &request->irq_seq))
-               return -EFAULT;
-
-       return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
-                        (unsigned long)request);
-}
-typedef struct drm_i915_getparam32 {
-       int param;
+struct drm_i915_getparam32 {
+       s32 param;
+       /*
+        * We screwed up the generic ioctl struct here and used a variable-sized
+        * pointer. Use u32 in the compat struct to match the 32bit pointer
+        * userspace expects.
+        */
        u32 value;
-} drm_i915_getparam32_t;
+};
 
 static int compat_i915_getparam(struct file *file, unsigned int cmd,
                                unsigned long arg)
 {
-       drm_i915_getparam32_t req32;
+       struct drm_i915_getparam32 req32;
        drm_i915_getparam_t __user *request;
 
        if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
@@ -152,41 +65,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
                         (unsigned long)request);
 }
 
-typedef struct drm_i915_mem_alloc32 {
-       int region;
-       int alignment;
-       int size;
-       u32 region_offset;      /* offset from start of fb or agp */
-} drm_i915_mem_alloc32_t;
-
-static int compat_i915_alloc(struct file *file, unsigned int cmd,
-                            unsigned long arg)
-{
-       drm_i915_mem_alloc32_t req32;
-       drm_i915_mem_alloc_t __user *request;
-
-       if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-               return -EFAULT;
-
-       request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-           || __put_user(req32.region, &request->region)
-           || __put_user(req32.alignment, &request->alignment)
-           || __put_user(req32.size, &request->size)
-           || __put_user((void __user *)(unsigned long)req32.region_offset,
-                         &request->region_offset))
-               return -EFAULT;
-
-       return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
-                        (unsigned long)request);
-}
-
 static drm_ioctl_compat_t *i915_compat_ioctls[] = {
-       [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
-       [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
        [DRM_I915_GETPARAM] = compat_i915_getparam,
-       [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
-       [DRM_I915_ALLOC] = compat_i915_alloc
 };
 
 /**
index 984e2fe6688c4c2cabd8829fe2e5cad97e88770e..1118c39281f98cc272b23bcbded120c7ecdd4502 100644 (file)
@@ -564,8 +564,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-       const struct drm_display_mode *mode =
-               &intel_crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 
        htotal = mode->crtc_htotal;
        hsync_start = mode->crtc_hsync_start;
@@ -620,7 +619,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *mode = &crtc->base.hwmode;
        enum pipe pipe = crtc->pipe;
        int position, vtotal;
 
@@ -647,14 +646,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
        int position;
        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
        bool in_vbl = true;
        int ret = 0;
        unsigned long irqflags;
 
-       if (!intel_crtc->active) {
+       if (WARN_ON(!mode->crtc_clock)) {
                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
                                 "pipe %c\n", pipe_name(pipe));
                return 0;
@@ -796,7 +795,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                return -EINVAL;
        }
 
-       if (!crtc->state->enable) {
+       if (!crtc->hwmode.crtc_clock) {
                DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
                return -EBUSY;
        }
@@ -805,151 +804,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
        return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
                                                     vblank_time, flags,
                                                     crtc,
-                                                    &to_intel_crtc(crtc)->config->base.adjusted_mode);
-}
-
-static bool intel_hpd_irq_event(struct drm_device *dev,
-                               struct drm_connector *connector)
-{
-       enum drm_connector_status old_status;
-
-       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-       old_status = connector->status;
-
-       connector->status = connector->funcs->detect(connector, false);
-       if (old_status == connector->status)
-               return false;
-
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-                     connector->base.id,
-                     connector->name,
-                     drm_get_connector_status_name(old_status),
-                     drm_get_connector_status_name(connector->status));
-
-       return true;
-}
-
-static void i915_digport_work_func(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, dig_port_work);
-       u32 long_port_mask, short_port_mask;
-       struct intel_digital_port *intel_dig_port;
-       int i;
-       u32 old_bits = 0;
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       long_port_mask = dev_priv->long_hpd_port_mask;
-       dev_priv->long_hpd_port_mask = 0;
-       short_port_mask = dev_priv->short_hpd_port_mask;
-       dev_priv->short_hpd_port_mask = 0;
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       for (i = 0; i < I915_MAX_PORTS; i++) {
-               bool valid = false;
-               bool long_hpd = false;
-               intel_dig_port = dev_priv->hpd_irq_port[i];
-               if (!intel_dig_port || !intel_dig_port->hpd_pulse)
-                       continue;
-
-               if (long_port_mask & (1 << i))  {
-                       valid = true;
-                       long_hpd = true;
-               } else if (short_port_mask & (1 << i))
-                       valid = true;
-
-               if (valid) {
-                       enum irqreturn ret;
-
-                       ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
-                       if (ret == IRQ_NONE) {
-                               /* fall back to old school hpd */
-                               old_bits |= (1 << intel_dig_port->base.hpd_pin);
-                       }
-               }
-       }
-
-       if (old_bits) {
-               spin_lock_irq(&dev_priv->irq_lock);
-               dev_priv->hpd_event_bits |= old_bits;
-               spin_unlock_irq(&dev_priv->irq_lock);
-               schedule_work(&dev_priv->hotplug_work);
-       }
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
-
-static void i915_hotplug_work_func(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, hotplug_work);
-       struct drm_device *dev = dev_priv->dev;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct intel_connector *intel_connector;
-       struct intel_encoder *intel_encoder;
-       struct drm_connector *connector;
-       bool hpd_disabled = false;
-       bool changed = false;
-       u32 hpd_event_bits;
-
-       mutex_lock(&mode_config->mutex);
-       DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
-       spin_lock_irq(&dev_priv->irq_lock);
-
-       hpd_event_bits = dev_priv->hpd_event_bits;
-       dev_priv->hpd_event_bits = 0;
-       list_for_each_entry(connector, &mode_config->connector_list, head) {
-               intel_connector = to_intel_connector(connector);
-               if (!intel_connector->encoder)
-                       continue;
-               intel_encoder = intel_connector->encoder;
-               if (intel_encoder->hpd_pin > HPD_NONE &&
-                   dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
-                   connector->polled == DRM_CONNECTOR_POLL_HPD) {
-                       DRM_INFO("HPD interrupt storm detected on connector %s: "
-                                "switching from hotplug detection to polling\n",
-                               connector->name);
-                       dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT
-                               | DRM_CONNECTOR_POLL_DISCONNECT;
-                       hpd_disabled = true;
-               }
-               if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-                       DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-                                     connector->name, intel_encoder->hpd_pin);
-               }
-       }
-        /* if there were no outputs to poll, poll was disabled,
-         * therefore make sure it's enabled when disabling HPD on
-         * some connectors */
-       if (hpd_disabled) {
-               drm_kms_helper_poll_enable(dev);
-               mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
-                                msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
-       }
-
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       list_for_each_entry(connector, &mode_config->connector_list, head) {
-               intel_connector = to_intel_connector(connector);
-               if (!intel_connector->encoder)
-                       continue;
-               intel_encoder = intel_connector->encoder;
-               if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-                       if (intel_encoder->hot_plug)
-                               intel_encoder->hot_plug(intel_encoder);
-                       if (intel_hpd_irq_event(dev, connector))
-                               changed = true;
-               }
-       }
-       mutex_unlock(&mode_config->mutex);
-
-       if (changed)
-               drm_kms_helper_hotplug_event(dev);
+                                                    &crtc->hwmode);
 }
 
 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
@@ -1372,165 +1227,78 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-#define HPD_STORM_DETECT_PERIOD 1000
-#define HPD_STORM_THRESHOLD 5
-
-static int pch_port_to_hotplug_shift(enum port port)
+static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
        switch (port) {
        case PORT_A:
-       case PORT_E:
-       default:
-               return -1;
+               return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
        case PORT_B:
-               return 0;
+               return val & PORTB_HOTPLUG_LONG_DETECT;
        case PORT_C:
-               return 8;
+               return val & PORTC_HOTPLUG_LONG_DETECT;
        case PORT_D:
-               return 16;
+               return val & PORTD_HOTPLUG_LONG_DETECT;
+       default:
+               return false;
        }
 }
 
-static int i915_port_to_hotplug_shift(enum port port)
+static bool pch_port_hotplug_long_detect(enum port port, u32 val)
 {
        switch (port) {
-       case PORT_A:
-       case PORT_E:
-       default:
-               return -1;
        case PORT_B:
-               return 17;
+               return val & PORTB_HOTPLUG_LONG_DETECT;
        case PORT_C:
-               return 19;
+               return val & PORTC_HOTPLUG_LONG_DETECT;
        case PORT_D:
-               return 21;
+               return val & PORTD_HOTPLUG_LONG_DETECT;
+       default:
+               return false;
        }
 }
 
-static enum port get_port_from_pin(enum hpd_pin pin)
+static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
 {
-       switch (pin) {
-       case HPD_PORT_B:
-               return PORT_B;
-       case HPD_PORT_C:
-               return PORT_C;
-       case HPD_PORT_D:
-               return PORT_D;
+       switch (port) {
+       case PORT_B:
+               return val & PORTB_HOTPLUG_INT_LONG_PULSE;
+       case PORT_C:
+               return val & PORTC_HOTPLUG_INT_LONG_PULSE;
+       case PORT_D:
+               return val & PORTD_HOTPLUG_INT_LONG_PULSE;
        default:
-               return PORT_A; /* no hpd */
+               return false;
        }
 }
 
-static void intel_hpd_irq_handler(struct drm_device *dev,
-                                 u32 hotplug_trigger,
-                                 u32 dig_hotplug_reg,
-                                 const u32 hpd[HPD_NUM_PINS])
+/* Get a bit mask of pins that have triggered, and which ones may be long. */
+static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
+                            u32 hotplug_trigger, u32 dig_hotplug_reg,
+                            const u32 hpd[HPD_NUM_PINS],
+                            bool long_pulse_detect(enum port port, u32 val))
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
        enum port port;
-       bool storm_detected = false;
-       bool queue_dig = false, queue_hp = false;
-       u32 dig_shift;
-       u32 dig_port_mask = 0;
-
-       if (!hotplug_trigger)
-               return;
+       int i;
 
-       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
-                        hotplug_trigger, dig_hotplug_reg);
+       *pin_mask = 0;
+       *long_mask = 0;
 
-       spin_lock(&dev_priv->irq_lock);
-       for (i = 1; i < HPD_NUM_PINS; i++) {
-               if (!(hpd[i] & hotplug_trigger))
+       for_each_hpd_pin(i) {
+               if ((hpd[i] & hotplug_trigger) == 0)
                        continue;
 
-               port = get_port_from_pin(i);
-               if (port && dev_priv->hpd_irq_port[port]) {
-                       bool long_hpd;
-
-                       if (!HAS_GMCH_DISPLAY(dev_priv)) {
-                               dig_shift = pch_port_to_hotplug_shift(port);
-                               long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-                       } else {
-                               dig_shift = i915_port_to_hotplug_shift(port);
-                               long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-                       }
-
-                       DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
-                                        port_name(port),
-                                        long_hpd ? "long" : "short");
-                       /* for long HPD pulses we want to have the digital queue happen,
-                          but we still want HPD storm detection to function. */
-                       if (long_hpd) {
-                               dev_priv->long_hpd_port_mask |= (1 << port);
-                               dig_port_mask |= hpd[i];
-                       } else {
-                               /* for short HPD just trigger the digital queue */
-                               dev_priv->short_hpd_port_mask |= (1 << port);
-                               hotplug_trigger &= ~hpd[i];
-                       }
-                       queue_dig = true;
-               }
-       }
-
-       for (i = 1; i < HPD_NUM_PINS; i++) {
-               if (hpd[i] & hotplug_trigger &&
-                   dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
-                       /*
-                        * On GMCH platforms the interrupt mask bits only
-                        * prevent irq generation, not the setting of the
-                        * hotplug bits itself. So only WARN about unexpected
-                        * interrupts on saner platforms.
-                        */
-                       WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
-                                 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
-                                 hotplug_trigger, i, hpd[i]);
+               *pin_mask |= BIT(i);
 
+               if (!intel_hpd_pin_to_port(i, &port))
                        continue;
-               }
-
-               if (!(hpd[i] & hotplug_trigger) ||
-                   dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
-                       continue;
-
-               if (!(dig_port_mask & hpd[i])) {
-                       dev_priv->hpd_event_bits |= (1 << i);
-                       queue_hp = true;
-               }
 
-               if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
-                                  dev_priv->hpd_stats[i].hpd_last_jiffies
-                                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
-                       dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
-                       dev_priv->hpd_stats[i].hpd_cnt = 0;
-                       DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
-               } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
-                       dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
-                       dev_priv->hpd_event_bits &= ~(1 << i);
-                       DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
-                       storm_detected = true;
-               } else {
-                       dev_priv->hpd_stats[i].hpd_cnt++;
-                       DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
-                                     dev_priv->hpd_stats[i].hpd_cnt);
-               }
+               if (long_pulse_detect(port, dig_hotplug_reg))
+                       *long_mask |= BIT(i);
        }
 
-       if (storm_detected)
-               dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock(&dev_priv->irq_lock);
+       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
+                        hotplug_trigger, dig_hotplug_reg, *pin_mask);
 
-       /*
-        * Our hotplug handler can grab modeset locks (by calling down into the
-        * fb helpers). Hence it must not be run on our own dev-priv->wq work
-        * queue for otherwise the flush_work in the pageflip code will
-        * deadlock.
-        */
-       if (queue_dig)
-               queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
-       if (queue_hp)
-               schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -1755,28 +1523,35 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+       u32 pin_mask, long_mask;
 
-       if (hotplug_status) {
-               I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-               /*
-                * Make sure hotplug status is cleared before we clear IIR, or else we
-                * may miss hotplug events.
-                */
-               POSTING_READ(PORT_HOTPLUG_STAT);
+       if (!hotplug_status)
+               return;
 
-               if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
-                       u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+       I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+       /*
+        * Make sure hotplug status is cleared before we clear IIR, or else we
+        * may miss hotplug events.
+        */
+       POSTING_READ(PORT_HOTPLUG_STAT);
 
-                       intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
-               } else {
-                       u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+       if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+               u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
-                       intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
-               }
+               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                                  hotplug_trigger, hpd_status_g4x,
+                                  i9xx_port_hotplug_long_detect);
+               intel_hpd_irq_handler(dev, pin_mask, long_mask);
 
-               if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
-                   hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+               if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
                        dp_aux_irq_handler(dev);
+       } else {
+               u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                                  hotplug_trigger, hpd_status_g4x,
+                                  i9xx_port_hotplug_long_detect);
+               intel_hpd_irq_handler(dev, pin_mask, long_mask);
        }
 }
 
@@ -1875,12 +1650,18 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
-       u32 dig_hotplug_reg;
 
-       dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-       I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+       if (hotplug_trigger) {
+               u32 dig_hotplug_reg, pin_mask, long_mask;
 
-       intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
+               dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+               I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                                  dig_hotplug_reg, hpd_ibx,
+                                  pch_port_hotplug_long_detect);
+               intel_hpd_irq_handler(dev, pin_mask, long_mask);
+       }
 
        if (pch_iir & SDE_AUDIO_POWER_MASK) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1972,12 +1753,18 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
-       u32 dig_hotplug_reg;
 
-       dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-       I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+       if (hotplug_trigger) {
+               u32 dig_hotplug_reg, pin_mask, long_mask;
+
+               dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+               I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
-       intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
+               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                                  dig_hotplug_reg, hpd_cpt,
+                                  pch_port_hotplug_long_detect);
+               intel_hpd_irq_handler(dev, pin_mask, long_mask);
+       }
 
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2176,8 +1963,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t hp_control;
-       uint32_t hp_trigger;
+       u32 hp_control, hp_trigger;
+       u32 pin_mask, long_mask;
 
        /* Get the status */
        hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
@@ -2189,20 +1976,12 @@ static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
                return;
        }
 
-       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-               hp_control & BXT_HOTPLUG_CTL_MASK);
-
-       /* Check for HPD storm and schedule bottom half */
-       intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
-
-       /*
-        * FIXME: Save the hot plug status for bottom half before
-        * clearing the sticky status bits, else the status will be
-        * lost.
-        */
-
        /* Clear sticky bits in hpd status */
        I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+
+       intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
+                          hpd_bxt, bxt_port_hotplug_long_detect);
+       intel_hpd_irq_handler(dev, pin_mask, long_mask);
 }
 
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
@@ -3203,12 +2982,12 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
        if (HAS_PCH_IBX(dev)) {
                hotplug_irqs = SDE_HOTPLUG_MASK;
                for_each_intel_encoder(dev, intel_encoder)
-                       if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+                       if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
                                enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
        } else {
                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
                for_each_intel_encoder(dev, intel_encoder)
-                       if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+                       if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
                                enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
        }
 
@@ -3237,7 +3016,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
 
        /* Now, enable HPD */
        for_each_intel_encoder(dev, intel_encoder) {
-               if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+               if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
                                == HPD_ENABLED)
                        hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
        }
@@ -4130,7 +3909,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
        /* Note HDMI and DP share hotplug bits */
        /* enable bits are the same for all generations */
        for_each_intel_encoder(dev, intel_encoder)
-               if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+               if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
                        hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
        /* Programming the CRT detection parameters tends
           to generate a spurious hotplug event about three
@@ -4270,46 +4049,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
        I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv),
-                            hotplug_reenable_work.work);
-       struct drm_device *dev = dev_priv->dev;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       int i;
-
-       intel_runtime_pm_get(dev_priv);
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
-               struct drm_connector *connector;
-
-               if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
-                       continue;
-
-               dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-
-               list_for_each_entry(connector, &mode_config->connector_list, head) {
-                       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-                       if (intel_connector->encoder->hpd_pin == i) {
-                               if (connector->polled != intel_connector->polled)
-                                       DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
-                                                        connector->name);
-                               connector->polled = intel_connector->polled;
-                               if (!connector->polled)
-                                       connector->polled = DRM_CONNECTOR_POLL_HPD;
-                       }
-               }
-       }
-       if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       intel_runtime_pm_put(dev_priv);
-}
-
 /**
  * intel_irq_init - initializes irq support
  * @dev_priv: i915 device instance
@@ -4321,8 +4060,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
 
-       INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
-       INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
+       intel_hpd_init_work(dev_priv);
+
        INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
@@ -4335,8 +4074,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 
        INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
                          i915_hangcheck_elapsed);
-       INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
-                         intel_hpd_irq_reenable_work);
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
@@ -4421,46 +4158,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        }
 }
 
-/**
- * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
- *
- * This function enables the hotplug support. It requires that interrupts have
- * already been enabled with intel_irq_init_hw(). From this point on hotplug and
- * poll request can run concurrently to other code, so locking rules must be
- * obeyed.
- *
- * This is a separate step from interrupt enabling to simplify the locking rules
- * in the driver load and resume code.
- */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
-       int i;
-
-       for (i = 1; i < HPD_NUM_PINS; i++) {
-               dev_priv->hpd_stats[i].hpd_cnt = 0;
-               dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-       }
-       list_for_each_entry(connector, &mode_config->connector_list, head) {
-               struct intel_connector *intel_connector = to_intel_connector(connector);
-               connector->polled = intel_connector->polled;
-               if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
-                       connector->polled = DRM_CONNECTOR_POLL_HPD;
-               if (intel_connector->mst_port)
-                       connector->polled = DRM_CONNECTOR_POLL_HPD;
-       }
-
-       /* Interrupt setup is already guaranteed to be single-threaded, this is
-        * just to make the assert_spin_locked checks happy. */
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock_irq(&dev_priv->irq_lock);
-}
-
 /**
  * intel_irq_install - enables the hardware interrupt
  * @dev_priv: i915 device instance
index 8ac5a1b29ac0747735d1f05b9480cfc98bba42b9..5ae4b0aba56412e3c7d8560c5fe1d1484ae00675 100644 (file)
@@ -28,7 +28,6 @@ struct i915_params i915 __read_mostly = {
        .modeset = -1,
        .panel_ignore_lid = 1,
        .semaphores = -1,
-       .lvds_downclock = 0,
        .lvds_channel_mode = 0,
        .panel_use_ssc = -1,
        .vbt_sdvo_panel_type = -1,
@@ -52,13 +51,14 @@ struct i915_params i915 __read_mostly = {
        .use_mmio_flip = 0,
        .mmio_debug = 0,
        .verbose_state_checks = 1,
-       .nuclear_pageflip = 0,
        .edp_vswing = 0,
+       .enable_guc_submission = false,
+       .guc_log_level = -1,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
 MODULE_PARM_DESC(modeset,
-       "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+       "Use kernel modesetting [KMS] (0=disable, "
        "1=on, -1=force vga console preference [default])");
 
 module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
@@ -84,11 +84,6 @@ MODULE_PARM_DESC(enable_fbc,
        "Enable frame buffer compression for power savings "
        "(default: -1 (use per-chip default))");
 
-module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
-       "Use panel (LVDS/eDP) downclocking for power savings "
-       "(default: false)");
-
 module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
 MODULE_PARM_DESC(lvds_channel_mode,
         "Specify LVDS channel mode "
@@ -104,7 +99,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
        "Override/Ignore selection of SDVO panel mode in the VBT "
        "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
-module_param_named(reset, i915.reset, bool, 0600);
+module_param_named_unsafe(reset, i915.reset, bool, 0600);
 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
 module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
@@ -182,13 +177,16 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
 MODULE_PARM_DESC(verbose_state_checks,
        "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
 
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
-MODULE_PARM_DESC(nuclear_pageflip,
-                "Force atomic modeset functionality; only planes work for now (default: false).");
-
 /* WA to get away with the default setting in VBT for early platforms.Will be removed */
 module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
 MODULE_PARM_DESC(edp_vswing,
                 "Ignore/Override vswing pre-emph table selection from VBT "
                 "(0=use value from vbt [default], 1=low power swing(200mV),"
                 "2=default swing(400mV))");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
+MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+
+module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
+MODULE_PARM_DESC(guc_log_level,
+       "GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
index 2030f602cbf8b74366bcb78f9f2ddc4a5f0dd9c6..8e46c348366bc867c7b81ed9ccdd72d7bc57c2fc 100644 (file)
 
 /* PCI config space */
 
-#define HPLLCC 0xc0 /* 855 only */
-#define   GC_CLOCK_CONTROL_MASK                (0xf << 0)
+#define HPLLCC 0xc0 /* 85x only */
+#define   GC_CLOCK_CONTROL_MASK                (0x7 << 0)
 #define   GC_CLOCK_133_200             (0 << 0)
 #define   GC_CLOCK_100_200             (1 << 0)
 #define   GC_CLOCK_100_133             (2 << 0)
-#define   GC_CLOCK_166_250             (3 << 0)
+#define   GC_CLOCK_133_266             (3 << 0)
+#define   GC_CLOCK_133_200_2           (4 << 0)
+#define   GC_CLOCK_133_266_2           (5 << 0)
+#define   GC_CLOCK_166_266             (6 << 0)
+#define   GC_CLOCK_166_250             (7 << 0)
+
 #define GCFGC2 0xda
 #define GCFGC  0xf0 /* 915+ only */
 #define   GC_LOW_FREQUENCY_ENABLE      (1 << 7)
 #define GAM_ECOCHK                     0x4090
 #define   BDW_DISABLE_HDC_INVALIDATION (1<<25)
 #define   ECOCHK_SNB_BIT               (1<<10)
+#define   ECOCHK_DIS_TLB               (1<<8)
 #define   HSW_ECOCHK_ARB_PRIO_SOL      (1<<6)
 #define   ECOCHK_PPGTT_CACHE64B                (0x3<<3)
 #define   ECOCHK_PPGTT_CACHE4B         (0x0<<3)
 #define GAB_CTL                                0x24000
 #define   GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
 
-#define GEN7_BIOS_RESERVED             0x1082C0
-#define GEN7_BIOS_RESERVED_1M          (0 << 5)
-#define GEN7_BIOS_RESERVED_256K                (1 << 5)
-#define GEN8_BIOS_RESERVED_SHIFT       7
-#define GEN7_BIOS_RESERVED_MASK        0x1
-#define GEN8_BIOS_RESERVED_MASK        0x3
-
+#define GEN6_STOLEN_RESERVED           0x1082C0
+#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
+#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
+#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4)
+#define GEN6_STOLEN_RESERVED_1M                (0 << 4)
+#define GEN6_STOLEN_RESERVED_512K      (1 << 4)
+#define GEN6_STOLEN_RESERVED_256K      (2 << 4)
+#define GEN6_STOLEN_RESERVED_128K      (3 << 4)
+#define GEN7_STOLEN_RESERVED_SIZE_MASK (1 << 5)
+#define GEN7_STOLEN_RESERVED_1M                (0 << 5)
+#define GEN7_STOLEN_RESERVED_256K      (1 << 5)
+#define GEN8_STOLEN_RESERVED_SIZE_MASK (3 << 7)
+#define GEN8_STOLEN_RESERVED_1M                (0 << 7)
+#define GEN8_STOLEN_RESERVED_2M                (1 << 7)
+#define GEN8_STOLEN_RESERVED_4M                (2 << 7)
+#define GEN8_STOLEN_RESERVED_8M                (3 << 7)
 
 /* VGA stuff */
 
 #define   MI_RESTORE_EXT_STATE_EN      (1<<2)
 #define   MI_FORCE_RESTORE             (1<<1)
 #define   MI_RESTORE_INHIBIT           (1<<0)
+#define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
+#define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
 #define MI_SEMAPHORE_SIGNAL    MI_INSTR(0x1b, 0) /* GEN8+ */
 #define   MI_SEMAPHORE_TARGET(engine)  ((engine)<<15)
 #define MI_SEMAPHORE_WAIT      MI_INSTR(0x1c, 2) /* GEN8+ */
 #define   MI_INVALIDATE_BSD            (1<<7)
 #define   MI_FLUSH_DW_USE_GTT          (1<<2)
 #define   MI_FLUSH_DW_USE_PPGTT                (0<<2)
+#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
+#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE          (1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
 #define MI_BATCH_BUFFER_START  MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT             (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_BATCH_BUFFER_START_GEN8     MI_INSTR(0x31, 1)
+#define   MI_BATCH_RESOURCE_STREAMER (1<<10)
 
 #define MI_PREDICATE_SRC0      (0x2400)
 #define MI_PREDICATE_SRC1      (0x2408)
 #define   DISPLAY_PLANE_A           (0<<20)
 #define   DISPLAY_PLANE_B           (1<<20)
 #define GFX_OP_PIPE_CONTROL(len)       ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define   PIPE_CONTROL_FLUSH_L3                                (1<<27)
 #define   PIPE_CONTROL_GLOBAL_GTT_IVB                  (1<<24) /* gen7+ */
 #define   PIPE_CONTROL_MMIO_WRITE                      (1<<23)
 #define   PIPE_CONTROL_STORE_DATA_INDEX                        (1<<21)
 #define   PIPE_CONTROL_INDIRECT_STATE_DISABLE          (1<<9)
 #define   PIPE_CONTROL_NOTIFY                          (1<<8)
 #define   PIPE_CONTROL_FLUSH_ENABLE                    (1<<7) /* gen7+ */
+#define   PIPE_CONTROL_DC_FLUSH_ENABLE                 (1<<5)
 #define   PIPE_CONTROL_VF_CACHE_INVALIDATE             (1<<4)
 #define   PIPE_CONTROL_CONST_CACHE_INVALIDATE          (1<<3)
 #define   PIPE_CONTROL_STATE_CACHE_INVALIDATE          (1<<2)
 #define MI_CLFLUSH              MI_INSTR(0x27, 0)
 #define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
 #define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_MEM    MI_INSTR(0x29, 0)
 #define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 0)
 #define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
 #define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
@@ -1163,10 +1184,12 @@ enum skl_disp_power_wells {
 #define _PORT_PLL_EBB_0_A              0x162034
 #define _PORT_PLL_EBB_0_B              0x6C034
 #define _PORT_PLL_EBB_0_C              0x6C340
-#define   PORT_PLL_P1_MASK             (0x07 << 13)
-#define   PORT_PLL_P1(x)               ((x)  << 13)
-#define   PORT_PLL_P2_MASK             (0x1f << 8)
-#define   PORT_PLL_P2(x)               ((x)  << 8)
+#define   PORT_PLL_P1_SHIFT            13
+#define   PORT_PLL_P1_MASK             (0x07 << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P1(x)               ((x)  << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P2_SHIFT            8
+#define   PORT_PLL_P2_MASK             (0x1f << PORT_PLL_P2_SHIFT)
+#define   PORT_PLL_P2(x)               ((x)  << PORT_PLL_P2_SHIFT)
 #define BXT_PORT_PLL_EBB_0(port)       _PORT3(port, _PORT_PLL_EBB_0_A, \
                                                _PORT_PLL_EBB_0_B,      \
                                                _PORT_PLL_EBB_0_C)
@@ -1186,8 +1209,9 @@ enum skl_disp_power_wells {
 /* PORT_PLL_0_A */
 #define   PORT_PLL_M2_MASK             0xFF
 /* PORT_PLL_1_A */
-#define   PORT_PLL_N_MASK              (0x0F << 8)
-#define   PORT_PLL_N(x)                        ((x) << 8)
+#define   PORT_PLL_N_SHIFT             8
+#define   PORT_PLL_N_MASK              (0x0F << PORT_PLL_N_SHIFT)
+#define   PORT_PLL_N(x)                        ((x) << PORT_PLL_N_SHIFT)
 /* PORT_PLL_2_A */
 #define   PORT_PLL_M2_FRAC_MASK                0x3FFFFF
 /* PORT_PLL_3_A */
@@ -1201,9 +1225,11 @@ enum skl_disp_power_wells {
 /* PORT_PLL_8_A */
 #define   PORT_PLL_TARGET_CNT_MASK     0x3FF
 /* PORT_PLL_9_A */
-#define  PORT_PLL_LOCK_THRESHOLD_MASK  0xe
+#define  PORT_PLL_LOCK_THRESHOLD_SHIFT 1
+#define  PORT_PLL_LOCK_THRESHOLD_MASK  (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
 /* PORT_PLL_10_A */
 #define  PORT_PLL_DCO_AMP_OVR_EN_H     (1<<27)
+#define  PORT_PLL_DCO_AMP_DEFAULT      15
 #define  PORT_PLL_DCO_AMP_MASK         0x3c00
 #define  PORT_PLL_DCO_AMP(x)           (x<<10)
 #define _PORT_PLL_BASE(port)           _PORT3(port, _PORT_PLL_0_A,     \
@@ -1377,6 +1403,18 @@ enum skl_disp_power_wells {
                                                        _PORT_TX_DW14_LN0_C) + \
                                         _BXT_LANE_OFFSET(lane))
 
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1                     0x4F074
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK                 0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0           0x6C00C
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port)                (8+3*(port))
+#define BALANCE_LEG_MASK(port)         (7<<(8+3*(port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT      23
+
 /*
  * Fence registers
  */
@@ -1456,6 +1494,9 @@ enum skl_disp_power_wells {
 #define RING_MAX_IDLE(base)    ((base)+0x54)
 #define RING_HWS_PGA(base)     ((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)        ((base)+0x2080)
+#define RING_RESET_CTL(base)   ((base)+0xd0)
+#define   RESET_CTL_REQUEST_RESET  (1 << 0)
+#define   RESET_CTL_READY_TO_RESET (1 << 1)
 
 #define HSW_GTT_CACHE_EN       0x4024
 #define   GTT_CACHE_EN_ALL     0xF0007FFF
@@ -1946,6 +1987,9 @@ enum skl_disp_power_wells {
 #define FBC_FENCE_OFF          0x03218 /* BSpec typo has 321Bh */
 #define FBC_TAG                        0x03300
 
+#define FBC_STATUS2            0x43214
+#define  FBC_COMPRESSION_MASK  0x7ff
+
 #define FBC_LL_SIZE            (1536)
 
 /* Framebuffer compression for GM45+ */
@@ -2116,7 +2160,7 @@ enum skl_disp_power_wells {
 #define   DPLL_DVO_2X_MODE             (1 << 30)
 #define   DPLL_EXT_BUFFER_ENABLE_VLV   (1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE         (1 << 29)
-#define   DPLL_REFA_CLK_ENABLE_VLV     (1 << 29)
+#define   DPLL_REF_CLK_ENABLE_VLV      (1 << 29)
 #define   DPLL_VGA_MODE_DIS            (1 << 28)
 #define   DPLLB_MODE_DAC_SERIAL                (1 << 26) /* i915 */
 #define   DPLLB_MODE_LVDS              (2 << 26) /* i915 */
@@ -2130,8 +2174,8 @@ enum skl_disp_power_wells {
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
 #define   DPLL_LOCK_VLV                        (1<<15)
 #define   DPLL_INTEGRATED_CRI_CLK_VLV  (1<<14)
-#define   DPLL_INTEGRATED_CLOCK_VLV    (1<<13)
-#define   DPLL_SSC_REF_CLOCK_CHV       (1<<13)
+#define   DPLL_INTEGRATED_REF_CLK_VLV  (1<<13)
+#define   DPLL_SSC_REF_CLK_CHV         (1<<13)
 #define   DPLL_PORTC_READY_MASK                (0xf << 4)
 #define   DPLL_PORTB_READY_MASK                (0xf)
 
@@ -2488,6 +2532,9 @@ enum skl_disp_power_wells {
 #define CLKCFG_MEM_800                                 (3 << 4)
 #define CLKCFG_MEM_MASK                                        (7 << 4)
 
+#define HPLLVCO                 (MCHBAR_MIRROR_BASE + 0xc38)
+#define HPLLVCO_MOBILE          (MCHBAR_MIRROR_BASE + 0xc0f)
+
 #define TSC1                   0x11001
 #define   TSE                  (1<<0)
 #define TR1                    0x11006
@@ -2718,8 +2765,10 @@ enum skl_disp_power_wells {
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 
 #define GEN6_GT_PERF_STATUS    (MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define BXT_GT_PERF_STATUS      (MCHBAR_MIRROR_BASE_SNB + 0x7070)
 #define GEN6_RP_STATE_LIMITS   (MCHBAR_MIRROR_BASE_SNB + 0x5994)
 #define GEN6_RP_STATE_CAP      (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define BXT_RP_STATE_CAP        0x138170
 
 #define INTERVAL_1_28_US(us)   (((us) * 100) >> 7)
 #define INTERVAL_1_33_US(us)   (((us) * 3)   >> 2)
@@ -2767,7 +2816,8 @@ enum skl_disp_power_wells {
  * valid. Now, docs explain in dwords what is in the context object. The full
  * size is 70720 bytes, however, the power context and execlist context will
  * never be saved (power context is stored elsewhere, and execlists don't work
- * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
  */
 #define HSW_CXT_TOTAL_SIZE             (17 * PAGE_SIZE)
 /* Same as Haswell, but 72064 bytes now. */
@@ -4398,9 +4448,32 @@ enum skl_disp_power_wells {
 #define   DSPARB_BSTART_SHIFT  0
 #define   DSPARB_BEND_SHIFT    9 /* on 855 */
 #define   DSPARB_AEND_SHIFT    0
-
+#define   DSPARB_SPRITEA_SHIFT_VLV     0
+#define   DSPARB_SPRITEA_MASK_VLV      (0xff << 0)
+#define   DSPARB_SPRITEB_SHIFT_VLV     8
+#define   DSPARB_SPRITEB_MASK_VLV      (0xff << 8)
+#define   DSPARB_SPRITEC_SHIFT_VLV     16
+#define   DSPARB_SPRITEC_MASK_VLV      (0xff << 16)
+#define   DSPARB_SPRITED_SHIFT_VLV     24
+#define   DSPARB_SPRITED_MASK_VLV      (0xff << 24)
 #define DSPARB2                        (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define   DSPARB_SPRITEA_HI_SHIFT_VLV  0
+#define   DSPARB_SPRITEA_HI_MASK_VLV   (0x1 << 0)
+#define   DSPARB_SPRITEB_HI_SHIFT_VLV  4
+#define   DSPARB_SPRITEB_HI_MASK_VLV   (0x1 << 4)
+#define   DSPARB_SPRITEC_HI_SHIFT_VLV  8
+#define   DSPARB_SPRITEC_HI_MASK_VLV   (0x1 << 8)
+#define   DSPARB_SPRITED_HI_SHIFT_VLV  12
+#define   DSPARB_SPRITED_HI_MASK_VLV   (0x1 << 12)
+#define   DSPARB_SPRITEE_HI_SHIFT_VLV  16
+#define   DSPARB_SPRITEE_HI_MASK_VLV   (0x1 << 16)
+#define   DSPARB_SPRITEF_HI_SHIFT_VLV  20
+#define   DSPARB_SPRITEF_HI_MASK_VLV   (0x1 << 20)
 #define DSPARB3                        (VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define   DSPARB_SPRITEE_SHIFT_VLV     0
+#define   DSPARB_SPRITEE_MASK_VLV      (0xff << 0)
+#define   DSPARB_SPRITEF_SHIFT_VLV     8
+#define   DSPARB_SPRITEF_MASK_VLV      (0xff << 8)
 
 /* pnv/gen4/g4x/vlv/chv */
 #define DSPFW1                 (dev_priv->info.display_mmio_offset + 0x70034)
@@ -5754,6 +5827,13 @@ enum skl_disp_power_wells {
 #define HSW_NDE_RSTWRN_OPT     0x46408
 #define  RESET_PCH_HANDSHAKE_ENABLE    (1<<4)
 
+#define SKL_DFSM                       0x51000
+#define SKL_DFSM_CDCLK_LIMIT_MASK      (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675       (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540       (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450       (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5     (3 << 23)
+
 #define FF_SLICE_CS_CHICKEN2                   0x20e4
 #define  GEN9_TSG_BARRIER_ACK_DISABLE          (1<<8)
 
@@ -5791,6 +5871,7 @@ enum skl_disp_power_wells {
 
 #define GEN8_L3SQCREG4                         0xb118
 #define  GEN8_LQSC_RO_PERF_DIS                 (1<<27)
+#define  GEN8_LQSC_FLUSH_COHERENT_LINES                (1<<21)
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0                           0x7300
@@ -5913,6 +5994,11 @@ enum skl_disp_power_wells {
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG        0xc4030                /* SHOTPLUG_CTL */
+#define BXT_PORTA_HOTPLUG_ENABLE       (1 << 28)
+#define BXT_PORTA_HOTPLUG_STATUS_MASK  (0x3 << 24)
+#define  BXT_PORTA_HOTPLUG_NO_DETECT   (0 << 24)
+#define  BXT_PORTA_HOTPLUG_SHORT_DETECT        (1 << 24)
+#define  BXT_PORTA_HOTPLUG_LONG_DETECT (2 << 24)
 #define PORTD_HOTPLUG_ENABLE            (1 << 20)
 #define PORTD_PULSE_DURATION_2ms        (0)
 #define PORTD_PULSE_DURATION_4_5ms      (1 << 18)
@@ -6047,6 +6133,9 @@ enum skl_disp_power_wells {
 #define _VIDEO_DIP_CTL_A         0xe0200
 #define _VIDEO_DIP_DATA_A        0xe0208
 #define _VIDEO_DIP_GCP_A         0xe0210
+#define  GCP_COLOR_INDICATION          (1 << 2)
+#define  GCP_DEFAULT_PHASE_ENABLE      (1 << 1)
+#define  GCP_AV_MUTE                   (1 << 0)
 
 #define _VIDEO_DIP_CTL_B         0xe1200
 #define _VIDEO_DIP_DATA_B        0xe1208
@@ -6186,6 +6275,7 @@ enum skl_disp_power_wells {
 #define _TRANSA_CHICKEN1        0xf0060
 #define _TRANSB_CHICKEN1        0xf1060
 #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE    (1<<10)
 #define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE     (1<<4)
 #define _TRANSA_CHICKEN2        0xf0064
 #define _TRANSB_CHICKEN2        0xf1064
@@ -6370,6 +6460,8 @@ enum skl_disp_power_wells {
 #define PCH_PP_CONTROL         0xc7204
 #define  PANEL_UNLOCK_REGS     (0xabcd << 16)
 #define  PANEL_UNLOCK_MASK     (0xffff << 16)
+#define  BXT_POWER_CYCLE_DELAY_MASK    (0x1f0)
+#define  BXT_POWER_CYCLE_DELAY_SHIFT   4
 #define  EDP_FORCE_VDD         (1 << 3)
 #define  EDP_BLC_ENABLE                (1 << 2)
 #define  PANEL_POWER_RESET     (1 << 1)
@@ -6398,6 +6490,17 @@ enum skl_disp_power_wells {
 #define  PANEL_POWER_CYCLE_DELAY_MASK  (0x1f)
 #define  PANEL_POWER_CYCLE_DELAY_SHIFT 0
 
+/* BXT PPS changes - 2nd set of PPS registers */
+#define _BXT_PP_STATUS2        0xc7300
+#define _BXT_PP_CONTROL2       0xc7304
+#define _BXT_PP_ON_DELAYS2     0xc7308
+#define _BXT_PP_OFF_DELAYS2    0xc730c
+
+#define BXT_PP_STATUS(n)       ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
+#define BXT_PP_CONTROL(n)      ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
+#define BXT_PP_ON_DELAYS(n)    ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
+#define BXT_PP_OFF_DELAYS(n)   ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
+
 #define PCH_DP_B               0xe4100
 #define PCH_DPB_AUX_CH_CTL     0xe4110
 #define PCH_DPB_AUX_CH_DATA1   0xe4114
@@ -6698,6 +6801,7 @@ enum skl_disp_power_wells {
 #define          GEN6_PCODE_READ_RC6VIDS               0x5
 #define     GEN6_ENCODE_RC6_VID(mv)            (((mv) - 245) / 5)
 #define     GEN6_DECODE_RC6_VID(vids)          (((vids) * 5) + 245)
+#define   BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ    0x18
 #define   GEN9_PCODE_READ_MEM_LATENCY          0x6
 #define     GEN9_MEM_LATENCY_LEVEL_MASK                0xFF
 #define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT   8
@@ -6756,6 +6860,9 @@ enum skl_disp_power_wells {
 #define GEN7_MISCCPCTL                 (0x9424)
 #define   GEN7_DOP_CLOCK_GATE_ENABLE   (1<<0)
 
+#define GEN8_GARBCNTL                   0xB004
+#define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1                        0xB008 /* L3CD Error Status 1 */
 #define HSW_L3CDERRST11                        0xB208 /* L3CD Error Status register 1 slice 1 */
@@ -7163,6 +7270,7 @@ enum skl_disp_power_wells {
 #define  LCPLL_CLK_FREQ_337_5_BDW      (2<<26)
 #define  LCPLL_CLK_FREQ_675_BDW                (3<<26)
 #define  LCPLL_CD_CLOCK_DISABLE                (1<<25)
+#define  LCPLL_ROOT_CD_CLOCK_DISABLE   (1<<24)
 #define  LCPLL_CD2X_CLOCK_DISABLE      (1<<23)
 #define  LCPLL_POWER_DOWN_ALLOW                (1<<22)
 #define  LCPLL_CD_SOURCE_FCLK          (1<<21)
@@ -7265,12 +7373,6 @@ enum skl_disp_power_wells {
 #define DC_STATE_EN                    0x45504
 #define  DC_STATE_EN_UPTO_DC5          (1<<0)
 #define  DC_STATE_EN_DC9               (1<<3)
-
-/*
-* SKL DC
-*/
-#define  DC_STATE_EN                   0x45504
-#define  DC_STATE_EN_UPTO_DC5          (1<<0)
 #define  DC_STATE_EN_UPTO_DC6          (2<<0)
 #define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
 
@@ -7822,4 +7924,13 @@ enum skl_disp_power_wells {
 #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
 #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
 
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS0                0xb020  /* L3 Cache Control base */
+
+#define GEN9_GFX_MOCS_0                0xc800  /* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0       0xc900  /* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0       0xca00  /* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0      0xcb00  /* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0                0xcc00  /* Blitter MOCS base register*/
+
 #endif /* _I915_REG_H_ */
index cf67f82f7b7fc18ecb392c9ae26b0da3d36e97a3..1ccac618468ef6360f823ef5b5650efcdad5ca01 100644 (file)
@@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
        }
 
        /* only restore FBC info on the platform that supports FBC*/
-       intel_fbc_disable(dev);
+       intel_fbc_disable(dev_priv);
 
        /* restore FBC interval */
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
index 247626885f49d22cfb94f6f89b1a459c4b0500a8..55bd04c6b9390d1d5cd21a26dea49861cbcff44e 100644 (file)
@@ -64,24 +64,16 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
                        goto out;
                }
 
-               units = 0;
-               div = 1000000ULL;
-
-               if (IS_CHERRYVIEW(dev)) {
+               if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
                        /* Special case for 320Mhz */
-                       if (czcount_30ns == 1) {
-                               div = 10000000ULL;
-                               units = 3125ULL;
-                       } else {
-                               /* chv counts are one less */
-                               czcount_30ns += 1;
-                       }
+                       div = 10000000ULL;
+                       units = 3125ULL;
+               } else {
+                       czcount_30ns += 1;
+                       div = 1000000ULL;
+                       units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
                }
 
-               if (units == 0)
-                       units = DIV_ROUND_UP_ULL(30ULL * bias,
-                                                (u64)czcount_30ns);
-
                if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
                        units <<= 8;
 
index 849a2590e010ca1d10de85fea06cbb41211ad2d8..2f34c47bd4bfb7a566453475cd5be6f16b4b7872 100644 (file)
@@ -424,10 +424,10 @@ TRACE_EVENT(i915_gem_evict_vm,
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-           TP_PROTO(struct intel_engine_cs *from,
-                    struct intel_engine_cs *to,
+           TP_PROTO(struct drm_i915_gem_request *to_req,
+                    struct intel_engine_cs *from,
                     struct drm_i915_gem_request *req),
-           TP_ARGS(from, to, req),
+           TP_ARGS(to_req, from, req),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -439,7 +439,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
            TP_fast_assign(
                           __entry->dev = from->dev->primary->index;
                           __entry->sync_from = from->id;
-                          __entry->sync_to = to->id;
+                          __entry->sync_to = to_req->ring->id;
                           __entry->seqno = i915_gem_request_get_seqno(req);
                           ),
 
@@ -475,8 +475,8 @@ TRACE_EVENT(i915_gem_ring_dispatch,
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
-           TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
-           TP_ARGS(ring, invalidate, flush),
+           TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+           TP_ARGS(req, invalidate, flush),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -486,8 +486,8 @@ TRACE_EVENT(i915_gem_ring_flush,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = ring->dev->primary->index;
-                          __entry->ring = ring->id;
+                          __entry->dev = req->ring->dev->primary->index;
+                          __entry->ring = req->ring->id;
                           __entry->invalidate = invalidate;
                           __entry->flush = flush;
                           ),
index 8e35e0d013df556d8ac04fc27f9ba2bd7354fae3..e2531cf59266e77208c766b36c5291d4a939a825 100644 (file)
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
 
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @state: state to validate
- */
-int intel_atomic_check(struct drm_device *dev,
-                      struct drm_atomic_state *state)
-{
-       int nplanes = dev->mode_config.num_total_plane;
-       int ncrtcs = dev->mode_config.num_crtc;
-       int nconnectors = dev->mode_config.num_connector;
-       enum pipe nuclear_pipe = INVALID_PIPE;
-       struct intel_crtc *nuclear_crtc = NULL;
-       struct intel_crtc_state *crtc_state = NULL;
-       int ret;
-       int i;
-       bool not_nuclear = false;
-
-       /*
-        * FIXME:  At the moment, we only support "nuclear pageflip" on a
-        * single CRTC.  Cross-crtc updates will be added later.
-        */
-       for (i = 0; i < nplanes; i++) {
-               struct intel_plane *plane = to_intel_plane(state->planes[i]);
-               if (!plane)
-                       continue;
-
-               if (nuclear_pipe == INVALID_PIPE) {
-                       nuclear_pipe = plane->pipe;
-               } else if (nuclear_pipe != plane->pipe) {
-                       DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
-                       return -EINVAL;
-               }
-       }
-
-       /*
-        * FIXME:  We only handle planes for now; make sure there are no CRTC's
-        * or connectors involved.
-        */
-       state->allow_modeset = false;
-       for (i = 0; i < ncrtcs; i++) {
-               struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
-               if (crtc)
-                       memset(&crtc->atomic, 0, sizeof(crtc->atomic));
-               if (crtc && crtc->pipe != nuclear_pipe)
-                       not_nuclear = true;
-               if (crtc && crtc->pipe == nuclear_pipe) {
-                       nuclear_crtc = crtc;
-                       crtc_state = to_intel_crtc_state(state->crtc_states[i]);
-               }
-       }
-       for (i = 0; i < nconnectors; i++)
-               if (state->connectors[i] != NULL)
-                       not_nuclear = true;
-
-       if (not_nuclear) {
-               DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
-               return -EINVAL;
-       }
-
-       ret = drm_atomic_helper_check_planes(dev, state);
-       if (ret)
-               return ret;
-
-       /* FIXME: move to crtc atomic check function once it is ready */
-       ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
-       if (ret)
-               return ret;
-
-       return ret;
-}
-
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @async: asynchronous commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
- * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int intel_atomic_commit(struct drm_device *dev,
-                       struct drm_atomic_state *state,
-                       bool async)
-{
-       struct drm_crtc_state *crtc_state;
-       struct drm_crtc *crtc;
-       int ret, i;
-
-       if (async) {
-               DRM_DEBUG_KMS("i915 does not yet support async commit\n");
-               return -EINVAL;
-       }
-
-       ret = drm_atomic_helper_prepare_planes(dev, state);
-       if (ret)
-               return ret;
-
-       /* Point of no return */
-       drm_atomic_helper_swap_state(dev, state);
-
-       /* swap crtc_scaler_state */
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
-
-               if (INTEL_INFO(dev)->gen >= 9)
-                       skl_detach_scalers(to_intel_crtc(crtc));
-
-               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
-       }
-
-       drm_atomic_helper_wait_for_vblanks(dev, state);
-       drm_atomic_helper_cleanup_planes(dev, state);
-       drm_atomic_state_free(state);
-
-       return 0;
-}
-
 /**
  * intel_connector_atomic_get_property - fetch connector property value
  * @connector: connector to fetch property for
@@ -269,17 +142,12 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
        struct drm_plane *plane = NULL;
        struct intel_plane *intel_plane;
        struct intel_plane_state *plane_state = NULL;
-       struct intel_crtc_scaler_state *scaler_state;
-       struct drm_atomic_state *drm_state;
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+       struct drm_atomic_state *drm_state = crtc_state->base.state;
        int num_scalers_need;
        int i, j;
 
-       if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
-               return 0;
-
-       scaler_state = &crtc_state->scaler_state;
-       drm_state = crtc_state->base.state;
-
        num_scalers_need = hweight32(scaler_state->scaler_users);
        DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
                crtc_state, num_scalers_need, intel_crtc->num_scalers,
@@ -307,17 +175,21 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
        /* walkthrough scaler_users bits and start assigning scalers */
        for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
                int *scaler_id;
+               const char *name;
+               int idx;
 
                /* skip if scaler not required */
                if (!(scaler_state->scaler_users & (1 << i)))
                        continue;
 
                if (i == SKL_CRTC_INDEX) {
+                       name = "CRTC";
+                       idx = intel_crtc->base.base.id;
+
                        /* panel fitter case: assign as a crtc scaler */
                        scaler_id = &scaler_state->scaler_id;
                } else {
-                       if (!drm_state)
-                               continue;
+                       name = "PLANE";
 
                        /* plane scaler case: assign as a plane scaler */
                        /* find the plane that set the bit as scaler_user */
@@ -336,9 +208,19 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
                                                plane->base.id);
                                        return PTR_ERR(state);
                                }
+
+                               /*
+                                * the plane is added after plane checks are run,
+                                * but since this plane is unchanged just do the
+                                * minimum required validation.
+                                */
+                               if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+                                       intel_crtc->atomic.wait_for_flips = true;
+                               crtc_state->base.planes_changed = true;
                        }
 
                        intel_plane = to_intel_plane(plane);
+                       idx = plane->base.id;
 
                        /* plane on different crtc cannot be a scaler user of this crtc */
                        if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
@@ -354,23 +236,16 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
                        for (j = 0; j < intel_crtc->num_scalers; j++) {
                                if (!scaler_state->scalers[j].in_use) {
                                        scaler_state->scalers[j].in_use = 1;
-                                       *scaler_id = scaler_state->scalers[j].id;
+                                       *scaler_id = j;
                                        DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-                                               intel_crtc->pipe,
-                                               i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
-                                                       plane_state->scaler_id,
-                                               i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-                                               i == SKL_CRTC_INDEX ?  intel_crtc->base.base.id :
-                                               plane->base.id);
+                                               intel_crtc->pipe, *scaler_id, name, idx);
                                        break;
                                }
                        }
                }
 
                if (WARN_ON(*scaler_id < 0)) {
-                       DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
-                               i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-                               i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
+                       DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
                        continue;
                }
 
@@ -392,3 +267,54 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
 
        return 0;
 }
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll_config *shared_dpll)
+{
+       enum intel_dpll_id i;
+
+       /* Copy shared dpll state */
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+               shared_dpll[i] = pll->config;
+       }
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+       struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+       WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+       if (!state->dpll_set) {
+               state->dpll_set = true;
+
+               intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+                                                 state->shared_dpll);
+       }
+
+       return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+       struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+       if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+               kfree(state);
+               return NULL;
+       }
+
+       return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+       struct intel_atomic_state *state = to_intel_atomic_state(s);
+       drm_atomic_state_default_clear(&state->base);
+       state->dpll_set = false;
+}
index 86ba4b2c3a651e982a266a242c9a410f40e04a05..f1ab8e4b9c11c6b75534c52ad5e3bc1b05000dce 100644 (file)
@@ -56,6 +56,7 @@ intel_create_plane_state(struct drm_plane *plane)
 
        state->base.plane = plane;
        state->base.rotation = BIT(DRM_ROTATE_0);
+       state->ckey.flags = I915_SET_COLORKEY_NONE;
 
        return state;
 }
@@ -114,8 +115,10 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
        struct intel_crtc_state *crtc_state;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct intel_plane_state *intel_state = to_intel_plane_state(state);
+       struct drm_crtc_state *drm_crtc_state;
+       int ret;
 
-       crtc = crtc ? crtc : plane->crtc;
+       crtc = crtc ? crtc : plane->state->crtc;
        intel_crtc = to_intel_crtc(crtc);
 
        /*
@@ -127,16 +130,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
        if (!crtc)
                return 0;
 
-       /* FIXME: temporary hack necessary while we still use the plane update
-        * helper. */
-       if (state->state) {
-               crtc_state =
-                       intel_atomic_get_crtc_state(state->state, intel_crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
-       } else {
-               crtc_state = intel_crtc->config;
-       }
+       drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+       if (WARN_ON(!drm_crtc_state))
+               return -EINVAL;
+
+       crtc_state = to_intel_crtc_state(drm_crtc_state);
 
        /*
         * The original src/dest coordinates are stored in state->base, but
@@ -160,20 +158,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
        intel_state->clip.y2 =
                crtc_state->base.active ? crtc_state->pipe_src_h : 0;
 
-       /*
-        * Disabling a plane is always okay; we just need to update
-        * fb tracking in a special way since cleanup_fb() won't
-        * get called by the plane helpers.
-        */
-       if (state->fb == NULL && plane->state->fb != NULL) {
-               /*
-                * 'prepare' is never called when plane is being disabled, so
-                * we need to handle frontbuffer tracking as a special case
-                */
-               intel_crtc->atomic.disabled_planes |=
-                       (1 << drm_plane_index(plane));
-       }
-
        if (state->fb && intel_rotation_90_or_270(state->rotation)) {
                if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
                        state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
@@ -198,7 +182,12 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
                }
        }
 
-       return intel_plane->check_plane(plane, intel_state);
+       intel_state->visible = false;
+       ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+       if (ret)
+               return ret;
+
+       return intel_plane_atomic_calc_changes(&crtc_state->base, state);
 }
 
 static void intel_plane_atomic_update(struct drm_plane *plane,
index 3da9b8409f205723da7a3613fd1badf77e4b64cc..dc32cf4585f8a58db7ba256dd3599dcdeb7fba68 100644 (file)
@@ -41,7 +41,8 @@
  *
  * The disable sequences must be performed before disabling the transcoder or
  * port. The enable sequences may only be performed after enabling the
- * transcoder and port, and after completed link training.
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
  *
  * The codec and controller sequences could be done either parallel or serial,
  * but generally the ELDV/PD change in the codec sequence indicates to the audio
index 3dcd59e694db9e6f32c8e49ea04cbf21bbdc0ad8..990acc20771a6027ad2144edf864011e668604e6 100644 (file)
@@ -122,42 +122,6 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
        drm_mode_set_name(panel_fixed_mode);
 }
 
-static bool
-lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
-                          const struct lvds_dvo_timing *b)
-{
-       if (a->hactive_hi != b->hactive_hi ||
-           a->hactive_lo != b->hactive_lo)
-               return false;
-
-       if (a->hsync_off_hi != b->hsync_off_hi ||
-           a->hsync_off_lo != b->hsync_off_lo)
-               return false;
-
-       if (a->hsync_pulse_width != b->hsync_pulse_width)
-               return false;
-
-       if (a->hblank_hi != b->hblank_hi ||
-           a->hblank_lo != b->hblank_lo)
-               return false;
-
-       if (a->vactive_hi != b->vactive_hi ||
-           a->vactive_lo != b->vactive_lo)
-               return false;
-
-       if (a->vsync_off != b->vsync_off)
-               return false;
-
-       if (a->vsync_pulse_width != b->vsync_pulse_width)
-               return false;
-
-       if (a->vblank_hi != b->vblank_hi ||
-           a->vblank_lo != b->vblank_lo)
-               return false;
-
-       return true;
-}
-
 static const struct lvds_dvo_timing *
 get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
                    const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
@@ -213,7 +177,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        const struct lvds_dvo_timing *panel_dvo_timing;
        const struct lvds_fp_timing *fp_timing;
        struct drm_display_mode *panel_fixed_mode;
-       int i, downclock, drrs_mode;
+       int drrs_mode;
 
        lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
        if (!lvds_options)
@@ -272,30 +236,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
        drm_mode_debug_printmodeline(panel_fixed_mode);
 
-       /*
-        * Iterate over the LVDS panel timing info to find the lowest clock
-        * for the native resolution.
-        */
-       downclock = panel_dvo_timing->clock;
-       for (i = 0; i < 16; i++) {
-               const struct lvds_dvo_timing *dvo_timing;
-
-               dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
-                                                lvds_lfp_data_ptrs,
-                                                i);
-               if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
-                   dvo_timing->clock < downclock)
-                       downclock = dvo_timing->clock;
-       }
-
-       if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
-               dev_priv->lvds_downclock_avail = 1;
-               dev_priv->lvds_downclock = downclock * 10;
-               DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
-                             "Normal Clock %dKHz, downclock %dKHz\n",
-                             panel_fixed_mode->clock, 10*downclock);
-       }
-
        fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
                                       lvds_lfp_data_ptrs,
                                       lvds_options->panel_type);
@@ -1028,13 +968,28 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        }
 
        if (is_dp) {
-               if (aux_channel == 0x40 && port != PORT_A)
+               if (port == PORT_E) {
+                       info->alternate_aux_channel = aux_channel;
+                       /* if DDIE share aux channel with other port, then
+                        * DP couldn't exist on the shared port. Otherwise
+                        * they share the same aux channel and system
+                        * couldn't communicate with them seperately. */
+                       if (aux_channel == DP_AUX_A)
+                               dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+                       else if (aux_channel == DP_AUX_B)
+                               dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+                       else if (aux_channel == DP_AUX_C)
+                               dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+                       else if (aux_channel == DP_AUX_D)
+                               dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+               }
+               else if (aux_channel == DP_AUX_A && port != PORT_A)
                        DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
-               if (aux_channel == 0x10 && port != PORT_B)
+               else if (aux_channel == DP_AUX_B && port != PORT_B)
                        DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
-               if (aux_channel == 0x20 && port != PORT_C)
+               else if (aux_channel == DP_AUX_C && port != PORT_C)
                        DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
-               if (aux_channel == 0x30 && port != PORT_D)
+               else if (aux_channel == DP_AUX_D && port != PORT_D)
                        DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
        }
 
index af0b476527526c18a8d2fde082e3a7840b8036fb..f7ad6a585129a311ec003392e5de46c887f303a8 100644 (file)
@@ -778,6 +778,13 @@ int intel_parse_bios(struct drm_device *dev);
 #define MIPI_DSI_UNDEFINED_PANEL_ID    0
 #define MIPI_DSI_GENERIC_PANEL_ID      1
 
+/*
+ * PMIC vs SoC Backlight support specified in pwm_blc
+ * field in mipi_config block below.
+*/
+#define PPS_BLC_PMIC   0
+#define PPS_BLC_SOC    1
+
 struct mipi_config {
        u16 panel_id;
 
index 521af2c069cb6aed90e379501d3fbc39c24df6e9..af5e43bef4a41003437a7f1f2979992d065193ac 100644 (file)
@@ -236,53 +236,6 @@ static void intel_enable_crt(struct intel_encoder *encoder)
        intel_crt_set_dpms(encoder, crt->connector->base.dpms);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_crt_dpms(struct drm_connector *connector, int mode)
-{
-       struct drm_device *dev = connector->dev;
-       struct intel_encoder *encoder = intel_attached_encoder(connector);
-       struct drm_crtc *crtc;
-       int old_dpms;
-
-       /* PCH platforms and VLV only support on/off. */
-       if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
-               mode = DRM_MODE_DPMS_OFF;
-
-       if (mode == connector->dpms)
-               return;
-
-       old_dpms = connector->dpms;
-       connector->dpms = mode;
-
-       /* Only need to change hw state when actually enabled */
-       crtc = encoder->base.crtc;
-       if (!crtc) {
-               encoder->connectors_active = false;
-               return;
-       }
-
-       /* We need the pipe to run for anything but OFF. */
-       if (mode == DRM_MODE_DPMS_OFF)
-               encoder->connectors_active = false;
-       else
-               encoder->connectors_active = true;
-
-       /* We call connector dpms manually below in case pipe dpms doesn't
-        * change due to cloning. */
-       if (mode < old_dpms) {
-               /* From off to on, enable the pipe first. */
-               intel_crtc_update_dpms(crtc);
-
-               intel_crt_set_dpms(encoder, mode);
-       } else {
-               intel_crt_set_dpms(encoder, mode);
-
-               intel_crtc_update_dpms(crtc);
-       }
-
-       intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_crt_mode_valid(struct drm_connector *connector,
                     struct drm_display_mode *mode)
@@ -798,7 +751,7 @@ static void intel_crt_reset(struct drm_connector *connector)
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
        .reset = intel_crt_reset,
-       .dpms = intel_crt_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_crt_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = intel_crt_destroy,
index bcb41e61877d030d1d377cbb866760c35993abd6..ba1ae031e6fd47ff7873fe739a4efd5e5c5fddac 100644 (file)
@@ -244,7 +244,7 @@ void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
 void intel_csr_load_program(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       __be32 *payload = dev_priv->csr.dmc_payload;
+       u32 *payload = dev_priv->csr.dmc_payload;
        uint32_t i, fw_size;
 
        if (!IS_GEN9(dev)) {
@@ -256,7 +256,7 @@ void intel_csr_load_program(struct drm_device *dev)
        fw_size = dev_priv->csr.dmc_fw_size;
        for (i = 0; i < fw_size; i++)
                I915_WRITE(CSR_PROGRAM_BASE + i * 4,
-                       (u32 __force)payload[i]);
+                       payload[i]);
 
        for (i = 0; i < dev_priv->csr.mmio_count; i++) {
                I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -279,7 +279,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
        char substepping = intel_get_substepping(dev);
        uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
        uint32_t i;
-       __be32 *dmc_payload;
+       uint32_t *dmc_payload;
        bool fw_loaded = false;
 
        if (!fw) {
@@ -375,20 +375,13 @@ static void finish_csr_load(const struct firmware *fw, void *context)
        }
 
        dmc_payload = csr->dmc_payload;
-       for (i = 0; i < dmc_header->fw_size; i++) {
-               uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
-               /*
-                * The firmware payload is an array of 32 bit words stored in
-                * little-endian format in the firmware image and programmed
-                * as 32 bit big-endian format to memory.
-                */
-               dmc_payload[i] = cpu_to_be32(*tmp);
-       }
+       memcpy(dmc_payload, &fw->data[readcount], nbytes);
 
        /* load csr program during system boot, as needed for DC states */
        intel_csr_load_program(dev);
        fw_loaded = true;
 
+       DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
 out:
        if (fw_loaded)
                intel_runtime_pm_put(dev_priv);
@@ -422,6 +415,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
                return;
        }
 
+       DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+
        /*
         * Obtain a runtime pm reference, until CSR is loaded,
         * to avoid entering runtime-suspend.
@@ -459,7 +454,8 @@ void intel_csr_ucode_fini(struct drm_device *dev)
 
 void assert_csr_loaded(struct drm_i915_private *dev_priv)
 {
-       WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+       WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+            "CSR is not loaded.\n");
        WARN(!I915_READ(CSR_PROGRAM_BASE),
                                "CSR program storage start is NULL\n");
        WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
index cacb07b7a8f108a84a209223c4d7604d1c462a13..110d5469c86c72005534a508e65cbf4897cb6b02 100644 (file)
@@ -31,6 +31,7 @@
 struct ddi_buf_trans {
        u32 trans1;     /* balance leg enable, de-emph level */
        u32 trans2;     /* vref sel, vswing */
+       u8 i_boost;     /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
 };
 
 /* HDMI/DVI modes ignore everything but the last 2 items. So we share
@@ -38,134 +39,213 @@ struct ddi_buf_trans {
  * automatically adapt to HDMI connections as well
  */
 static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
-       { 0x00FFFFFF, 0x0006000E },
-       { 0x00D75FFF, 0x0005000A },
-       { 0x00C30FFF, 0x00040006 },
-       { 0x80AAAFFF, 0x000B0000 },
-       { 0x00FFFFFF, 0x0005000A },
-       { 0x00D75FFF, 0x000C0004 },
-       { 0x80C30FFF, 0x000B0000 },
-       { 0x00FFFFFF, 0x00040006 },
-       { 0x80D75FFF, 0x000B0000 },
+       { 0x00FFFFFF, 0x0006000E, 0x0 },
+       { 0x00D75FFF, 0x0005000A, 0x0 },
+       { 0x00C30FFF, 0x00040006, 0x0 },
+       { 0x80AAAFFF, 0x000B0000, 0x0 },
+       { 0x00FFFFFF, 0x0005000A, 0x0 },
+       { 0x00D75FFF, 0x000C0004, 0x0 },
+       { 0x80C30FFF, 0x000B0000, 0x0 },
+       { 0x00FFFFFF, 0x00040006, 0x0 },
+       { 0x80D75FFF, 0x000B0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
-       { 0x00FFFFFF, 0x0007000E },
-       { 0x00D75FFF, 0x000F000A },
-       { 0x00C30FFF, 0x00060006 },
-       { 0x00AAAFFF, 0x001E0000 },
-       { 0x00FFFFFF, 0x000F000A },
-       { 0x00D75FFF, 0x00160004 },
-       { 0x00C30FFF, 0x001E0000 },
-       { 0x00FFFFFF, 0x00060006 },
-       { 0x00D75FFF, 0x001E0000 },
+       { 0x00FFFFFF, 0x0007000E, 0x0 },
+       { 0x00D75FFF, 0x000F000A, 0x0 },
+       { 0x00C30FFF, 0x00060006, 0x0 },
+       { 0x00AAAFFF, 0x001E0000, 0x0 },
+       { 0x00FFFFFF, 0x000F000A, 0x0 },
+       { 0x00D75FFF, 0x00160004, 0x0 },
+       { 0x00C30FFF, 0x001E0000, 0x0 },
+       { 0x00FFFFFF, 0x00060006, 0x0 },
+       { 0x00D75FFF, 0x001E0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
                                        /* Idx  NT mV d T mV d  db      */
-       { 0x00FFFFFF, 0x0006000E },     /* 0:   400     400     0       */
-       { 0x00E79FFF, 0x000E000C },     /* 1:   400     500     2       */
-       { 0x00D75FFF, 0x0005000A },     /* 2:   400     600     3.5     */
-       { 0x00FFFFFF, 0x0005000A },     /* 3:   600     600     0       */
-       { 0x00E79FFF, 0x001D0007 },     /* 4:   600     750     2       */
-       { 0x00D75FFF, 0x000C0004 },     /* 5:   600     900     3.5     */
-       { 0x00FFFFFF, 0x00040006 },     /* 6:   800     800     0       */
-       { 0x80E79FFF, 0x00030002 },     /* 7:   800     1000    2       */
-       { 0x00FFFFFF, 0x00140005 },     /* 8:   850     850     0       */
-       { 0x00FFFFFF, 0x000C0004 },     /* 9:   900     900     0       */
-       { 0x00FFFFFF, 0x001C0003 },     /* 10:  950     950     0       */
-       { 0x80FFFFFF, 0x00030002 },     /* 11:  1000    1000    0       */
+       { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0:   400     400     0       */
+       { 0x00E79FFF, 0x000E000C, 0x0 },/* 1:   400     500     2       */
+       { 0x00D75FFF, 0x0005000A, 0x0 },/* 2:   400     600     3.5     */
+       { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3:   600     600     0       */
+       { 0x00E79FFF, 0x001D0007, 0x0 },/* 4:   600     750     2       */
+       { 0x00D75FFF, 0x000C0004, 0x0 },/* 5:   600     900     3.5     */
+       { 0x00FFFFFF, 0x00040006, 0x0 },/* 6:   800     800     0       */
+       { 0x80E79FFF, 0x00030002, 0x0 },/* 7:   800     1000    2       */
+       { 0x00FFFFFF, 0x00140005, 0x0 },/* 8:   850     850     0       */
+       { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9:   900     900     0       */
+       { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10:  950     950     0       */
+       { 0x80FFFFFF, 0x00030002, 0x0 },/* 11:  1000    1000    0       */
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
-       { 0x00FFFFFF, 0x00000012 },
-       { 0x00EBAFFF, 0x00020011 },
-       { 0x00C71FFF, 0x0006000F },
-       { 0x00AAAFFF, 0x000E000A },
-       { 0x00FFFFFF, 0x00020011 },
-       { 0x00DB6FFF, 0x0005000F },
-       { 0x00BEEFFF, 0x000A000C },
-       { 0x00FFFFFF, 0x0005000F },
-       { 0x00DB6FFF, 0x000A000C },
+       { 0x00FFFFFF, 0x00000012, 0x0 },
+       { 0x00EBAFFF, 0x00020011, 0x0 },
+       { 0x00C71FFF, 0x0006000F, 0x0 },
+       { 0x00AAAFFF, 0x000E000A, 0x0 },
+       { 0x00FFFFFF, 0x00020011, 0x0 },
+       { 0x00DB6FFF, 0x0005000F, 0x0 },
+       { 0x00BEEFFF, 0x000A000C, 0x0 },
+       { 0x00FFFFFF, 0x0005000F, 0x0 },
+       { 0x00DB6FFF, 0x000A000C, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
-       { 0x00FFFFFF, 0x0007000E },
-       { 0x00D75FFF, 0x000E000A },
-       { 0x00BEFFFF, 0x00140006 },
-       { 0x80B2CFFF, 0x001B0002 },
-       { 0x00FFFFFF, 0x000E000A },
-       { 0x00DB6FFF, 0x00160005 },
-       { 0x80C71FFF, 0x001A0002 },
-       { 0x00F7DFFF, 0x00180004 },
-       { 0x80D75FFF, 0x001B0002 },
+       { 0x00FFFFFF, 0x0007000E, 0x0 },
+       { 0x00D75FFF, 0x000E000A, 0x0 },
+       { 0x00BEFFFF, 0x00140006, 0x0 },
+       { 0x80B2CFFF, 0x001B0002, 0x0 },
+       { 0x00FFFFFF, 0x000E000A, 0x0 },
+       { 0x00DB6FFF, 0x00160005, 0x0 },
+       { 0x80C71FFF, 0x001A0002, 0x0 },
+       { 0x00F7DFFF, 0x00180004, 0x0 },
+       { 0x80D75FFF, 0x001B0002, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
-       { 0x00FFFFFF, 0x0001000E },
-       { 0x00D75FFF, 0x0004000A },
-       { 0x00C30FFF, 0x00070006 },
-       { 0x00AAAFFF, 0x000C0000 },
-       { 0x00FFFFFF, 0x0004000A },
-       { 0x00D75FFF, 0x00090004 },
-       { 0x00C30FFF, 0x000C0000 },
-       { 0x00FFFFFF, 0x00070006 },
-       { 0x00D75FFF, 0x000C0000 },
+       { 0x00FFFFFF, 0x0001000E, 0x0 },
+       { 0x00D75FFF, 0x0004000A, 0x0 },
+       { 0x00C30FFF, 0x00070006, 0x0 },
+       { 0x00AAAFFF, 0x000C0000, 0x0 },
+       { 0x00FFFFFF, 0x0004000A, 0x0 },
+       { 0x00D75FFF, 0x00090004, 0x0 },
+       { 0x00C30FFF, 0x000C0000, 0x0 },
+       { 0x00FFFFFF, 0x00070006, 0x0 },
+       { 0x00D75FFF, 0x000C0000, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
                                        /* Idx  NT mV d T mV df db      */
-       { 0x00FFFFFF, 0x0007000E },     /* 0:   400     400     0       */
-       { 0x00D75FFF, 0x000E000A },     /* 1:   400     600     3.5     */
-       { 0x00BEFFFF, 0x00140006 },     /* 2:   400     800     6       */
-       { 0x00FFFFFF, 0x0009000D },     /* 3:   450     450     0       */
-       { 0x00FFFFFF, 0x000E000A },     /* 4:   600     600     0       */
-       { 0x00D7FFFF, 0x00140006 },     /* 5:   600     800     2.5     */
-       { 0x80CB2FFF, 0x001B0002 },     /* 6:   600     1000    4.5     */
-       { 0x00FFFFFF, 0x00140006 },     /* 7:   800     800     0       */
-       { 0x80E79FFF, 0x001B0002 },     /* 8:   800     1000    2       */
-       { 0x80FFFFFF, 0x001B0002 },     /* 9:   1000    1000    0       */
+       { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0:   400     400     0       */
+       { 0x00D75FFF, 0x000E000A, 0x0 },/* 1:   400     600     3.5     */
+       { 0x00BEFFFF, 0x00140006, 0x0 },/* 2:   400     800     6       */
+       { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3:   450     450     0       */
+       { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4:   600     600     0       */
+       { 0x00D7FFFF, 0x00140006, 0x0 },/* 5:   600     800     2.5     */
+       { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6:   600     1000    4.5     */
+       { 0x00FFFFFF, 0x00140006, 0x0 },/* 7:   800     800     0       */
+       { 0x80E79FFF, 0x001B0002, 0x0 },/* 8:   800     1000    2       */
+       { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9:   1000    1000    0       */
 };
 
+/* Skylake H, S, and Skylake Y with 0.95V VccIO */
 static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
-       { 0x00000018, 0x000000a2 },
-       { 0x00004014, 0x0000009B },
-       { 0x00006012, 0x00000088 },
-       { 0x00008010, 0x00000087 },
-       { 0x00000018, 0x0000009B },
-       { 0x00004014, 0x00000088 },
-       { 0x00006012, 0x00000087 },
-       { 0x00000018, 0x00000088 },
-       { 0x00004014, 0x00000087 },
+       { 0x00002016, 0x000000A0, 0x0 },
+       { 0x00005012, 0x0000009B, 0x0 },
+       { 0x00007011, 0x00000088, 0x0 },
+       { 0x00009010, 0x000000C7, 0x0 },
+       { 0x00002016, 0x0000009B, 0x0 },
+       { 0x00005012, 0x00000088, 0x0 },
+       { 0x00007011, 0x000000C7, 0x0 },
+       { 0x00002016, 0x000000DF, 0x0 },
+       { 0x00005012, 0x000000C7, 0x0 },
 };
 
-/* eDP 1.4 low vswing translation parameters */
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+       { 0x00002016, 0x000000A2, 0x0 },
+       { 0x00005012, 0x00000088, 0x0 },
+       { 0x00007011, 0x00000087, 0x0 },
+       { 0x80009010, 0x000000C7, 0x1 },        /* Uses I_boost */
+       { 0x00002016, 0x0000009D, 0x0 },
+       { 0x00005012, 0x000000C7, 0x0 },
+       { 0x00007011, 0x000000C7, 0x0 },
+       { 0x00002016, 0x00000088, 0x0 },
+       { 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y with 0.85V VccIO */
+static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = {
+       { 0x00000018, 0x000000A2, 0x0 },
+       { 0x00005012, 0x00000088, 0x0 },
+       { 0x00007011, 0x00000087, 0x0 },
+       { 0x80009010, 0x000000C7, 0x1 },        /* Uses I_boost */
+       { 0x00000018, 0x0000009D, 0x0 },
+       { 0x00005012, 0x000000C7, 0x0 },
+       { 0x00007011, 0x000000C7, 0x0 },
+       { 0x00000018, 0x00000088, 0x0 },
+       { 0x00005012, 0x000000C7, 0x0 },
+};
+
+/*
+ * Skylake H and S, and Skylake Y with 0.95V VccIO
+ * eDP 1.4 low vswing translation parameters
+ */
 static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
-       { 0x00000018, 0x000000a8 },
-       { 0x00002016, 0x000000ab },
-       { 0x00006012, 0x000000a2 },
-       { 0x00008010, 0x00000088 },
-       { 0x00000018, 0x000000ab },
-       { 0x00004014, 0x000000a2 },
-       { 0x00006012, 0x000000a6 },
-       { 0x00000018, 0x000000a2 },
-       { 0x00005013, 0x0000009c },
-       { 0x00000018, 0x00000088 },
+       { 0x00000018, 0x000000A8, 0x0 },
+       { 0x00004013, 0x000000A9, 0x0 },
+       { 0x00007011, 0x000000A2, 0x0 },
+       { 0x00009010, 0x0000009C, 0x0 },
+       { 0x00000018, 0x000000A9, 0x0 },
+       { 0x00006013, 0x000000A2, 0x0 },
+       { 0x00007011, 0x000000A6, 0x0 },
+       { 0x00000018, 0x000000AB, 0x0 },
+       { 0x00007013, 0x0000009F, 0x0 },
+       { 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+       { 0x00000018, 0x000000A8, 0x0 },
+       { 0x00004013, 0x000000A9, 0x0 },
+       { 0x00007011, 0x000000A2, 0x0 },
+       { 0x00009010, 0x0000009C, 0x0 },
+       { 0x00000018, 0x000000A9, 0x0 },
+       { 0x00006013, 0x000000A2, 0x0 },
+       { 0x00007011, 0x000000A6, 0x0 },
+       { 0x00002016, 0x000000AB, 0x0 },
+       { 0x00005013, 0x0000009F, 0x0 },
+       { 0x00000018, 0x000000DF, 0x0 },
 };
 
+/*
+ * Skylake Y with 0.95V VccIO
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = {
+       { 0x00000018, 0x000000A8, 0x0 },
+       { 0x00004013, 0x000000AB, 0x0 },
+       { 0x00007011, 0x000000A4, 0x0 },
+       { 0x00009010, 0x000000DF, 0x0 },
+       { 0x00000018, 0x000000AA, 0x0 },
+       { 0x00006013, 0x000000A4, 0x0 },
+       { 0x00007011, 0x0000009D, 0x0 },
+       { 0x00000018, 0x000000A0, 0x0 },
+       { 0x00006012, 0x000000DF, 0x0 },
+       { 0x00000018, 0x0000008A, 0x0 },
+};
 
+/* Skylake H, S and U, and Skylake Y with 0.95V VccIO */
 static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
-       { 0x00000018, 0x000000ac },
-       { 0x00005012, 0x0000009d },
-       { 0x00007011, 0x00000088 },
-       { 0x00000018, 0x000000a1 },
-       { 0x00000018, 0x00000098 },
-       { 0x00004013, 0x00000088 },
-       { 0x00006012, 0x00000087 },
-       { 0x00000018, 0x000000df },
-       { 0x00003015, 0x00000087 },
-       { 0x00003015, 0x000000c7 },
-       { 0x00000018, 0x000000c7 },
+       { 0x00000018, 0x000000AC, 0x0 },
+       { 0x00005012, 0x0000009D, 0x0 },
+       { 0x00007011, 0x00000088, 0x0 },
+       { 0x00000018, 0x000000A1, 0x0 },
+       { 0x00000018, 0x00000098, 0x0 },
+       { 0x00004013, 0x00000088, 0x0 },
+       { 0x00006012, 0x00000087, 0x0 },
+       { 0x00000018, 0x000000DF, 0x0 },
+       { 0x00003015, 0x00000087, 0x0 },        /* Default */
+       { 0x00003015, 0x000000C7, 0x0 },
+       { 0x00000018, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y with 0.85V VccIO */
+static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = {
+       { 0x00000018, 0x000000A1, 0x0 },
+       { 0x00005012, 0x000000DF, 0x0 },
+       { 0x00007011, 0x00000084, 0x0 },
+       { 0x00000018, 0x000000A4, 0x0 },
+       { 0x00000018, 0x0000009D, 0x0 },
+       { 0x00004013, 0x00000080, 0x0 },
+       { 0x00006013, 0x000000C7, 0x0 },
+       { 0x00000018, 0x0000008A, 0x0 },
+       { 0x00003015, 0x000000C7, 0x0 },        /* Default */
+       { 0x80003015, 0x000000C7, 0x7 },        /* Uses I_boost */
+       { 0x00000018, 0x000000C7, 0x0 },
 };
 
 struct bxt_ddi_buf_trans {
@@ -181,16 +261,16 @@ struct bxt_ddi_buf_trans {
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
                                        /* Idx  NT mV diff      db  */
-       { 52,  0,    0, 128, true  },   /* 0:   400             0   */
-       { 78,  0,    0, 85,  false },   /* 1:   400             3.5 */
-       { 104, 0,    0, 64,  false },   /* 2:   400             6   */
-       { 154, 0,    0, 43,  false },   /* 3:   400             9.5 */
-       { 77,  0,    0, 128, false },   /* 4:   600             0   */
-       { 116, 0,    0, 85,  false },   /* 5:   600             3.5 */
-       { 154, 0,    0, 64,  false },   /* 6:   600             6   */
-       { 102, 0,    0, 128, false },   /* 7:   800             0   */
-       { 154, 0,    0, 85,  false },   /* 8:   800             3.5 */
-       { 154, 0x9A, 1, 128, false },  /* 9:    1200            0   */
+       { 52,  0x9A, 0, 128, true  },   /* 0:   400             0   */
+       { 78,  0x9A, 0, 85,  false },   /* 1:   400             3.5 */
+       { 104, 0x9A, 0, 64,  false },   /* 2:   400             6   */
+       { 154, 0x9A, 0, 43,  false },   /* 3:   400             9.5 */
+       { 77,  0x9A, 0, 128, false },   /* 4:   600             0   */
+       { 116, 0x9A, 0, 85,  false },   /* 5:   600             3.5 */
+       { 154, 0x9A, 0, 64,  false },   /* 6:   600             6   */
+       { 102, 0x9A, 0, 128, false },   /* 7:   800             0   */
+       { 154, 0x9A, 0, 85,  false },   /* 8:   800             3.5 */
+       { 154, 0x9A, 1, 128, false },   /* 9:   1200            0   */
 };
 
 /* BSpec has 2 recommended values - entries 0 and 8.
@@ -198,18 +278,21 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
                                        /* Idx  NT mV diff      db  */
-       { 52,  0,    0, 128, false },   /* 0:   400             0   */
-       { 52,  0,    0, 85,  false },   /* 1:   400             3.5 */
-       { 52,  0,    0, 64,  false },   /* 2:   400             6   */
-       { 42,  0,    0, 43,  false },   /* 3:   400             9.5 */
-       { 77,  0,    0, 128, false },   /* 4:   600             0   */
-       { 77,  0,    0, 85,  false },   /* 5:   600             3.5 */
-       { 77,  0,    0, 64,  false },   /* 6:   600             6   */
-       { 102, 0,    0, 128, false },   /* 7:   800             0   */
-       { 102, 0,    0, 85,  false },   /* 8:   800             3.5 */
+       { 52,  0x9A, 0, 128, false },   /* 0:   400             0   */
+       { 52,  0x9A, 0, 85,  false },   /* 1:   400             3.5 */
+       { 52,  0x9A, 0, 64,  false },   /* 2:   400             6   */
+       { 42,  0x9A, 0, 43,  false },   /* 3:   400             9.5 */
+       { 77,  0x9A, 0, 128, false },   /* 4:   600             0   */
+       { 77,  0x9A, 0, 85,  false },   /* 5:   600             3.5 */
+       { 77,  0x9A, 0, 64,  false },   /* 6:   600             6   */
+       { 102, 0x9A, 0, 128, false },   /* 7:   800             0   */
+       { 102, 0x9A, 0, 85,  false },   /* 8:   800             3.5 */
        { 154, 0x9A, 1, 128, true },    /* 9:   1200            0   */
 };
 
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+                                   enum port port, int type);
+
 static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
                                 struct intel_digital_port **dig_port,
                                 enum port *port)
@@ -249,6 +332,102 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
        return intel_dig_port->hdmi.hdmi_reg;
 }
 
+static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
+                                                       int *n_entries)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct ddi_buf_trans *ddi_translations;
+       static int is_095v = -1;
+
+       if (is_095v == -1) {
+               u32 spr1 = I915_READ(UAIMI_SPR1);
+
+               is_095v = spr1 & SKL_VCCIO_MASK;
+       }
+
+       if (IS_SKL_ULX(dev) && !is_095v) {
+               ddi_translations = skl_y_085v_ddi_translations_dp;
+               *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+       } else if (IS_SKL_ULT(dev)) {
+               ddi_translations = skl_u_ddi_translations_dp;
+               *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+       } else {
+               ddi_translations = skl_ddi_translations_dp;
+               *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+       }
+
+       return ddi_translations;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
+                                                        int *n_entries)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct ddi_buf_trans *ddi_translations;
+       static int is_095v = -1;
+
+       if (is_095v == -1) {
+               u32 spr1 = I915_READ(UAIMI_SPR1);
+
+               is_095v = spr1 & SKL_VCCIO_MASK;
+       }
+
+       if (IS_SKL_ULX(dev) && !is_095v) {
+               if (dev_priv->edp_low_vswing) {
+                       ddi_translations = skl_y_085v_ddi_translations_edp;
+                       *n_entries =
+                               ARRAY_SIZE(skl_y_085v_ddi_translations_edp);
+               } else {
+                       ddi_translations = skl_y_085v_ddi_translations_dp;
+                       *n_entries =
+                               ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+               }
+       } else if (IS_SKL_ULT(dev)) {
+               if (dev_priv->edp_low_vswing) {
+                       ddi_translations = skl_u_ddi_translations_edp;
+                       *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+               } else {
+                       ddi_translations = skl_u_ddi_translations_dp;
+                       *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+               }
+       } else {
+               if (dev_priv->edp_low_vswing) {
+                       ddi_translations = skl_ddi_translations_edp;
+                       *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+               } else {
+                       ddi_translations = skl_ddi_translations_dp;
+                       *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+               }
+       }
+
+       return ddi_translations;
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_device *dev,
+                      int *n_entries)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct ddi_buf_trans *ddi_translations;
+       static int is_095v = -1;
+
+       if (is_095v == -1) {
+               u32 spr1 = I915_READ(UAIMI_SPR1);
+
+               is_095v = spr1 & SKL_VCCIO_MASK;
+       }
+
+       if (IS_SKL_ULX(dev) && !is_095v) {
+               ddi_translations = skl_y_085v_ddi_translations_hdmi;
+               *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_hdmi);
+       } else {
+               ddi_translations = skl_ddi_translations_hdmi;
+               *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+       }
+
+       return ddi_translations;
+}
+
 /*
  * Starting with Haswell, DDI port buffers must be programmed with correct
  * values in advance. The buffer values are different for FDI and DP modes,
@@ -280,19 +459,13 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                return;
        } else if (IS_SKYLAKE(dev)) {
                ddi_translations_fdi = NULL;
-               ddi_translations_dp = skl_ddi_translations_dp;
-               n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-               if (dev_priv->edp_low_vswing) {
-                       ddi_translations_edp = skl_ddi_translations_edp;
-                       n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
-               } else {
-                       ddi_translations_edp = skl_ddi_translations_dp;
-                       n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-               }
-
-               ddi_translations_hdmi = skl_ddi_translations_hdmi;
-               n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
-               hdmi_default_entry = 7;
+               ddi_translations_dp =
+                               skl_get_buf_trans_dp(dev, &n_dp_entries);
+               ddi_translations_edp =
+                               skl_get_buf_trans_edp(dev, &n_edp_entries);
+               ddi_translations_hdmi =
+                               skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+               hdmi_default_entry = 8;
        } else if (IS_BROADWELL(dev)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
@@ -625,11 +798,11 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
        (void) (&__a == &__b);                  \
        __a > __b ? (__a - __b) : (__b - __a); })
 
-struct wrpll_rnp {
+struct hsw_wrpll_rnp {
        unsigned p, n2, r2;
 };
 
-static unsigned wrpll_get_budget_for_freq(int clock)
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 {
        unsigned budget;
 
@@ -703,9 +876,9 @@ static unsigned wrpll_get_budget_for_freq(int clock)
        return budget;
 }
 
-static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
-                            unsigned r2, unsigned n2, unsigned p,
-                            struct wrpll_rnp *best)
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+                                unsigned r2, unsigned n2, unsigned p,
+                                struct hsw_wrpll_rnp *best)
 {
        uint64_t a, b, c, d, diff, diff_best;
 
@@ -762,8 +935,7 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
        /* Otherwise a < c && b >= d, do nothing */
 }
 
-static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
-                                    int reg)
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
 {
        int refclk = LC_FREQ;
        int n, p, r;
@@ -856,6 +1028,26 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
        return dco_freq / (p0 * p1 * p2 * 5);
 }
 
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+       int dotclock;
+
+       if (pipe_config->has_pch_encoder)
+               dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+                                                   &pipe_config->fdi_m_n);
+       else if (pipe_config->has_dp_encoder)
+               dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+                                                   &pipe_config->dp_m_n);
+       else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
+               dotclock = pipe_config->port_clock * 2 / 3;
+       else
+               dotclock = pipe_config->port_clock;
+
+       if (pipe_config->pixel_multiplier)
+               dotclock /= pipe_config->pixel_multiplier;
+
+       pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
 
 static void skl_ddi_clock_get(struct intel_encoder *encoder,
                                struct intel_crtc_state *pipe_config)
@@ -902,12 +1094,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
 
        pipe_config->port_clock = link_clock;
 
-       if (pipe_config->has_dp_encoder)
-               pipe_config->base.adjusted_mode.crtc_clock =
-                       intel_dotclock_calculate(pipe_config->port_clock,
-                                                &pipe_config->dp_m_n);
-       else
-               pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+       ddi_dotclock_get(pipe_config);
 }
 
 static void hsw_ddi_clock_get(struct intel_encoder *encoder,
@@ -929,10 +1116,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
                link_clock = 270000;
                break;
        case PORT_CLK_SEL_WRPLL1:
-               link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+               link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
                break;
        case PORT_CLK_SEL_WRPLL2:
-               link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+               link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
                break;
        case PORT_CLK_SEL_SPLL:
                pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -954,23 +1141,32 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
 
        pipe_config->port_clock = link_clock * 2;
 
-       if (pipe_config->has_pch_encoder)
-               pipe_config->base.adjusted_mode.crtc_clock =
-                       intel_dotclock_calculate(pipe_config->port_clock,
-                                                &pipe_config->fdi_m_n);
-       else if (pipe_config->has_dp_encoder)
-               pipe_config->base.adjusted_mode.crtc_clock =
-                       intel_dotclock_calculate(pipe_config->port_clock,
-                                                &pipe_config->dp_m_n);
-       else
-               pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+       ddi_dotclock_get(pipe_config);
 }
 
 static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
                                enum intel_dpll_id dpll)
 {
-       /* FIXME formula not available in bspec */
-       return 0;
+       struct intel_shared_dpll *pll;
+       struct intel_dpll_hw_state *state;
+       intel_clock_t clock;
+
+       /* For DDI ports we always use a shared PLL. */
+       if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+               return 0;
+
+       pll = &dev_priv->shared_dplls[dpll];
+       state = &pll->config.hw_state;
+
+       clock.m1 = 2;
+       clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
+       if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+               clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
+       clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+       clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+       clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+       return chv_calc_dpll_params(100000, &clock);
 }
 
 static void bxt_ddi_clock_get(struct intel_encoder *encoder,
@@ -980,16 +1176,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
        enum port port = intel_ddi_get_encoder_port(encoder);
        uint32_t dpll = port;
 
-       pipe_config->port_clock =
-               bxt_calc_pll_link(dev_priv, dpll);
+       pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
 
-       if (pipe_config->has_dp_encoder)
-               pipe_config->base.adjusted_mode.crtc_clock =
-                       intel_dotclock_calculate(pipe_config->port_clock,
-                                                       &pipe_config->dp_m_n);
-       else
-               pipe_config->base.adjusted_mode.crtc_clock =
-                                                       pipe_config->port_clock;
+       ddi_dotclock_get(pipe_config);
 }
 
 void intel_ddi_clock_get(struct intel_encoder *encoder,
@@ -1011,12 +1200,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 {
        uint64_t freq2k;
        unsigned p, n2, r2;
-       struct wrpll_rnp best = { 0, 0, 0 };
+       struct hsw_wrpll_rnp best = { 0, 0, 0 };
        unsigned budget;
 
        freq2k = clock / 100;
 
-       budget = wrpll_get_budget_for_freq(clock);
+       budget = hsw_wrpll_get_budget_for_freq(clock);
 
        /* Special case handling for 540 pixel clock: bypass WR PLL entirely
         * and directly pass the LC PLL to it. */
@@ -1060,8 +1249,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
                     n2++) {
 
                        for (p = P_MIN; p <= P_MAX; p += P_INC)
-                               wrpll_update_rnp(freq2k, budget,
-                                                r2, n2, p, &best);
+                               hsw_wrpll_update_rnp(freq2k, budget,
+                                                    r2, n2, p, &best);
                }
        }
 
@@ -1105,6 +1294,102 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
        return true;
 }
 
+struct skl_wrpll_context {
+       uint64_t min_deviation;         /* current minimal deviation */
+       uint64_t central_freq;          /* chosen central freq */
+       uint64_t dco_freq;              /* chosen dco freq */
+       unsigned int p;                 /* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+       memset(ctx, 0, sizeof(*ctx));
+
+       ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6%  of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION 100
+#define SKL_DCO_MAX_NDEVIATION 600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+                                 uint64_t central_freq,
+                                 uint64_t dco_freq,
+                                 unsigned int divider)
+{
+       uint64_t deviation;
+
+       deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+                             central_freq);
+
+       /* positive deviation */
+       if (dco_freq >= central_freq) {
+               if (deviation < SKL_DCO_MAX_PDEVIATION &&
+                   deviation < ctx->min_deviation) {
+                       ctx->min_deviation = deviation;
+                       ctx->central_freq = central_freq;
+                       ctx->dco_freq = dco_freq;
+                       ctx->p = divider;
+               }
+       /* negative deviation */
+       } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+                  deviation < ctx->min_deviation) {
+               ctx->min_deviation = deviation;
+               ctx->central_freq = central_freq;
+               ctx->dco_freq = dco_freq;
+               ctx->p = divider;
+       }
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+                                     unsigned int *p0 /* out */,
+                                     unsigned int *p1 /* out */,
+                                     unsigned int *p2 /* out */)
+{
+       /* even dividers */
+       if (p % 2 == 0) {
+               unsigned int half = p / 2;
+
+               if (half == 1 || half == 2 || half == 3 || half == 5) {
+                       *p0 = 2;
+                       *p1 = 1;
+                       *p2 = half;
+               } else if (half % 2 == 0) {
+                       *p0 = 2;
+                       *p1 = half / 2;
+                       *p2 = 2;
+               } else if (half % 3 == 0) {
+                       *p0 = 3;
+                       *p1 = half / 3;
+                       *p2 = 2;
+               } else if (half % 7 == 0) {
+                       *p0 = 7;
+                       *p1 = half / 7;
+                       *p2 = 2;
+               }
+       } else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
+               *p0 = 3;
+               *p1 = 1;
+               *p2 = p / 3;
+       } else if (p == 5 || p == 7) {
+               *p0 = p;
+               *p1 = 1;
+               *p2 = 1;
+       } else if (p == 15) {
+               *p0 = 3;
+               *p1 = 1;
+               *p2 = 5;
+       } else if (p == 21) {
+               *p0 = 7;
+               *p1 = 1;
+               *p2 = 3;
+       } else if (p == 35) {
+               *p0 = 7;
+               *p1 = 1;
+               *p2 = 5;
+       }
+}
+
 struct skl_wrpll_params {
        uint32_t        dco_fraction;
        uint32_t        dco_integer;
@@ -1115,150 +1400,145 @@ struct skl_wrpll_params {
        uint32_t        central_freq;
 };
 
-static void
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
-                       struct skl_wrpll_params *wrpll_params)
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+                                     uint64_t afe_clock,
+                                     uint64_t central_freq,
+                                     uint32_t p0, uint32_t p1, uint32_t p2)
 {
-       uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
-       uint64_t dco_central_freq[3] = {8400000000ULL,
-                                       9000000000ULL,
-                                       9600000000ULL};
-       uint32_t min_dco_deviation = 400;
-       uint32_t min_dco_index = 3;
-       uint32_t P0[4] = {1, 2, 3, 7};
-       uint32_t P2[4] = {1, 2, 3, 5};
-       bool found = false;
-       uint32_t candidate_p = 0;
-       uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
-       uint32_t candidate_p2[3] = {0};
-       uint32_t dco_central_freq_deviation[3];
-       uint32_t i, P1, k, dco_count;
-       bool retry_with_odd = false;
        uint64_t dco_freq;
 
-       /* Determine P0, P1 or P2 */
-       for (dco_count = 0; dco_count < 3; dco_count++) {
-               found = false;
-               candidate_p =
-                       div64_u64(dco_central_freq[dco_count], afe_clock);
-               if (retry_with_odd == false)
-                       candidate_p = (candidate_p % 2 == 0 ?
-                               candidate_p : candidate_p + 1);
-
-               for (P1 = 1; P1 < candidate_p; P1++) {
-                       for (i = 0; i < 4; i++) {
-                               if (!(P0[i] != 1 || P1 == 1))
-                                       continue;
-
-                               for (k = 0; k < 4; k++) {
-                                       if (P1 != 1 && P2[k] != 2)
-                                               continue;
-
-                                       if (candidate_p == P0[i] * P1 * P2[k]) {
-                                               /* Found possible P0, P1, P2 */
-                                               found = true;
-                                               candidate_p0[dco_count] = P0[i];
-                                               candidate_p1[dco_count] = P1;
-                                               candidate_p2[dco_count] = P2[k];
-                                               goto found;
-                                       }
-
-                               }
-                       }
-               }
+       switch (central_freq) {
+       case 9600000000ULL:
+               params->central_freq = 0;
+               break;
+       case 9000000000ULL:
+               params->central_freq = 1;
+               break;
+       case 8400000000ULL:
+               params->central_freq = 3;
+       }
 
-found:
-               if (found) {
-                       dco_central_freq_deviation[dco_count] =
-                               div64_u64(10000 *
-                                         abs_diff((candidate_p * afe_clock),
-                                                  dco_central_freq[dco_count]),
-                                         dco_central_freq[dco_count]);
-
-                       if (dco_central_freq_deviation[dco_count] <
-                               min_dco_deviation) {
-                               min_dco_deviation =
-                                       dco_central_freq_deviation[dco_count];
-                               min_dco_index = dco_count;
-                       }
-               }
+       switch (p0) {
+       case 1:
+               params->pdiv = 0;
+               break;
+       case 2:
+               params->pdiv = 1;
+               break;
+       case 3:
+               params->pdiv = 2;
+               break;
+       case 7:
+               params->pdiv = 4;
+               break;
+       default:
+               WARN(1, "Incorrect PDiv\n");
+       }
 
-               if (min_dco_index > 2 && dco_count == 2) {
-                       retry_with_odd = true;
-                       dco_count = 0;
-               }
+       switch (p2) {
+       case 5:
+               params->kdiv = 0;
+               break;
+       case 2:
+               params->kdiv = 1;
+               break;
+       case 3:
+               params->kdiv = 2;
+               break;
+       case 1:
+               params->kdiv = 3;
+               break;
+       default:
+               WARN(1, "Incorrect KDiv\n");
        }
 
-       if (min_dco_index > 2) {
-               WARN(1, "No valid values found for the given pixel clock\n");
-       } else {
-               wrpll_params->central_freq = dco_central_freq[min_dco_index];
+       params->qdiv_ratio = p1;
+       params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
 
-               switch (dco_central_freq[min_dco_index]) {
-               case 9600000000ULL:
-                       wrpll_params->central_freq = 0;
-                       break;
-               case 9000000000ULL:
-                       wrpll_params->central_freq = 1;
-                       break;
-               case 8400000000ULL:
-                       wrpll_params->central_freq = 3;
-               }
+       dco_freq = p0 * p1 * p2 * afe_clock;
 
-               switch (candidate_p0[min_dco_index]) {
-               case 1:
-                       wrpll_params->pdiv = 0;
-                       break;
-               case 2:
-                       wrpll_params->pdiv = 1;
-                       break;
-               case 3:
-                       wrpll_params->pdiv = 2;
-                       break;
-               case 7:
-                       wrpll_params->pdiv = 4;
-                       break;
-               default:
-                       WARN(1, "Incorrect PDiv\n");
-               }
+       /*
+        * Intermediate values are in Hz.
+        * Divide by MHz to match bsepc
+        */
+       params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+       params->dco_fraction =
+               div_u64((div_u64(dco_freq, 24) -
+                        params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
 
-               switch (candidate_p2[min_dco_index]) {
-               case 5:
-                       wrpll_params->kdiv = 0;
-                       break;
-               case 2:
-                       wrpll_params->kdiv = 1;
-                       break;
-               case 3:
-                       wrpll_params->kdiv = 2;
-                       break;
-               case 1:
-                       wrpll_params->kdiv = 3;
-                       break;
-               default:
-                       WARN(1, "Incorrect KDiv\n");
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+                       struct skl_wrpll_params *wrpll_params)
+{
+       uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+       uint64_t dco_central_freq[3] = {8400000000ULL,
+                                       9000000000ULL,
+                                       9600000000ULL};
+       static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
+                                            24, 28, 30, 32, 36, 40, 42, 44,
+                                            48, 52, 54, 56, 60, 64, 66, 68,
+                                            70, 72, 76, 78, 80, 84, 88, 90,
+                                            92, 96, 98 };
+       static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+       static const struct {
+               const int *list;
+               int n_dividers;
+       } dividers[] = {
+               { even_dividers, ARRAY_SIZE(even_dividers) },
+               { odd_dividers, ARRAY_SIZE(odd_dividers) },
+       };
+       struct skl_wrpll_context ctx;
+       unsigned int dco, d, i;
+       unsigned int p0, p1, p2;
+
+       skl_wrpll_context_init(&ctx);
+
+       for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+               for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+                       for (i = 0; i < dividers[d].n_dividers; i++) {
+                               unsigned int p = dividers[d].list[i];
+                               uint64_t dco_freq = p * afe_clock;
+
+                               skl_wrpll_try_divider(&ctx,
+                                                     dco_central_freq[dco],
+                                                     dco_freq,
+                                                     p);
+                               /*
+                                * Skip the remaining dividers if we're sure to
+                                * have found the definitive divider, we can't
+                                * improve a 0 deviation.
+                                */
+                               if (ctx.min_deviation == 0)
+                                       goto skip_remaining_dividers;
+                       }
                }
 
-               wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
-               wrpll_params->qdiv_mode =
-                       (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
-
-               dco_freq = candidate_p0[min_dco_index] *
-                       candidate_p1[min_dco_index] *
-                       candidate_p2[min_dco_index] * afe_clock;
-
+skip_remaining_dividers:
                /*
-                * Intermediate values are in Hz.
-                * Divide by MHz to match bsepc
+                * If a solution is found with an even divider, prefer
+                * this one.
                 */
-               wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
-               wrpll_params->dco_fraction =
-                       div_u64(((div_u64(dco_freq, 24) -
-                                 wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+               if (d == 0 && ctx.p)
+                       break;
+       }
 
+       if (!ctx.p) {
+               DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+               return false;
        }
-}
 
+       /*
+        * gcc incorrectly analyses that these can be used without being
+        * initialized. To be fair, it's hard to guess.
+        */
+       p0 = p1 = p2 = 0;
+       skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+       skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+                                 p0, p1, p2);
+
+       return true;
+}
 
 static bool
 skl_ddi_pll_select(struct intel_crtc *intel_crtc,
@@ -1281,7 +1561,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
 
                ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-               skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+               if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+                       return false;
 
                cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
                         DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1334,6 +1615,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
 
 /* bxt clock parameters */
 struct bxt_clk_div {
+       int clock;
        uint32_t p1;
        uint32_t p2;
        uint32_t m2_int;
@@ -1343,14 +1625,14 @@ struct bxt_clk_div {
 };
 
 /* pre-calculated values for DP linkrates */
-static struct bxt_clk_div bxt_dp_clk_val[7] = {
-       /* 162 */ {4, 2, 32, 1677722, 1, 1},
-       /* 270 */ {4, 1, 27,       0, 0, 1},
-       /* 540 */ {2, 1, 27,       0, 0, 1},
-       /* 216 */ {3, 2, 32, 1677722, 1, 1},
-       /* 243 */ {4, 1, 24, 1258291, 1, 1},
-       /* 324 */ {4, 1, 32, 1677722, 1, 1},
-       /* 432 */ {3, 1, 32, 1677722, 1, 1}
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+       {162000, 4, 2, 32, 1677722, 1, 1},
+       {270000, 4, 1, 27,       0, 0, 1},
+       {540000, 2, 1, 27,       0, 0, 1},
+       {216000, 3, 2, 32, 1677722, 1, 1},
+       {243000, 4, 1, 24, 1258291, 1, 1},
+       {324000, 4, 1, 32, 1677722, 1, 1},
+       {432000, 3, 1, 32, 1677722, 1, 1}
 };
 
 static bool
@@ -1363,7 +1645,7 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
        struct bxt_clk_div clk_div = {0};
        int vco = 0;
        uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-       uint32_t dcoampovr_en_h, dco_amp, lanestagger;
+       uint32_t lanestagger;
 
        if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
                intel_clock_t best_clock;
@@ -1390,29 +1672,19 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
                vco = best_clock.vco;
        } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
                        intel_encoder->type == INTEL_OUTPUT_EDP) {
-               struct drm_encoder *encoder = &intel_encoder->base;
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+               int i;
 
-               switch (intel_dp->link_bw) {
-               case DP_LINK_BW_1_62:
-                       clk_div = bxt_dp_clk_val[0];
-                       break;
-               case DP_LINK_BW_2_7:
-                       clk_div = bxt_dp_clk_val[1];
-                       break;
-               case DP_LINK_BW_5_4:
-                       clk_div = bxt_dp_clk_val[2];
-                       break;
-               default:
-                       clk_div = bxt_dp_clk_val[0];
-                       DRM_ERROR("Unknown link rate\n");
+               clk_div = bxt_dp_clk_val[0];
+               for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+                       if (bxt_dp_clk_val[i].clock == clock) {
+                               clk_div = bxt_dp_clk_val[i];
+                               break;
+                       }
                }
                vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
        }
 
-       dco_amp = 15;
-       dcoampovr_en_h = 0;
-       if (vco >= 6200000 && vco <= 6480000) {
+       if (vco >= 6200000 && vco <= 6700000) {
                prop_coef = 4;
                int_coef = 9;
                gain_ctl = 3;
@@ -1423,8 +1695,6 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
                int_coef = 11;
                gain_ctl = 3;
                targ_cnt = 9;
-               if (vco >= 4800000 && vco < 5400000)
-                       dcoampovr_en_h = 1;
        } else if (vco == 5400000) {
                prop_coef = 3;
                int_coef = 8;
@@ -1466,10 +1736,13 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 
        crtc_state->dpll_hw_state.pll8 = targ_cnt;
 
-       if (dcoampovr_en_h)
-               crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
+       crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-       crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
+       crtc_state->dpll_hw_state.pll10 =
+               PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+               | PORT_PLL_DCO_AMP_OVR_EN_H;
+
+       crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
        crtc_state->dpll_hw_state.pcsdw12 =
                LANESTAGGER_STRAP_OVRD | lanestagger;
@@ -1799,8 +2072,48 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
                           TRANS_CLK_SEL_DISABLED);
 }
 
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-                            enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
+                              enum port port, int type)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct ddi_buf_trans *ddi_translations;
+       uint8_t iboost;
+       int n_entries;
+       u32 reg;
+
+       if (type == INTEL_OUTPUT_DISPLAYPORT) {
+               ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+               iboost = ddi_translations[port].i_boost;
+       } else if (type == INTEL_OUTPUT_EDP) {
+               ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+               iboost = ddi_translations[port].i_boost;
+       } else if (type == INTEL_OUTPUT_HDMI) {
+               ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+               iboost = ddi_translations[port].i_boost;
+       } else {
+               return;
+       }
+
+       /* Make sure that the requested I_boost is valid */
+       if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+               DRM_ERROR("Invalid I_boost value %u\n", iboost);
+               return;
+       }
+
+       reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
+       reg &= ~BALANCE_LEG_MASK(port);
+       reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+
+       if (iboost)
+               reg |= iboost << BALANCE_LEG_SHIFT(port);
+       else
+               reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+
+       I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+}
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+                                   enum port port, int type)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        const struct bxt_ddi_buf_trans *ddi_translations;
@@ -1860,6 +2173,73 @@ void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
        I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
 }
 
+static uint32_t translate_signal_level(int signal_levels)
+{
+       uint32_t level;
+
+       switch (signal_levels) {
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+                             signal_levels);
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 0;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               level = 1;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+               level = 2;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+               level = 3;
+               break;
+
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 4;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               level = 5;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+               level = 6;
+               break;
+
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 7;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               level = 8;
+               break;
+
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 9;
+               break;
+       }
+
+       return level;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dport->base.base.dev;
+       struct intel_encoder *encoder = &dport->base;
+       uint8_t train_set = intel_dp->train_set[0];
+       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       enum port port = dport->port;
+       uint32_t level;
+
+       level = translate_signal_level(signal_levels);
+
+       if (IS_SKYLAKE(dev))
+               skl_ddi_set_iboost(dev, level, port, encoder->type);
+       else if (IS_BROXTON(dev))
+               bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+
+       return DDI_BUF_TRANS_SELECT(level);
+}
+
 static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
@@ -2404,7 +2784,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 
        temp = I915_READ(BXT_PORT_PLL(port, 9));
        temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
-       temp |= (5 << 1);
+       temp |= pll->config.hw_state.pll9;
        I915_WRITE(BXT_PORT_PLL(port, 9), temp);
 
        temp = I915_READ(BXT_PORT_PLL(port, 10));
@@ -2417,8 +2797,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
        temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
        temp |= PORT_PLL_RECALIBRATE;
        I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-       /* Enable 10 bit clock */
-       temp |= PORT_PLL_10BIT_CLK_ENABLE;
+       temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+       temp |= pll->config.hw_state.ebb4;
        I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
 
        /* Enable PLL */
@@ -2469,13 +2849,38 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
                return false;
 
        hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+       hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+       hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+       hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
        hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+       hw_state->pll0 &= PORT_PLL_M2_MASK;
+
        hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+       hw_state->pll1 &= PORT_PLL_N_MASK;
+
        hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+       hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
        hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+       hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
        hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+       hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+                         PORT_PLL_INT_COEFF_MASK |
+                         PORT_PLL_GAIN_CTL_MASK;
+
        hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+       hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+       hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+       hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
        hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+       hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+                          PORT_PLL_DCO_AMP_MASK;
+
        /*
         * While we write to the group register to program all lanes at once we
         * can read only lane registers. We configure all lanes the same way, so
@@ -2486,6 +2891,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
                DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
                                 hw_state->pcsdw12,
                                 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+       hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
        return true;
 }
@@ -2510,7 +2916,6 @@ void intel_ddi_pll_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t val = I915_READ(LCPLL_CTL);
-       int cdclk_freq;
 
        if (IS_SKYLAKE(dev))
                skl_shared_dplls_init(dev_priv);
@@ -2519,10 +2924,10 @@ void intel_ddi_pll_init(struct drm_device *dev)
        else
                hsw_shared_dplls_init(dev_priv);
 
-       cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-       DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
-
        if (IS_SKYLAKE(dev)) {
+               int cdclk_freq;
+
+               cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
                dev_priv->skl_boot_cdclk = cdclk_freq;
                if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
                        DRM_ERROR("LCPLL1 is disabled\n");
@@ -2618,20 +3023,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
        I915_WRITE(_FDI_RXA_CTL, val);
 }
 
-static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
-{
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-       int type = intel_dig_port->base.type;
-
-       if (type != INTEL_OUTPUT_DISPLAYPORT &&
-           type != INTEL_OUTPUT_EDP &&
-           type != INTEL_OUTPUT_UNKNOWN) {
-               return;
-       }
-
-       intel_dp_hot_plug(intel_encoder);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
@@ -2793,10 +3184,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
                     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
        init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
        if (!init_dp && !init_hdmi) {
-               DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
+               DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
                              port_name(port));
-               init_hdmi = true;
-               init_dp = true;
+               return;
        }
 
        intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
@@ -2825,14 +3215,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
        intel_encoder->cloneable = 0;
-       intel_encoder->hot_plug = intel_ddi_hot_plug;
 
        if (init_dp) {
                if (!intel_ddi_init_dp_connector(intel_dig_port))
                        goto err;
 
                intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-               dev_priv->hpd_irq_port[port] = intel_dig_port;
+               dev_priv->hotplug.irq_port[port] = intel_dig_port;
        }
 
        /* In theory we don't need the encoder->type check, but leave it just in
index 87476ff181ddbef0967d948c37119cfcbd758315..7f6d5fd7c2cf9cf91e2a6d87defbe559cbc19ea1 100644 (file)
@@ -86,9 +86,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config);
 
-static int intel_set_mode(struct drm_crtc *crtc,
-                         struct drm_atomic_state *state,
-                         bool force_restore);
 static int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
                                  struct drm_mode_fb_cmd2 *mode_cmd,
@@ -105,22 +102,13 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *crtc);
-static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
        struct intel_crtc_state *crtc_state);
 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
                           int num_connectors);
-static void intel_crtc_enable_planes(struct drm_crtc *crtc);
-static void intel_crtc_disable_planes(struct drm_crtc *crtc);
-
-static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
-{
-       if (!connector->mst_port)
-               return connector->encoder;
-       else
-               return &connector->mst_port->mst_encoders[pipe]->base;
-}
+static void intel_modeset_setup_hw_state(struct drm_device *dev);
 
 typedef struct {
        int     min, max;
@@ -413,7 +401,7 @@ static const intel_limit_t intel_limits_chv = {
 static const intel_limit_t intel_limits_bxt = {
        /* FIXME: find real dot limits */
        .dot = { .min = 0, .max = INT_MAX },
-       .vco = { .min = 4800000, .max = 6480000 },
+       .vco = { .min = 4800000, .max = 6700000 },
        .n = { .min = 1, .max = 1 },
        .m1 = { .min = 2, .max = 2 },
        /* FIXME: find real m2 limits */
@@ -422,14 +410,10 @@ static const intel_limit_t intel_limits_bxt = {
        .p2 = { .p2_slow = 1, .p2_fast = 20 },
 };
 
-static void vlv_clock(int refclk, intel_clock_t *clock)
+static bool
+needs_modeset(struct drm_crtc_state *state)
 {
-       clock->m = clock->m1 * clock->m2;
-       clock->p = clock->p1 * clock->p2;
-       if (WARN_ON(clock->n == 0 || clock->p == 0))
-               return;
-       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+       return drm_atomic_crtc_needs_modeset(state);
 }
 
 /**
@@ -561,15 +545,25 @@ intel_limit(struct intel_crtc_state *crtc_state, int refclk)
        return limit;
 }
 
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
 /* m1 is reserved as 0 in Pineview, n is a ring counter */
-static void pineview_clock(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
        clock->m = clock->m2 + 2;
        clock->p = clock->p1 * clock->p2;
        if (WARN_ON(clock->n == 0 || clock->p == 0))
-               return;
+               return 0;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot;
 }
 
 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -577,25 +571,41 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
        return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 }
 
-static void i9xx_clock(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
        clock->m = i9xx_dpll_compute_m(clock);
        clock->p = clock->p1 * clock->p2;
        if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
-               return;
+               return 0;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot;
 }
 
-static void chv_clock(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
        clock->m = clock->m1 * clock->m2;
        clock->p = clock->p1 * clock->p2;
        if (WARN_ON(clock->n == 0 || clock->p == 0))
-               return;
+               return 0;
+       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+       clock->m = clock->m1 * clock->m2;
+       clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n == 0 || clock->p == 0))
+               return 0;
        clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
                        clock->n << 22);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot / 5;
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -639,16 +649,12 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
        return true;
 }
 
-static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
-                   struct intel_crtc_state *crtc_state,
-                   int target, int refclk, intel_clock_t *match_clock,
-                   intel_clock_t *best_clock)
+static int
+i9xx_select_p2_div(const intel_limit_t *limit,
+                  const struct intel_crtc_state *crtc_state,
+                  int target)
 {
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       intel_clock_t clock;
-       int err = target;
+       struct drm_device *dev = crtc_state->base.crtc->dev;
 
        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                /*
@@ -657,18 +663,31 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
                 * single/dual channel state, if we even can.
                 */
                if (intel_is_dual_link_lvds(dev))
-                       clock.p2 = limit->p2.p2_fast;
+                       return limit->p2.p2_fast;
                else
-                       clock.p2 = limit->p2.p2_slow;
+                       return limit->p2.p2_slow;
        } else {
                if (target < limit->p2.dot_limit)
-                       clock.p2 = limit->p2.p2_slow;
+                       return limit->p2.p2_slow;
                else
-                       clock.p2 = limit->p2.p2_fast;
+                       return limit->p2.p2_fast;
        }
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit,
+                   struct intel_crtc_state *crtc_state,
+                   int target, int refclk, intel_clock_t *match_clock,
+                   intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc_state->base.crtc->dev;
+       intel_clock_t clock;
+       int err = target;
 
        memset(best_clock, 0, sizeof(*best_clock));
 
+       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
             clock.m1++) {
                for (clock.m2 = limit->m2.min;
@@ -681,7 +700,7 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
                                        clock.p1 <= limit->p1.max; clock.p1++) {
                                        int this_err;
 
-                                       i9xx_clock(refclk, &clock);
+                                       i9xx_calc_dpll_params(refclk, &clock);
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
                                                continue;
@@ -708,30 +727,14 @@ pnv_find_best_dpll(const intel_limit_t *limit,
                   int target, int refclk, intel_clock_t *match_clock,
                   intel_clock_t *best_clock)
 {
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
+       struct drm_device *dev = crtc_state->base.crtc->dev;
        intel_clock_t clock;
        int err = target;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               /*
-                * For LVDS just rely on its current settings for dual-channel.
-                * We haven't figured out how to reliably set up different
-                * single/dual channel state, if we even can.
-                */
-               if (intel_is_dual_link_lvds(dev))
-                       clock.p2 = limit->p2.p2_fast;
-               else
-                       clock.p2 = limit->p2.p2_slow;
-       } else {
-               if (target < limit->p2.dot_limit)
-                       clock.p2 = limit->p2.p2_slow;
-               else
-                       clock.p2 = limit->p2.p2_fast;
-       }
-
        memset(best_clock, 0, sizeof(*best_clock));
 
+       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
             clock.m1++) {
                for (clock.m2 = limit->m2.min;
@@ -742,7 +745,7 @@ pnv_find_best_dpll(const intel_limit_t *limit,
                                        clock.p1 <= limit->p1.max; clock.p1++) {
                                        int this_err;
 
-                                       pineview_clock(refclk, &clock);
+                                       pnv_calc_dpll_params(refclk, &clock);
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
                                                continue;
@@ -769,28 +772,17 @@ g4x_find_best_dpll(const intel_limit_t *limit,
                   int target, int refclk, intel_clock_t *match_clock,
                   intel_clock_t *best_clock)
 {
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
+       struct drm_device *dev = crtc_state->base.crtc->dev;
        intel_clock_t clock;
        int max_n;
-       bool found;
+       bool found = false;
        /* approximately equals target * 0.00585 */
        int err_most = (target >> 8) + (target >> 9);
-       found = false;
-
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               if (intel_is_dual_link_lvds(dev))
-                       clock.p2 = limit->p2.p2_fast;
-               else
-                       clock.p2 = limit->p2.p2_slow;
-       } else {
-               if (target < limit->p2.dot_limit)
-                       clock.p2 = limit->p2.p2_slow;
-               else
-                       clock.p2 = limit->p2.p2_fast;
-       }
 
        memset(best_clock, 0, sizeof(*best_clock));
+
+       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
        max_n = limit->n.max;
        /* based on hardware requirement, prefer smaller n to precision */
        for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
@@ -803,7 +795,7 @@ g4x_find_best_dpll(const intel_limit_t *limit,
                                     clock.p1 >= limit->p1.min; clock.p1--) {
                                        int this_err;
 
-                                       i9xx_clock(refclk, &clock);
+                                       i9xx_calc_dpll_params(refclk, &clock);
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
                                                continue;
@@ -893,7 +885,7 @@ vlv_find_best_dpll(const intel_limit_t *limit,
                                        clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
                                                                     refclk * clock.m1);
 
-                                       vlv_clock(refclk, &clock);
+                                       vlv_calc_dpll_params(refclk, &clock);
 
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
@@ -956,7 +948,7 @@ chv_find_best_dpll(const intel_limit_t *limit,
 
                        clock.m2 = m2;
 
-                       chv_clock(refclk, &clock);
+                       chv_calc_dpll_params(refclk, &clock);
 
                        if (!intel_PLL_is_valid(dev, limit, &clock))
                                continue;
@@ -1026,7 +1018,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
                line_mask = DSL_LINEMASK_GEN3;
 
        line1 = I915_READ(reg) & line_mask;
-       mdelay(5);
+       msleep(5);
        line2 = I915_READ(reg) & line_mask;
 
        return line1 == line2;
@@ -1694,7 +1686,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
        int count = 0;
 
        for_each_intel_crtc(dev, crtc)
-               count += crtc->active &&
+               count += crtc->base.state->active &&
                        intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
 
        return count;
@@ -1775,7 +1767,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
        /* Disable DVO 2x clock on both PLLs if necessary */
        if (IS_I830(dev) &&
            intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
-           intel_num_dvo_pipes(dev) == 1) {
+           !intel_num_dvo_pipes(dev)) {
                I915_WRITE(DPLL(PIPE_B),
                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
                I915_WRITE(DPLL(PIPE_A),
@@ -1790,13 +1782,13 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
        /* Make sure the pipe isn't still relying on us */
        assert_pipe_disabled(dev_priv, pipe);
 
-       I915_WRITE(DPLL(pipe), 0);
+       I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
        POSTING_READ(DPLL(pipe));
 }
 
 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-       u32 val = 0;
+       u32 val;
 
        /* Make sure the pipe isn't still relying on us */
        assert_pipe_disabled(dev_priv, pipe);
@@ -1805,8 +1797,9 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
         * Leave integrated clock source and reference clock enabled for pipe B.
         * The latter is needed for VGA hotplug / manual detection.
         */
+       val = DPLL_VGA_MODE_DIS;
        if (pipe == PIPE_B)
-               val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
+               val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
        I915_WRITE(DPLL(pipe), val);
        POSTING_READ(DPLL(pipe));
 
@@ -1821,7 +1814,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        assert_pipe_disabled(dev_priv, pipe);
 
        /* Set PLL en = 0 */
-       val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
+       val = DPLL_SSC_REF_CLK_CHV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
        if (pipe != PIPE_A)
                val |= DPLL_INTEGRATED_CRI_CLK_VLV;
        I915_WRITE(DPLL(pipe), val);
@@ -1942,11 +1936,13 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
        /* PCH only available on ILK+ */
-       BUG_ON(INTEL_INFO(dev)->gen < 5);
-       if (WARN_ON(pll == NULL))
-              return;
+       if (INTEL_INFO(dev)->gen < 5)
+               return;
 
-       if (WARN_ON(pll->config.crtc_mask == 0))
+       if (pll == NULL)
+               return;
+
+       if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
                return;
 
        DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -2004,11 +2000,15 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 
        if (HAS_PCH_IBX(dev_priv->dev)) {
                /*
-                * make the BPC in transcoder be consistent with
-                * that in pipeconf reg.
+                * Make the BPC in transcoder be consistent with
+                * that in pipeconf reg. For HDMI we must use 8bpc
+                * here for both 8bpc and 12bpc.
                 */
                val &= ~PIPECONF_BPC_MASK;
-               val |= pipeconf_val & PIPECONF_BPC_MASK;
+               if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+                       val |= PIPECONF_8BPC;
+               else
+                       val |= pipeconf_val & PIPECONF_BPC_MASK;
        }
 
        val &= ~TRANS_INTERLACE_MASK;
@@ -2122,6 +2122,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
        int reg;
        u32 val;
 
+       DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
        assert_planes_disabled(dev_priv, pipe);
        assert_cursor_disabled(dev_priv, pipe);
        assert_sprites_disabled(dev_priv, pipe);
@@ -2181,6 +2183,8 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
        int reg;
        u32 val;
 
+       DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
        /*
         * Make sure planes won't keep trying to pump pixels to us,
         * or we might hang the display.
@@ -2211,28 +2215,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
                intel_wait_for_pipe_off(crtc);
 }
 
-/**
- * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @plane:  plane to be enabled
- * @crtc: crtc for the plane
- *
- * Enable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_enable_primary_hw_plane(struct drm_plane *plane,
-                                         struct drm_crtc *crtc)
-{
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       /* If the pipe isn't enabled, we can't pump pixels and may hang */
-       assert_pipe_enabled(dev_priv, intel_crtc->pipe);
-       to_intel_plane_state(plane->state)->visible = true;
-
-       dev_priv->display.update_primary_plane(crtc, plane->fb,
-                                              crtc->x, crtc->y);
-}
-
 static bool need_vtd_wa(struct drm_device *dev)
 {
 #ifdef CONFIG_INTEL_IOMMU
@@ -2302,6 +2284,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
                        const struct drm_plane_state *plane_state)
 {
        struct intel_rotation_info *info = &view->rotation_info;
+       unsigned int tile_height, tile_pitch;
 
        *view = i915_ggtt_view_normal;
 
@@ -2318,14 +2301,35 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
        info->pitch = fb->pitches[0];
        info->fb_modifier = fb->modifier[0];
 
+       tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+                                       fb->modifier[0]);
+       tile_pitch = PAGE_SIZE / tile_height;
+       info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+       info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
+       info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+
        return 0;
 }
 
+static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_INFO(dev_priv)->gen >= 9)
+               return 256 * 1024;
+       else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+                IS_VALLEYVIEW(dev_priv))
+               return 128 * 1024;
+       else if (INTEL_INFO(dev_priv)->gen >= 4)
+               return 4 * 1024;
+       else
+               return 0;
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
                           struct drm_framebuffer *fb,
                           const struct drm_plane_state *plane_state,
-                          struct intel_engine_cs *pipelined)
+                          struct intel_engine_cs *pipelined,
+                          struct drm_i915_gem_request **pipelined_request)
 {
        struct drm_device *dev = fb->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2338,14 +2342,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 
        switch (fb->modifier[0]) {
        case DRM_FORMAT_MOD_NONE:
-               if (INTEL_INFO(dev)->gen >= 9)
-                       alignment = 256 * 1024;
-               else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
-                       alignment = 128 * 1024;
-               else if (INTEL_INFO(dev)->gen >= 4)
-                       alignment = 4 * 1024;
-               else
-                       alignment = 64 * 1024;
+               alignment = intel_linear_alignment(dev_priv);
                break;
        case I915_FORMAT_MOD_X_TILED:
                if (INTEL_INFO(dev)->gen >= 9)
@@ -2390,7 +2387,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 
        dev_priv->mm.interruptible = false;
        ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
-                                                  &view);
+                                                  pipelined_request, &view);
        if (ret)
                goto err_interruptible;
 
@@ -2400,7 +2397,18 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
         * a fence as the cost is not that onerous.
         */
        ret = i915_gem_object_get_fence(obj);
-       if (ret)
+       if (ret == -EDEADLK) {
+               /*
+                * -EDEADLK means there are no free fences
+                * no pending flips.
+                *
+                * This is propagated to atomic, but it uses
+                * -EDEADLK to force a locking recovery, so
+                * change the returned error to -EBUSY.
+                */
+               ret = -EBUSY;
+               goto err_unpin;
+       } else if (ret)
                goto err_unpin;
 
        i915_gem_object_pin_fence(obj);
@@ -2435,7 +2443,8 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+                                            int *x, int *y,
                                             unsigned int tiling_mode,
                                             unsigned int cpp,
                                             unsigned int pitch)
@@ -2451,12 +2460,13 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
 
                return tile_rows * pitch * 8 + tiles * 4096;
        } else {
+               unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
                unsigned int offset;
 
                offset = *y * pitch + *x * cpp;
-               *y = 0;
-               *x = (offset & 4095) / cpp;
-               return offset & -4096;
+               *y = (offset & alignment) / pitch;
+               *x = ((offset & alignment) - *y * pitch) / cpp;
+               return offset & ~alignment;
        }
 }
 
@@ -2583,6 +2593,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
        struct drm_plane *primary = intel_crtc->base.primary;
+       struct drm_plane_state *plane_state = primary->state;
        struct drm_framebuffer *fb;
 
        if (!plane_config->fb)
@@ -2622,15 +2633,23 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       plane_state->src_x = plane_state->src_y = 0;
+       plane_state->src_w = fb->width << 16;
+       plane_state->src_h = fb->height << 16;
+
+       plane_state->crtc_x = plane_state->src_y = 0;
+       plane_state->crtc_w = fb->width;
+       plane_state->crtc_h = fb->height;
+
        obj = intel_fb_obj(fb);
        if (obj->tiling_mode != I915_TILING_NONE)
                dev_priv->preserve_bios_swizzle = true;
 
-       primary->fb = fb;
-       primary->state->crtc = &intel_crtc->base;
-       primary->crtc = &intel_crtc->base;
-       update_state_fb(primary);
-       obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+       drm_framebuffer_reference(fb);
+       primary->fb = primary->state->fb = fb;
+       primary->crtc = primary->state->crtc = &intel_crtc->base;
+       intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+       obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
 }
 
 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2725,7 +2744,8 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
 
        if (INTEL_INFO(dev)->gen >= 4) {
                intel_crtc->dspaddr_offset =
-                       intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+                       intel_gen4_compute_page_offset(dev_priv,
+                                                      &x, &y, obj->tiling_mode,
                                                       pixel_size,
                                                       fb->pitches[0]);
                linear_offset -= intel_crtc->dspaddr_offset;
@@ -2826,7 +2846,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
 
        linear_offset = y * fb->pitches[0] + x * pixel_size;
        intel_crtc->dspaddr_offset =
-               intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+               intel_gen4_compute_page_offset(dev_priv,
+                                              &x, &y, obj->tiling_mode,
                                               pixel_size,
                                               fb->pitches[0]);
        linear_offset -= intel_crtc->dspaddr_offset;
@@ -2904,32 +2925,32 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
        return i915_gem_obj_ggtt_offset_view(obj, view);
 }
 
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+       I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+       I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+       DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+               intel_crtc->base.base.id, intel_crtc->pipe, id);
+}
+
 /*
  * This function detaches (aka. unbinds) unused scalers in hardware
  */
-void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 {
-       struct drm_device *dev;
-       struct drm_i915_private *dev_priv;
        struct intel_crtc_scaler_state *scaler_state;
        int i;
 
-       if (!intel_crtc || !intel_crtc->config)
-               return;
-
-       dev = intel_crtc->base.dev;
-       dev_priv = dev->dev_private;
        scaler_state = &intel_crtc->config->scaler_state;
 
        /* loop through and disable scalers that aren't in use */
        for (i = 0; i < intel_crtc->num_scalers; i++) {
-               if (!scaler_state->scalers[i].in_use) {
-                       I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
-                       I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
-                       I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
-                       DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
-                               intel_crtc->base.base.id, intel_crtc->pipe, i);
-               }
+               if (!scaler_state->scalers[i].in_use)
+                       skl_detach_scaler(intel_crtc, i);
        }
 }
 
@@ -3132,8 +3153,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->display.disable_fbc)
-               dev_priv->display.disable_fbc(dev);
+       if (dev_priv->fbc.disable_fbc)
+               dev_priv->fbc.disable_fbc(dev_priv);
 
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -3176,24 +3197,8 @@ static void intel_update_primary_planes(struct drm_device *dev)
        }
 }
 
-void intel_crtc_reset(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!crtc->active)
-               return;
-
-       intel_crtc_disable_planes(&crtc->base);
-       dev_priv->display.crtc_disable(&crtc->base);
-       dev_priv->display.crtc_enable(&crtc->base);
-       intel_crtc_enable_planes(&crtc->base);
-}
-
 void intel_prepare_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc;
-
        /* no reset support for gen2 */
        if (IS_GEN2(dev))
                return;
@@ -3203,18 +3208,11 @@ void intel_prepare_reset(struct drm_device *dev)
                return;
 
        drm_modeset_lock_all(dev);
-
        /*
         * Disabling the crtcs gracefully seems nicer. Also the
         * g33 docs say we should at least disable all the planes.
         */
-       for_each_intel_crtc(dev, crtc) {
-               if (!crtc->active)
-                       continue;
-
-               intel_crtc_disable_planes(&crtc->base);
-               dev_priv->display.crtc_disable(&crtc->base);
-       }
+       intel_display_suspend(dev);
 }
 
 void intel_finish_reset(struct drm_device *dev)
@@ -3258,7 +3256,7 @@ void intel_finish_reset(struct drm_device *dev)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_modeset_setup_hw_state(dev, true);
+       intel_display_resume(dev);
 
        intel_hpd_init(dev_priv);
 
@@ -4200,34 +4198,16 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
        lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
-void intel_put_shared_dpll(struct intel_crtc *crtc)
-{
-       struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
-       if (pll == NULL)
-               return;
-
-       if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
-               WARN(1, "bad %s crtc mask\n", pll->name);
-               return;
-       }
-
-       pll->config.crtc_mask &= ~(1 << crtc->pipe);
-       if (pll->config.crtc_mask == 0) {
-               WARN_ON(pll->on);
-               WARN_ON(pll->active);
-       }
-
-       crtc->config->shared_dpll = DPLL_ID_PRIVATE;
-}
-
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                                                struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
        struct intel_shared_dpll *pll;
+       struct intel_shared_dpll_config *shared_dpll;
        enum intel_dpll_id i;
 
+       shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
        if (HAS_PCH_IBX(dev_priv->dev)) {
                /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
                i = (enum intel_dpll_id) crtc->pipe;
@@ -4236,7 +4216,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
                              crtc->base.base.id, pll->name);
 
-               WARN_ON(pll->new_config->crtc_mask);
+               WARN_ON(shared_dpll[i].crtc_mask);
 
                goto found;
        }
@@ -4256,7 +4236,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                pll = &dev_priv->shared_dplls[i];
                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
                        crtc->base.base.id, pll->name);
-               WARN_ON(pll->new_config->crtc_mask);
+               WARN_ON(shared_dpll[i].crtc_mask);
 
                goto found;
        }
@@ -4265,15 +4245,15 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                pll = &dev_priv->shared_dplls[i];
 
                /* Only want to check enabled timings first */
-               if (pll->new_config->crtc_mask == 0)
+               if (shared_dpll[i].crtc_mask == 0)
                        continue;
 
                if (memcmp(&crtc_state->dpll_hw_state,
-                          &pll->new_config->hw_state,
-                          sizeof(pll->new_config->hw_state)) == 0) {
+                          &shared_dpll[i].hw_state,
+                          sizeof(crtc_state->dpll_hw_state)) == 0) {
                        DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
                                      crtc->base.base.id, pll->name,
-                                     pll->new_config->crtc_mask,
+                                     shared_dpll[i].crtc_mask,
                                      pll->active);
                        goto found;
                }
@@ -4282,7 +4262,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
        /* Ok no matching timings, maybe there's a free one? */
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
                pll = &dev_priv->shared_dplls[i];
-               if (pll->new_config->crtc_mask == 0) {
+               if (shared_dpll[i].crtc_mask == 0) {
                        DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
                                      crtc->base.base.id, pll->name);
                        goto found;
@@ -4292,83 +4272,33 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
        return NULL;
 
 found:
-       if (pll->new_config->crtc_mask == 0)
-               pll->new_config->hw_state = crtc_state->dpll_hw_state;
+       if (shared_dpll[i].crtc_mask == 0)
+               shared_dpll[i].hw_state =
+                       crtc_state->dpll_hw_state;
 
        crtc_state->shared_dpll = i;
        DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
                         pipe_name(crtc->pipe));
 
-       pll->new_config->crtc_mask |= 1 << crtc->pipe;
+       shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
 
        return pll;
 }
 
-/**
- * intel_shared_dpll_start_config - start a new PLL staged config
- * @dev_priv: DRM device
- * @clear_pipes: mask of pipes that will have their PLLs freed
- *
- * Starts a new PLL staged config, copying the current config but
- * releasing the references of pipes specified in clear_pipes.
- */
-static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
-                                         unsigned clear_pipes)
-{
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id i;
-
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               pll = &dev_priv->shared_dplls[i];
-
-               pll->new_config = kmemdup(&pll->config, sizeof pll->config,
-                                         GFP_KERNEL);
-               if (!pll->new_config)
-                       goto cleanup;
-
-               pll->new_config->crtc_mask &= ~clear_pipes;
-       }
-
-       return 0;
-
-cleanup:
-       while (--i >= 0) {
-               pll = &dev_priv->shared_dplls[i];
-               kfree(pll->new_config);
-               pll->new_config = NULL;
-       }
-
-       return -ENOMEM;
-}
-
-static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
+static void intel_shared_dpll_commit(struct drm_atomic_state *state)
 {
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
+       struct intel_shared_dpll_config *shared_dpll;
        struct intel_shared_dpll *pll;
        enum intel_dpll_id i;
 
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               pll = &dev_priv->shared_dplls[i];
-
-               WARN_ON(pll->new_config == &pll->config);
-
-               pll->config = *pll->new_config;
-               kfree(pll->new_config);
-               pll->new_config = NULL;
-       }
-}
-
-static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
-{
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id i;
+       if (!to_intel_atomic_state(state)->dpll_set)
+               return;
 
+       shared_dpll = to_intel_atomic_state(state)->shared_dpll;
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
                pll = &dev_priv->shared_dplls[i];
-
-               WARN_ON(pll->new_config == &pll->config);
-
-               kfree(pll->new_config);
-               pll->new_config = NULL;
+               pll->config = shared_dpll[i];
        }
 }
 
@@ -4386,62 +4316,16 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
        }
 }
 
-/**
- * skl_update_scaler_users - Stages update to crtc's scaler state
- * @intel_crtc: crtc
- * @crtc_state: crtc_state
- * @plane: plane (NULL indicates crtc is requesting update)
- * @plane_state: plane's state
- * @force_detach: request unconditional detachment of scaler
- *
- * This function updates scaler state for requested plane or crtc.
- * To request scaler usage update for a plane, caller shall pass plane pointer.
- * To request scaler usage update for crtc, caller shall pass plane pointer
- * as NULL.
- *
- * Return
- *     0 - scaler_usage updated successfully
- *    error - requested scaling cannot be supported or other error condition
- */
-int
-skl_update_scaler_users(
-       struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
-       struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
-       int force_detach)
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+                 unsigned scaler_user, int *scaler_id, unsigned int rotation,
+                 int src_w, int src_h, int dst_w, int dst_h)
 {
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(crtc_state->base.crtc);
        int need_scaling;
-       int idx;
-       int src_w, src_h, dst_w, dst_h;
-       int *scaler_id;
-       struct drm_framebuffer *fb;
-       struct intel_crtc_scaler_state *scaler_state;
-       unsigned int rotation;
-
-       if (!intel_crtc || !crtc_state)
-               return 0;
-
-       scaler_state = &crtc_state->scaler_state;
-
-       idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
-       fb = intel_plane ? plane_state->base.fb : NULL;
-
-       if (intel_plane) {
-               src_w = drm_rect_width(&plane_state->src) >> 16;
-               src_h = drm_rect_height(&plane_state->src) >> 16;
-               dst_w = drm_rect_width(&plane_state->dst);
-               dst_h = drm_rect_height(&plane_state->dst);
-               scaler_id = &plane_state->scaler_id;
-               rotation = plane_state->base.rotation;
-       } else {
-               struct drm_display_mode *adjusted_mode =
-                       &crtc_state->base.adjusted_mode;
-               src_w = crtc_state->pipe_src_w;
-               src_h = crtc_state->pipe_src_h;
-               dst_w = adjusted_mode->hdisplay;
-               dst_h = adjusted_mode->vdisplay;
-               scaler_id = &scaler_state->scaler_id;
-               rotation = DRM_ROTATE_0;
-       }
 
        need_scaling = intel_rotation_90_or_270(rotation) ?
                (src_h != dst_w || src_w != dst_h):
@@ -4457,17 +4341,14 @@ skl_update_scaler_users(
         * update to free the scaler is done in plane/panel-fit programming.
         * For this purpose crtc/plane_state->scaler_id isn't reset here.
         */
-       if (force_detach || !need_scaling || (intel_plane &&
-               (!fb || !plane_state->visible))) {
+       if (force_detach || !need_scaling) {
                if (*scaler_id >= 0) {
-                       scaler_state->scaler_users &= ~(1 << idx);
+                       scaler_state->scaler_users &= ~(1 << scaler_user);
                        scaler_state->scalers[*scaler_id].in_use = 0;
 
-                       DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
-                               "crtc_state = %p scaler_users = 0x%x\n",
-                               intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
-                               intel_plane ? intel_plane->base.base.id :
-                               intel_crtc->base.base.id, crtc_state,
+                       DRM_DEBUG_KMS("scaler_user index %u.%u: "
+                               "Staged freeing scaler id %d scaler_users = 0x%x\n",
+                               intel_crtc->pipe, scaler_user, *scaler_id,
                                scaler_state->scaler_users);
                        *scaler_id = -1;
                }
@@ -4480,70 +4361,131 @@ skl_update_scaler_users(
 
                src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
                dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
-               DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+               DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
                        "size is out of scaler range\n",
-                       intel_plane ? "PLANE" : "CRTC",
-                       intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-                       intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
-               return -EINVAL;
-       }
-
-       /* check colorkey */
-       if (WARN_ON(intel_plane &&
-               intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) {
-               DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey",
-                       intel_plane->base.base.id, src_w, src_h, dst_w, dst_h);
+                       intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
                return -EINVAL;
        }
 
-       /* Check src format */
-       if (intel_plane) {
-               switch (fb->pixel_format) {
-               case DRM_FORMAT_RGB565:
-               case DRM_FORMAT_XBGR8888:
-               case DRM_FORMAT_XRGB8888:
-               case DRM_FORMAT_ABGR8888:
-               case DRM_FORMAT_ARGB8888:
-               case DRM_FORMAT_XRGB2101010:
-               case DRM_FORMAT_XBGR2101010:
-               case DRM_FORMAT_YUYV:
-               case DRM_FORMAT_YVYU:
-               case DRM_FORMAT_UYVY:
-               case DRM_FORMAT_VYUY:
-                       break;
-               default:
-                       DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
-                               intel_plane->base.base.id, fb->base.id, fb->pixel_format);
-                       return -EINVAL;
-               }
-       }
-
        /* mark this plane as a scaler user in crtc_state */
-       scaler_state->scaler_users |= (1 << idx);
-       DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
-               "crtc_state = %p scaler_users = 0x%x\n",
-               intel_plane ? "PLANE" : "CRTC",
-               intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-               src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
+       scaler_state->scaler_users |= (1 << scaler_user);
+       DRM_DEBUG_KMS("scaler_user index %u.%u: "
+               "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+               intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+               scaler_state->scaler_users);
+
        return 0;
 }
 
-static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe = crtc->pipe;
-       struct intel_crtc_scaler_state *scaler_state =
-               &crtc->config->scaler_state;
+       struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+       struct drm_display_mode *adjusted_mode =
+               &state->base.adjusted_mode;
 
-       DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
+       DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
+                     intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
 
-       /* To update pfit, first update scaler state */
-       skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
-       intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
-       skl_detach_scalers(crtc);
-       if (!enable)
-               return;
+       return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+               &state->scaler_state.scaler_id, DRM_ROTATE_0,
+               state->pipe_src_w, state->pipe_src_h,
+               adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ *
+ * @state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+                                  struct intel_plane_state *plane_state)
+{
+
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct intel_plane *intel_plane =
+               to_intel_plane(plane_state->base.plane);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       int ret;
+
+       bool force_detach = !fb || !plane_state->visible;
+
+       DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
+                     intel_plane->base.base.id, intel_crtc->pipe,
+                     drm_plane_index(&intel_plane->base));
+
+       ret = skl_update_scaler(crtc_state, force_detach,
+                               drm_plane_index(&intel_plane->base),
+                               &plane_state->scaler_id,
+                               plane_state->base.rotation,
+                               drm_rect_width(&plane_state->src) >> 16,
+                               drm_rect_height(&plane_state->src) >> 16,
+                               drm_rect_width(&plane_state->dst),
+                               drm_rect_height(&plane_state->dst));
+
+       if (ret || plane_state->scaler_id < 0)
+               return ret;
+
+       /* check colorkey */
+       if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+               DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
+                             intel_plane->base.base.id);
+               return -EINVAL;
+       }
+
+       /* Check src format */
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               break;
+       default:
+               DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
+                       intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+       int i;
+
+       for (i = 0; i < crtc->num_scalers; i++)
+               skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = crtc->pipe;
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc->config->scaler_state;
+
+       DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
 
        if (crtc->config->pch_pfit.enabled) {
                int id;
@@ -4584,20 +4526,6 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
        }
 }
 
-static void intel_enable_sprite_planes(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
-       struct drm_plane *plane;
-       struct intel_plane *intel_plane;
-
-       drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
-               intel_plane = to_intel_plane(plane);
-               if (intel_plane->pipe == pipe)
-                       intel_plane_restore(&intel_plane->base);
-       }
-}
-
 void hsw_enable_ips(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
@@ -4668,7 +4596,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
        bool reenable_ips = false;
 
        /* The clocks have to be on to load the palette. */
-       if (!crtc->state->enable || !intel_crtc->active)
+       if (!crtc->state->active)
                return;
 
        if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
@@ -4755,10 +4683,6 @@ intel_post_enable_primary(struct drm_crtc *crtc)
         */
        hsw_enable_ips(intel_crtc);
 
-       mutex_lock(&dev->struct_mutex);
-       intel_fbc_update(dev);
-       mutex_unlock(&dev->struct_mutex);
-
        /*
         * Gen2 reports pipe underruns whenever all planes are disabled.
         * So don't enable underrun reporting before at least some planes
@@ -4810,13 +4734,11 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
         * event which is after the vblank start event, so we need to have a
         * wait-for-vblank between disabling the plane and the pipe.
         */
-       if (HAS_GMCH_DISPLAY(dev))
+       if (HAS_GMCH_DISPLAY(dev)) {
                intel_set_memory_cxsr(dev_priv, false);
-
-       mutex_lock(&dev->struct_mutex);
-       if (dev_priv->fbc.crtc == intel_crtc)
-               intel_fbc_disable(dev);
-       mutex_unlock(&dev->struct_mutex);
+               dev_priv->wm.vlv.cxsr = false;
+               intel_wait_for_vblank(dev, pipe);
+       }
 
        /*
         * FIXME IPS should be fine as long as one plane is
@@ -4827,49 +4749,83 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
        hsw_disable_ips(intel_crtc);
 }
 
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_plane *plane;
 
-       intel_enable_primary_hw_plane(crtc->primary, crtc);
-       intel_enable_sprite_planes(crtc);
-       intel_crtc_update_cursor(crtc, true);
+       if (atomic->wait_vblank)
+               intel_wait_for_vblank(dev, crtc->pipe);
 
-       intel_post_enable_primary(crtc);
+       intel_frontbuffer_flip(dev, atomic->fb_bits);
 
-       /*
-        * FIXME: Once we grow proper nuclear flip support out of this we need
-        * to compute the mask of flip planes precisely. For the time being
-        * consider this a flip to a NULL plane.
-        */
-       intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+       if (atomic->disable_cxsr)
+               crtc->wm.cxsr_allowed = true;
+
+       if (crtc->atomic.update_wm_post)
+               intel_update_watermarks(&crtc->base);
+
+       if (atomic->update_fbc)
+               intel_fbc_update(dev_priv);
+
+       if (atomic->post_enable_primary)
+               intel_post_enable_primary(&crtc->base);
+
+       drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
+               intel_update_sprite_watermarks(plane, &crtc->base,
+                                              0, 0, 0, false, false);
+
+       memset(atomic, 0, sizeof(*atomic));
 }
 
-static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_plane *intel_plane;
-       int pipe = intel_crtc->pipe;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+       struct drm_plane *p;
 
-       if (!intel_crtc->active)
-               return;
+       /* Track fb's for any planes being disabled */
+       drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
+               struct intel_plane *plane = to_intel_plane(p);
 
-       intel_crtc_wait_for_pending_flips(crtc);
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
+                                 plane->frontbuffer_bit);
+               mutex_unlock(&dev->struct_mutex);
+       }
 
-       intel_pre_disable_primary(crtc);
+       if (atomic->wait_for_flips)
+               intel_crtc_wait_for_pending_flips(&crtc->base);
 
-       intel_crtc_dpms_overlay_disable(intel_crtc);
-       for_each_intel_plane(dev, intel_plane) {
-               if (intel_plane->pipe == pipe) {
-                       struct drm_crtc *from = intel_plane->base.crtc;
+       if (atomic->disable_fbc)
+               intel_fbc_disable_crtc(crtc);
 
-                       intel_plane->disable_plane(&intel_plane->base,
-                                                  from ?: crtc, true);
-               }
+       if (crtc->atomic.disable_ips)
+               hsw_disable_ips(crtc);
+
+       if (atomic->pre_disable_primary)
+               intel_pre_disable_primary(&crtc->base);
+
+       if (atomic->disable_cxsr) {
+               crtc->wm.cxsr_allowed = false;
+               intel_set_memory_cxsr(dev_priv, false);
        }
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+{
+       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_plane *p;
+       int pipe = intel_crtc->pipe;
+
+       intel_crtc_dpms_overlay_disable(intel_crtc);
+
+       drm_for_each_plane_mask(p, dev, plane_mask)
+               to_intel_plane(p)->disable_plane(p, crtc);
 
        /*
         * FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4887,9 +4843,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
-       WARN_ON(!crtc->state->enable);
-
-       if (intel_crtc->active)
+       if (WARN_ON(intel_crtc->active))
                return;
 
        if (intel_crtc->config->has_pch_encoder)
@@ -4956,46 +4910,17 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
        return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-/*
- * This implements the workaround described in the "notes" section of the mode
- * set sequence documentation. When going from no pipes or single pipe to
- * multiple pipes, and planes are enabled after the pipe, we need to wait at
- * least 2 vblanks on the first pipe before enabling planes on the second pipe.
- */
-static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct intel_crtc *crtc_it, *other_active_crtc = NULL;
-
-       /* We want to get the other_active_crtc only if there's only 1 other
-        * active crtc. */
-       for_each_intel_crtc(dev, crtc_it) {
-               if (!crtc_it->active || crtc_it == crtc)
-                       continue;
-
-               if (other_active_crtc)
-                       return;
-
-               other_active_crtc = crtc_it;
-       }
-       if (!other_active_crtc)
-               return;
-
-       intel_wait_for_vblank(dev, other_active_crtc->pipe);
-       intel_wait_for_vblank(dev, other_active_crtc->pipe);
-}
-
 static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
-       int pipe = intel_crtc->pipe;
+       int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+       struct intel_crtc_state *pipe_config =
+               to_intel_crtc_state(crtc->state);
 
-       WARN_ON(!crtc->state->enable);
-
-       if (intel_crtc->active)
+       if (WARN_ON(intel_crtc->active))
                return;
 
        if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -5036,7 +4961,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        intel_ddi_enable_pipe_clock(intel_crtc);
 
        if (INTEL_INFO(dev)->gen == 9)
-               skylake_pfit_update(intel_crtc, 1);
+               skylake_pfit_enable(intel_crtc);
        else if (INTEL_INFO(dev)->gen < 9)
                ironlake_pfit_enable(intel_crtc);
        else
@@ -5070,7 +4995,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 
        /* If we change the relative order between pipe/planes enabling, we need
         * to change the workaround. */
-       haswell_mode_set_planes_workaround(intel_crtc);
+       hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+       if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+               intel_wait_for_vblank(dev, hsw_workaround_pipe);
+               intel_wait_for_vblank(dev, hsw_workaround_pipe);
+       }
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -5097,9 +5026,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        int pipe = intel_crtc->pipe;
        u32 reg, temp;
 
-       if (!intel_crtc->active)
-               return;
-
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->disable(encoder);
 
@@ -5138,18 +5064,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                        I915_WRITE(PCH_DPLL_SEL, temp);
                }
 
-               /* disable PCH DPLL */
-               intel_disable_shared_dpll(intel_crtc);
-
                ironlake_fdi_pll_disable(intel_crtc);
        }
 
        intel_crtc->active = false;
        intel_update_watermarks(crtc);
-
-       mutex_lock(&dev->struct_mutex);
-       intel_fbc_update(dev);
-       mutex_unlock(&dev->struct_mutex);
 }
 
 static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5160,9 +5079,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
-       if (!intel_crtc->active)
-               return;
-
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                intel_opregion_notify_encoder(encoder, false);
                encoder->disable(encoder);
@@ -5182,7 +5098,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
        if (INTEL_INFO(dev)->gen == 9)
-               skylake_pfit_update(intel_crtc, 0);
+               skylake_scaler_disable(intel_crtc);
        else if (INTEL_INFO(dev)->gen < 9)
                ironlake_pfit_disable(intel_crtc);
        else
@@ -5201,22 +5117,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc->active = false;
        intel_update_watermarks(crtc);
-
-       mutex_lock(&dev->struct_mutex);
-       intel_fbc_update(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       if (intel_crtc_to_shared_dpll(intel_crtc))
-               intel_disable_shared_dpll(intel_crtc);
-}
-
-static void ironlake_crtc_off(struct drm_crtc *crtc)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       intel_put_shared_dpll(intel_crtc);
 }
 
-
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
@@ -5245,6 +5147,7 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
 {
        switch (port) {
        case PORT_A:
+       case PORT_E:
                return POWER_DOMAIN_PORT_DDI_A_4_LANES;
        case PORT_B:
                return POWER_DOMAIN_PORT_DDI_B_4_LANES;
@@ -5298,6 +5201,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
        unsigned long mask;
        enum transcoder transcoder;
 
+       if (!crtc->state->active)
+               return 0;
+
        transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
        mask = BIT(POWER_DOMAIN_PIPE(pipe));
@@ -5312,45 +5218,131 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
        return mask;
 }
 
+static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum intel_display_power_domain domain;
+       unsigned long domains, new_domains, old_domains;
+
+       old_domains = intel_crtc->enabled_power_domains;
+       intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+
+       domains = new_domains & ~old_domains;
+
+       for_each_power_domain(domain, domains)
+               intel_display_power_get(dev_priv, domain);
+
+       return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+                                     unsigned long domains)
+{
+       enum intel_display_power_domain domain;
+
+       for_each_power_domain(domain, domains)
+               intel_display_power_put(dev_priv, domain);
+}
+
 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
-       struct intel_crtc *crtc;
+       unsigned long put_domains[I915_MAX_PIPES] = {};
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int i;
 
-       /*
-        * First get all needed power domains, then put all unneeded, to avoid
-        * any unnecessary toggling of the power wells.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               enum intel_display_power_domain domain;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (needs_modeset(crtc->state))
+                       put_domains[to_intel_crtc(crtc)->pipe] =
+                               modeset_get_crtc_power_domains(crtc);
+       }
 
-               if (!crtc->base.state->enable)
-                       continue;
+       if (dev_priv->display.modeset_commit_cdclk) {
+               unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
+
+               if (cdclk != dev_priv->cdclk_freq &&
+                   !WARN_ON(!state->allow_modeset))
+                       dev_priv->display.modeset_commit_cdclk(state);
+       }
+
+       for (i = 0; i < I915_MAX_PIPES; i++)
+               if (put_domains[i])
+                       modeset_put_power_domains(dev_priv, put_domains[i]);
+}
+
+static void intel_update_max_cdclk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-               pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+       if (IS_SKYLAKE(dev)) {
+               u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
 
-               for_each_power_domain(domain, pipe_domains[crtc->pipe])
-                       intel_display_power_get(dev_priv, domain);
+               if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+                       dev_priv->max_cdclk_freq = 675000;
+               else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+                       dev_priv->max_cdclk_freq = 540000;
+               else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+                       dev_priv->max_cdclk_freq = 450000;
+               else
+                       dev_priv->max_cdclk_freq = 337500;
+       } else if (IS_BROADWELL(dev))  {
+               /*
+                * FIXME with extra cooling we can allow
+                * 540 MHz for ULX and 675 Mhz for ULT.
+                * How can we know if extra cooling is
+                * available? PCI ID, VTB, something else?
+                */
+               if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+                       dev_priv->max_cdclk_freq = 450000;
+               else if (IS_BDW_ULX(dev))
+                       dev_priv->max_cdclk_freq = 450000;
+               else if (IS_BDW_ULT(dev))
+                       dev_priv->max_cdclk_freq = 540000;
+               else
+                       dev_priv->max_cdclk_freq = 675000;
+       } else if (IS_CHERRYVIEW(dev)) {
+               dev_priv->max_cdclk_freq = 320000;
+       } else if (IS_VALLEYVIEW(dev)) {
+               dev_priv->max_cdclk_freq = 400000;
+       } else {
+               /* otherwise assume cdclk is fixed */
+               dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
        }
 
-       if (dev_priv->display.modeset_global_resources)
-               dev_priv->display.modeset_global_resources(state);
+       DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+                        dev_priv->max_cdclk_freq);
+}
 
-       for_each_intel_crtc(dev, crtc) {
-               enum intel_display_power_domain domain;
+static void intel_update_cdclk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-               for_each_power_domain(domain, crtc->enabled_power_domains)
-                       intel_display_power_put(dev_priv, domain);
+       dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+       DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+                        dev_priv->cdclk_freq);
 
-               crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+       /*
+        * Program the gmbus_freq based on the cdclk frequency.
+        * BSpec erroneously claims we should aim for 4MHz, but
+        * in fact 1MHz is the correct frequency.
+        */
+       if (IS_VALLEYVIEW(dev)) {
+               /*
+                * Program the gmbus_freq based on the cdclk frequency.
+                * BSpec erroneously claims we should aim for 4MHz, but
+                * in fact 1MHz is the correct frequency.
+                */
+               I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
        }
 
-       intel_display_set_init_power(dev_priv, false);
+       if (dev_priv->max_cdclk_freq == 0)
+               intel_update_max_cdclk(dev);
 }
 
-void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_device *dev, int frequency)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t divider;
@@ -5466,7 +5458,7 @@ void broxton_set_cdclk(struct drm_device *dev, int frequency)
                return;
        }
 
-       dev_priv->cdclk_freq = frequency;
+       intel_update_cdclk(dev);
 }
 
 void broxton_init_cdclk(struct drm_device *dev)
@@ -5641,6 +5633,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
 {
+       struct drm_device *dev = dev_priv->dev;
        u32 freq_select, pcu_ack;
 
        DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
@@ -5681,6 +5674,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
        mutex_lock(&dev_priv->rps.hw_lock);
        sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
        mutex_unlock(&dev_priv->rps.hw_lock);
+
+       intel_update_cdclk(dev);
 }
 
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -5751,22 +5746,6 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv)
        return vco_freq[hpll_freq] * 1000;
 }
 
-static void vlv_update_cdclk(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-       DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
-                        dev_priv->cdclk_freq);
-
-       /*
-        * Program the gmbus_freq based on the cdclk frequency.
-        * BSpec erroneously claims we should aim for 4MHz, but
-        * in fact 1MHz is the correct frequency.
-        */
-       I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-}
-
 /* Adjust CDclk dividers to allow high res or save power if possible */
 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 {
@@ -5830,7 +5809,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 
        mutex_unlock(&dev_priv->sb_lock);
 
-       vlv_update_cdclk(dev);
+       intel_update_cdclk(dev);
 }
 
 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -5871,7 +5850,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       vlv_update_cdclk(dev);
+       intel_update_cdclk(dev);
 }
 
 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -5934,11 +5913,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
        int max_pixclk = 0;
 
        for_each_intel_crtc(dev, intel_crtc) {
-               if (state)
-                       crtc_state =
-                               intel_atomic_get_crtc_state(state, intel_crtc);
-               else
-                       crtc_state = intel_crtc->config;
+               crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
                if (IS_ERR(crtc_state))
                        return PTR_ERR(crtc_state);
 
@@ -5952,39 +5927,32 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
        return max_pixclk;
 }
 
-static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
+static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *crtc_state;
-       int max_pixclk = intel_mode_max_pixclk(state->dev, state);
-       int cdclk, i;
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int max_pixclk = intel_mode_max_pixclk(dev, state);
 
        if (max_pixclk < 0)
                return max_pixclk;
 
-       if (IS_VALLEYVIEW(dev_priv))
-               cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
-       else
-               cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+       to_intel_atomic_state(state)->cdclk =
+               valleyview_calc_cdclk(dev_priv, max_pixclk);
 
-       if (cdclk == dev_priv->cdclk_freq)
-               return 0;
+       return 0;
+}
 
-       /* add all active pipes to the state */
-       for_each_crtc(state->dev, crtc) {
-               if (!crtc->state->enable)
-                       continue;
+static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int max_pixclk = intel_mode_max_pixclk(dev, state);
 
-               crtc_state = drm_atomic_get_crtc_state(state, crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
-       }
+       if (max_pixclk < 0)
+               return max_pixclk;
 
-       /* disable/enable all currently active pipes while we change cdclk */
-       for_each_crtc_in_state(state, crtc, crtc_state, i)
-               if (crtc_state->enable)
-                       crtc_state->mode_changed = true;
+       to_intel_atomic_state(state)->cdclk =
+               broxton_calc_cdclk(dev_priv, max_pixclk);
 
        return 0;
 }
@@ -6001,7 +5969,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
        if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
                /* CHV suggested value is 31 or 63 */
                if (IS_CHERRYVIEW(dev_priv))
-                       credits = PFI_CREDIT_31;
+                       credits = PFI_CREDIT_63;
                else
                        credits = PFI_CREDIT(15);
        } else {
@@ -6025,41 +5993,31 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
        WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
 }
 
-static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
+static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
+       unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-       int req_cdclk;
-
-       /* The path in intel_mode_max_pixclk() with a NULL atomic state should
-        * never fail. */
-       if (WARN_ON(max_pixclk < 0))
-               return;
-
-       req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
 
-       if (req_cdclk != dev_priv->cdclk_freq) {
-               /*
-                * FIXME: We can end up here with all power domains off, yet
-                * with a CDCLK frequency other than the minimum. To account
-                * for this take the PIPE-A power domain, which covers the HW
-                * blocks needed for the following programming. This can be
-                * removed once it's guaranteed that we get here either with
-                * the minimum CDCLK set, or the required power domains
-                * enabled.
-                */
-               intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+       /*
+        * FIXME: We can end up here with all power domains off, yet
+        * with a CDCLK frequency other than the minimum. To account
+        * for this take the PIPE-A power domain, which covers the HW
+        * blocks needed for the following programming. This can be
+        * removed once it's guaranteed that we get here either with
+        * the minimum CDCLK set, or the required power domains
+        * enabled.
+        */
+       intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
-               if (IS_CHERRYVIEW(dev))
-                       cherryview_set_cdclk(dev, req_cdclk);
-               else
-                       valleyview_set_cdclk(dev, req_cdclk);
+       if (IS_CHERRYVIEW(dev))
+               cherryview_set_cdclk(dev, req_cdclk);
+       else
+               valleyview_set_cdclk(dev, req_cdclk);
 
-               vlv_program_pfi_credits(dev_priv);
+       vlv_program_pfi_credits(dev_priv);
 
-               intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
-       }
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -6071,9 +6029,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        int pipe = intel_crtc->pipe;
        bool is_dsi;
 
-       WARN_ON(!crtc->state->enable);
-
-       if (intel_crtc->active)
+       if (WARN_ON(intel_crtc->active))
                return;
 
        is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
@@ -6122,7 +6078,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc_load_lut(crtc);
 
-       intel_update_watermarks(crtc);
        intel_enable_pipe(intel_crtc);
 
        assert_vblank_disabled(crtc);
@@ -6149,9 +6104,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
-       WARN_ON(!crtc->state->enable);
-
-       if (intel_crtc->active)
+       if (WARN_ON(intel_crtc->active))
                return;
 
        i9xx_set_pll_dividers(intel_crtc);
@@ -6211,9 +6164,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
-       if (!intel_crtc->active)
-               return;
-
        /*
         * On gen2 planes are double buffered but the pipe isn't, so we must
         * wait for planes to fully turn off before disabling the pipe.
@@ -6250,88 +6200,89 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc->active = false;
        intel_update_watermarks(crtc);
-
-       mutex_lock(&dev->struct_mutex);
-       intel_fbc_update(dev);
-       mutex_unlock(&dev->struct_mutex);
-}
-
-static void i9xx_crtc_off(struct drm_crtc *crtc)
-{
 }
 
-/* Master function to enable/disable CRTC and corresponding power wells */
-void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        enum intel_display_power_domain domain;
        unsigned long domains;
 
-       if (enable) {
-               if (!intel_crtc->active) {
-                       domains = get_crtc_power_domains(crtc);
-                       for_each_power_domain(domain, domains)
-                               intel_display_power_get(dev_priv, domain);
-                       intel_crtc->enabled_power_domains = domains;
-
-                       dev_priv->display.crtc_enable(crtc);
-                       intel_crtc_enable_planes(crtc);
-               }
-       } else {
-               if (intel_crtc->active) {
-                       intel_crtc_disable_planes(crtc);
-                       dev_priv->display.crtc_disable(crtc);
+       if (!intel_crtc->active)
+               return;
 
-                       domains = intel_crtc->enabled_power_domains;
-                       for_each_power_domain(domain, domains)
-                               intel_display_power_put(dev_priv, domain);
-                       intel_crtc->enabled_power_domains = 0;
-               }
+       if (to_intel_plane_state(crtc->primary->state)->visible) {
+               intel_crtc_wait_for_pending_flips(crtc);
+               intel_pre_disable_primary(crtc);
        }
+
+       intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
+       dev_priv->display.crtc_disable(crtc);
+       intel_disable_shared_dpll(intel_crtc);
+
+       domains = intel_crtc->enabled_power_domains;
+       for_each_power_domain(domain, domains)
+               intel_display_power_put(dev_priv, domain);
+       intel_crtc->enabled_power_domains = 0;
 }
 
-/**
- * Sets the power management mode of the pipe and plane.
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
  */
-void intel_crtc_update_dpms(struct drm_crtc *crtc)
+int intel_display_suspend(struct drm_device *dev)
 {
-       struct drm_device *dev = crtc->dev;
-       struct intel_encoder *intel_encoder;
-       bool enable = false;
-
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder)
-               enable |= intel_encoder->connectors_active;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+       struct drm_atomic_state *state;
+       struct drm_crtc *crtc;
+       unsigned crtc_mask = 0;
+       int ret = 0;
 
-       intel_crtc_control(crtc, enable);
+       if (WARN_ON(!ctx))
+               return 0;
 
-       crtc->state->active = enable;
-}
+       lockdep_assert_held(&ctx->ww_ctx);
+       state = drm_atomic_state_alloc(dev);
+       if (WARN_ON(!state))
+               return -ENOMEM;
 
-static void intel_crtc_disable(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_connector *connector;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       state->acquire_ctx = ctx;
+       state->allow_modeset = true;
 
-       intel_crtc_disable_planes(crtc);
-       dev_priv->display.crtc_disable(crtc);
-       dev_priv->display.off(crtc);
+       for_each_crtc(dev, crtc) {
+               struct drm_crtc_state *crtc_state =
+                       drm_atomic_get_crtc_state(state, crtc);
 
-       drm_plane_helper_disable(crtc->primary);
+               ret = PTR_ERR_OR_ZERO(crtc_state);
+               if (ret)
+                       goto free;
 
-       /* Update computed state. */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (!connector->encoder || !connector->encoder->crtc)
+               if (!crtc_state->active)
                        continue;
 
-               if (connector->encoder->crtc != crtc)
-                       continue;
+               crtc_state->active = false;
+               crtc_mask |= 1 << drm_crtc_index(crtc);
+       }
+
+       if (crtc_mask) {
+               ret = drm_atomic_commit(state);
 
-               connector->dpms = DRM_MODE_DPMS_OFF;
-               to_intel_encoder(connector->encoder)->connectors_active = false;
+               if (!ret) {
+                       for_each_crtc(dev, crtc)
+                               if (crtc_mask & (1 << drm_crtc_index(crtc)))
+                                       crtc->state->active = true;
+
+                       return ret;
+               }
        }
+
+free:
+       if (ret)
+               DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+       drm_atomic_state_free(state);
+       return ret;
 }
 
 void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -6342,62 +6293,42 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_encoder);
 }
 
-/* Simple dpms helper for encoders with just one connector, no cloning and only
- * one kind of off state. It clamps all !ON modes to fully OFF and changes the
- * state of the entire output pipe. */
-static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
-{
-       if (mode == DRM_MODE_DPMS_ON) {
-               encoder->connectors_active = true;
-
-               intel_crtc_update_dpms(encoder->base.crtc);
-       } else {
-               encoder->connectors_active = false;
-
-               intel_crtc_update_dpms(encoder->base.crtc);
-       }
-}
-
 /* Cross check the actual hw state with our own modeset state tracking (and it's
  * internal consistency). */
 static void intel_connector_check_state(struct intel_connector *connector)
 {
+       struct drm_crtc *crtc = connector->base.state->crtc;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.base.id,
+                     connector->base.name);
+
        if (connector->get_hw_state(connector)) {
-               struct intel_encoder *encoder = connector->encoder;
-               struct drm_crtc *crtc;
-               bool encoder_enabled;
-               enum pipe pipe;
+               struct drm_encoder *encoder = &connector->encoder->base;
+               struct drm_connector_state *conn_state = connector->base.state;
 
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                             connector->base.base.id,
-                             connector->base.name);
+               I915_STATE_WARN(!crtc,
+                        "connector enabled without attached crtc\n");
 
-               /* there is no real hw state for MST connectors */
-               if (connector->mst_port)
+               if (!crtc)
                        return;
 
-               I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
-                    "wrong connector dpms state\n");
-               I915_STATE_WARN(connector->base.encoder != &encoder->base,
-                    "active connector not linked to encoder\n");
+               I915_STATE_WARN(!crtc->state->active,
+                     "connector is active, but attached crtc isn't\n");
 
-               if (encoder) {
-                       I915_STATE_WARN(!encoder->connectors_active,
-                            "encoder->connectors_active not set\n");
-
-                       encoder_enabled = encoder->get_hw_state(encoder, &pipe);
-                       I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
-                       if (I915_STATE_WARN_ON(!encoder->base.crtc))
-                               return;
+               if (!encoder)
+                       return;
 
-                       crtc = encoder->base.crtc;
+               I915_STATE_WARN(conn_state->best_encoder != encoder,
+                       "atomic encoder doesn't match attached encoder\n");
 
-                       I915_STATE_WARN(!crtc->state->enable,
-                                       "crtc not enabled\n");
-                       I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
-                       I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
-                            "encoder active on the wrong pipe\n");
-               }
+               I915_STATE_WARN(conn_state->crtc != encoder->crtc,
+                       "attached encoder crtc differs from connector crtc\n");
+       } else {
+               I915_STATE_WARN(crtc && crtc->state->active,
+                       "attached crtc is active, but connector isn't\n");
+               I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+                       "best encoder set without crtc!\n");
        }
 }
 
@@ -6429,26 +6360,6 @@ struct intel_connector *intel_connector_alloc(void)
        return connector;
 }
 
-/* Even simpler default implementation, if there's really no special case to
- * consider. */
-void intel_connector_dpms(struct drm_connector *connector, int mode)
-{
-       /* All the simple cases only support two dpms states. */
-       if (mode != DRM_MODE_DPMS_ON)
-               mode = DRM_MODE_DPMS_OFF;
-
-       if (mode == connector->dpms)
-               return;
-
-       connector->dpms = mode;
-
-       /* Only need to change hw state when actually enabled */
-       if (connector->encoder)
-               intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
-
-       intel_modeset_check_state(connector->dev);
-}
-
 /* Simple connector->get_hw_state implementation for encoders that support only
  * one connector and no cloning and hence the encoder state determines the state
  * of the connector. */
@@ -6586,12 +6497,36 @@ retry:
        return ret;
 }
 
+static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
+                                    struct intel_crtc_state *pipe_config)
+{
+       if (pipe_config->pipe_bpp > 24)
+               return false;
+
+       /* HSW can handle pixel rate up to cdclk? */
+       if (IS_HASWELL(dev_priv->dev))
+               return true;
+
+       /*
+        * We compare against max which means we must take
+        * the increased cdclk requirement into account when
+        * calculating the new cdclk.
+        *
+        * Should measure whether using a lower cdclk w/o IPS
+        */
+       return ilk_pipe_pixel_rate(pipe_config) <=
+               dev_priv->max_cdclk_freq * 95 / 100;
+}
+
 static void hsw_compute_ips_config(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config)
 {
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
        pipe_config->ips_enabled = i915.enable_ips &&
-                                  hsw_crtc_supports_ips(crtc) &&
-                                  pipe_config->pipe_bpp <= 24;
+               hsw_crtc_supports_ips(crtc) &&
+               pipe_config_supports_ips(dev_priv, pipe_config);
 }
 
 static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -6600,12 +6535,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int ret;
 
        /* FIXME should check pixel clock limits on all platforms */
        if (INTEL_INFO(dev)->gen < 4) {
-               int clock_limit =
-                       dev_priv->display.get_display_clock_speed(dev);
+               int clock_limit = dev_priv->max_cdclk_freq;
 
                /*
                 * Enable pixel doubling when the dot clock
@@ -6647,14 +6580,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
        if (pipe_config->has_pch_encoder)
                return ironlake_fdi_compute_config(crtc, pipe_config);
 
-       /* FIXME: remove below call once atomic mode set is place and all crtc
-        * related checks called from atomic_crtc_check function */
-       ret = 0;
-       DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
-               crtc, pipe_config->base.state);
-       ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
-
-       return ret;
+       return 0;
 }
 
 static int skylake_get_display_clock_speed(struct drm_device *dev)
@@ -6664,10 +6590,8 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
        uint32_t cdctl = I915_READ(CDCLK_CTL);
        uint32_t linkrate;
 
-       if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
-               WARN(1, "LCPLL1 not enabled\n");
+       if (!(lcpll1 & LCPLL_PLL_ENABLE))
                return 24000; /* 24MHz is the cd freq with NSSC ref */
-       }
 
        if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
                return 540000;
@@ -6706,6 +6630,34 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
        return 24000;
 }
 
+static int broxton_get_display_clock_speed(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       uint32_t cdctl = I915_READ(CDCLK_CTL);
+       uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+       uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
+       int cdclk;
+
+       if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
+               return 19200;
+
+       cdclk = 19200 * pll_ratio / 2;
+
+       switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+       case BXT_CDCLK_CD2X_DIV_SEL_1:
+               return cdclk;  /* 576MHz or 624MHz */
+       case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+               return cdclk * 2 / 3; /* 384MHz */
+       case BXT_CDCLK_CD2X_DIV_SEL_2:
+               return cdclk / 2; /* 288MHz */
+       case BXT_CDCLK_CD2X_DIV_SEL_4:
+               return cdclk / 4; /* 144MHz */
+       }
+
+       /* error case, do as if DE PLL isn't enabled */
+       return 19200;
+}
+
 static int broadwell_get_display_clock_speed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6834,20 +6786,37 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
        return 266667;
 }
 
-static int i855_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
        u16 hpllcc = 0;
+
+       /*
+        * 852GM/852GMV only supports 133 MHz and the HPLLCC
+        * encoding is different :(
+        * FIXME is this the right way to detect 852GM/852GMV?
+        */
+       if (dev->pdev->revision == 0x1)
+               return 133333;
+
+       pci_bus_read_config_word(dev->pdev->bus,
+                                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
        /* Assume that the hardware is in the high speed state.  This
         * should be the default.
         */
        switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
        case GC_CLOCK_133_200:
+       case GC_CLOCK_133_200_2:
        case GC_CLOCK_100_200:
                return 200000;
        case GC_CLOCK_166_250:
                return 250000;
        case GC_CLOCK_100_133:
                return 133333;
+       case GC_CLOCK_133_266:
+       case GC_CLOCK_133_266_2:
+       case GC_CLOCK_166_266:
+               return 266667;
        }
 
        /* Shouldn't happen */
@@ -6859,6 +6828,175 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
        return 133333;
 }
 
+static unsigned int intel_hpll_vco(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       static const unsigned int blb_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 4800000,
+               [4] = 6400000,
+       };
+       static const unsigned int pnv_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 4800000,
+               [4] = 2666667,
+       };
+       static const unsigned int cl_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 6400000,
+               [4] = 3333333,
+               [5] = 3566667,
+               [6] = 4266667,
+       };
+       static const unsigned int elk_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 4800000,
+       };
+       static const unsigned int ctg_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 6400000,
+               [4] = 2666667,
+               [5] = 4266667,
+       };
+       const unsigned int *vco_table;
+       unsigned int vco;
+       uint8_t tmp = 0;
+
+       /* FIXME other chipsets? */
+       if (IS_GM45(dev))
+               vco_table = ctg_vco;
+       else if (IS_G4X(dev))
+               vco_table = elk_vco;
+       else if (IS_CRESTLINE(dev))
+               vco_table = cl_vco;
+       else if (IS_PINEVIEW(dev))
+               vco_table = pnv_vco;
+       else if (IS_G33(dev))
+               vco_table = blb_vco;
+       else
+               return 0;
+
+       tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+
+       vco = vco_table[tmp & 0x7];
+       if (vco == 0)
+               DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+       else
+               DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+       return vco;
+}
+
+static int gm45_get_display_clock_speed(struct drm_device *dev)
+{
+       unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+       uint16_t tmp = 0;
+
+       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+       cdclk_sel = (tmp >> 12) & 0x1;
+
+       switch (vco) {
+       case 2666667:
+       case 4000000:
+       case 5333333:
+               return cdclk_sel ? 333333 : 222222;
+       case 3200000:
+               return cdclk_sel ? 320000 : 228571;
+       default:
+               DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
+               return 222222;
+       }
+}
+
+static int i965gm_get_display_clock_speed(struct drm_device *dev)
+{
+       static const uint8_t div_3200[] = { 16, 10,  8 };
+       static const uint8_t div_4000[] = { 20, 12, 10 };
+       static const uint8_t div_5333[] = { 24, 16, 14 };
+       const uint8_t *div_table;
+       unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+       uint16_t tmp = 0;
+
+       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+       cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+       if (cdclk_sel >= ARRAY_SIZE(div_3200))
+               goto fail;
+
+       switch (vco) {
+       case 3200000:
+               div_table = div_3200;
+               break;
+       case 4000000:
+               div_table = div_4000;
+               break;
+       case 5333333:
+               div_table = div_5333;
+               break;
+       default:
+               goto fail;
+       }
+
+       return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
+       return 200000;
+}
+
+static int g33_get_display_clock_speed(struct drm_device *dev)
+{
+       static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
+       static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
+       static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+       static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+       const uint8_t *div_table;
+       unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+       uint16_t tmp = 0;
+
+       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+       cdclk_sel = (tmp >> 4) & 0x7;
+
+       if (cdclk_sel >= ARRAY_SIZE(div_3200))
+               goto fail;
+
+       switch (vco) {
+       case 3200000:
+               div_table = div_3200;
+               break;
+       case 4000000:
+               div_table = div_4000;
+               break;
+       case 4800000:
+               div_table = div_4800;
+               break;
+       case 5333333:
+               div_table = div_5333;
+               break;
+       default:
+               goto fail;
+       }
+
+       return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
+       return 190476;
+}
+
 static void
 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 {
@@ -7064,8 +7202,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
                intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
 }
 
-static void vlv_update_pll(struct intel_crtc *crtc,
-                          struct intel_crtc_state *pipe_config)
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+                            struct intel_crtc_state *pipe_config)
 {
        u32 dpll, dpll_md;
 
@@ -7074,8 +7212,8 @@ static void vlv_update_pll(struct intel_crtc *crtc,
         * clock for pipe B, since VGA hotplug / manual detection depends
         * on it.
         */
-       dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
-               DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+       dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
+               DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
        /* We should never disable this, set it here for state tracking */
        if (crtc->pipe == PIPE_B)
                dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7178,11 +7316,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
        mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_update_pll(struct intel_crtc *crtc,
-                          struct intel_crtc_state *pipe_config)
+static void chv_compute_dpll(struct intel_crtc *crtc,
+                            struct intel_crtc_state *pipe_config)
 {
-       pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
-               DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+       pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
                DPLL_VCO_ENABLE;
        if (crtc->pipe != PIPE_A)
                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7318,11 +7456,11 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
        };
 
        if (IS_CHERRYVIEW(dev)) {
-               chv_update_pll(crtc, &pipe_config);
+               chv_compute_dpll(crtc, &pipe_config);
                chv_prepare_pll(crtc, &pipe_config);
                chv_enable_pll(crtc, &pipe_config);
        } else {
-               vlv_update_pll(crtc, &pipe_config);
+               vlv_compute_dpll(crtc, &pipe_config);
                vlv_prepare_pll(crtc, &pipe_config);
                vlv_enable_pll(crtc, &pipe_config);
        }
@@ -7344,10 +7482,10 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
                vlv_disable_pll(to_i915(dev), pipe);
 }
 
-static void i9xx_update_pll(struct intel_crtc *crtc,
-                           struct intel_crtc_state *crtc_state,
-                           intel_clock_t *reduced_clock,
-                           int num_connectors)
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+                             struct intel_crtc_state *crtc_state,
+                             intel_clock_t *reduced_clock,
+                             int num_connectors)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7421,10 +7559,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
        }
 }
 
-static void i8xx_update_pll(struct intel_crtc *crtc,
-                           struct intel_crtc_state *crtc_state,
-                           intel_clock_t *reduced_clock,
-                           int num_connectors)
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+                             struct intel_crtc_state *crtc_state,
+                             intel_clock_t *reduced_clock,
+                             int num_connectors)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7584,9 +7722,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
        mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
 
        mode->flags = pipe_config->base.adjusted_mode.flags;
+       mode->type = DRM_MODE_TYPE_DRIVER;
 
        mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
        mode->flags |= pipe_config->base.adjusted_mode.flags;
+
+       mode->hsync = drm_mode_hsync(mode);
+       mode->vrefresh = drm_mode_vrefresh(mode);
+       drm_mode_set_name(mode);
 }
 
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -7658,9 +7801,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int refclk, num_connectors = 0;
-       intel_clock_t clock, reduced_clock;
-       bool ok, has_reduced_clock = false;
-       bool is_lvds = false, is_dsi = false;
+       intel_clock_t clock;
+       bool ok;
+       bool is_dsi = false;
        struct intel_encoder *encoder;
        const intel_limit_t *limit;
        struct drm_atomic_state *state = crtc_state->base.state;
@@ -7678,9 +7821,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
                encoder = to_intel_encoder(connector_state->best_encoder);
 
                switch (encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       is_lvds = true;
-                       break;
                case INTEL_OUTPUT_DSI:
                        is_dsi = true;
                        break;
@@ -7712,19 +7852,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
                        return -EINVAL;
                }
 
-               if (is_lvds && dev_priv->lvds_downclock_avail) {
-                       /*
-                        * Ensure we match the reduced clock's P to the target
-                        * clock.  If the clocks don't match, we can't switch
-                        * the display clock by using the FP0/FP1. In such case
-                        * we will disable the LVDS downclock feature.
-                        */
-                       has_reduced_clock =
-                               dev_priv->display.find_dpll(limit, crtc_state,
-                                                           dev_priv->lvds_downclock,
-                                                           refclk, &clock,
-                                                           &reduced_clock);
-               }
                /* Compat-code for transition, will disappear. */
                crtc_state->dpll.n = clock.n;
                crtc_state->dpll.m1 = clock.m1;
@@ -7734,17 +7861,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
        }
 
        if (IS_GEN2(dev)) {
-               i8xx_update_pll(crtc, crtc_state,
-                               has_reduced_clock ? &reduced_clock : NULL,
-                               num_connectors);
+               i8xx_compute_dpll(crtc, crtc_state, NULL,
+                                 num_connectors);
        } else if (IS_CHERRYVIEW(dev)) {
-               chv_update_pll(crtc, crtc_state);
+               chv_compute_dpll(crtc, crtc_state);
        } else if (IS_VALLEYVIEW(dev)) {
-               vlv_update_pll(crtc, crtc_state);
+               vlv_compute_dpll(crtc, crtc_state);
        } else {
-               i9xx_update_pll(crtc, crtc_state,
-                               has_reduced_clock ? &reduced_clock : NULL,
-                               num_connectors);
+               i9xx_compute_dpll(crtc, crtc_state, NULL,
+                                 num_connectors);
        }
 
        return 0;
@@ -7804,10 +7929,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
        clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
        clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 
-       vlv_clock(refclk, &clock);
-
-       /* clock.dot is the fast clock */
-       pipe_config->port_clock = clock.dot / 5;
+       pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
 }
 
 static void
@@ -7906,10 +8028,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
        clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
        clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
 
-       chv_clock(refclk, &clock);
-
-       /* clock.dot is the fast clock */
-       pipe_config->port_clock = clock.dot / 5;
+       pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
 }
 
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
@@ -8558,9 +8677,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        int refclk;
        const intel_limit_t *limit;
-       bool ret, is_lvds = false;
-
-       is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+       bool ret;
 
        refclk = ironlake_get_refclk(crtc_state);
 
@@ -8576,20 +8693,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
        if (!ret)
                return false;
 
-       if (is_lvds && dev_priv->lvds_downclock_avail) {
-               /*
-                * Ensure we match the reduced clock's P to the target clock.
-                * If the clocks don't match, we can't switch the display clock
-                * by using the FP0/FP1. In such case we will disable the LVDS
-                * downclock feature.
-               */
-               *has_reduced_clock =
-                       dev_priv->display.find_dpll(limit, crtc_state,
-                                                   dev_priv->lvds_downclock,
-                                                   refclk, clock,
-                                                   reduced_clock);
-       }
-
        return true;
 }
 
@@ -9297,6 +9400,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
        }
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_update_cdclk(dev_priv->dev);
 }
 
 /*
@@ -9358,50 +9462,189 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
        intel_prepare_ddi(dev);
 }
 
-static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
+static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-       int req_cdclk;
-
-       /* see the comment in valleyview_modeset_global_resources */
-       if (WARN_ON(max_pixclk < 0))
-               return;
-
-       req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+       unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 
-       if (req_cdclk != dev_priv->cdclk_freq)
-               broxton_set_cdclk(dev, req_cdclk);
+       broxton_set_cdclk(dev, req_cdclk);
 }
 
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
-                                     struct intel_crtc_state *crtc_state)
+/* compute the max rate for new configuration */
+static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 {
-       if (!intel_ddi_pll_select(crtc, crtc_state))
-               return -EINVAL;
+       struct intel_crtc *intel_crtc;
+       struct intel_crtc_state *crtc_state;
+       int max_pixel_rate = 0;
 
-       crtc->lowfreq_avail = false;
+       for_each_intel_crtc(state->dev, intel_crtc) {
+               int pixel_rate;
 
-       return 0;
+               crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+
+               if (!crtc_state->base.enable)
+                       continue;
+
+               pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+               /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+               if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+                       pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+               max_pixel_rate = max(max_pixel_rate, pixel_rate);
+       }
+
+       return max_pixel_rate;
 }
 
-static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
-                               enum port port,
-                               struct intel_crtc_state *pipe_config)
+static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
 {
-       switch (port) {
-       case PORT_A:
-               pipe_config->ddi_pll_sel = SKL_DPLL0;
-               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
-               break;
-       case PORT_B:
-               pipe_config->ddi_pll_sel = SKL_DPLL1;
-               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
-               break;
-       case PORT_C:
-               pipe_config->ddi_pll_sel = SKL_DPLL2;
-               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t val, data;
+       int ret;
+
+       if (WARN((I915_READ(LCPLL_CTL) &
+                 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+                  LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+                  LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+                  LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+                "trying to change cdclk frequency with cdclk not enabled\n"))
+               return;
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+       ret = sandybridge_pcode_write(dev_priv,
+                                     BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (ret) {
+               DRM_ERROR("failed to inform pcode about cdclk change\n");
+               return;
+       }
+
+       val = I915_READ(LCPLL_CTL);
+       val |= LCPLL_CD_SOURCE_FCLK;
+       I915_WRITE(LCPLL_CTL, val);
+
+       if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+                              LCPLL_CD_SOURCE_FCLK_DONE, 1))
+               DRM_ERROR("Switching to FCLK failed\n");
+
+       val = I915_READ(LCPLL_CTL);
+       val &= ~LCPLL_CLK_FREQ_MASK;
+
+       switch (cdclk) {
+       case 450000:
+               val |= LCPLL_CLK_FREQ_450;
+               data = 0;
+               break;
+       case 540000:
+               val |= LCPLL_CLK_FREQ_54O_BDW;
+               data = 1;
+               break;
+       case 337500:
+               val |= LCPLL_CLK_FREQ_337_5_BDW;
+               data = 2;
+               break;
+       case 675000:
+               val |= LCPLL_CLK_FREQ_675_BDW;
+               data = 3;
+               break;
+       default:
+               WARN(1, "invalid cdclk frequency\n");
+               return;
+       }
+
+       I915_WRITE(LCPLL_CTL, val);
+
+       val = I915_READ(LCPLL_CTL);
+       val &= ~LCPLL_CD_SOURCE_FCLK;
+       I915_WRITE(LCPLL_CTL, val);
+
+       if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+                               LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+               DRM_ERROR("Switching back to LCPLL failed\n");
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+       sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       intel_update_cdclk(dev);
+
+       WARN(cdclk != dev_priv->cdclk_freq,
+            "cdclk requested %d kHz but got %d kHz\n",
+            cdclk, dev_priv->cdclk_freq);
+}
+
+static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
+       int max_pixclk = ilk_max_pixel_rate(state);
+       int cdclk;
+
+       /*
+        * FIXME should also account for plane ratio
+        * once 64bpp pixel formats are supported.
+        */
+       if (max_pixclk > 540000)
+               cdclk = 675000;
+       else if (max_pixclk > 450000)
+               cdclk = 540000;
+       else if (max_pixclk > 337500)
+               cdclk = 450000;
+       else
+               cdclk = 337500;
+
+       /*
+        * FIXME move the cdclk caclulation to
+        * compute_config() so we can fail gracegully.
+        */
+       if (cdclk > dev_priv->max_cdclk_freq) {
+               DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+                         cdclk, dev_priv->max_cdclk_freq);
+               cdclk = dev_priv->max_cdclk_freq;
+       }
+
+       to_intel_atomic_state(state)->cdclk = cdclk;
+
+       return 0;
+}
+
+static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+       struct drm_device *dev = old_state->dev;
+       unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+       broadwell_set_cdclk(dev, req_cdclk);
+}
+
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
+                                     struct intel_crtc_state *crtc_state)
+{
+       if (!intel_ddi_pll_select(crtc, crtc_state))
+               return -EINVAL;
+
+       crtc->lowfreq_avail = false;
+
+       return 0;
+}
+
+static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+{
+       switch (port) {
+       case PORT_A:
+               pipe_config->ddi_pll_sel = SKL_DPLL0;
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+               break;
+       case PORT_B:
+               pipe_config->ddi_pll_sel = SKL_DPLL1;
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+               break;
+       case PORT_C:
+               pipe_config->ddi_pll_sel = SKL_DPLL2;
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
                break;
        default:
                DRM_ERROR("Incorrect port type\n");
@@ -9978,7 +10221,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 retry:
        ret = drm_modeset_lock(&config->connection_mutex, ctx);
        if (ret)
-               goto fail_unlock;
+               goto fail;
 
        /*
         * Algorithm gets a little messy:
@@ -9996,10 +10239,10 @@ retry:
 
                ret = drm_modeset_lock(&crtc->mutex, ctx);
                if (ret)
-                       goto fail_unlock;
+                       goto fail;
                ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
                if (ret)
-                       goto fail_unlock;
+                       goto fail;
 
                old->dpms_mode = connector->dpms;
                old->load_detect_temp = false;
@@ -10018,9 +10261,6 @@ retry:
                        continue;
                if (possible_crtc->state->enable)
                        continue;
-               /* This can occur when applying the pipe A quirk on resume. */
-               if (to_intel_crtc(possible_crtc)->new_enabled)
-                       continue;
 
                crtc = possible_crtc;
                break;
@@ -10031,20 +10271,17 @@ retry:
         */
        if (!crtc) {
                DRM_DEBUG_KMS("no pipe available for load-detect\n");
-               goto fail_unlock;
+               goto fail;
        }
 
        ret = drm_modeset_lock(&crtc->mutex, ctx);
        if (ret)
-               goto fail_unlock;
+               goto fail;
        ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
        if (ret)
-               goto fail_unlock;
-       intel_encoder->new_crtc = to_intel_crtc(crtc);
-       to_intel_connector(connector)->new_encoder = intel_encoder;
+               goto fail;
 
        intel_crtc = to_intel_crtc(crtc);
-       intel_crtc->new_enabled = true;
        old->dpms_mode = connector->dpms;
        old->load_detect_temp = true;
        old->release_fb = NULL;
@@ -10100,7 +10337,7 @@ retry:
 
        drm_mode_copy(&crtc_state->base.mode, mode);
 
-       if (intel_set_mode(crtc, state, true)) {
+       if (drm_atomic_commit(state)) {
                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
                if (old->release_fb)
                        old->release_fb->funcs->destroy(old->release_fb);
@@ -10112,9 +10349,7 @@ retry:
        intel_wait_for_vblank(dev, intel_crtc->pipe);
        return true;
 
- fail:
-       intel_crtc->new_enabled = crtc->state->enable;
-fail_unlock:
+fail:
        drm_atomic_state_free(state);
        state = NULL;
 
@@ -10160,10 +10395,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                if (IS_ERR(crtc_state))
                        goto fail;
 
-               to_intel_connector(connector)->new_encoder = NULL;
-               intel_encoder->new_crtc = NULL;
-               intel_crtc->new_enabled = false;
-
                connector_state->best_encoder = NULL;
                connector_state->crtc = NULL;
 
@@ -10174,7 +10405,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                if (ret)
                        goto fail;
 
-               ret = intel_set_mode(crtc, state, true);
+               ret = drm_atomic_commit(state);
                if (ret)
                        goto fail;
 
@@ -10222,6 +10453,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
        u32 dpll = pipe_config->dpll_hw_state.dpll;
        u32 fp;
        intel_clock_t clock;
+       int port_clock;
        int refclk = i9xx_pll_refclk(dev, pipe_config);
 
        if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
@@ -10262,9 +10494,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                }
 
                if (IS_PINEVIEW(dev))
-                       pineview_clock(refclk, &clock);
+                       port_clock = pnv_calc_dpll_params(refclk, &clock);
                else
-                       i9xx_clock(refclk, &clock);
+                       port_clock = i9xx_calc_dpll_params(refclk, &clock);
        } else {
                u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
                bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
@@ -10290,7 +10522,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                                clock.p2 = 2;
                }
 
-               i9xx_clock(refclk, &clock);
+               port_clock = i9xx_calc_dpll_params(refclk, &clock);
        }
 
        /*
@@ -10298,7 +10530,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
         * port_clock to compute adjusted_mode.crtc_clock in the
         * encoder's get_config() function.
         */
-       pipe_config->port_clock = clock.dot;
+       pipe_config->port_clock = port_clock;
 }
 
 int intel_dotclock_calculate(int link_freq,
@@ -10387,42 +10619,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-static void intel_decrease_pllclock(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       if (!HAS_GMCH_DISPLAY(dev))
-               return;
-
-       if (!dev_priv->lvds_downclock_avail)
-               return;
-
-       /*
-        * Since this is called by a timer, we should never get here in
-        * the manual case.
-        */
-       if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
-               int pipe = intel_crtc->pipe;
-               int dpll_reg = DPLL(pipe);
-               int dpll;
-
-               DRM_DEBUG_DRIVER("downclocking LVDS\n");
-
-               assert_panel_unlocked(dev_priv, pipe);
-
-               dpll = I915_READ(dpll_reg);
-               dpll |= DISPLAY_RATE_SELECT_FPA1;
-               I915_WRITE(dpll_reg, dpll);
-               intel_wait_for_vblank(dev, pipe);
-               dpll = I915_READ(dpll_reg);
-               if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
-                       DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
-       }
-
-}
-
 void intel_mark_busy(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10440,20 +10636,12 @@ void intel_mark_busy(struct drm_device *dev)
 void intel_mark_idle(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc;
 
        if (!dev_priv->mm.busy)
                return;
 
        dev_priv->mm.busy = false;
 
-       for_each_crtc(dev, crtc) {
-               if (!crtc->primary->fb)
-                       continue;
-
-               intel_decrease_pllclock(crtc);
-       }
-
        if (INTEL_INFO(dev)->gen >= 6)
                gen6_rps_idle(dev->dev_private);
 
@@ -10485,24 +10673,23 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 {
        struct intel_unpin_work *work =
                container_of(__work, struct intel_unpin_work, work);
-       struct drm_device *dev = work->crtc->dev;
-       enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_plane *primary = crtc->base.primary;
 
        mutex_lock(&dev->struct_mutex);
-       intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
+       intel_unpin_fb_obj(work->old_fb, primary->state);
        drm_gem_object_unreference(&work->pending_flip_obj->base);
 
-       intel_fbc_update(dev);
-
        if (work->flip_queued_req)
                i915_gem_request_assign(&work->flip_queued_req, NULL);
        mutex_unlock(&dev->struct_mutex);
 
-       intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+       intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
        drm_framebuffer_unreference(work->old_fb);
 
-       BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
-       atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+       BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
+       atomic_dec(&crtc->unpin_work_count);
 
        kfree(work);
 }
@@ -10635,14 +10822,15 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_engine_cs *ring,
+                                struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 flip_mask;
        int ret;
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -10662,7 +10850,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, 0); /* aux display base address, unused */
 
        intel_mark_page_flip_active(intel_crtc);
-       __intel_ring_advance(ring);
        return 0;
 }
 
@@ -10670,14 +10857,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_engine_cs *ring,
+                                struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 flip_mask;
        int ret;
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -10694,7 +10882,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_NOOP);
 
        intel_mark_page_flip_active(intel_crtc);
-       __intel_ring_advance(ring);
        return 0;
 }
 
@@ -10702,15 +10889,16 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_engine_cs *ring,
+                                struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -10733,7 +10921,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, pf | pipesrc);
 
        intel_mark_page_flip_active(intel_crtc);
-       __intel_ring_advance(ring);
        return 0;
 }
 
@@ -10741,15 +10928,16 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_engine_cs *ring,
+                                struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -10769,7 +10957,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, pf | pipesrc);
 
        intel_mark_page_flip_active(intel_crtc);
-       __intel_ring_advance(ring);
        return 0;
 }
 
@@ -10777,9 +10964,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_engine_cs *ring,
+                                struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t plane_bit = 0;
        int len, ret;
@@ -10821,11 +11009,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
         * then do the cacheline alignment, and finally emit the
         * MI_DISPLAY_FLIP.
         */
-       ret = intel_ring_cacheline_align(ring);
+       ret = intel_ring_cacheline_align(req);
        if (ret)
                return ret;
 
-       ret = intel_ring_begin(ring, len);
+       ret = intel_ring_begin(req, len);
        if (ret)
                return ret;
 
@@ -10864,7 +11052,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, (MI_NOOP));
 
        intel_mark_page_flip_active(intel_crtc);
-       __intel_ring_advance(ring);
        return 0;
 }
 
@@ -10973,12 +11160,11 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       bool atomic_update;
        u32 start_vbl_count;
 
        intel_mark_page_flip_active(intel_crtc);
 
-       atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+       intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
        if (INTEL_INFO(dev)->gen >= 9)
                skl_do_mmio_flip(intel_crtc);
@@ -10986,8 +11172,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
                /* use_mmio_flip() retricts MMIO flips to ilk+ */
                ilk_do_mmio_flip(intel_crtc);
 
-       if (atomic_update)
-               intel_pipe_update_end(intel_crtc, start_vbl_count);
+       intel_pipe_update_end(intel_crtc, start_vbl_count);
 }
 
 static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11034,7 +11219,7 @@ static int intel_default_queue_flip(struct drm_device *dev,
                                    struct drm_crtc *crtc,
                                    struct drm_framebuffer *fb,
                                    struct drm_i915_gem_object *obj,
-                                   struct intel_engine_cs *ring,
+                                   struct drm_i915_gem_request *req,
                                    uint32_t flags)
 {
        return -ENODEV;
@@ -11120,6 +11305,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
        bool mmio_flip;
+       struct drm_i915_gem_request *request = NULL;
        int ret;
 
        /*
@@ -11226,7 +11412,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
         */
        ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
                                         crtc->primary->state,
-                                        mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
+                                        mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
        if (ret)
                goto cleanup_pending;
 
@@ -11242,31 +11428,34 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                i915_gem_request_assign(&work->flip_queued_req,
                                        obj->last_write_req);
        } else {
-               if (obj->last_write_req) {
-                       ret = i915_gem_check_olr(obj->last_write_req);
+               if (!request) {
+                       ret = i915_gem_request_alloc(ring, ring->default_context, &request);
                        if (ret)
                                goto cleanup_unpin;
                }
 
-               ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+               ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
                                                   page_flip_flags);
                if (ret)
                        goto cleanup_unpin;
 
-               i915_gem_request_assign(&work->flip_queued_req,
-                                       intel_ring_get_request(ring));
+               i915_gem_request_assign(&work->flip_queued_req, request);
        }
 
+       if (request)
+               i915_add_request_no_flush(request);
+
        work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
        work->enable_stall_check = true;
 
        i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
-                         INTEL_FRONTBUFFER_PRIMARY(pipe));
-
-       intel_fbc_disable(dev);
-       intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+                         to_intel_plane(primary)->frontbuffer_bit);
        mutex_unlock(&dev->struct_mutex);
 
+       intel_fbc_disable_crtc(intel_crtc);
+       intel_frontbuffer_flip_prepare(dev,
+                                      to_intel_plane(primary)->frontbuffer_bit);
+
        trace_i915_flip_request(intel_crtc->plane, obj);
 
        return 0;
@@ -11274,6 +11463,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_unpin:
        intel_unpin_fb_obj(fb, crtc->primary->state);
 cleanup_pending:
+       if (request)
+               i915_gem_request_cancel(request);
        atomic_dec(&intel_crtc->unpin_work_count);
        mutex_unlock(&dev->struct_mutex);
 cleanup:
@@ -11292,8 +11483,35 @@ free_work:
        kfree(work);
 
        if (ret == -EIO) {
+               struct drm_atomic_state *state;
+               struct drm_plane_state *plane_state;
+
 out_hang:
-               ret = intel_plane_restore(primary);
+               state = drm_atomic_state_alloc(dev);
+               if (!state)
+                       return -ENOMEM;
+               state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+               plane_state = drm_atomic_get_plane_state(state, primary);
+               ret = PTR_ERR_OR_ZERO(plane_state);
+               if (!ret) {
+                       drm_atomic_set_fb_for_plane(plane_state, fb);
+
+                       ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+                       if (!ret)
+                               ret = drm_atomic_commit(state);
+               }
+
+               if (ret == -EDEADLK) {
+                       drm_modeset_backoff(state->acquire_ctx);
+                       drm_atomic_state_clear(state);
+                       goto retry;
+               }
+
+               if (ret)
+                       drm_atomic_state_free(state);
+
                if (ret == 0 && event) {
                        spin_lock_irq(&dev->event_lock);
                        drm_send_vblank_event(dev, pipe, event);
@@ -11303,99 +11521,296 @@ out_hang:
        return ret;
 }
 
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
-       .mode_set_base_atomic = intel_pipe_set_base_atomic,
-       .load_lut = intel_crtc_load_lut,
-       .atomic_begin = intel_begin_crtc_commit,
-       .atomic_flush = intel_finish_crtc_commit,
-};
 
 /**
- * intel_modeset_update_staged_output_state
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
  *
- * Updates the staged output configuration state, e.g. after we've read out the
- * current hw state.
+ * Returns true or false.
  */
-static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+static bool intel_wm_need_update(struct drm_plane *plane,
+                                struct drm_plane_state *state)
 {
-       struct intel_crtc *crtc;
-       struct intel_encoder *encoder;
-       struct intel_connector *connector;
-
-       for_each_intel_connector(dev, connector) {
-               connector->new_encoder =
-                       to_intel_encoder(connector->base.encoder);
-       }
+       /* Update watermarks on tiling changes. */
+       if (!plane->state->fb || !state->fb ||
+           plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+           plane->state->rotation != state->rotation)
+               return true;
 
-       for_each_intel_encoder(dev, encoder) {
-               encoder->new_crtc =
-                       to_intel_crtc(encoder->base.crtc);
-       }
+       if (plane->state->crtc_w != state->crtc_w)
+               return true;
 
-       for_each_intel_crtc(dev, crtc) {
-               crtc->new_enabled = crtc->base.state->enable;
-       }
+       return false;
 }
 
-/* Transitional helper to copy current connector/encoder state to
- * connector->state. This is needed so that code that is partially
- * converted to atomic does the right thing.
- */
-static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+                                   struct drm_plane_state *plane_state)
 {
-       struct intel_connector *connector;
+       struct drm_crtc *crtc = crtc_state->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_plane *plane = plane_state->plane;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane_state *old_plane_state =
+               to_intel_plane_state(plane->state);
+       int idx = intel_crtc->base.base.id, ret;
+       int i = drm_plane_index(plane);
+       bool mode_changed = needs_modeset(crtc_state);
+       bool was_crtc_enabled = crtc->state->active;
+       bool is_crtc_enabled = crtc_state->active;
+
+       bool turn_off, turn_on, visible, was_visible;
+       struct drm_framebuffer *fb = plane_state->fb;
+
+       if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
+           plane->type != DRM_PLANE_TYPE_CURSOR) {
+               ret = skl_update_scaler_plane(
+                       to_intel_crtc_state(crtc_state),
+                       to_intel_plane_state(plane_state));
+               if (ret)
+                       return ret;
+       }
 
-       for_each_intel_connector(dev, connector) {
-               if (connector->base.encoder) {
-                       connector->base.state->best_encoder =
-                               connector->base.encoder;
-                       connector->base.state->crtc =
-                               connector->base.encoder->crtc;
-               } else {
-                       connector->base.state->best_encoder = NULL;
-                       connector->base.state->crtc = NULL;
+       /*
+        * Disabling a plane is always okay; we just need to update
+        * fb tracking in a special way since cleanup_fb() won't
+        * get called by the plane helpers.
+        */
+       if (old_plane_state->base.fb && !fb)
+               intel_crtc->atomic.disabled_planes |= 1 << i;
+
+       was_visible = old_plane_state->visible;
+       visible = to_intel_plane_state(plane_state)->visible;
+
+       if (!was_crtc_enabled && WARN_ON(was_visible))
+               was_visible = false;
+
+       if (!is_crtc_enabled && WARN_ON(visible))
+               visible = false;
+
+       if (!was_visible && !visible)
+               return 0;
+
+       turn_off = was_visible && (!visible || mode_changed);
+       turn_on = visible && (!was_visible || mode_changed);
+
+       DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
+                        plane->base.id, fb ? fb->base.id : -1);
+
+       DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
+                        plane->base.id, was_visible, visible,
+                        turn_off, turn_on, mode_changed);
+
+       if (turn_on) {
+               intel_crtc->atomic.update_wm_pre = true;
+               /* must disable cxsr around plane enable/disable */
+               if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+                       intel_crtc->atomic.disable_cxsr = true;
+                       /* to potentially re-enable cxsr */
+                       intel_crtc->atomic.wait_vblank = true;
+                       intel_crtc->atomic.update_wm_post = true;
+               }
+       } else if (turn_off) {
+               intel_crtc->atomic.update_wm_post = true;
+               /* must disable cxsr around plane enable/disable */
+               if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+                       if (is_crtc_enabled)
+                               intel_crtc->atomic.wait_vblank = true;
+                       intel_crtc->atomic.disable_cxsr = true;
                }
+       } else if (intel_wm_need_update(plane, plane_state)) {
+               intel_crtc->atomic.update_wm_pre = true;
        }
-}
 
-/* Fixup legacy state after an atomic state swap.
- */
-static void intel_modeset_fixup_state(struct drm_atomic_state *state)
-{
-       struct intel_crtc *crtc;
-       struct intel_encoder *encoder;
-       struct intel_connector *connector;
+       if (visible)
+               intel_crtc->atomic.fb_bits |=
+                       to_intel_plane(plane)->frontbuffer_bit;
 
-       for_each_intel_connector(state->dev, connector) {
-               connector->base.encoder = connector->base.state->best_encoder;
-               if (connector->base.encoder)
-                       connector->base.encoder->crtc =
-                               connector->base.state->crtc;
-       }
+       switch (plane->type) {
+       case DRM_PLANE_TYPE_PRIMARY:
+               intel_crtc->atomic.wait_for_flips = true;
+               intel_crtc->atomic.pre_disable_primary = turn_off;
+               intel_crtc->atomic.post_enable_primary = turn_on;
 
-       /* Update crtc of disabled encoders */
-       for_each_intel_encoder(state->dev, encoder) {
-               int num_connectors = 0;
+               if (turn_off) {
+                       /*
+                        * FIXME: Actually if we will still have any other
+                        * plane enabled on the pipe we could let IPS enabled
+                        * still, but for now lets consider that when we make
+                        * primary invisible by setting DSPCNTR to 0 on
+                        * update_primary_plane function IPS needs to be
+                        * disable.
+                        */
+                       intel_crtc->atomic.disable_ips = true;
 
-               for_each_intel_connector(state->dev, connector)
-                       if (connector->base.encoder == &encoder->base)
-                               num_connectors++;
+                       intel_crtc->atomic.disable_fbc = true;
+               }
 
-               if (num_connectors == 0)
-                       encoder->base.crtc = NULL;
-       }
+               /*
+                * FBC does not work on some platforms for rotated
+                * planes, so disable it when rotation is not 0 and
+                * update it when rotation is set back to 0.
+                *
+                * FIXME: This is redundant with the fbc update done in
+                * the primary plane enable function except that that
+                * one is done too late. We eventually need to unify
+                * this.
+                */
+
+               if (visible &&
+                   INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+                   dev_priv->fbc.crtc == intel_crtc &&
+                   plane_state->rotation != BIT(DRM_ROTATE_0))
+                       intel_crtc->atomic.disable_fbc = true;
+
+               /*
+                * BDW signals flip done immediately if the plane
+                * is disabled, even if the plane enable is already
+                * armed to occur at the next vblank :(
+                */
+               if (turn_on && IS_BROADWELL(dev))
+                       intel_crtc->atomic.wait_vblank = true;
 
-       for_each_intel_crtc(state->dev, crtc) {
-               crtc->base.enabled = crtc->base.state->enable;
-               crtc->config = to_intel_crtc_state(crtc->base.state);
+               intel_crtc->atomic.update_fbc |= visible || mode_changed;
+               break;
+       case DRM_PLANE_TYPE_CURSOR:
+               break;
+       case DRM_PLANE_TYPE_OVERLAY:
+               if (turn_off && !mode_changed) {
+                       intel_crtc->atomic.wait_vblank = true;
+                       intel_crtc->atomic.update_sprite_watermarks |=
+                               1 << i;
+               }
        }
+       return 0;
 }
 
-static void
-connected_sink_compute_bpp(struct intel_connector *connector,
-                          struct intel_crtc_state *pipe_config)
+static bool encoders_cloneable(const struct intel_encoder *a,
+                              const struct intel_encoder *b)
 {
-       int bpp = pipe_config->pipe_bpp;
+       /* masks could be asymmetric, so check both ways */
+       return a == b || (a->cloneable & (1 << b->type) &&
+                         b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+                                        struct intel_crtc *crtc,
+                                        struct intel_encoder *encoder)
+{
+       struct intel_encoder *source_encoder;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
+       int i;
+
+       for_each_connector_in_state(state, connector, connector_state, i) {
+               if (connector_state->crtc != &crtc->base)
+                       continue;
+
+               source_encoder =
+                       to_intel_encoder(connector_state->best_encoder);
+               if (!encoders_cloneable(encoder, source_encoder))
+                       return false;
+       }
+
+       return true;
+}
+
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+                                 struct intel_crtc *crtc)
+{
+       struct intel_encoder *encoder;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
+       int i;
+
+       for_each_connector_in_state(state, connector, connector_state, i) {
+               if (connector_state->crtc != &crtc->base)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+               if (!check_single_encoder_cloning(state, crtc, encoder))
+                       return false;
+       }
+
+       return true;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *pipe_config =
+               to_intel_crtc_state(crtc_state);
+       struct drm_atomic_state *state = crtc_state->state;
+       int ret;
+       bool mode_changed = needs_modeset(crtc_state);
+
+       if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
+               DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+               return -EINVAL;
+       }
+
+       if (mode_changed && !crtc_state->active)
+               intel_crtc->atomic.update_wm_post = true;
+
+       if (mode_changed && crtc_state->enable &&
+           dev_priv->display.crtc_compute_clock &&
+           !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+               ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+                                                          pipe_config);
+               if (ret)
+                       return ret;
+       }
+
+       ret = 0;
+       if (INTEL_INFO(dev)->gen >= 9) {
+               if (mode_changed)
+                       ret = skl_update_scaler_crtc(pipe_config);
+
+               if (!ret)
+                       ret = intel_atomic_setup_scalers(dev, intel_crtc,
+                                                        pipe_config);
+       }
+
+       return ret;
+}
+
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+       .mode_set_base_atomic = intel_pipe_set_base_atomic,
+       .load_lut = intel_crtc_load_lut,
+       .atomic_begin = intel_begin_crtc_commit,
+       .atomic_flush = intel_finish_crtc_commit,
+       .atomic_check = intel_crtc_atomic_check,
+};
+
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+{
+       struct intel_connector *connector;
+
+       for_each_intel_connector(dev, connector) {
+               if (connector->base.encoder) {
+                       connector->base.state->best_encoder =
+                               connector->base.encoder;
+                       connector->base.state->crtc =
+                               connector->base.encoder->crtc;
+               } else {
+                       connector->base.state->best_encoder = NULL;
+                       connector->base.state->crtc = NULL;
+               }
+       }
+}
+
+static void
+connected_sink_compute_bpp(struct intel_connector *connector,
+                          struct intel_crtc_state *pipe_config)
+{
+       int bpp = pipe_config->pipe_bpp;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
                connector->base.base.id,
@@ -11526,17 +11941,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
        if (IS_BROXTON(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, "
+               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
                              "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
-                             "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n",
+                             "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
                              pipe_config->ddi_pll_sel,
                              pipe_config->dpll_hw_state.ebb0,
+                             pipe_config->dpll_hw_state.ebb4,
                              pipe_config->dpll_hw_state.pll0,
                              pipe_config->dpll_hw_state.pll1,
                              pipe_config->dpll_hw_state.pll2,
                              pipe_config->dpll_hw_state.pll3,
                              pipe_config->dpll_hw_state.pll6,
                              pipe_config->dpll_hw_state.pll8,
+                             pipe_config->dpll_hw_state.pll9,
+                             pipe_config->dpll_hw_state.pll10,
                              pipe_config->dpll_hw_state.pcsdw12);
        } else if (IS_SKYLAKE(dev)) {
                DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
@@ -11593,56 +12011,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
        }
 }
 
-static bool encoders_cloneable(const struct intel_encoder *a,
-                              const struct intel_encoder *b)
-{
-       /* masks could be asymmetric, so check both ways */
-       return a == b || (a->cloneable & (1 << b->type) &&
-                         b->cloneable & (1 << a->type));
-}
-
-static bool check_single_encoder_cloning(struct drm_atomic_state *state,
-                                        struct intel_crtc *crtc,
-                                        struct intel_encoder *encoder)
-{
-       struct intel_encoder *source_encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int i;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != &crtc->base)
-                       continue;
-
-               source_encoder =
-                       to_intel_encoder(connector_state->best_encoder);
-               if (!encoders_cloneable(encoder, source_encoder))
-                       return false;
-       }
-
-       return true;
-}
-
-static bool check_encoder_cloning(struct drm_atomic_state *state,
-                                 struct intel_crtc *crtc)
-{
-       struct intel_encoder *encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int i;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != &crtc->base)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-               if (!check_single_encoder_cloning(state, crtc, encoder))
-                       return false;
-       }
-
-       return true;
-}
-
 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -11696,6 +12064,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        struct intel_dpll_hw_state dpll_hw_state;
        enum intel_dpll_id shared_dpll;
        uint32_t ddi_pll_sel;
+       bool force_thru;
 
        /* FIXME: before the switch to atomic started, a new pipe_config was
         * kzalloc'd. Code that depends on any field being zero should be
@@ -11707,6 +12076,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        shared_dpll = crtc_state->shared_dpll;
        dpll_hw_state = crtc_state->dpll_hw_state;
        ddi_pll_sel = crtc_state->ddi_pll_sel;
+       force_thru = crtc_state->pch_pfit.force_thru;
 
        memset(crtc_state, 0, sizeof *crtc_state);
 
@@ -11715,13 +12085,14 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        crtc_state->shared_dpll = shared_dpll;
        crtc_state->dpll_hw_state = dpll_hw_state;
        crtc_state->ddi_pll_sel = ddi_pll_sel;
+       crtc_state->pch_pfit.force_thru = force_thru;
 }
 
 static int
 intel_modeset_pipe_config(struct drm_crtc *crtc,
-                         struct drm_atomic_state *state,
                          struct intel_crtc_state *pipe_config)
 {
+       struct drm_atomic_state *state = pipe_config->base.state;
        struct intel_encoder *encoder;
        struct drm_connector *connector;
        struct drm_connector_state *connector_state;
@@ -11729,16 +12100,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
        int i;
        bool retry = true;
 
-       if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
-               DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-               return -EINVAL;
-       }
-
-       if (!check_digital_port_conflicts(state)) {
-               DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
-               return -EINVAL;
-       }
-
        clear_intel_crtc_state(pipe_config);
 
        pipe_config->cpu_transcoder =
@@ -11832,90 +12193,27 @@ encoder_retry:
        DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
-       return 0;
 fail:
        return ret;
 }
 
-static bool intel_crtc_in_use(struct drm_crtc *crtc)
-{
-       struct drm_encoder *encoder;
-       struct drm_device *dev = crtc->dev;
-
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-               if (encoder->crtc == crtc)
-                       return true;
-
-       return false;
-}
-
-static bool
-needs_modeset(struct drm_crtc_state *state)
-{
-       return state->mode_changed || state->active_changed;
-}
-
 static void
-intel_modeset_update_state(struct drm_atomic_state *state)
+intel_modeset_update_crtc_state(struct drm_atomic_state *state)
 {
-       struct drm_device *dev = state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder;
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
-       struct drm_connector *connector;
        int i;
 
-       intel_shared_dpll_commit(dev_priv);
-
-       for_each_intel_encoder(dev, intel_encoder) {
-               if (!intel_encoder->base.crtc)
-                       continue;
-
-               for_each_crtc_in_state(state, crtc, crtc_state, i) {
-                       if (crtc != intel_encoder->base.crtc)
-                               continue;
-
-                       if (crtc_state->enable && needs_modeset(crtc_state))
-                               intel_encoder->connectors_active = false;
-
-                       break;
-               }
-       }
-
-       drm_atomic_helper_swap_state(state->dev, state);
-       intel_modeset_fixup_state(state);
-
        /* Double check state. */
-       for_each_crtc(dev, crtc) {
-               WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
-       }
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (!connector->encoder || !connector->encoder->crtc)
-                       continue;
-
-               for_each_crtc_in_state(state, crtc, crtc_state, i) {
-                       if (crtc != connector->encoder->crtc)
-                               continue;
-
-                       if (crtc->state->enable && needs_modeset(crtc->state)) {
-                               struct drm_property *dpms_property =
-                                       dev->mode_config.dpms_property;
-
-                               connector->dpms = DRM_MODE_DPMS_ON;
-                               drm_object_property_set_value(&connector->base,
-                                                                dpms_property,
-                                                                DRM_MODE_DPMS_ON);
-
-                               intel_encoder = to_intel_encoder(connector->encoder);
-                               intel_encoder->connectors_active = true;
-                       }
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
 
-                       break;
-               }
+               /* Update hwmode for vblank functions */
+               if (crtc->state->active)
+                       crtc->hwmode = crtc->state->adjusted_mode;
+               else
+                       crtc->hwmode.crtc_clock = 0;
        }
-
 }
 
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11942,27 +12240,133 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
                            base.head) \
                if (mask & (1 <<(intel_crtc)->pipe))
 
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+                 unsigned int m2, unsigned int n2,
+                 bool exact)
+{
+       if (m == m2 && n == n2)
+               return true;
+
+       if (exact || !m || !n || !m2 || !n2)
+               return false;
+
+       BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+       if (m > m2) {
+               while (m > m2) {
+                       m2 <<= 1;
+                       n2 <<= 1;
+               }
+       } else if (m < m2) {
+               while (m < m2) {
+                       m <<= 1;
+                       n <<= 1;
+               }
+       }
+
+       return m == m2 && n == n2;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+                      struct intel_link_m_n *m2_n2,
+                      bool adjust)
+{
+       if (m_n->tu == m2_n2->tu &&
+           intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+                             m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+           intel_compare_m_n(m_n->link_m, m_n->link_n,
+                             m2_n2->link_m, m2_n2->link_n, !adjust)) {
+               if (adjust)
+                       *m2_n2 = *m_n;
+
+               return true;
+       }
+
+       return false;
+}
+
 static bool
 intel_pipe_config_compare(struct drm_device *dev,
                          struct intel_crtc_state *current_config,
-                         struct intel_crtc_state *pipe_config)
+                         struct intel_crtc_state *pipe_config,
+                         bool adjust)
 {
+       bool ret = true;
+
+#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
+       do { \
+               if (!adjust) \
+                       DRM_ERROR(fmt, ##__VA_ARGS__); \
+               else \
+                       DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
+       } while (0)
+
 #define PIPE_CONF_CHECK_X(name)        \
        if (current_config->name != pipe_config->name) { \
-               DRM_ERROR("mismatch in " #name " " \
+               INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
                          "(expected 0x%08x, found 0x%08x)\n", \
                          current_config->name, \
                          pipe_config->name); \
-               return false; \
+               ret = false; \
        }
 
 #define PIPE_CONF_CHECK_I(name)        \
        if (current_config->name != pipe_config->name) { \
-               DRM_ERROR("mismatch in " #name " " \
+               INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
                          "(expected %i, found %i)\n", \
                          current_config->name, \
                          pipe_config->name); \
-               return false; \
+               ret = false; \
+       }
+
+#define PIPE_CONF_CHECK_M_N(name) \
+       if (!intel_compare_link_m_n(&current_config->name, \
+                                   &pipe_config->name,\
+                                   adjust)) { \
+               INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+                         "(expected tu %i gmch %i/%i link %i/%i, " \
+                         "found tu %i, gmch %i/%i link %i/%i)\n", \
+                         current_config->name.tu, \
+                         current_config->name.gmch_m, \
+                         current_config->name.gmch_n, \
+                         current_config->name.link_m, \
+                         current_config->name.link_n, \
+                         pipe_config->name.tu, \
+                         pipe_config->name.gmch_m, \
+                         pipe_config->name.gmch_n, \
+                         pipe_config->name.link_m, \
+                         pipe_config->name.link_n); \
+               ret = false; \
+       }
+
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+       if (!intel_compare_link_m_n(&current_config->name, \
+                                   &pipe_config->name, adjust) && \
+           !intel_compare_link_m_n(&current_config->alt_name, \
+                                   &pipe_config->name, adjust)) { \
+               INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+                         "(expected tu %i gmch %i/%i link %i/%i, " \
+                         "or tu %i gmch %i/%i link %i/%i, " \
+                         "found tu %i, gmch %i/%i link %i/%i)\n", \
+                         current_config->name.tu, \
+                         current_config->name.gmch_m, \
+                         current_config->name.gmch_n, \
+                         current_config->name.link_m, \
+                         current_config->name.link_n, \
+                         current_config->alt_name.tu, \
+                         current_config->alt_name.gmch_m, \
+                         current_config->alt_name.gmch_n, \
+                         current_config->alt_name.link_m, \
+                         current_config->alt_name.link_n, \
+                         pipe_config->name.tu, \
+                         pipe_config->name.gmch_m, \
+                         pipe_config->name.gmch_n, \
+                         pipe_config->name.link_m, \
+                         pipe_config->name.link_n); \
+               ret = false; \
        }
 
 /* This is required for BDW+ where there is only one set of registers for
@@ -11973,30 +12377,30 @@ intel_pipe_config_compare(struct drm_device *dev,
 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
        if ((current_config->name != pipe_config->name) && \
                (current_config->alt_name != pipe_config->name)) { \
-                       DRM_ERROR("mismatch in " #name " " \
+                       INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
                                  "(expected %i or %i, found %i)\n", \
                                  current_config->name, \
                                  current_config->alt_name, \
                                  pipe_config->name); \
-                       return false; \
+                       ret = false; \
        }
 
 #define PIPE_CONF_CHECK_FLAGS(name, mask)      \
        if ((current_config->name ^ pipe_config->name) & (mask)) { \
-               DRM_ERROR("mismatch in " #name "(" #mask ") "      \
+               INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
                          "(expected %i, found %i)\n", \
                          current_config->name & (mask), \
                          pipe_config->name & (mask)); \
-               return false; \
+               ret = false; \
        }
 
 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
        if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
-               DRM_ERROR("mismatch in " #name " " \
+               INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
                          "(expected %i, found %i)\n", \
                          current_config->name, \
                          pipe_config->name); \
-               return false; \
+               ret = false; \
        }
 
 #define PIPE_CONF_QUIRK(quirk) \
@@ -12006,35 +12410,18 @@ intel_pipe_config_compare(struct drm_device *dev,
 
        PIPE_CONF_CHECK_I(has_pch_encoder);
        PIPE_CONF_CHECK_I(fdi_lanes);
-       PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
-       PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
-       PIPE_CONF_CHECK_I(fdi_m_n.link_m);
-       PIPE_CONF_CHECK_I(fdi_m_n.link_n);
-       PIPE_CONF_CHECK_I(fdi_m_n.tu);
+       PIPE_CONF_CHECK_M_N(fdi_m_n);
 
        PIPE_CONF_CHECK_I(has_dp_encoder);
 
        if (INTEL_INFO(dev)->gen < 8) {
-               PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
-               PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
-               PIPE_CONF_CHECK_I(dp_m_n.link_m);
-               PIPE_CONF_CHECK_I(dp_m_n.link_n);
-               PIPE_CONF_CHECK_I(dp_m_n.tu);
-
-               if (current_config->has_drrs) {
-                       PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
-                       PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
-                       PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
-                       PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
-                       PIPE_CONF_CHECK_I(dp_m2_n2.tu);
-               }
-       } else {
-               PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
-               PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
-               PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
-               PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
-               PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
-       }
+               PIPE_CONF_CHECK_M_N(dp_m_n);
+
+               PIPE_CONF_CHECK_I(has_drrs);
+               if (current_config->has_drrs)
+                       PIPE_CONF_CHECK_M_N(dp_m2_n2);
+       } else
+               PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12076,21 +12463,11 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(pipe_src_w);
        PIPE_CONF_CHECK_I(pipe_src_h);
 
-       /*
-        * FIXME: BIOS likes to set up a cloned config with lvds+external
-        * screen. Since we don't yet re-compute the pipe config when moving
-        * just the lvds port away to another pipe the sw tracking won't match.
-        *
-        * Proper atomic modesets with recomputed global state will fix this.
-        * Until then just don't check gmch state for inherited modes.
-        */
-       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
-               PIPE_CONF_CHECK_I(gmch_pfit.control);
-               /* pfit ratios are autocomputed by the hw on gen4+ */
-               if (INTEL_INFO(dev)->gen < 4)
-                       PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-               PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
-       }
+       PIPE_CONF_CHECK_I(gmch_pfit.control);
+       /* pfit ratios are autocomputed by the hw on gen4+ */
+       if (INTEL_INFO(dev)->gen < 4)
+               PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+       PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
 
        PIPE_CONF_CHECK_I(pch_pfit.enabled);
        if (current_config->pch_pfit.enabled) {
@@ -12130,8 +12507,9 @@ intel_pipe_config_compare(struct drm_device *dev,
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
+#undef INTEL_ERR_OR_DBG_KMS
 
-       return true;
+       return ret;
 }
 
 static void check_wm_state(struct drm_device *dev)
@@ -12185,17 +12563,23 @@ static void check_wm_state(struct drm_device *dev)
 }
 
 static void
-check_connector_state(struct drm_device *dev)
+check_connector_state(struct drm_device *dev,
+                     struct drm_atomic_state *old_state)
 {
-       struct intel_connector *connector;
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *connector;
+       int i;
+
+       for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+               struct drm_encoder *encoder = connector->encoder;
+               struct drm_connector_state *state = connector->state;
 
-       for_each_intel_connector(dev, connector) {
                /* This also checks the encoder/connector hw state with the
                 * ->get_hw_state callbacks. */
-               intel_connector_check_state(connector);
+               intel_connector_check_state(to_intel_connector(connector));
 
-               I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
-                    "connector's staged encoder doesn't match current encoder\n");
+               I915_STATE_WARN(state->best_encoder != encoder,
+                    "connector's atomic encoder doesn't match legacy encoder\n");
        }
 }
 
@@ -12207,124 +12591,106 @@ check_encoder_state(struct drm_device *dev)
 
        for_each_intel_encoder(dev, encoder) {
                bool enabled = false;
-               bool active = false;
-               enum pipe pipe, tracked_pipe;
+               enum pipe pipe;
 
                DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
                              encoder->base.base.id,
                              encoder->base.name);
 
-               I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
-                    "encoder's stage crtc doesn't match current crtc\n");
-               I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
-                    "encoder's active_connectors set, but no crtc\n");
-
                for_each_intel_connector(dev, connector) {
-                       if (connector->base.encoder != &encoder->base)
+                       if (connector->base.state->best_encoder != &encoder->base)
                                continue;
                        enabled = true;
-                       if (connector->base.dpms != DRM_MODE_DPMS_OFF)
-                               active = true;
+
+                       I915_STATE_WARN(connector->base.state->crtc !=
+                                       encoder->base.crtc,
+                            "connector's crtc doesn't match encoder crtc\n");
                }
-               /*
-                * for MST connectors if we unplug the connector is gone
-                * away but the encoder is still connected to a crtc
-                * until a modeset happens in response to the hotplug.
-                */
-               if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
-                       continue;
 
                I915_STATE_WARN(!!encoder->base.crtc != enabled,
                     "encoder's enabled state mismatch "
                     "(expected %i, found %i)\n",
                     !!encoder->base.crtc, enabled);
-               I915_STATE_WARN(active && !encoder->base.crtc,
-                    "active encoder with no crtc\n");
-
-               I915_STATE_WARN(encoder->connectors_active != active,
-                    "encoder's computed active state doesn't match tracked active state "
-                    "(expected %i, found %i)\n", active, encoder->connectors_active);
-
-               active = encoder->get_hw_state(encoder, &pipe);
-               I915_STATE_WARN(active != encoder->connectors_active,
-                    "encoder's hw state doesn't match sw tracking "
-                    "(expected %i, found %i)\n",
-                    encoder->connectors_active, active);
 
-               if (!encoder->base.crtc)
-                       continue;
-
-               tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
-               I915_STATE_WARN(active && pipe != tracked_pipe,
-                    "active encoder's pipe doesn't match"
-                    "(expected %i, found %i)\n",
-                    tracked_pipe, pipe);
+               if (!encoder->base.crtc) {
+                       bool active;
 
+                       active = encoder->get_hw_state(encoder, &pipe);
+                       I915_STATE_WARN(active,
+                            "encoder detached but still enabled on pipe %c.\n",
+                            pipe_name(pipe));
+               }
        }
 }
 
 static void
-check_crtc_state(struct drm_device *dev)
+check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc;
        struct intel_encoder *encoder;
-       struct intel_crtc_state pipe_config;
+       struct drm_crtc_state *old_crtc_state;
+       struct drm_crtc *crtc;
+       int i;
 
-       for_each_intel_crtc(dev, crtc) {
-               bool enabled = false;
-               bool active = false;
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               struct intel_crtc_state *pipe_config, *sw_config;
+               bool active;
+
+               if (!needs_modeset(crtc->state))
+                       continue;
 
-               memset(&pipe_config, 0, sizeof(pipe_config));
+               __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+               pipe_config = to_intel_crtc_state(old_crtc_state);
+               memset(pipe_config, 0, sizeof(*pipe_config));
+               pipe_config->base.crtc = crtc;
+               pipe_config->base.state = old_state;
 
                DRM_DEBUG_KMS("[CRTC:%d]\n",
-                             crtc->base.base.id);
-
-               I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
-                    "active crtc, but not enabled in sw tracking\n");
+                             crtc->base.id);
 
-               for_each_intel_encoder(dev, encoder) {
-                       if (encoder->base.crtc != &crtc->base)
-                               continue;
-                       enabled = true;
-                       if (encoder->connectors_active)
-                               active = true;
-               }
+               active = dev_priv->display.get_pipe_config(intel_crtc,
+                                                          pipe_config);
 
-               I915_STATE_WARN(active != crtc->active,
-                    "crtc's computed active state doesn't match tracked active state "
-                    "(expected %i, found %i)\n", active, crtc->active);
-               I915_STATE_WARN(enabled != crtc->base.state->enable,
-                    "crtc's computed enabled state doesn't match tracked enabled state "
-                    "(expected %i, found %i)\n", enabled,
-                               crtc->base.state->enable);
+               /* hw state is inconsistent with the pipe quirk */
+               if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+                   (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+                       active = crtc->state->active;
 
-               active = dev_priv->display.get_pipe_config(crtc,
-                                                          &pipe_config);
+               I915_STATE_WARN(crtc->state->active != active,
+                    "crtc active state doesn't match with hw state "
+                    "(expected %i, found %i)\n", crtc->state->active, active);
 
-               /* hw state is inconsistent with the pipe quirk */
-               if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
-                   (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
-                       active = crtc->active;
+               I915_STATE_WARN(intel_crtc->active != crtc->state->active,
+                    "transitional active state does not match atomic hw state "
+                    "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
 
-               for_each_intel_encoder(dev, encoder) {
+               for_each_encoder_on_crtc(dev, crtc, encoder) {
                        enum pipe pipe;
-                       if (encoder->base.crtc != &crtc->base)
-                               continue;
-                       if (encoder->get_hw_state(encoder, &pipe))
-                               encoder->get_config(encoder, &pipe_config);
+
+                       active = encoder->get_hw_state(encoder, &pipe);
+                       I915_STATE_WARN(active != crtc->state->active,
+                               "[ENCODER:%i] active %i with crtc active %i\n",
+                               encoder->base.base.id, active, crtc->state->active);
+
+                       I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+                                       "Encoder connected to wrong pipe %c\n",
+                                       pipe_name(pipe));
+
+                       if (active)
+                               encoder->get_config(encoder, pipe_config);
                }
 
-               I915_STATE_WARN(crtc->active != active,
-                    "crtc active state doesn't match with hw state "
-                    "(expected %i, found %i)\n", crtc->active, active);
+               if (!crtc->state->active)
+                       continue;
 
-               if (active &&
-                   !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+               sw_config = to_intel_crtc_state(crtc->state);
+               if (!intel_pipe_config_compare(dev, sw_config,
+                                              pipe_config, false)) {
                        I915_STATE_WARN(1, "pipe state doesn't match!\n");
-                       intel_dump_pipe_config(crtc, &pipe_config,
+                       intel_dump_pipe_config(intel_crtc, pipe_config,
                                               "[hw state]");
-                       intel_dump_pipe_config(crtc, crtc->config,
+                       intel_dump_pipe_config(intel_crtc, sw_config,
                                               "[sw state]");
                }
        }
@@ -12379,13 +12745,14 @@ check_shared_dpll_state(struct drm_device *dev)
        }
 }
 
-void
-intel_modeset_check_state(struct drm_device *dev)
+static void
+intel_modeset_check_state(struct drm_device *dev,
+                         struct drm_atomic_state *old_state)
 {
        check_wm_state(dev);
-       check_connector_state(dev);
+       check_connector_state(dev, old_state);
        check_encoder_state(dev);
-       check_crtc_state(dev);
+       check_crtc_state(dev, old_state);
        check_shared_dpll_state(dev);
 }
 
@@ -12439,519 +12806,390 @@ static void update_scanline_offset(struct intel_crtc *crtc)
                crtc->scanline_offset = 1;
 }
 
-static struct intel_crtc_state *
-intel_modeset_compute_config(struct drm_crtc *crtc,
-                            struct drm_atomic_state *state)
-{
-       struct intel_crtc_state *pipe_config;
-       int ret = 0;
-
-       ret = drm_atomic_add_affected_connectors(state, crtc);
-       if (ret)
-               return ERR_PTR(ret);
-
-       ret = drm_atomic_helper_check_modeset(state->dev, state);
-       if (ret)
-               return ERR_PTR(ret);
-
-       /*
-        * Note this needs changes when we start tracking multiple modes
-        * and crtcs.  At that point we'll need to compute the whole config
-        * (i.e. one pipe_config for each crtc) rather than just the one
-        * for this crtc.
-        */
-       pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-       if (IS_ERR(pipe_config))
-               return pipe_config;
-
-       if (!pipe_config->base.enable)
-               return pipe_config;
-
-       ret = intel_modeset_pipe_config(crtc, state, pipe_config);
-       if (ret)
-               return ERR_PTR(ret);
-
-       /* Check things that can only be changed through modeset */
-       if (pipe_config->has_audio !=
-           to_intel_crtc(crtc)->config->has_audio)
-               pipe_config->base.mode_changed = true;
-
-       /*
-        * Note we have an issue here with infoframes: current code
-        * only updates them on the full mode set path per hw
-        * requirements.  So here we should be checking for any
-        * required changes and forcing a mode set.
-        */
-
-       intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
-
-       ret = drm_atomic_helper_check_planes(state->dev, state);
-       if (ret)
-               return ERR_PTR(ret);
-
-       return pipe_config;
-}
-
-static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       unsigned clear_pipes = 0;
+       struct intel_shared_dpll_config *shared_dpll = NULL;
        struct intel_crtc *intel_crtc;
        struct intel_crtc_state *intel_crtc_state;
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
-       int ret = 0;
        int i;
 
        if (!dev_priv->display.crtc_compute_clock)
-               return 0;
-
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               intel_crtc = to_intel_crtc(crtc);
-               intel_crtc_state = to_intel_crtc_state(crtc_state);
-
-               if (needs_modeset(crtc_state)) {
-                       clear_pipes |= 1 << intel_crtc->pipe;
-                       intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
-               }
-       }
-
-       ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
-       if (ret)
-               goto done;
+               return;
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!needs_modeset(crtc_state) || !crtc_state->enable)
-                       continue;
+               int dpll;
 
                intel_crtc = to_intel_crtc(crtc);
                intel_crtc_state = to_intel_crtc_state(crtc_state);
+               dpll = intel_crtc_state->shared_dpll;
 
-               ret = dev_priv->display.crtc_compute_clock(intel_crtc,
-                                                          intel_crtc_state);
-               if (ret) {
-                       intel_shared_dpll_abort_config(dev_priv);
-                       goto done;
-               }
-       }
+               if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+                       continue;
 
-done:
-       return ret;
-}
+               intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
 
-/* Code that should eventually be part of atomic_check() */
-static int __intel_set_mode_checks(struct drm_atomic_state *state)
-{
-       struct drm_device *dev = state->dev;
-       int ret;
+               if (!shared_dpll)
+                       shared_dpll = intel_atomic_get_shared_dpll_state(state);
 
-       /*
-        * See if the config requires any additional preparation, e.g.
-        * to adjust global state with pipes off.  We need to do this
-        * here so we can get the modeset_pipe updated config for the new
-        * mode set on this crtc.  For other crtcs we need to use the
-        * adjusted_mode bits in the crtc directly.
-        */
-       if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
-               ret = valleyview_modeset_global_pipes(state);
-               if (ret)
-                       return ret;
+               shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
        }
-
-       ret = __intel_set_mode_setup_plls(state);
-       if (ret)
-               return ret;
-
-       return 0;
 }
 
-static int __intel_set_mode(struct drm_crtc *modeset_crtc,
-                           struct intel_crtc_state *pipe_config)
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
 {
-       struct drm_device *dev = modeset_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_atomic_state *state = pipe_config->base.state;
-       struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
-       int ret = 0;
+       struct intel_crtc *intel_crtc;
+       struct drm_crtc *crtc;
+       struct intel_crtc_state *first_crtc_state = NULL;
+       struct intel_crtc_state *other_crtc_state = NULL;
+       enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
        int i;
 
-       ret = __intel_set_mode_checks(state);
-       if (ret < 0)
-               return ret;
-
-       ret = drm_atomic_helper_prepare_planes(dev, state);
-       if (ret)
-               return ret;
-
+       /* look at all crtc's that are going to be enabled in during modeset */
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!needs_modeset(crtc_state))
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (!crtc_state->active || !needs_modeset(crtc_state))
                        continue;
 
-               if (!crtc_state->enable) {
-                       if (crtc->state->enable)
-                               intel_crtc_disable(crtc);
-               } else if (crtc->state->enable) {
-                       intel_crtc_disable_planes(crtc);
-                       dev_priv->display.crtc_disable(crtc);
+               if (first_crtc_state) {
+                       other_crtc_state = to_intel_crtc_state(crtc_state);
+                       break;
+               } else {
+                       first_crtc_state = to_intel_crtc_state(crtc_state);
+                       first_pipe = intel_crtc->pipe;
                }
        }
 
-       /* crtc->mode is already used by the ->mode_set callbacks, hence we need
-        * to set it here already despite that we pass it down the callchain.
-        *
-        * Note we'll need to fix this up when we start tracking multiple
-        * pipes; here we assume a single modeset_pipe and only track the
-        * single crtc and mode.
-        */
-       if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
-               modeset_crtc->mode = pipe_config->base.mode;
-
-               /*
-                * Calculate and store various constants which
-                * are later needed by vblank and swap-completion
-                * timestamping. They are derived from true hwmode.
-                */
-               drm_calc_timestamping_constants(modeset_crtc,
-                                               &pipe_config->base.adjusted_mode);
-       }
+       /* No workaround needed? */
+       if (!first_crtc_state)
+               return 0;
 
-       /* Only after disabling all output pipelines that will be changed can we
-        * update the the output configuration. */
-       intel_modeset_update_state(state);
+       /* w/a possibly needed, check how many crtc's are already enabled. */
+       for_each_intel_crtc(state->dev, intel_crtc) {
+               struct intel_crtc_state *pipe_config;
 
-       /* The state has been swaped above, so state actually contains the
-        * old state now. */
+               pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+               if (IS_ERR(pipe_config))
+                       return PTR_ERR(pipe_config);
 
-       modeset_update_crtc_power_domains(state);
+               pipe_config->hsw_workaround_pipe = INVALID_PIPE;
 
-       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!needs_modeset(crtc->state) || !crtc->state->enable) {
-                       drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+               if (!pipe_config->base.active ||
+                   needs_modeset(&pipe_config->base))
                        continue;
-               }
 
-               update_scanline_offset(to_intel_crtc(crtc));
+               /* 2 or more enabled crtcs means no need for w/a */
+               if (enabled_pipe != INVALID_PIPE)
+                       return 0;
 
-               dev_priv->display.crtc_enable(crtc);
-               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+               enabled_pipe = intel_crtc->pipe;
        }
 
-       /* FIXME: add subpixel order */
-
-       drm_atomic_helper_cleanup_planes(dev, state);
-
-       drm_atomic_state_free(state);
+       if (enabled_pipe != INVALID_PIPE)
+               first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+       else if (other_crtc_state)
+               other_crtc_state->hsw_workaround_pipe = first_pipe;
 
        return 0;
 }
 
-static int intel_set_mode_with_config(struct drm_crtc *crtc,
-                                     struct intel_crtc_state *pipe_config,
-                                     bool force_restore)
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
 {
-       int ret;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int ret = 0;
 
-       ret = __intel_set_mode(crtc, pipe_config);
+       /* add all active pipes to the state */
+       for_each_crtc(state->dev, crtc) {
+               crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
 
-       if (ret == 0 && force_restore) {
-               intel_modeset_update_staged_output_state(crtc->dev);
-               intel_modeset_check_state(crtc->dev);
-       }
+               if (!crtc_state->active || needs_modeset(crtc_state))
+                       continue;
 
-       return ret;
-}
+               crtc_state->mode_changed = true;
 
-static int intel_set_mode(struct drm_crtc *crtc,
-                         struct drm_atomic_state *state,
-                         bool force_restore)
-{
-       struct intel_crtc_state *pipe_config;
-       int ret = 0;
+               ret = drm_atomic_add_affected_connectors(state, crtc);
+               if (ret)
+                       break;
 
-       pipe_config = intel_modeset_compute_config(crtc, state);
-       if (IS_ERR(pipe_config)) {
-               ret = PTR_ERR(pipe_config);
-               goto out;
+               ret = drm_atomic_add_affected_planes(state, crtc);
+               if (ret)
+                       break;
        }
 
-       ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
-       if (ret)
-               goto out;
-
-out:
        return ret;
 }
 
-void intel_crtc_restore_mode(struct drm_crtc *crtc)
+
+static int intel_modeset_checks(struct drm_atomic_state *state)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_atomic_state *state;
-       struct intel_encoder *encoder;
-       struct intel_connector *connector;
-       struct drm_connector_state *connector_state;
-       struct intel_crtc_state *crtc_state;
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       state = drm_atomic_state_alloc(dev);
-       if (!state) {
-               DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
-                             crtc->base.id);
-               return;
-       }
-
-       state->acquire_ctx = dev->mode_config.acquire_ctx;
-
-       /* The force restore path in the HW readout code relies on the staged
-        * config still keeping the user requested config while the actual
-        * state has been overwritten by the configuration read from HW. We
-        * need to copy the staged config to the atomic state, otherwise the
-        * mode set will just reapply the state the HW is already in. */
-       for_each_intel_encoder(dev, encoder) {
-               if (&encoder->new_crtc->base != crtc)
-                       continue;
-
-               for_each_intel_connector(dev, connector) {
-                       if (connector->new_encoder != encoder)
-                               continue;
-
-                       connector_state = drm_atomic_get_connector_state(state, &connector->base);
-                       if (IS_ERR(connector_state)) {
-                               DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
-                                             connector->base.base.id,
-                                             connector->base.name,
-                                             PTR_ERR(connector_state));
-                               continue;
-                       }
-
-                       connector_state->crtc = crtc;
-                       connector_state->best_encoder = &encoder->base;
-               }
-       }
-
-       crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-       if (IS_ERR(crtc_state)) {
-               DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
-                             crtc->base.id, PTR_ERR(crtc_state));
-               drm_atomic_state_free(state);
-               return;
+       if (!check_digital_port_conflicts(state)) {
+               DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+               return -EINVAL;
        }
 
-       crtc_state->base.active = crtc_state->base.enable =
-               to_intel_crtc(crtc)->new_enabled;
-
-       drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
+       /*
+        * See if the config requires any additional preparation, e.g.
+        * to adjust global state with pipes off.  We need to do this
+        * here so we can get the modeset_pipe updated config for the new
+        * mode set on this crtc.  For other crtcs we need to use the
+        * adjusted_mode bits in the crtc directly.
+        */
+       if (dev_priv->display.modeset_calc_cdclk) {
+               unsigned int cdclk;
 
-       intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
-                                       crtc->primary->fb, crtc->x, crtc->y);
+               ret = dev_priv->display.modeset_calc_cdclk(state);
 
-       ret = intel_set_mode(crtc, state, false);
-       if (ret)
-               drm_atomic_state_free(state);
-}
+               cdclk = to_intel_atomic_state(state)->cdclk;
+               if (!ret && cdclk != dev_priv->cdclk_freq)
+                       ret = intel_modeset_all_pipes(state);
 
-#undef for_each_intel_crtc_masked
+               if (ret < 0)
+                       return ret;
+       } else
+               to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
 
-static bool intel_connector_in_mode_set(struct intel_connector *connector,
-                                       struct drm_mode_set *set)
-{
-       int ro;
+       intel_modeset_clear_plls(state);
 
-       for (ro = 0; ro < set->num_connectors; ro++)
-               if (set->connectors[ro] == &connector->base)
-                       return true;
+       if (IS_HASWELL(dev))
+               return haswell_mode_set_planes_workaround(state);
 
-       return false;
+       return 0;
 }
 
-static int
-intel_modeset_stage_output_state(struct drm_device *dev,
-                                struct drm_mode_set *set,
-                                struct drm_atomic_state *state)
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+                             struct drm_atomic_state *state)
 {
-       struct intel_connector *connector;
-       struct drm_connector *drm_connector;
-       struct drm_connector_state *connector_state;
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
-       int i, ret;
-
-       /* The upper layers ensure that we either disable a crtc or have a list
-        * of connectors. For paranoia, double-check this. */
-       WARN_ON(!set->fb && (set->num_connectors != 0));
-       WARN_ON(set->fb && (set->num_connectors == 0));
+       int ret, i;
+       bool any_ms = false;
 
-       for_each_intel_connector(dev, connector) {
-               bool in_mode_set = intel_connector_in_mode_set(connector, set);
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
 
-               if (!in_mode_set && connector->base.state->crtc != set->crtc)
-                       continue;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct intel_crtc_state *pipe_config =
+                       to_intel_crtc_state(crtc_state);
 
-               connector_state =
-                       drm_atomic_get_connector_state(state, &connector->base);
-               if (IS_ERR(connector_state))
-                       return PTR_ERR(connector_state);
+               /* Catch I915_MODE_FLAG_INHERITED */
+               if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+                       crtc_state->mode_changed = true;
 
-               if (in_mode_set) {
-                       int pipe = to_intel_crtc(set->crtc)->pipe;
-                       connector_state->best_encoder =
-                               &intel_find_encoder(connector, pipe)->base;
+               if (!crtc_state->enable) {
+                       if (needs_modeset(crtc_state))
+                               any_ms = true;
+                       continue;
                }
 
-               if (connector->base.state->crtc != set->crtc)
+               if (!needs_modeset(crtc_state))
                        continue;
 
-               /* If we disable the crtc, disable all its connectors. Also, if
-                * the connector is on the changing crtc but not on the new
-                * connector list, disable it. */
-               if (!set->fb || !in_mode_set) {
-                       connector_state->best_encoder = NULL;
-
-                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
-                               connector->base.base.id,
-                               connector->base.name);
-               }
-       }
-       /* connector->new_encoder is now updated for all connectors. */
+               /* FIXME: For only active_changed we shouldn't need to do any
+                * state recomputation at all. */
 
-       for_each_connector_in_state(state, drm_connector, connector_state, i) {
-               connector = to_intel_connector(drm_connector);
+               ret = drm_atomic_add_affected_connectors(state, crtc);
+               if (ret)
+                       return ret;
 
-               if (!connector_state->best_encoder) {
-                       ret = drm_atomic_set_crtc_for_connector(connector_state,
-                                                               NULL);
-                       if (ret)
-                               return ret;
+               ret = intel_modeset_pipe_config(crtc, pipe_config);
+               if (ret)
+                       return ret;
 
-                       continue;
+               if (i915.fastboot &&
+                   intel_pipe_config_compare(state->dev,
+                                       to_intel_crtc_state(crtc->state),
+                                       pipe_config, true)) {
+                       crtc_state->mode_changed = false;
                }
 
-               if (intel_connector_in_mode_set(connector, set)) {
-                       struct drm_crtc *crtc = connector->base.state->crtc;
-
-                       /* If this connector was in a previous crtc, add it
-                        * to the state. We might need to disable it. */
-                       if (crtc) {
-                               crtc_state =
-                                       drm_atomic_get_crtc_state(state, crtc);
-                               if (IS_ERR(crtc_state))
-                                       return PTR_ERR(crtc_state);
-                       }
+               if (needs_modeset(crtc_state)) {
+                       any_ms = true;
 
-                       ret = drm_atomic_set_crtc_for_connector(connector_state,
-                                                               set->crtc);
+                       ret = drm_atomic_add_affected_planes(state, crtc);
                        if (ret)
                                return ret;
                }
 
-               /* Make sure the new CRTC will work with the encoder */
-               if (!drm_encoder_crtc_ok(connector_state->best_encoder,
-                                        connector_state->crtc)) {
-                       return -EINVAL;
-               }
-
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
-                       connector->base.base.id,
-                       connector->base.name,
-                       connector_state->crtc->base.id);
-
-               if (connector_state->best_encoder != &connector->encoder->base)
-                       connector->encoder =
-                               to_intel_encoder(connector_state->best_encoder);
+               intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+                                      needs_modeset(crtc_state) ?
+                                      "[modeset]" : "[fastset]");
        }
 
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               bool has_connectors;
+       if (any_ms) {
+               ret = intel_modeset_checks(state);
 
-               ret = drm_atomic_add_affected_connectors(state, crtc);
                if (ret)
                        return ret;
+       } else
+               to_intel_atomic_state(state)->cdclk =
+                       to_i915(state->dev)->cdclk_freq;
+
+       return drm_atomic_helper_check_planes(state->dev, state);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+                              struct drm_atomic_state *state,
+                              bool async)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int ret = 0;
+       int i;
+       bool any_ms = false;
 
-               has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc);
-               if (has_connectors != crtc_state->enable)
-                       crtc_state->enable =
-                       crtc_state->active = has_connectors;
+       if (async) {
+               DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+               return -EINVAL;
        }
 
-       ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
-                                             set->fb, set->x, set->y);
+       ret = drm_atomic_helper_prepare_planes(dev, state);
        if (ret)
                return ret;
 
-       crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
-       if (IS_ERR(crtc_state))
-               return PTR_ERR(crtc_state);
+       drm_atomic_helper_swap_state(dev, state);
 
-       if (set->mode)
-               drm_mode_copy(&crtc_state->mode, set->mode);
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       if (set->num_connectors)
-               crtc_state->active = true;
+               if (!needs_modeset(crtc->state))
+                       continue;
 
-       return 0;
-}
+               any_ms = true;
+               intel_pre_plane_update(intel_crtc);
 
-static int intel_crtc_set_config(struct drm_mode_set *set)
-{
-       struct drm_device *dev;
-       struct drm_atomic_state *state = NULL;
-       struct intel_crtc_state *pipe_config;
-       int ret;
+               if (crtc_state->active) {
+                       intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+                       dev_priv->display.crtc_disable(crtc);
+                       intel_crtc->active = false;
+                       intel_disable_shared_dpll(intel_crtc);
+               }
+       }
 
-       BUG_ON(!set);
-       BUG_ON(!set->crtc);
-       BUG_ON(!set->crtc->helper_private);
+       /* Only after disabling all output pipelines that will be changed can we
+        * update the the output configuration. */
+       intel_modeset_update_crtc_state(state);
 
-       /* Enforce sane interface api - has been abused by the fb helper. */
-       BUG_ON(!set->mode && set->fb);
-       BUG_ON(set->fb && set->num_connectors == 0);
+       if (any_ms) {
+               intel_shared_dpll_commit(state);
 
-       if (set->fb) {
-               DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
-                               set->crtc->base.id, set->fb->base.id,
-                               (int)set->num_connectors, set->x, set->y);
-       } else {
-               DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+               drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+               modeset_update_crtc_power_domains(state);
+       }
+
+       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               bool modeset = needs_modeset(crtc->state);
+
+               if (modeset && crtc->state->active) {
+                       update_scanline_offset(to_intel_crtc(crtc));
+                       dev_priv->display.crtc_enable(crtc);
+               }
+
+               if (!modeset)
+                       intel_pre_plane_update(intel_crtc);
+
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+               intel_post_plane_update(intel_crtc);
        }
 
-       dev = set->crtc->dev;
+       /* FIXME: add subpixel order */
 
-       state = drm_atomic_state_alloc(dev);
-       if (!state)
-               return -ENOMEM;
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+       drm_atomic_helper_cleanup_planes(dev, state);
 
-       state->acquire_ctx = dev->mode_config.acquire_ctx;
+       if (any_ms)
+               intel_modeset_check_state(dev, state);
 
-       ret = intel_modeset_stage_output_state(dev, set, state);
-       if (ret)
-               goto out;
+       drm_atomic_state_free(state);
+
+       return 0;
+}
+
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_atomic_state *state;
+       struct drm_crtc_state *crtc_state;
+       int ret;
 
-       pipe_config = intel_modeset_compute_config(set->crtc, state);
-       if (IS_ERR(pipe_config)) {
-               ret = PTR_ERR(pipe_config);
-               goto out;
+       state = drm_atomic_state_alloc(dev);
+       if (!state) {
+               DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
+                             crtc->base.id);
+               return;
        }
 
-       intel_update_pipe_size(to_intel_crtc(set->crtc));
+       state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 
-       ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
+retry:
+       crtc_state = drm_atomic_get_crtc_state(state, crtc);
+       ret = PTR_ERR_OR_ZERO(crtc_state);
+       if (!ret) {
+               if (!crtc_state->active)
+                       goto out;
 
-       if (ret) {
-               DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
-                             set->crtc->base.id, ret);
+               crtc_state->mode_changed = true;
+               ret = drm_atomic_commit(state);
+       }
+
+       if (ret == -EDEADLK) {
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(state->acquire_ctx);
+               goto retry;
        }
 
-out:
        if (ret)
+out:
                drm_atomic_state_free(state);
-       return ret;
 }
 
+#undef for_each_intel_crtc_masked
+
 static const struct drm_crtc_funcs intel_crtc_funcs = {
        .gamma_set = intel_crtc_gamma_set,
-       .set_config = intel_crtc_set_config,
+       .set_config = drm_atomic_helper_set_config,
        .destroy = intel_crtc_destroy,
        .page_flip = intel_crtc_page_flip,
        .atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13048,36 +13286,16 @@ static void intel_shared_dpll_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       intel_update_cdclk(dev);
+
        if (HAS_DDI(dev))
                intel_ddi_pll_init(dev);
        else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               ibx_pch_dpll_init(dev);
-       else
-               dev_priv->num_shared_dpll = 0;
-
-       BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-}
-
-/**
- * intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-bool intel_wm_need_update(struct drm_plane *plane,
-                         struct drm_plane_state *state)
-{
-       /* Update watermarks on tiling changes. */
-       if (!plane->state->fb || !state->fb ||
-           plane->state->fb->modifier[0] != state->fb->modifier[0] ||
-           plane->state->rotation != state->rotation)
-               return true;
+               ibx_pch_dpll_init(dev);
+       else
+               dev_priv->num_shared_dpll = 0;
 
-       return false;
+       BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
 }
 
 /**
@@ -13099,27 +13317,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 {
        struct drm_device *dev = plane->dev;
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       enum pipe pipe = intel_plane->pipe;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
-       unsigned frontbuffer_bits = 0;
        int ret = 0;
 
        if (!obj)
                return 0;
 
-       switch (plane->type) {
-       case DRM_PLANE_TYPE_PRIMARY:
-               frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
-               break;
-       case DRM_PLANE_TYPE_CURSOR:
-               frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
-               break;
-       case DRM_PLANE_TYPE_OVERLAY:
-               frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
-               break;
-       }
-
        mutex_lock(&dev->struct_mutex);
 
        if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13129,11 +13333,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                if (ret)
                        DRM_DEBUG_KMS("failed to attach phys object\n");
        } else {
-               ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+               ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
        }
 
        if (ret == 0)
-               i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
+               i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -13180,7 +13384,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
        dev = intel_crtc->base.dev;
        dev_priv = dev->dev_private;
        crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
-       cdclk = dev_priv->display.get_display_clock_speed(dev);
+       cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
        if (!crtc_clock || !cdclk)
                return DRM_PLANE_HELPER_NO_SCALING;
@@ -13198,112 +13402,28 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
 
 static int
 intel_check_primary_plane(struct drm_plane *plane,
+                         struct intel_crtc_state *crtc_state,
                          struct intel_plane_state *state)
 {
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = state->base.crtc;
-       struct intel_crtc *intel_crtc;
-       struct intel_crtc_state *crtc_state;
        struct drm_framebuffer *fb = state->base.fb;
-       struct drm_rect *dest = &state->dst;
-       struct drm_rect *src = &state->src;
-       const struct drm_rect *clip = &state->clip;
-       bool can_position = false;
-       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
        int min_scale = DRM_PLANE_HELPER_NO_SCALING;
-       int ret;
-
-       crtc = crtc ? crtc : plane->crtc;
-       intel_crtc = to_intel_crtc(crtc);
-       crtc_state = state->base.state ?
-               intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
+       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+       bool can_position = false;
 
-       if (INTEL_INFO(dev)->gen >= 9) {
-               /* use scaler when colorkey is not required */
-               if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) {
-                       min_scale = 1;
-                       max_scale = skl_max_scale(intel_crtc, crtc_state);
-               }
+       /* use scaler when colorkey is not required */
+       if (INTEL_INFO(plane->dev)->gen >= 9 &&
+           state->ckey.flags == I915_SET_COLORKEY_NONE) {
+               min_scale = 1;
+               max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
                can_position = true;
        }
 
-       ret = drm_plane_helper_check_update(plane, crtc, fb,
-                                           src, dest, clip,
-                                           min_scale,
-                                           max_scale,
-                                           can_position, true,
-                                           &state->visible);
-       if (ret)
-               return ret;
-
-       if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
-               struct intel_plane_state *old_state =
-                       to_intel_plane_state(plane->state);
-
-               intel_crtc->atomic.wait_for_flips = true;
-
-               /*
-                * FBC does not work on some platforms for rotated
-                * planes, so disable it when rotation is not 0 and
-                * update it when rotation is set back to 0.
-                *
-                * FIXME: This is redundant with the fbc update done in
-                * the primary plane enable function except that that
-                * one is done too late. We eventually need to unify
-                * this.
-                */
-               if (state->visible &&
-                   INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-                   dev_priv->fbc.crtc == intel_crtc &&
-                   state->base.rotation != BIT(DRM_ROTATE_0)) {
-                       intel_crtc->atomic.disable_fbc = true;
-               }
-
-               if (state->visible && !old_state->visible) {
-                       /*
-                        * BDW signals flip done immediately if the plane
-                        * is disabled, even if the plane enable is already
-                        * armed to occur at the next vblank :(
-                        */
-                       if (IS_BROADWELL(dev))
-                               intel_crtc->atomic.wait_vblank = true;
-
-                       if (crtc_state)
-                               intel_crtc->atomic.post_enable_primary = true;
-               }
-
-               /*
-                * FIXME: Actually if we will still have any other plane enabled
-                * on the pipe we could let IPS enabled still, but for
-                * now lets consider that when we make primary invisible
-                * by setting DSPCNTR to 0 on update_primary_plane function
-                * IPS needs to be disable.
-                */
-               if (!state->visible || !fb)
-                       intel_crtc->atomic.disable_ips = true;
-
-               if (!state->visible && old_state->visible &&
-                   crtc_state && !needs_modeset(&crtc_state->base))
-                       intel_crtc->atomic.pre_disable_primary = true;
-
-               intel_crtc->atomic.fb_bits |=
-                       INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
-
-               intel_crtc->atomic.update_fbc = true;
-
-               if (intel_wm_need_update(plane, &state->base))
-                       intel_crtc->atomic.update_wm = true;
-       }
-
-       if (INTEL_INFO(dev)->gen >= 9) {
-               ret = skl_update_scaler_users(intel_crtc, crtc_state,
-                       to_intel_plane(plane), state, 0);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
+       return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+                                            &state->dst, &state->clip,
+                                            min_scale, max_scale,
+                                            can_position, true,
+                                            &state->visible);
 }
 
 static void
@@ -13324,20 +13444,19 @@ intel_commit_primary_plane(struct drm_plane *plane,
        crtc->x = src->x1 >> 16;
        crtc->y = src->y1 >> 16;
 
-       if (intel_crtc->active) {
-               if (state->visible)
-                       /* FIXME: kill this fastboot hack */
-                       intel_update_pipe_size(intel_crtc);
+       if (!crtc->state->active)
+               return;
 
-               dev_priv->display.update_primary_plane(crtc, plane->fb,
-                                                      crtc->x, crtc->y);
-       }
+       if (state->visible)
+               /* FIXME: kill this fastboot hack */
+               intel_update_pipe_size(intel_crtc);
+
+       dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
 }
 
 static void
 intel_disable_primary_plane(struct drm_plane *plane,
-                           struct drm_crtc *crtc,
-                           bool force)
+                           struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13345,96 +13464,30 @@ intel_disable_primary_plane(struct drm_plane *plane,
        dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
 }
 
-static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+static void intel_begin_crtc_commit(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_plane *intel_plane;
-       struct drm_plane *p;
-       unsigned fb_bits = 0;
-
-       /* Track fb's for any planes being disabled */
-       list_for_each_entry(p, &dev->mode_config.plane_list, head) {
-               intel_plane = to_intel_plane(p);
-
-               if (intel_crtc->atomic.disabled_planes &
-                   (1 << drm_plane_index(p))) {
-                       switch (p->type) {
-                       case DRM_PLANE_TYPE_PRIMARY:
-                               fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
-                               break;
-                       case DRM_PLANE_TYPE_CURSOR:
-                               fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
-                               break;
-                       case DRM_PLANE_TYPE_OVERLAY:
-                               fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
-                               break;
-                       }
-
-                       mutex_lock(&dev->struct_mutex);
-                       i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
-                       mutex_unlock(&dev->struct_mutex);
-               }
-       }
-
-       if (intel_crtc->atomic.wait_for_flips)
-               intel_crtc_wait_for_pending_flips(crtc);
-
-       if (intel_crtc->atomic.disable_fbc)
-               intel_fbc_disable(dev);
-
-       if (intel_crtc->atomic.disable_ips)
-               hsw_disable_ips(intel_crtc);
 
-       if (intel_crtc->atomic.pre_disable_primary)
-               intel_pre_disable_primary(crtc);
-
-       if (intel_crtc->atomic.update_wm)
+       if (intel_crtc->atomic.update_wm_pre)
                intel_update_watermarks(crtc);
 
-       intel_runtime_pm_get(dev_priv);
-
        /* Perform vblank evasion around commit operation */
-       if (intel_crtc->active)
-               intel_crtc->atomic.evade =
-                       intel_pipe_update_start(intel_crtc,
-                                               &intel_crtc->atomic.start_vbl_count);
+       if (crtc->state->active)
+               intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+
+       if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
+               skl_detach_scalers(intel_crtc);
 }
 
-static void intel_finish_crtc_commit(struct drm_crtc *crtc)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *old_crtc_state)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_plane *p;
-
-       if (intel_crtc->atomic.evade)
-               intel_pipe_update_end(intel_crtc,
-                                     intel_crtc->atomic.start_vbl_count);
-
-       intel_runtime_pm_put(dev_priv);
-
-       if (intel_crtc->atomic.wait_vblank)
-               intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-       intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
-
-       if (intel_crtc->atomic.update_fbc) {
-               mutex_lock(&dev->struct_mutex);
-               intel_fbc_update(dev);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
-       if (intel_crtc->atomic.post_enable_primary)
-               intel_post_enable_primary(crtc);
-
-       drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
-               if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
-                       intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
-                                                      false, false);
-
-       memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
+       if (crtc->state->active)
+               intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
 }
 
 /**
@@ -13490,10 +13543,10 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
        }
        primary->pipe = pipe;
        primary->plane = pipe;
+       primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
        primary->check_plane = intel_check_primary_plane;
        primary->commit_plane = intel_commit_primary_plane;
        primary->disable_plane = intel_disable_primary_plane;
-       primary->ckey.flags = I915_SET_COLORKEY_NONE;
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
                primary->plane = !pipe;
 
@@ -13541,37 +13594,29 @@ void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *
 
 static int
 intel_check_cursor_plane(struct drm_plane *plane,
+                        struct intel_crtc_state *crtc_state,
                         struct intel_plane_state *state)
 {
-       struct drm_crtc *crtc = state->base.crtc;
-       struct drm_device *dev = plane->dev;
+       struct drm_crtc *crtc = crtc_state->base.crtc;
        struct drm_framebuffer *fb = state->base.fb;
-       struct drm_rect *dest = &state->dst;
-       struct drm_rect *src = &state->src;
-       const struct drm_rect *clip = &state->clip;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc;
        unsigned stride;
        int ret;
 
-       crtc = crtc ? crtc : plane->crtc;
-       intel_crtc = to_intel_crtc(crtc);
-
-       ret = drm_plane_helper_check_update(plane, crtc, fb,
-                                           src, dest, clip,
+       ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+                                           &state->dst, &state->clip,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            true, true, &state->visible);
        if (ret)
                return ret;
 
-
        /* if we want to turn off the cursor ignore width and height */
        if (!obj)
-               goto finish;
+               return 0;
 
        /* Check for which cursor types we support */
-       if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+       if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
                DRM_DEBUG("Cursor dimension %dx%d not supported\n",
                          state->base.crtc_w, state->base.crtc_h);
                return -EINVAL;
@@ -13585,34 +13630,16 @@ intel_check_cursor_plane(struct drm_plane *plane,
 
        if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
                DRM_DEBUG_KMS("cursor cannot be tiled\n");
-               ret = -EINVAL;
-       }
-
-finish:
-       if (intel_crtc->active) {
-               if (plane->state->crtc_w != state->base.crtc_w)
-                       intel_crtc->atomic.update_wm = true;
-
-               intel_crtc->atomic.fb_bits |=
-                       INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
+               return -EINVAL;
        }
 
-       return ret;
+       return 0;
 }
 
 static void
 intel_disable_cursor_plane(struct drm_plane *plane,
-                          struct drm_crtc *crtc,
-                          bool force)
+                          struct drm_crtc *crtc)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       if (!force) {
-               plane->fb = NULL;
-               intel_crtc->cursor_bo = NULL;
-               intel_crtc->cursor_addr = 0;
-       }
-
        intel_crtc_update_cursor(crtc, false);
 }
 
@@ -13645,9 +13672,9 @@ intel_commit_cursor_plane(struct drm_plane *plane,
 
        intel_crtc->cursor_addr = addr;
        intel_crtc->cursor_bo = obj;
-update:
 
-       if (intel_crtc->active)
+update:
+       if (crtc->state->active)
                intel_crtc_update_cursor(crtc, state->visible);
 }
 
@@ -13672,6 +13699,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
        cursor->max_downscale = 1;
        cursor->pipe = pipe;
        cursor->plane = pipe;
+       cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
        cursor->check_plane = intel_check_cursor_plane;
        cursor->commit_plane = intel_commit_cursor_plane;
        cursor->disable_plane = intel_disable_cursor_plane;
@@ -13712,8 +13740,6 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
        for (i = 0; i < intel_crtc->num_scalers; i++) {
                intel_scaler = &scaler_state->scalers[i];
                intel_scaler->in_use = 0;
-               intel_scaler->id = i;
-
                intel_scaler->mode = PS_SCALER_MODE_DYN;
        }
 
@@ -13785,6 +13811,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        intel_crtc->cursor_cntl = ~0;
        intel_crtc->cursor_size = ~0;
 
+       intel_crtc->wm.cxsr_allowed = true;
+
        BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
               dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
        dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
@@ -13919,8 +13947,7 @@ static void intel_setup_outputs(struct drm_device *dev)
                 */
                found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
                /* WaIgnoreDDIAStrap: skl */
-               if (found ||
-                   (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
+               if (found || IS_SKYLAKE(dev))
                        intel_ddi_init(dev, PORT_A);
 
                /* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -13996,18 +14023,18 @@ static void intel_setup_outputs(struct drm_device *dev)
                }
 
                intel_dsi_init(dev);
-       } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+       } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
                bool found = false;
 
                if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
                        DRM_DEBUG_KMS("probing SDVOB\n");
                        found = intel_sdvo_init(dev, GEN3_SDVOB, true);
-                       if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+                       if (!found && IS_G4X(dev)) {
                                DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
                                intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
                        }
 
-                       if (!found && SUPPORTS_INTEGRATED_DP(dev))
+                       if (!found && IS_G4X(dev))
                                intel_dp_init(dev, DP_B, PORT_B);
                }
 
@@ -14020,15 +14047,15 @@ static void intel_setup_outputs(struct drm_device *dev)
 
                if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
 
-                       if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+                       if (IS_G4X(dev)) {
                                DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
                                intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
                        }
-                       if (SUPPORTS_INTEGRATED_DP(dev))
+                       if (IS_G4X(dev))
                                intel_dp_init(dev, DP_C, PORT_C);
                }
 
-               if (SUPPORTS_INTEGRATED_DP(dev) &&
+               if (IS_G4X(dev) &&
                    (I915_READ(DP_D) & DP_DETECTED))
                        intel_dp_init(dev, DP_D, PORT_D);
        } else if (IS_GEN2(dev))
@@ -14073,9 +14100,27 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
        return drm_gem_handle_create(file, &obj->base, handle);
 }
 
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+                                       struct drm_file *file,
+                                       unsigned flags, unsigned color,
+                                       struct drm_clip_rect *clips,
+                                       unsigned num_clips)
+{
+       struct drm_device *dev = fb->dev;
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+
+       mutex_lock(&dev->struct_mutex);
+       intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
        .destroy = intel_user_framebuffer_destroy,
        .create_handle = intel_user_framebuffer_create_handle,
+       .dirty = intel_user_framebuffer_dirty,
 };
 
 static
@@ -14281,6 +14326,8 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
        .output_poll_changed = intel_fbdev_output_poll_changed,
        .atomic_check = intel_atomic_check,
        .atomic_commit = intel_atomic_commit,
+       .atomic_state_alloc = intel_atomic_state_alloc,
+       .atomic_state_clear = intel_atomic_state_clear,
 };
 
 /* Set up chip specific display functions */
@@ -14307,7 +14354,6 @@ static void intel_init_display(struct drm_device *dev)
                        haswell_crtc_compute_clock;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
-               dev_priv->display.off = ironlake_crtc_off;
                dev_priv->display.update_primary_plane =
                        skylake_update_primary_plane;
        } else if (HAS_DDI(dev)) {
@@ -14318,7 +14364,6 @@ static void intel_init_display(struct drm_device *dev)
                        haswell_crtc_compute_clock;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
-               dev_priv->display.off = ironlake_crtc_off;
                dev_priv->display.update_primary_plane =
                        ironlake_update_primary_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
@@ -14329,7 +14374,6 @@ static void intel_init_display(struct drm_device *dev)
                        ironlake_crtc_compute_clock;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
-               dev_priv->display.off = ironlake_crtc_off;
                dev_priv->display.update_primary_plane =
                        ironlake_update_primary_plane;
        } else if (IS_VALLEYVIEW(dev)) {
@@ -14339,7 +14383,6 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
-               dev_priv->display.off = i9xx_crtc_off;
                dev_priv->display.update_primary_plane =
                        i9xx_update_primary_plane;
        } else {
@@ -14349,7 +14392,6 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
-               dev_priv->display.off = i9xx_crtc_off;
                dev_priv->display.update_primary_plane =
                        i9xx_update_primary_plane;
        }
@@ -14358,6 +14400,9 @@ static void intel_init_display(struct drm_device *dev)
        if (IS_SKYLAKE(dev))
                dev_priv->display.get_display_clock_speed =
                        skylake_get_display_clock_speed;
+       else if (IS_BROXTON(dev))
+               dev_priv->display.get_display_clock_speed =
+                       broxton_get_display_clock_speed;
        else if (IS_BROADWELL(dev))
                dev_priv->display.get_display_clock_speed =
                        broadwell_get_display_clock_speed;
@@ -14371,9 +14416,21 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.get_display_clock_speed =
                        ilk_get_display_clock_speed;
        else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
-                IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+                IS_GEN6(dev) || IS_IVYBRIDGE(dev))
                dev_priv->display.get_display_clock_speed =
                        i945_get_display_clock_speed;
+       else if (IS_GM45(dev))
+               dev_priv->display.get_display_clock_speed =
+                       gm45_get_display_clock_speed;
+       else if (IS_CRESTLINE(dev))
+               dev_priv->display.get_display_clock_speed =
+                       i965gm_get_display_clock_speed;
+       else if (IS_PINEVIEW(dev))
+               dev_priv->display.get_display_clock_speed =
+                       pnv_get_display_clock_speed;
+       else if (IS_G33(dev) || IS_G4X(dev))
+               dev_priv->display.get_display_clock_speed =
+                       g33_get_display_clock_speed;
        else if (IS_I915G(dev))
                dev_priv->display.get_display_clock_speed =
                        i915_get_display_clock_speed;
@@ -14391,10 +14448,12 @@ static void intel_init_display(struct drm_device *dev)
                        i865_get_display_clock_speed;
        else if (IS_I85X(dev))
                dev_priv->display.get_display_clock_speed =
-                       i855_get_display_clock_speed;
-       else /* 852, 830 */
+                       i85x_get_display_clock_speed;
+       else { /* 830 */
+               WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
                dev_priv->display.get_display_clock_speed =
                        i830_get_display_clock_speed;
+       }
 
        if (IS_GEN5(dev)) {
                dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
@@ -14405,12 +14464,22 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+               if (IS_BROADWELL(dev)) {
+                       dev_priv->display.modeset_commit_cdclk =
+                               broadwell_modeset_commit_cdclk;
+                       dev_priv->display.modeset_calc_cdclk =
+                               broadwell_modeset_calc_cdclk;
+               }
        } else if (IS_VALLEYVIEW(dev)) {
-               dev_priv->display.modeset_global_resources =
-                       valleyview_modeset_global_resources;
+               dev_priv->display.modeset_commit_cdclk =
+                       valleyview_modeset_commit_cdclk;
+               dev_priv->display.modeset_calc_cdclk =
+                       valleyview_modeset_calc_cdclk;
        } else if (IS_BROXTON(dev)) {
-               dev_priv->display.modeset_global_resources =
-                       broxton_modeset_global_resources;
+               dev_priv->display.modeset_commit_cdclk =
+                       broxton_modeset_commit_cdclk;
+               dev_priv->display.modeset_calc_cdclk =
+                       broxton_modeset_calc_cdclk;
        }
 
        switch (INTEL_INFO(dev)->gen) {
@@ -14629,13 +14698,9 @@ static void i915_disable_vga(struct drm_device *dev)
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
+       intel_update_cdclk(dev);
        intel_prepare_ddi(dev);
-
-       if (IS_VALLEYVIEW(dev))
-               vlv_update_cdclk(dev);
-
        intel_init_clock_gating(dev);
-
        intel_enable_gt_powersave(dev);
 }
 
@@ -14715,13 +14780,15 @@ void intel_modeset_init(struct drm_device *dev)
        intel_setup_outputs(dev);
 
        /* Just in case the BIOS is doing something questionable. */
-       intel_fbc_disable(dev);
+       intel_fbc_disable(dev_priv);
 
        drm_modeset_lock_all(dev);
-       intel_modeset_setup_hw_state(dev, false);
+       intel_modeset_setup_hw_state(dev);
        drm_modeset_unlock_all(dev);
 
        for_each_intel_crtc(dev, crtc) {
+               struct intel_initial_plane_config plane_config = {};
+
                if (!crtc->active)
                        continue;
 
@@ -14732,15 +14799,14 @@ void intel_modeset_init(struct drm_device *dev)
                 * can even allow for smooth boot transitions if the BIOS
                 * fb is large enough for the active pipe configuration.
                 */
-               if (dev_priv->display.get_initial_plane_config) {
-                       dev_priv->display.get_initial_plane_config(crtc,
-                                                          &crtc->plane_config);
-                       /*
-                        * If the fb is shared between multiple heads, we'll
-                        * just get the first one.
-                        */
-                       intel_find_initial_plane_obj(crtc, &crtc->plane_config);
-               }
+               dev_priv->display.get_initial_plane_config(crtc,
+                                                          &plane_config);
+
+               /*
+                * If the fb is shared between multiple heads, we'll
+                * just get the first one.
+                */
+               intel_find_initial_plane_obj(crtc, &plane_config);
        }
 }
 
@@ -14792,7 +14858,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_encoder *encoder;
        u32 reg;
+       bool enable;
 
        /* Clear any frame start delays used for debugging left by the BIOS */
        reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14801,6 +14869,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
        /* restore vblank interrupts to correct state */
        drm_crtc_vblank_reset(&crtc->base);
        if (crtc->active) {
+               drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
                update_scanline_offset(crtc);
                drm_crtc_vblank_on(&crtc->base);
        }
@@ -14809,7 +14878,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
         * disable the crtc (and hence change the state) if it is wrong. Note
         * that gen4+ has a fixed plane -> pipe mapping.  */
        if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
-               struct intel_connector *connector;
                bool plane;
 
                DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
@@ -14821,30 +14889,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                plane = crtc->plane;
                to_intel_plane_state(crtc->base.primary->state)->visible = true;
                crtc->plane = !plane;
-               intel_crtc_disable_planes(&crtc->base);
-               dev_priv->display.crtc_disable(&crtc->base);
+               intel_crtc_disable_noatomic(&crtc->base);
                crtc->plane = plane;
-
-               /* ... and break all links. */
-               for_each_intel_connector(dev, connector) {
-                       if (connector->encoder->base.crtc != &crtc->base)
-                               continue;
-
-                       connector->base.dpms = DRM_MODE_DPMS_OFF;
-                       connector->base.encoder = NULL;
-               }
-               /* multiple connectors may have the same encoder:
-                *  handle them and break crtc link separately */
-               for_each_intel_connector(dev, connector)
-                       if (connector->encoder->base.crtc == &crtc->base) {
-                               connector->encoder->base.crtc = NULL;
-                               connector->encoder->connectors_active = false;
-                       }
-
-               WARN_ON(crtc->active);
-               crtc->base.state->enable = false;
-               crtc->base.state->active = false;
-               crtc->base.enabled = false;
        }
 
        if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
@@ -14858,20 +14904,27 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
-       intel_crtc_update_dpms(&crtc->base);
+       enable = false;
+       for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+               enable = true;
+               break;
+       }
 
-       if (crtc->active != crtc->base.state->enable) {
-               struct intel_encoder *encoder;
+       if (!enable)
+               intel_crtc_disable_noatomic(&crtc->base);
+
+       if (crtc->active != crtc->base.state->active) {
 
                /* This can happen either due to bugs in the get_hw_state
-                * functions or because the pipe is force-enabled due to the
+                * functions or because of calls to intel_crtc_disable_noatomic,
+                * or because the pipe is force-enabled due to the
                 * pipe A quirk. */
                DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
                              crtc->base.base.id,
                              crtc->base.state->enable ? "enabled" : "disabled",
                              crtc->active ? "enabled" : "disabled");
 
-               crtc->base.state->enable = crtc->active;
+               WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
                crtc->base.state->active = crtc->active;
                crtc->base.enabled = crtc->active;
 
@@ -14882,10 +14935,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 *  actually up, hence no need to break them. */
                WARN_ON(crtc->active);
 
-               for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-                       WARN_ON(encoder->connectors_active);
+               for_each_encoder_on_crtc(dev, &crtc->base, encoder)
                        encoder->base.crtc = NULL;
-               }
        }
 
        if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
@@ -14911,6 +14962,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
        struct intel_connector *connector;
        struct drm_device *dev = encoder->base.dev;
+       bool active = false;
 
        /* We need to check both for a crtc link (meaning that the
         * encoder is active and trying to read from a pipe) and the
@@ -14918,7 +14970,15 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
        bool has_active_crtc = encoder->base.crtc &&
                to_intel_crtc(encoder->base.crtc)->active;
 
-       if (encoder->connectors_active && !has_active_crtc) {
+       for_each_intel_connector(dev, connector) {
+               if (connector->base.encoder != &encoder->base)
+                       continue;
+
+               active = true;
+               break;
+       }
+
+       if (active && !has_active_crtc) {
                DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
                              encoder->base.base.id,
                              encoder->base.name);
@@ -14935,7 +14995,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                                encoder->post_disable(encoder);
                }
                encoder->base.crtc = NULL;
-               encoder->connectors_active = false;
 
                /* Inconsistent output/port/pipe state happens presumably due to
                 * a bug in one of the get_hw_state functions. Or someplace else
@@ -14984,10 +15043,31 @@ static bool primary_get_hw_state(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-       if (!crtc->active)
-               return false;
+       return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+}
 
-       return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+static void readout_plane_state(struct intel_crtc *crtc,
+                               struct intel_crtc_state *crtc_state)
+{
+       struct intel_plane *p;
+       struct intel_plane_state *plane_state;
+       bool active = crtc_state->base.active;
+
+       for_each_intel_plane(crtc->base.dev, p) {
+               if (crtc->pipe != p->pipe)
+                       continue;
+
+               plane_state = to_intel_plane_state(p->base.state);
+
+               if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+                       plane_state->visible = primary_get_hw_state(crtc);
+               else {
+                       if (active)
+                               p->disable_plane(&p->base, &crtc->base);
+
+                       plane_state->visible = false;
+               }
+       }
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15000,23 +15080,44 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        int i;
 
        for_each_intel_crtc(dev, crtc) {
-               struct drm_plane *primary = crtc->base.primary;
-               struct intel_plane_state *plane_state;
-
+               __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
                memset(crtc->config, 0, sizeof(*crtc->config));
                crtc->config->base.crtc = &crtc->base;
 
-               crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
-
                crtc->active = dev_priv->display.get_pipe_config(crtc,
                                                                 crtc->config);
 
-               crtc->base.state->enable = crtc->active;
                crtc->base.state->active = crtc->active;
                crtc->base.enabled = crtc->active;
 
-               plane_state = to_intel_plane_state(primary->state);
-               plane_state->visible = primary_get_hw_state(crtc);
+               memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+               if (crtc->base.state->active) {
+                       intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+                       intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+                       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+                       /*
+                        * The initial mode needs to be set in order to keep
+                        * the atomic core happy. It wants a valid mode if the
+                        * crtc's enabled, so we do the above call.
+                        *
+                        * At this point some state updated by the connectors
+                        * in their ->detect() callback has not run yet, so
+                        * no recalculation can be done yet.
+                        *
+                        * Even if we could do a recalculation and modeset
+                        * right now it would cause a double modeset if
+                        * fbdev or userspace chooses a different initial mode.
+                        *
+                        * If that happens, someone indicated they wanted a
+                        * mode change, which means it's safe to do a full
+                        * recalculation.
+                        */
+                       crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+               }
+
+               crtc->base.hwmode = crtc->config->base.adjusted_mode;
+               readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
 
                DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
                              crtc->base.base.id,
@@ -15055,7 +15156,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                        encoder->base.crtc = NULL;
                }
 
-               encoder->connectors_active = false;
                DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
                              encoder->base.base.id,
                              encoder->base.name,
@@ -15066,7 +15166,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        for_each_intel_connector(dev, connector) {
                if (connector->get_hw_state(connector)) {
                        connector->base.dpms = DRM_MODE_DPMS_ON;
-                       connector->encoder->connectors_active = true;
                        connector->base.encoder = &connector->encoder->base;
                } else {
                        connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -15079,10 +15178,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        }
 }
 
-/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
- * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev,
-                                 bool force_restore)
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
@@ -15092,21 +15192,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 
        intel_modeset_readout_hw_state(dev);
 
-       /*
-        * Now that we have the config, copy it to each CRTC struct
-        * Note that this could go away if we move to using crtc_config
-        * checking everywhere.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               if (crtc->active && i915.fastboot) {
-                       intel_mode_from_pipe_config(&crtc->base.mode,
-                                                   crtc->config);
-                       DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
-                                     crtc->base.base.id);
-                       drm_mode_debug_printmodeline(&crtc->base.mode);
-               }
-       }
-
        /* HW state is read out, now we need to sanitize this mess. */
        for_each_intel_encoder(dev, encoder) {
                intel_sanitize_encoder(encoder);
@@ -15133,29 +15218,73 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                pll->on = false;
        }
 
-       if (IS_GEN9(dev))
+       if (IS_VALLEYVIEW(dev))
+               vlv_wm_get_hw_state(dev);
+       else if (IS_GEN9(dev))
                skl_wm_get_hw_state(dev);
        else if (HAS_PCH_SPLIT(dev))
                ilk_wm_get_hw_state(dev);
 
-       if (force_restore) {
-               i915_redisable_vga(dev);
+       for_each_intel_crtc(dev, crtc) {
+               unsigned long put_domains;
 
-               /*
-                * We need to use raw interfaces for restoring state to avoid
-                * checking (bogus) intermediate states.
-                */
-               for_each_pipe(dev_priv, pipe) {
-                       struct drm_crtc *crtc =
-                               dev_priv->pipe_to_crtc_mapping[pipe];
+               put_domains = modeset_get_crtc_power_domains(&crtc->base);
+               if (WARN_ON(put_domains))
+                       modeset_put_power_domains(dev_priv, put_domains);
+       }
+       intel_display_set_init_power(dev_priv, false);
+}
 
-                       intel_crtc_restore_mode(crtc);
-               }
-       } else {
-               intel_modeset_update_staged_output_state(dev);
+void intel_display_resume(struct drm_device *dev)
+{
+       struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
+       struct intel_connector *conn;
+       struct intel_plane *plane;
+       struct drm_crtc *crtc;
+       int ret;
+
+       if (!state)
+               return;
+
+       state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+       /* preserve complete old state, including dpll */
+       intel_atomic_get_shared_dpll_state(state);
+
+       for_each_crtc(dev, crtc) {
+               struct drm_crtc_state *crtc_state =
+                       drm_atomic_get_crtc_state(state, crtc);
+
+               ret = PTR_ERR_OR_ZERO(crtc_state);
+               if (ret)
+                       goto err;
+
+               /* force a restore */
+               crtc_state->mode_changed = true;
        }
 
-       intel_modeset_check_state(dev);
+       for_each_intel_plane(dev, plane) {
+               ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
+               if (ret)
+                       goto err;
+       }
+
+       for_each_intel_connector(dev, conn) {
+               ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
+               if (ret)
+                       goto err;
+       }
+
+       intel_modeset_setup_hw_state(dev);
+
+       i915_redisable_vga(dev);
+       ret = drm_atomic_commit(state);
+       if (!ret)
+               return;
+
+err:
+       DRM_ERROR("Restoring old state failed with %i\n", ret);
+       drm_atomic_state_free(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -15197,14 +15326,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
                ret = intel_pin_and_fence_fb_obj(c->primary,
                                                 c->primary->fb,
                                                 c->primary->state,
-                                                NULL);
+                                                NULL, NULL);
                mutex_unlock(&dev->struct_mutex);
                if (ret) {
                        DRM_ERROR("failed to pin boot fb on pipe %d\n",
                                  to_intel_crtc(c)->pipe);
                        drm_framebuffer_unreference(c->primary->fb);
                        c->primary->fb = NULL;
+                       c->primary->crtc = c->primary->state->crtc = NULL;
                        update_state_fb(c->primary);
+                       c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
                }
        }
 
@@ -15241,13 +15372,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
         */
        drm_kms_helper_poll_fini(dev);
 
-       mutex_lock(&dev->struct_mutex);
-
        intel_unregister_dsm_handler();
 
-       intel_fbc_disable(dev);
-
-       mutex_unlock(&dev->struct_mutex);
+       intel_fbc_disable(dev_priv);
 
        /* flush any delayed tasks or pending work */
        flush_scheduled_work();
index 6e8faa25379240cab60f57631adf42612f28df33..016e7bc6af0abe33dcec4dd5d37b04420a042a0f 100644 (file)
@@ -91,6 +91,8 @@ static const struct dp_link_dpll chv_dpll[] = {
                { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
 };
 
+static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
+                                 324000, 432000, 540000 };
 static const int skl_rates[] = { 162000, 216000, 270000,
                                  324000, 432000, 540000 };
 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
@@ -565,7 +567,9 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (IS_BROXTON(dev))
+               return BXT_PP_CONTROL(0);
+       else if (HAS_PCH_SPLIT(dev))
                return PCH_PP_CONTROL;
        else
                return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
@@ -575,7 +579,9 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (IS_BROXTON(dev))
+               return BXT_PP_STATUS(0);
+       else if (HAS_PCH_SPLIT(dev))
                return PCH_PP_STATUS;
        else
                return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
@@ -708,7 +714,8 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
                return 0;
 
        if (intel_dig_port->port == PORT_A) {
-               return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
+               return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+
        } else {
                return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
        }
@@ -723,7 +730,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
        if (intel_dig_port->port == PORT_A) {
                if (index)
                        return 0;
-               return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
+               return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
        } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                /* Workaround for non-ULT HSW */
                switch (index) {
@@ -842,8 +849,15 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        }
 
        if (try == 3) {
-               WARN(1, "dp_aux_ch not started status 0x%08x\n",
-                    I915_READ(ch_ctl));
+               static u32 last_status = -1;
+               const u32 status = I915_READ(ch_ctl);
+
+               if (status != last_status) {
+                       WARN(1, "dp_aux_ch not started status 0x%08x\n",
+                            status);
+                       last_status = status;
+               }
+
                ret = -EBUSY;
                goto out;
        }
@@ -1019,11 +1033,34 @@ static void
 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->port;
+       struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
        const char *name = NULL;
+       uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
        int ret;
 
+       /* On SKL we don't have Aux for port E so we rely on VBT to set
+        * a proper alternate aux channel.
+        */
+       if (IS_SKYLAKE(dev) && port == PORT_E) {
+               switch (info->alternate_aux_channel) {
+               case DP_AUX_B:
+                       porte_aux_ctl_reg = DPB_AUX_CH_CTL;
+                       break;
+               case DP_AUX_C:
+                       porte_aux_ctl_reg = DPC_AUX_CH_CTL;
+                       break;
+               case DP_AUX_D:
+                       porte_aux_ctl_reg = DPD_AUX_CH_CTL;
+                       break;
+               case DP_AUX_A:
+               default:
+                       porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+               }
+       }
+
        switch (port) {
        case PORT_A:
                intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
@@ -1041,6 +1078,10 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
                intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
                name = "DPDDC-D";
                break;
+       case PORT_E:
+               intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
+               name = "DPDDC-E";
+               break;
        default:
                BUG();
        }
@@ -1054,7 +1095,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
         *
         * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
         */
-       if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+       if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
                intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
        intel_dp->aux.name = name;
@@ -1172,7 +1213,10 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
-       if (IS_SKYLAKE(dev)) {
+       if (IS_BROXTON(dev)) {
+               *source_rates = bxt_rates;
+               return ARRAY_SIZE(bxt_rates);
+       } else if (IS_SKYLAKE(dev)) {
                *source_rates = skl_rates;
                return ARRAY_SIZE(skl_rates);
        } else if (IS_CHERRYVIEW(dev)) {
@@ -1374,7 +1418,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
                if (INTEL_INFO(dev)->gen >= 9) {
                        int ret;
-                       ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+                       ret = skl_update_scaler_crtc(pipe_config);
                        if (ret)
                                return ret;
                }
@@ -1399,7 +1443,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
         * bpc in between. */
        bpp = pipe_config->pipe_bpp;
        if (is_edp(intel_dp)) {
-               if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+
+               /* Get bpp from vbt only for panels that dont have bpp in edid */
+               if (intel_connector->base.display_info.bpc == 0 &&
+                       (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
                        DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
                                      dev_priv->vbt.edp_bpp);
                        bpp = dev_priv->vbt.edp_bpp;
@@ -1699,8 +1746,10 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
        lockdep_assert_held(&dev_priv->pps_mutex);
 
        control = I915_READ(_pp_ctrl_reg(intel_dp));
-       control &= ~PANEL_UNLOCK_MASK;
-       control |= PANEL_UNLOCK_REGS;
+       if (!IS_BROXTON(dev)) {
+               control &= ~PANEL_UNLOCK_MASK;
+               control |= PANEL_UNLOCK_REGS;
+       }
        return control;
 }
 
@@ -2612,7 +2661,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
                DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
                              pipe_name(pipe), port_name(port));
 
-               WARN(encoder->connectors_active,
+               WARN(encoder->base.crtc,
                     "stealing pipe %c power sequencer from active eDP port %c\n",
                     pipe_name(pipe), port_name(port));
 
@@ -3414,92 +3463,6 @@ gen7_edp_signal_levels(uint8_t train_set)
        }
 }
 
-/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
-static uint32_t
-hsw_signal_levels(uint8_t train_set)
-{
-       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-                                        DP_TRAIN_PRE_EMPHASIS_MASK);
-       switch (signal_levels) {
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               return DDI_BUF_TRANS_SELECT(0);
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-               return DDI_BUF_TRANS_SELECT(1);
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-               return DDI_BUF_TRANS_SELECT(2);
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-               return DDI_BUF_TRANS_SELECT(3);
-
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               return DDI_BUF_TRANS_SELECT(4);
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-               return DDI_BUF_TRANS_SELECT(5);
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-               return DDI_BUF_TRANS_SELECT(6);
-
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               return DDI_BUF_TRANS_SELECT(7);
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-               return DDI_BUF_TRANS_SELECT(8);
-
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               return DDI_BUF_TRANS_SELECT(9);
-       default:
-               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
-                             "0x%x\n", signal_levels);
-               return DDI_BUF_TRANS_SELECT(0);
-       }
-}
-
-static void bxt_signal_levels(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       enum port port = dport->port;
-       struct drm_device *dev = dport->base.base.dev;
-       struct intel_encoder *encoder = &dport->base;
-       uint8_t train_set = intel_dp->train_set[0];
-       uint32_t level = 0;
-
-       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-                                        DP_TRAIN_PRE_EMPHASIS_MASK);
-       switch (signal_levels) {
-       default:
-               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               level = 0;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-               level = 1;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-               level = 2;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-               level = 3;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               level = 4;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-               level = 5;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-               level = 6;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               level = 7;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-               level = 8;
-               break;
-       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-               level = 9;
-               break;
-       }
-
-       bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
-}
-
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -3507,22 +3470,20 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->port;
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       uint32_t signal_levels, mask;
+       uint32_t signal_levels, mask = 0;
        uint8_t train_set = intel_dp->train_set[0];
 
-       if (IS_BROXTON(dev)) {
-               signal_levels = 0;
-               bxt_signal_levels(intel_dp);
-               mask = 0;
-       } else if (HAS_DDI(dev)) {
-               signal_levels = hsw_signal_levels(train_set);
-               mask = DDI_BUF_EMP_MASK;
+       if (HAS_DDI(dev)) {
+               signal_levels = ddi_signal_levels(intel_dp);
+
+               if (IS_BROXTON(dev))
+                       signal_levels = 0;
+               else
+                       mask = DDI_BUF_EMP_MASK;
        } else if (IS_CHERRYVIEW(dev)) {
                signal_levels = chv_signal_levels(intel_dp);
-               mask = 0;
        } else if (IS_VALLEYVIEW(dev)) {
                signal_levels = vlv_signal_levels(intel_dp);
-               mask = 0;
        } else if (IS_GEN7(dev) && port == PORT_A) {
                signal_levels = gen7_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
@@ -4034,43 +3995,67 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
        return intel_dp->is_mst;
 }
 
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(intel_dig_port->base.base.crtc);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
        u8 buf;
-       int test_crc_count;
-       int attempts = 6;
-       int ret = 0;
 
-       hsw_disable_ips(intel_crtc);
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
-               ret = -EIO;
-               goto out;
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
+               DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+               return;
        }
 
-       if (!(buf & DP_TEST_CRC_SUPPORTED)) {
-               ret = -ENOTTY;
-               goto out;
-       }
+       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+                              buf & ~DP_TEST_SINK_START) < 0)
+               DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
 
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-               ret = -EIO;
-               goto out;
-       }
+       hsw_enable_ips(intel_crtc);
+}
+
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+       u8 buf;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
+               return -EIO;
+
+       if (!(buf & DP_TEST_CRC_SUPPORTED))
+               return -ENOTTY;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+               return -EIO;
+
+       hsw_disable_ips(intel_crtc);
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                               buf | DP_TEST_SINK_START) < 0) {
-               ret = -EIO;
-               goto out;
+                              buf | DP_TEST_SINK_START) < 0) {
+               hsw_enable_ips(intel_crtc);
+               return -EIO;
        }
 
+       return 0;
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+       u8 buf;
+       int test_crc_count;
+       int attempts = 6;
+       int ret;
+
+       ret = intel_dp_sink_crc_start(intel_dp);
+       if (ret)
+               return ret;
+
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
                ret = -EIO;
-               goto out;
+               goto stop;
        }
 
        test_crc_count = buf & DP_TEST_COUNT_MASK;
@@ -4079,7 +4064,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
                if (drm_dp_dpcd_readb(&intel_dp->aux,
                                      DP_TEST_SINK_MISC, &buf) < 0) {
                        ret = -EIO;
-                       goto out;
+                       goto stop;
                }
                intel_wait_for_vblank(dev, intel_crtc->pipe);
        } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
@@ -4087,25 +4072,13 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
        if (attempts == 0) {
                DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
                ret = -ETIMEDOUT;
-               goto out;
+               goto stop;
        }
 
-       if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
                ret = -EIO;
-               goto out;
-       }
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-               ret = -EIO;
-               goto out;
-       }
-       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              buf & ~DP_TEST_SINK_START) < 0) {
-               ret = -EIO;
-               goto out;
-       }
-out:
-       hsw_enable_ips(intel_crtc);
+stop:
+       intel_dp_sink_crc_stop(intel_dp);
        return ret;
 }
 
@@ -4166,9 +4139,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
                                      intel_dp->aux.i2c_defer_count);
                intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
        } else {
+               struct edid *block = intel_connector->detect_edid;
+
+               /* We have to write the checksum
+                * of the last block read
+                */
+               block += intel_connector->detect_edid->extensions;
+
                if (!drm_dp_dpcd_write(&intel_dp->aux,
                                        DP_TEST_EDID_CHECKSUM,
-                                       &intel_connector->detect_edid->checksum,
+                                       &block->checksum,
                                        1))
                        DRM_DEBUG_KMS("Failed to write EDID checksum\n");
 
@@ -4316,10 +4296,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
 
        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-       if (!intel_encoder->connectors_active)
-               return;
-
-       if (WARN_ON(!intel_encoder->base.crtc))
+       if (!intel_encoder->base.crtc)
                return;
 
        if (!to_intel_crtc(intel_encoder->base.crtc)->active)
@@ -4900,7 +4877,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
 }
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
-       .dpms = intel_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dp_detect,
        .force = intel_dp_force,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -4922,12 +4899,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
        .destroy = intel_dp_encoder_destroy,
 };
 
-void
-intel_dp_hot_plug(struct intel_encoder *intel_encoder)
-{
-       return;
-}
-
 enum irqreturn
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
@@ -5095,8 +5066,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_power_seq cur, vbt, spec,
                *final = &intel_dp->pps_delays;
-       u32 pp_on, pp_off, pp_div, pp;
-       int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+       u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+       int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -5104,7 +5075,16 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        if (final->t11_t12 != 0)
                return;
 
-       if (HAS_PCH_SPLIT(dev)) {
+       if (IS_BROXTON(dev)) {
+               /*
+                * TODO: BXT has 2 sets of PPS registers.
+                * Correct Register for Broxton need to be identified
+                * using VBT. hardcoding for now
+                */
+               pp_ctrl_reg = BXT_PP_CONTROL(0);
+               pp_on_reg = BXT_PP_ON_DELAYS(0);
+               pp_off_reg = BXT_PP_OFF_DELAYS(0);
+       } else if (HAS_PCH_SPLIT(dev)) {
                pp_ctrl_reg = PCH_PP_CONTROL;
                pp_on_reg = PCH_PP_ON_DELAYS;
                pp_off_reg = PCH_PP_OFF_DELAYS;
@@ -5120,12 +5100,14 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 
        /* Workaround: Need to write PP_CONTROL with the unlock key as
         * the very first thing. */
-       pp = ironlake_get_pp_control(intel_dp);
-       I915_WRITE(pp_ctrl_reg, pp);
+       pp_ctl = ironlake_get_pp_control(intel_dp);
 
        pp_on = I915_READ(pp_on_reg);
        pp_off = I915_READ(pp_off_reg);
-       pp_div = I915_READ(pp_div_reg);
+       if (!IS_BROXTON(dev)) {
+               I915_WRITE(pp_ctrl_reg, pp_ctl);
+               pp_div = I915_READ(pp_div_reg);
+       }
 
        /* Pull timing values out of registers */
        cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -5140,8 +5122,17 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
                PANEL_POWER_DOWN_DELAY_SHIFT;
 
-       cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+       if (IS_BROXTON(dev)) {
+               u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+                       BXT_POWER_CYCLE_DELAY_SHIFT;
+               if (tmp > 0)
+                       cur.t11_t12 = (tmp - 1) * 1000;
+               else
+                       cur.t11_t12 = 0;
+       } else {
+               cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
                       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+       }
 
        DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
                      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
@@ -5198,13 +5189,23 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp_on, pp_off, pp_div, port_sel = 0;
        int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
-       int pp_on_reg, pp_off_reg, pp_div_reg;
+       int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
        enum port port = dp_to_dig_port(intel_dp)->port;
        const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
-       if (HAS_PCH_SPLIT(dev)) {
+       if (IS_BROXTON(dev)) {
+               /*
+                * TODO: BXT has 2 sets of PPS registers.
+                * Correct Register for Broxton need to be identified
+                * using VBT. hardcoding for now
+                */
+               pp_ctrl_reg = BXT_PP_CONTROL(0);
+               pp_on_reg = BXT_PP_ON_DELAYS(0);
+               pp_off_reg = BXT_PP_OFF_DELAYS(0);
+
+       } else if (HAS_PCH_SPLIT(dev)) {
                pp_on_reg = PCH_PP_ON_DELAYS;
                pp_off_reg = PCH_PP_OFF_DELAYS;
                pp_div_reg = PCH_PP_DIVISOR;
@@ -5230,9 +5231,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
-       pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
-       pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
-                       << PANEL_POWER_CYCLE_DELAY_SHIFT);
+       if (IS_BROXTON(dev)) {
+               pp_div = I915_READ(pp_ctrl_reg);
+               pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
+               pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+                               << BXT_POWER_CYCLE_DELAY_SHIFT);
+       } else {
+               pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+               pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+                               << PANEL_POWER_CYCLE_DELAY_SHIFT);
+       }
 
        /* Haswell doesn't have any port selection bits for the panel
         * power sequencer any more. */
@@ -5249,11 +5257,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 
        I915_WRITE(pp_on_reg, pp_on);
        I915_WRITE(pp_off_reg, pp_off);
-       I915_WRITE(pp_div_reg, pp_div);
+       if (IS_BROXTON(dev))
+               I915_WRITE(pp_ctrl_reg, pp_div);
+       else
+               I915_WRITE(pp_div_reg, pp_div);
 
        DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
                      I915_READ(pp_on_reg),
                      I915_READ(pp_off_reg),
+                     IS_BROXTON(dev) ?
+                     (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
                      I915_READ(pp_div_reg));
 }
 
@@ -5458,13 +5471,12 @@ unlock:
 }
 
 /**
- * intel_edp_drrs_invalidate - Invalidate DRRS
+ * intel_edp_drrs_invalidate - Disable Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is a disturbance on screen (due to cursor movement/time
- * update etc), DRRS needs to be invalidated, i.e. need to switch to
- * high RR.
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5489,26 +5501,27 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
        crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
 
-       if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+       dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+
+       /* invalidate means busy screen hence upclock */
+       if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
                intel_dp_set_drrs_state(dev_priv->dev,
                                dev_priv->drrs.dp->attached_connector->panel.
                                fixed_mode->vrefresh);
-       }
 
-       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
-       dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
        mutex_unlock(&dev_priv->drrs.mutex);
 }
 
 /**
- * intel_edp_drrs_flush - Flush DRRS
+ * intel_edp_drrs_flush - Restart Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is no movement on screen, DRRS work can be scheduled.
- * This DRRS work is responsible for setting relevant registers after a
- * timeout of 1 second.
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5532,10 +5545,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
 
        crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
+
+       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
        dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-       if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
-                       !dev_priv->drrs.busy_frontbuffer_bits)
+       /* flush means busy screen hence upclock */
+       if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+               intel_dp_set_drrs_state(dev_priv->dev,
+                               dev_priv->drrs.dp->attached_connector->panel.
+                               fixed_mode->vrefresh);
+
+       /*
+        * flush also means no more activity hence schedule downclock, if all
+        * other fbs are quiescent too
+        */
+       if (!dev_priv->drrs.busy_frontbuffer_bits)
                schedule_delayed_work(&dev_priv->drrs.work,
                                msecs_to_jiffies(1000));
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5939,10 +5963,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
                intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
        }
        intel_encoder->cloneable = 0;
-       intel_encoder->hot_plug = intel_dp_hot_plug;
 
        intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-       dev_priv->hpd_irq_port[port] = intel_dig_port;
+       dev_priv->hotplug.irq_port[port] = intel_dig_port;
 
        if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
                drm_encoder_cleanup(encoder);
@@ -5958,7 +5981,7 @@ void intel_dp_mst_suspend(struct drm_device *dev)
 
        /* disable MST */
        for (i = 0; i < I915_MAX_PORTS; i++) {
-               struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+               struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
                if (!intel_dig_port)
                        continue;
 
@@ -5977,7 +6000,7 @@ void intel_dp_mst_resume(struct drm_device *dev)
        int i;
 
        for (i = 0; i < I915_MAX_PORTS; i++) {
-               struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+               struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
                if (!intel_dig_port)
                        continue;
                if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
index 600afdbef8c9a434f51d527c5d85e202c36bae2b..f4fe1183bae694fd8446611b400d2128bc7128a1 100644 (file)
@@ -328,7 +328,7 @@ intel_dp_mst_connector_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
-       .dpms = intel_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dp_mst_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_dp_mst_set_property,
@@ -452,10 +452,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
        drm_mode_connector_set_path_property(connector, pathprop);
-       drm_reinit_primary_mode_group(dev);
-       mutex_lock(&dev->mode_config.mutex);
+       drm_modeset_lock_all(dev);
        intel_connector_add_to_fbdev(intel_connector);
-       mutex_unlock(&dev->mode_config.mutex);
+       drm_modeset_unlock_all(dev);
        drm_connector_register(&intel_connector->base);
        return connector;
 }
@@ -465,19 +464,28 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
+
        /* need to nuke the connector */
-       mutex_lock(&dev->mode_config.mutex);
-       intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-       mutex_unlock(&dev->mode_config.mutex);
+       drm_modeset_lock_all(dev);
+       if (connector->state->crtc) {
+               struct drm_mode_set set;
+               int ret;
+
+               memset(&set, 0, sizeof(set));
+               set.crtc = connector->state->crtc,
+
+               ret = drm_atomic_helper_set_config(&set);
+
+               WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+       }
+       drm_modeset_unlock_all(dev);
 
        intel_connector->unregister(intel_connector);
 
-       mutex_lock(&dev->mode_config.mutex);
+       drm_modeset_lock_all(dev);
        intel_connector_remove_from_fbdev(intel_connector);
        drm_connector_cleanup(connector);
-       mutex_unlock(&dev->mode_config.mutex);
-
-       drm_reinit_primary_mode_group(dev);
+       drm_modeset_unlock_all(dev);
 
        kfree(intel_connector);
        DRM_DEBUG_KMS("\n");
index 105928382e216239043faf4e651d10520c92b678..e31b95bf6966cdf6f52e1cc2cb1abdb6ff5a3af3 100644 (file)
@@ -130,15 +130,9 @@ struct intel_fbdev {
 
 struct intel_encoder {
        struct drm_encoder base;
-       /*
-        * The new crtc this encoder will be driven from. Only differs from
-        * base->crtc while a modeset is in progress.
-        */
-       struct intel_crtc *new_crtc;
 
        enum intel_output_type type;
        unsigned int cloneable;
-       bool connectors_active;
        void (*hot_plug)(struct intel_encoder *);
        bool (*compute_config)(struct intel_encoder *,
                               struct intel_crtc_state *);
@@ -182,6 +176,10 @@ struct intel_panel {
                bool enabled;
                bool combination_mode;  /* gen 2/4 only */
                bool active_low_pwm;
+
+               /* PWM chip */
+               struct pwm_device *pwm;
+
                struct backlight_device *device;
        } backlight;
 
@@ -195,12 +193,6 @@ struct intel_connector {
         */
        struct intel_encoder *encoder;
 
-       /*
-        * The new encoder this connector will be driven. Only differs from
-        * encoder while a modeset is in progress.
-        */
-       struct intel_encoder *new_encoder;
-
        /* Reads out the current hw, returning true if the connector is enabled
         * and active (i.e. dpms ON state). */
        bool (*get_hw_state)(struct intel_connector *);
@@ -241,6 +233,14 @@ typedef struct dpll {
        int     p;
 } intel_clock_t;
 
+struct intel_atomic_state {
+       struct drm_atomic_state base;
+
+       unsigned int cdclk;
+       bool dpll_set;
+       struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+};
+
 struct intel_plane_state {
        struct drm_plane_state base;
        struct drm_rect src;
@@ -256,7 +256,7 @@ struct intel_plane_state {
         * plane requiring a scaler:
         *   - During check_plane, its bit is set in
         *     crtc_state->scaler_state.scaler_users by calling helper function
-        *     update_scaler_users.
+        *     update_scaler_plane.
         *   - scaler_id indicates the scaler it got assigned.
         *
         * plane doesn't require a scaler:
@@ -264,9 +264,11 @@ struct intel_plane_state {
         *     got disabled.
         *   - During check_plane, corresponding bit is reset in
         *     crtc_state->scaler_state.scaler_users by calling helper function
-        *     update_scaler_users.
+        *     update_scaler_plane.
         */
        int scaler_id;
+
+       struct drm_intel_sprite_colorkey ckey;
 };
 
 struct intel_initial_plane_config {
@@ -286,7 +288,6 @@ struct intel_initial_plane_config {
 #define SKL_MAX_DST_H 4096
 
 struct intel_scaler {
-       int id;
        int in_use;
        uint32_t mode;
 };
@@ -319,6 +320,9 @@ struct intel_crtc_scaler_state {
        int scaler_id;
 };
 
+/* drm_mode->private_flags */
+#define I915_MODE_FLAG_INHERITED 1
+
 struct intel_crtc_state {
        struct drm_crtc_state base;
 
@@ -331,7 +335,6 @@ struct intel_crtc_state {
         * accordingly.
         */
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS      (1<<0) /* unreliable sync mode.flags */
-#define PIPE_CONFIG_QUIRK_INHERITED_MODE       (1<<1) /* mode inherited from firmware */
        unsigned long quirks;
 
        /* Pipe source size (ie. panel fitter input size)
@@ -447,6 +450,18 @@ struct intel_crtc_state {
        int pbn;
 
        struct intel_crtc_scaler_state scaler_state;
+
+       /* w/a for waiting 2 vblanks during crtc enable */
+       enum pipe hsw_workaround_pipe;
+};
+
+struct vlv_wm_state {
+       struct vlv_pipe_wm wm[3];
+       struct vlv_sr_wm sr[3];
+       uint8_t num_active_planes;
+       uint8_t num_levels;
+       uint8_t level;
+       bool cxsr;
 };
 
 struct intel_pipe_wm {
@@ -478,16 +493,13 @@ struct skl_pipe_wm {
  * and thus can't be run with interrupts disabled.
  */
 struct intel_crtc_atomic_commit {
-       /* vblank evasion */
-       bool evade;
-       unsigned start_vbl_count;
-
        /* Sleepable operations to perform before commit */
        bool wait_for_flips;
        bool disable_fbc;
        bool disable_ips;
+       bool disable_cxsr;
        bool pre_disable_primary;
-       bool update_wm;
+       bool update_wm_pre, update_wm_post;
        unsigned disabled_planes;
 
        /* Sleepable operations to perform after commit */
@@ -527,9 +539,7 @@ struct intel_crtc {
        uint32_t cursor_size;
        uint32_t cursor_base;
 
-       struct intel_initial_plane_config plane_config;
        struct intel_crtc_state *config;
-       bool new_enabled;
 
        /* reset counter value when the last flip was submitted */
        unsigned int reset_counter;
@@ -544,14 +554,19 @@ struct intel_crtc {
                struct intel_pipe_wm active;
                /* SKL wm values currently in use */
                struct skl_pipe_wm skl_active;
+               /* allow CxSR on this pipe */
+               bool cxsr_allowed;
        } wm;
 
        int scanline_offset;
 
+       unsigned start_vbl_count;
        struct intel_crtc_atomic_commit atomic;
 
        /* scalers available on this crtc */
        int num_scalers;
+
+       struct vlv_wm_state wm_state;
 };
 
 struct intel_plane_wm_parameters {
@@ -570,6 +585,7 @@ struct intel_plane_wm_parameters {
        bool scaled;
        u64 tiling;
        unsigned int rotation;
+       uint16_t fifo_size;
 };
 
 struct intel_plane {
@@ -578,9 +594,7 @@ struct intel_plane {
        enum pipe pipe;
        bool can_scale;
        int max_downscale;
-
-       /* FIXME convert to properties */
-       struct drm_intel_sprite_colorkey ckey;
+       uint32_t frontbuffer_bit;
 
        /* Since we need to change the watermarks before/after
         * enabling/disabling the planes, we need to store the parameters here
@@ -603,8 +617,9 @@ struct intel_plane {
                             uint32_t x, uint32_t y,
                             uint32_t src_w, uint32_t src_h);
        void (*disable_plane)(struct drm_plane *plane,
-                             struct drm_crtc *crtc, bool force);
+                             struct drm_crtc *crtc);
        int (*check_plane)(struct drm_plane *plane,
+                          struct intel_crtc_state *crtc_state,
                           struct intel_plane_state *state);
        void (*commit_plane)(struct drm_plane *plane,
                             struct intel_plane_state *state);
@@ -629,6 +644,7 @@ struct cxsr_latency {
        unsigned long cursor_hpll_disable;
 };
 
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
@@ -940,43 +956,23 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
 void intel_ddi_clock_get(struct intel_encoder *encoder,
                         struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-                               enum port port, int type);
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
 /* intel_frontbuffer.c */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            struct intel_engine_cs *ring,
                             enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
                                    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
                                     unsigned frontbuffer_bits);
-void intel_frontbuffer_flush(struct drm_device *dev,
-                            unsigned frontbuffer_bits);
-/**
- * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. This is for
- * synchronous plane updates which will happen on the next vblank and which will
- * not get delayed by pending gpu rendering.
- *
- * Can be called without any locks held.
- */
-static inline
 void intel_frontbuffer_flip(struct drm_device *dev,
-                           unsigned frontbuffer_bits)
-{
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
+                           unsigned frontbuffer_bits);
 unsigned int intel_fb_align_height(struct drm_device *dev,
                                   unsigned int height,
                                   uint32_t pixel_format,
                                   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
-
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+                       enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
                              uint32_t pixel_format);
 
@@ -994,15 +990,11 @@ int intel_pch_rawclk(struct drm_device *dev);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
-void intel_crtc_control(struct drm_crtc *crtc, bool enable);
-void intel_crtc_reset(struct intel_crtc *crtc);
-void intel_crtc_update_dpms(struct drm_crtc *crtc);
+int intel_display_suspend(struct drm_device *dev);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
-void intel_connector_dpms(struct drm_connector *, int mode);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_modeset_check_state(struct drm_device *dev);
 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
                                struct intel_digital_port *port);
 void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -1035,7 +1027,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
                               struct drm_framebuffer *fb,
                               const struct drm_plane_state *plane_state,
-                              struct intel_engine_cs *pipelined);
+                              struct intel_engine_cs *pipelined,
+                              struct drm_i915_gem_request **pipelined_request);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1058,6 +1051,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
                                    struct drm_plane_state *state,
                                    struct drm_property *property,
                                    uint64_t val);
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+                                   struct drm_plane_state *plane_state);
 
 unsigned int
 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
@@ -1072,9 +1067,6 @@ intel_rotation_90_or_270(unsigned int rotation)
 void intel_create_rotation_property(struct drm_device *dev,
                                        struct intel_plane *plane);
 
-bool intel_wm_need_update(struct drm_plane *plane,
-                         struct drm_plane_state *state);
-
 /* shared dpll functions */
 struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1084,7 +1076,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                                                struct intel_crtc_state *state);
-void intel_put_shared_dpll(struct intel_crtc *crtc);
 
 void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
                      const struct dpll *dpll);
@@ -1104,7 +1095,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+                                            int *x, int *y,
                                             unsigned int tiling_mode,
                                             unsigned int bpp,
                                             unsigned int pitch);
@@ -1114,7 +1106,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
 void broxton_init_cdclk(struct drm_device *dev);
 void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_set_cdclk(struct drm_device *dev, int frequency);
 void broxton_ddi_phy_init(struct drm_device *dev);
 void broxton_ddi_phy_uninit(struct drm_device *dev);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
@@ -1130,6 +1121,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
                                int dotclock);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
                        intel_clock_t *best_clock);
+int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
@@ -1139,10 +1132,8 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
                                 struct intel_crtc_state *pipe_config);
 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
-void skl_detach_scalers(struct intel_crtc *intel_crtc);
-int skl_update_scaler_users(struct intel_crtc *intel_crtc,
-       struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
-       struct intel_plane_state *plane_state, int force_detach);
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
@@ -1238,15 +1229,18 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
 #endif
 
 /* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_fbc_update(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct drm_i915_private *dev_priv);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_disable(struct drm_device *dev);
+void intel_fbc_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_disable_crtc(struct intel_crtc *crtc);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits,
                          enum fb_op_origin origin);
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-                    unsigned int frontbuffer_bits);
+                    unsigned int frontbuffer_bits, enum fb_op_origin origin);
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
 
 /* intel_hdmi.c */
 void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1314,11 +1308,13 @@ void intel_backlight_unregister(struct drm_device *dev);
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
 void intel_psr_invalidate(struct drm_device *dev,
-                             unsigned frontbuffer_bits);
+                         unsigned frontbuffer_bits);
 void intel_psr_flush(struct drm_device *dev,
-                        unsigned frontbuffer_bits);
+                    unsigned frontbuffer_bits,
+                    enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev,
+                                  unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
@@ -1372,11 +1368,12 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
                    unsigned long submitted);
 void intel_queue_rps_boost_for_request(struct drm_device *dev,
                                       struct drm_i915_gem_request *req);
+void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                          struct skl_ddb_allocation *ddb /* out */);
-
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
@@ -1384,10 +1381,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
 
 /* intel_sprite.c */
 int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-int intel_plane_restore(struct drm_plane *plane);
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
-bool intel_pipe_update_start(struct intel_crtc *crtc,
+void intel_pipe_update_start(struct intel_crtc *crtc,
                             uint32_t *start_vbl_count);
 void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 
@@ -1395,11 +1391,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 void intel_tv_init(struct drm_device *dev);
 
 /* intel_atomic.c */
-int intel_atomic_check(struct drm_device *dev,
-                      struct drm_atomic_state *state);
-int intel_atomic_commit(struct drm_device *dev,
-                       struct drm_atomic_state *state,
-                       bool async);
 int intel_connector_atomic_get_property(struct drm_connector *connector,
                                        const struct drm_connector_state *state,
                                        struct drm_property *property,
@@ -1407,6 +1398,11 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
 struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
 void intel_crtc_destroy_state(struct drm_crtc *crtc,
                               struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *);
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
+
 static inline struct intel_crtc_state *
 intel_atomic_get_crtc_state(struct drm_atomic_state *state,
                            struct intel_crtc *crtc)
index b5a5558ecd6314c9014687125e4b6d5f671ab78b..4a601cf90f16c68d694babd065bbae09cbe6e9f7 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
 #include <linux/slab.h>
+#include <linux/gpio/consumer.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_dsi.h"
@@ -261,11 +262,6 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
        return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
 }
 
-static void intel_dsi_hot_plug(struct intel_encoder *encoder)
-{
-       DRM_DEBUG_KMS("\n");
-}
-
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *config)
 {
@@ -401,6 +397,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
 
                intel_dsi_port_enable(encoder);
        }
+
+       intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
@@ -415,15 +413,21 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
+       /* Panel Enable over CRC PMIC */
+       if (intel_dsi->gpio_panel)
+               gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+
+       msleep(intel_dsi->panel_on_delay);
+
        /* Disable DPOunit clock gating, can stall pipe
         * and we need DPLL REFA always enabled */
        tmp = I915_READ(DPLL(pipe));
-       tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+       tmp |= DPLL_REF_CLK_ENABLE_VLV;
        I915_WRITE(DPLL(pipe), tmp);
 
        /* update the hw state for DPLL */
-       intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
-               DPLL_REFA_CLK_ENABLE_VLV;
+       intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 
        tmp = I915_READ(DSPCLK_GATE_D);
        tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -432,8 +436,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
        /* put device in ready state */
        intel_dsi_device_ready(encoder);
 
-       msleep(intel_dsi->panel_on_delay);
-
        drm_panel_prepare(intel_dsi->panel);
 
        for_each_dsi_port(port, intel_dsi->ports)
@@ -461,6 +463,8 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
+       intel_panel_disable_backlight(intel_dsi->attached_connector);
+
        if (is_vid_mode(intel_dsi)) {
                /* Send Shutdown command to the panel in LP mode */
                for_each_dsi_port(port, intel_dsi->ports)
@@ -576,6 +580,10 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
 
        msleep(intel_dsi->panel_off_delay);
        msleep(intel_dsi->panel_pwr_cycle_delay);
+
+       /* Panel Disable over CRC PMIC */
+       if (intel_dsi->gpio_panel)
+               gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -955,6 +963,11 @@ static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
                /* XXX: Logically this call belongs in the panel driver. */
                drm_panel_remove(intel_dsi->panel);
        }
+
+       /* dispose of the gpios */
+       if (intel_dsi->gpio_panel)
+               gpiod_put(intel_dsi->gpio_panel);
+
        intel_encoder_destroy(encoder);
 }
 
@@ -969,7 +982,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
 };
 
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
-       .dpms = intel_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dsi_detect,
        .destroy = intel_dsi_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1022,7 +1035,6 @@ void intel_dsi_init(struct drm_device *dev)
        drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
 
        /* XXX: very likely not all of these are needed */
-       intel_encoder->hot_plug = intel_dsi_hot_plug;
        intel_encoder->compute_config = intel_dsi_compute_config;
        intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
        intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1071,6 +1083,20 @@ void intel_dsi_init(struct drm_device *dev)
                goto err;
        }
 
+       /*
+        * In case of BYT with CRC PMIC, we need to use GPIO for
+        * Panel control.
+        */
+       if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+               intel_dsi->gpio_panel =
+                       gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
+
+               if (IS_ERR(intel_dsi->gpio_panel)) {
+                       DRM_ERROR("Failed to own gpio for panel control\n");
+                       intel_dsi->gpio_panel = NULL;
+               }
+       }
+
        intel_encoder->type = INTEL_OUTPUT_DSI;
        intel_encoder->cloneable = 0;
        drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
@@ -1104,6 +1130,7 @@ void intel_dsi_init(struct drm_device *dev)
        }
 
        intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        return;
 
index 2784ac442368a6a37e2f3d516406a38a63570e8d..42a68593e32aac97b8a0c43dcae13c9a5fcafcb7 100644 (file)
@@ -42,6 +42,9 @@ struct intel_dsi {
        struct drm_panel *panel;
        struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
 
+       /* GPIO Desc for CRC based Panel control */
+       struct gpio_desc *gpio_panel;
+
        struct intel_connector *attached_connector;
 
        /* bit mask of ports being driven */
index d20cf37b6901b68fa9926edeb3019e0b407f5af2..c6a8975b128f123da9ebae9b7236ae37e79d85c0 100644 (file)
 #define DSI_HFP_PACKET_EXTRA_SIZE      6
 #define DSI_EOTP_PACKET_SIZE           4
 
+static int dsi_pixel_format_bpp(int pixel_format)
+{
+       int bpp;
+
+       switch (pixel_format) {
+       default:
+       case VID_MODE_FORMAT_RGB888:
+       case VID_MODE_FORMAT_RGB666_LOOSE:
+               bpp = 24;
+               break;
+       case VID_MODE_FORMAT_RGB666:
+               bpp = 18;
+               break;
+       case VID_MODE_FORMAT_RGB565:
+               bpp = 16;
+               break;
+       }
+
+       return bpp;
+}
+
 struct dsi_mnp {
        u32 dsi_pll_ctrl;
        u32 dsi_pll_div;
@@ -46,8 +67,8 @@ struct dsi_mnp {
 static const u32 lfsr_converts[] = {
        426, 469, 234, 373, 442, 221, 110, 311, 411,            /* 62 - 70 */
        461, 486, 243, 377, 188, 350, 175, 343, 427, 213,       /* 71 - 80 */
-       106, 53, 282, 397, 354, 227, 113, 56, 284, 142,         /* 81 - 90 */
-       71, 35                                                  /* 91 - 92 */
+       106, 53, 282, 397, 454, 227, 113, 56, 284, 142,         /* 81 - 90 */
+       71, 35, 273, 136, 324, 418, 465, 488, 500, 506          /* 91 - 100 */
 };
 
 #ifdef DSI_CLK_FROM_RR
@@ -65,19 +86,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
        u32 dsi_bit_clock_hz;
        u32 dsi_clk;
 
-       switch (pixel_format) {
-       default:
-       case VID_MODE_FORMAT_RGB888:
-       case VID_MODE_FORMAT_RGB666_LOOSE:
-               bpp = 24;
-               break;
-       case VID_MODE_FORMAT_RGB666:
-               bpp = 18;
-               break;
-       case VID_MODE_FORMAT_RGB565:
-               bpp = 16;
-               break;
-       }
+       bpp = dsi_pixel_format_bpp(pixel_format);
 
        hactive = mode->hdisplay;
        vactive = mode->vdisplay;
@@ -137,21 +146,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
 static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 {
        u32 dsi_clk_khz;
-       u32 bpp;
-
-       switch (pixel_format) {
-       default:
-       case VID_MODE_FORMAT_RGB888:
-       case VID_MODE_FORMAT_RGB666_LOOSE:
-               bpp = 24;
-               break;
-       case VID_MODE_FORMAT_RGB666:
-               bpp = 18;
-               break;
-       case VID_MODE_FORMAT_RGB565:
-               bpp = 16;
-               break;
-       }
+       u32 bpp = dsi_pixel_format_bpp(pixel_format);
 
        /* DSI data rate = pixel clock * bits per pixel / lane count
           pixel clock is converted from KHz to Hz */
@@ -162,11 +157,13 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 
 #endif
 
-static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+                       struct dsi_mnp *dsi_mnp, int target_dsi_clk)
 {
        unsigned int calc_m = 0, calc_p = 0;
-       unsigned int m, n = 1, p;
-       int ref_clk = 25000;
+       unsigned int m_min, m_max, p_min = 2, p_max = 6;
+       unsigned int m, n, p;
+       int ref_clk;
        int delta = target_dsi_clk;
        u32 m_seed;
 
@@ -176,8 +173,20 @@ static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
                return -ECHRNG;
        }
 
-       for (m = 62; m <= 92 && delta; m++) {
-               for (p = 2; p <= 6 && delta; p++) {
+       if (IS_CHERRYVIEW(dev_priv)) {
+               ref_clk = 100000;
+               n = 4;
+               m_min = 70;
+               m_max = 96;
+       } else {
+               ref_clk = 25000;
+               n = 1;
+               m_min = 62;
+               m_max = 92;
+       }
+
+       for (m = m_min; m <= m_max && delta; m++) {
+               for (p = p_min; p <= p_max && delta; p++) {
                        /*
                         * Find the optimal m and p divisors with minimal delta
                         * +/- the required clock
@@ -217,7 +226,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
        dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
                                    intel_dsi->lane_count);
 
-       ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+       ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
        if (ret) {
                DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
                return;
@@ -286,21 +295,7 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
 
 static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
 {
-       int bpp;
-
-       switch (pixel_format) {
-       default:
-       case VID_MODE_FORMAT_RGB888:
-       case VID_MODE_FORMAT_RGB666_LOOSE:
-               bpp = 24;
-               break;
-       case VID_MODE_FORMAT_RGB666:
-               bpp = 18;
-               break;
-       case VID_MODE_FORMAT_RGB565:
-               bpp = 16;
-               break;
-       }
+       int bpp = dsi_pixel_format_bpp(pixel_format);
 
        WARN(bpp != pipe_bpp,
             "bpp match assertion failure (expected %d, current %d)\n",
index ece5bd754f85f5c0de25de036ba232ca19986b32..dc532bb61d229834dafeaf7d3ba54f50042763ea 100644 (file)
@@ -196,50 +196,6 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
        intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_dvo_dpms(struct drm_connector *connector, int mode)
-{
-       struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
-       struct drm_crtc *crtc;
-       struct intel_crtc_state *config;
-
-       /* dvo supports only 2 dpms states. */
-       if (mode != DRM_MODE_DPMS_ON)
-               mode = DRM_MODE_DPMS_OFF;
-
-       if (mode == connector->dpms)
-               return;
-
-       connector->dpms = mode;
-
-       /* Only need to change hw state when actually enabled */
-       crtc = intel_dvo->base.base.crtc;
-       if (!crtc) {
-               intel_dvo->base.connectors_active = false;
-               return;
-       }
-
-       /* We call connector dpms manually below in case pipe dpms doesn't
-        * change due to cloning. */
-       if (mode == DRM_MODE_DPMS_ON) {
-               config = to_intel_crtc(crtc)->config;
-
-               intel_dvo->base.connectors_active = true;
-
-               intel_crtc_update_dpms(crtc);
-
-               intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
-       } else {
-               intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
-
-               intel_dvo->base.connectors_active = false;
-
-               intel_crtc_update_dpms(crtc);
-       }
-
-       intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_dvo_mode_valid(struct drm_connector *connector,
                     struct drm_display_mode *mode)
@@ -387,7 +343,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
-       .dpms = intel_dvo_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dvo_detect,
        .destroy = intel_dvo_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
index 6abb83432d4d710bb5e63322fbbb03f891ee4a5e..1f97fb548c2ac6b937e2b5e8f0a7a29c9592a9b2 100644 (file)
@@ -41,9 +41,8 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void i8xx_fbc_disable(struct drm_device *dev)
+static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 fbc_ctl;
 
        dev_priv->fbc.enabled = false;
@@ -65,13 +64,11 @@ static void i8xx_fbc_disable(struct drm_device *dev)
        DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static void i8xx_fbc_enable(struct drm_crtc *crtc)
+static void i8xx_fbc_enable(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_framebuffer *fb = crtc->base.primary->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int cfb_pitch;
        int i;
        u32 fbc_ctl;
@@ -84,7 +81,7 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
                cfb_pitch = fb->pitches[0];
 
        /* FBC_CTL wants 32B or 64B units */
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                cfb_pitch = (cfb_pitch / 32) - 1;
        else
                cfb_pitch = (cfb_pitch / 64) - 1;
@@ -93,66 +90,61 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
        for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
                I915_WRITE(FBC_TAG + (i * 4), 0);
 
-       if (IS_GEN4(dev)) {
+       if (IS_GEN4(dev_priv)) {
                u32 fbc_ctl2;
 
                /* Set it up... */
                fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-               fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+               fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
                I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-               I915_WRITE(FBC_FENCE_OFF, crtc->y);
+               I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
        }
 
        /* enable it... */
        fbc_ctl = I915_READ(FBC_CONTROL);
        fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
        fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
-       if (IS_I945GM(dev))
+       if (IS_I945GM(dev_priv))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
        fbc_ctl |= obj->fence_reg;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
        DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
-                     cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+                     cfb_pitch, crtc->base.y, plane_name(crtc->plane));
 }
 
-static bool i8xx_fbc_enabled(struct drm_device *dev)
+static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_fbc_enable(struct drm_crtc *crtc)
+static void g4x_fbc_enable(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_framebuffer *fb = crtc->base.primary->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
        dev_priv->fbc.enabled = true;
 
-       dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+       dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 
-       I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+       I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
 
        /* enable it... */
        I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void g4x_fbc_disable(struct drm_device *dev)
+static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
 
        dev_priv->fbc.enabled = false;
@@ -167,10 +159,8 @@ static void g4x_fbc_disable(struct drm_device *dev)
        }
 }
 
-static bool g4x_fbc_enabled(struct drm_device *dev)
+static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
@@ -180,22 +170,21 @@ static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
        POSTING_READ(MSG_FBC_REND_STATE);
 }
 
-static void ilk_fbc_enable(struct drm_crtc *crtc)
+static void ilk_fbc_enable(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_framebuffer *fb = crtc->base.primary->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
+       int threshold = dev_priv->fbc.threshold;
 
        dev_priv->fbc.enabled = true;
 
-       dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+       dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-               dev_priv->fbc.threshold++;
+               threshold++;
 
-       switch (dev_priv->fbc.threshold) {
+       switch (threshold) {
        case 4:
        case 3:
                dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -208,28 +197,27 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
                break;
        }
        dpfc_ctl |= DPFC_CTL_FENCE_EN;
-       if (IS_GEN5(dev))
+       if (IS_GEN5(dev_priv))
                dpfc_ctl |= obj->fence_reg;
 
-       I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+       I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
        I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev_priv)) {
                I915_WRITE(SNB_DPFC_CTL_SA,
                           SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-               I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+               I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
        }
 
        intel_fbc_nuke(dev_priv);
 
-       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void ilk_fbc_disable(struct drm_device *dev)
+static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
 
        dev_priv->fbc.enabled = false;
@@ -244,29 +232,29 @@ static void ilk_fbc_disable(struct drm_device *dev)
        }
 }
 
-static bool ilk_fbc_enabled(struct drm_device *dev)
+static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_fbc_enable(struct drm_crtc *crtc)
+static void gen7_fbc_enable(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->primary->fb;
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_framebuffer *fb = crtc->base.primary->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
+       int threshold = dev_priv->fbc.threshold;
 
        dev_priv->fbc.enabled = true;
 
-       dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+       dpfc_ctl = 0;
+       if (IS_IVYBRIDGE(dev_priv))
+               dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-               dev_priv->fbc.threshold++;
+               threshold++;
 
-       switch (dev_priv->fbc.threshold) {
+       switch (threshold) {
        case 4:
        case 3:
                dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -286,39 +274,37 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
 
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-       if (IS_IVYBRIDGE(dev)) {
+       if (IS_IVYBRIDGE(dev_priv)) {
                /* WaFbcAsynchFlipDisableFbcQueue:ivb */
                I915_WRITE(ILK_DISPLAY_CHICKEN1,
                           I915_READ(ILK_DISPLAY_CHICKEN1) |
                           ILK_FBCQ_DIS);
        } else {
                /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-               I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
-                          I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+               I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
+                          I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
                           HSW_FBCQ_DIS);
        }
 
        I915_WRITE(SNB_DPFC_CTL_SA,
                   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-       I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+       I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
 
        intel_fbc_nuke(dev_priv);
 
-       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
 /**
  * intel_fbc_enabled - Is FBC enabled?
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
  *
  * This function is used to verify the current state of FBC.
  * FIXME: This should be tracked in the plane config eventually
  *        instead of queried at runtime for most callers.
  */
-bool intel_fbc_enabled(struct drm_device *dev)
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        return dev_priv->fbc.enabled;
 }
 
@@ -327,31 +313,33 @@ static void intel_fbc_work_fn(struct work_struct *__work)
        struct intel_fbc_work *work =
                container_of(to_delayed_work(__work),
                             struct intel_fbc_work, work);
-       struct drm_device *dev = work->crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
+       struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev_priv->fbc.lock);
        if (work == dev_priv->fbc.fbc_work) {
                /* Double check that we haven't switched fb without cancelling
                 * the prior work.
                 */
-               if (work->crtc->primary->fb == work->fb) {
-                       dev_priv->display.enable_fbc(work->crtc);
+               if (crtc_fb == work->fb) {
+                       dev_priv->fbc.enable_fbc(work->crtc);
 
-                       dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
-                       dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
-                       dev_priv->fbc.y = work->crtc->y;
+                       dev_priv->fbc.crtc = work->crtc;
+                       dev_priv->fbc.fb_id = crtc_fb->base.id;
+                       dev_priv->fbc.y = work->crtc->base.y;
                }
 
                dev_priv->fbc.fbc_work = NULL;
        }
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->fbc.lock);
 
        kfree(work);
 }
 
 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
 {
+       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
        if (dev_priv->fbc.fbc_work == NULL)
                return;
 
@@ -373,26 +361,24 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
        dev_priv->fbc.fbc_work = NULL;
 }
 
-static void intel_fbc_enable(struct drm_crtc *crtc)
+static void intel_fbc_enable(struct intel_crtc *crtc)
 {
        struct intel_fbc_work *work;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-       if (!dev_priv->display.enable_fbc)
-               return;
+       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
        intel_fbc_cancel_work(dev_priv);
 
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL) {
                DRM_ERROR("Failed to allocate FBC work structure\n");
-               dev_priv->display.enable_fbc(crtc);
+               dev_priv->fbc.enable_fbc(crtc);
                return;
        }
 
        work->crtc = crtc;
-       work->fb = crtc->primary->fb;
+       work->fb = crtc->base.primary->fb;
        INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
        dev_priv->fbc.fbc_work = work;
@@ -413,75 +399,274 @@ static void intel_fbc_enable(struct drm_crtc *crtc)
        schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 }
 
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+       intel_fbc_cancel_work(dev_priv);
+
+       dev_priv->fbc.disable_fbc(dev_priv);
+       dev_priv->fbc.crtc = NULL;
+}
+
 /**
  * intel_fbc_disable - disable FBC
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
  *
  * This function disables FBC.
  */
-void intel_fbc_disable(struct drm_device *dev)
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (!dev_priv->fbc.enable_fbc)
+               return;
 
-       intel_fbc_cancel_work(dev_priv);
+       mutex_lock(&dev_priv->fbc.lock);
+       __intel_fbc_disable(dev_priv);
+       mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-       if (!dev_priv->display.disable_fbc)
+       if (!dev_priv->fbc.enable_fbc)
                return;
 
-       dev_priv->display.disable_fbc(dev);
-       dev_priv->fbc.crtc = NULL;
+       mutex_lock(&dev_priv->fbc.lock);
+       if (dev_priv->fbc.crtc == crtc)
+               __intel_fbc_disable(dev_priv);
+       mutex_unlock(&dev_priv->fbc.lock);
 }
 
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
+{
+       switch (reason) {
+       case FBC_OK:
+               return "FBC enabled but currently disabled in hardware";
+       case FBC_UNSUPPORTED:
+               return "unsupported by this chipset";
+       case FBC_NO_OUTPUT:
+               return "no output";
+       case FBC_STOLEN_TOO_SMALL:
+               return "not enough stolen memory";
+       case FBC_UNSUPPORTED_MODE:
+               return "mode incompatible with compression";
+       case FBC_MODE_TOO_LARGE:
+               return "mode too large for compression";
+       case FBC_BAD_PLANE:
+               return "FBC unsupported on plane";
+       case FBC_NOT_TILED:
+               return "framebuffer not tiled or fenced";
+       case FBC_MULTIPLE_PIPES:
+               return "more than one pipe active";
+       case FBC_MODULE_PARAM:
+               return "disabled per module param";
+       case FBC_CHIP_DEFAULT:
+               return "disabled per chip default";
+       case FBC_ROTATION:
+               return "rotation unsupported";
+       case FBC_IN_DBG_MASTER:
+               return "Kernel debugger is active";
+       default:
+               MISSING_CASE(reason);
+               return "unknown reason";
+       }
+}
+
+static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
                              enum no_fbc_reason reason)
 {
        if (dev_priv->fbc.no_fbc_reason == reason)
-               return false;
+               return;
 
        dev_priv->fbc.no_fbc_reason = reason;
-       return true;
+       DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
 }
 
 static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
 {
        struct drm_crtc *crtc = NULL, *tmp_crtc;
        enum pipe pipe;
-       bool pipe_a_only = false, one_pipe_only = false;
+       bool pipe_a_only = false;
 
        if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
                pipe_a_only = true;
-       else if (INTEL_INFO(dev_priv)->gen <= 4)
-               one_pipe_only = true;
 
        for_each_pipe(dev_priv, pipe) {
                tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
                if (intel_crtc_active(tmp_crtc) &&
-                   to_intel_plane_state(tmp_crtc->primary->state)->visible) {
-                       if (one_pipe_only && crtc) {
-                               if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
-                                       DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-                               return NULL;
-                       }
+                   to_intel_plane_state(tmp_crtc->primary->state)->visible)
                        crtc = tmp_crtc;
-               }
 
                if (pipe_a_only)
                        break;
        }
 
-       if (!crtc || crtc->primary->fb == NULL) {
-               if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
-                       DRM_DEBUG_KMS("no output, disabling\n");
+       if (!crtc || crtc->primary->fb == NULL)
                return NULL;
-       }
 
        return crtc;
 }
 
+static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
+{
+       enum pipe pipe;
+       int n_pipes = 0;
+       struct drm_crtc *crtc;
+
+       if (INTEL_INFO(dev_priv)->gen > 4)
+               return true;
+
+       for_each_pipe(dev_priv, pipe) {
+               crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+               if (intel_crtc_active(crtc) &&
+                   to_intel_plane_state(crtc->primary->state)->visible)
+                       n_pipes++;
+       }
+
+       return (n_pipes < 2);
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+                                     struct drm_mm_node *node,
+                                     int size,
+                                     int fb_cpp)
+{
+       int compression_threshold = 1;
+       int ret;
+
+       /* HACK: This code depends on what we will do in *_enable_fbc. If that
+        * code changes, this code needs to change as well.
+        *
+        * The enable_fbc code will attempt to use one of our 2 compression
+        * thresholds, therefore, in that case, we only have 1 resort.
+        */
+
+       /* Try to over-allocate to reduce reallocations and fragmentation. */
+       ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+       if (ret == 0)
+               return compression_threshold;
+
+again:
+       /* HW's ability to limit the CFB is 1:4 */
+       if (compression_threshold > 4 ||
+           (fb_cpp == 2 && compression_threshold == 2))
+               return 0;
+
+       ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+       if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+               return 0;
+       } else if (ret) {
+               compression_threshold <<= 1;
+               goto again;
+       } else {
+               return compression_threshold;
+       }
+}
+
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
+                              int fb_cpp)
+{
+       struct drm_mm_node *uninitialized_var(compressed_llb);
+       int ret;
+
+       ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+                                        size, fb_cpp);
+       if (!ret)
+               goto err_llb;
+       else if (ret > 1) {
+               DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+       }
+
+       dev_priv->fbc.threshold = ret;
+
+       if (INTEL_INFO(dev_priv)->gen >= 5)
+               I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+       else if (IS_GM45(dev_priv)) {
+               I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+       } else {
+               compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+               if (!compressed_llb)
+                       goto err_fb;
+
+               ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+                                                 4096, 4096);
+               if (ret)
+                       goto err_fb;
+
+               dev_priv->fbc.compressed_llb = compressed_llb;
+
+               I915_WRITE(FBC_CFB_BASE,
+                          dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+               I915_WRITE(FBC_LL_BASE,
+                          dev_priv->mm.stolen_base + compressed_llb->start);
+       }
+
+       dev_priv->fbc.uncompressed_size = size;
+
+       DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+                     size);
+
+       return 0;
+
+err_fb:
+       kfree(compressed_llb);
+       i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+err_llb:
+       pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+       return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+       if (dev_priv->fbc.uncompressed_size == 0)
+               return;
+
+       i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+
+       if (dev_priv->fbc.compressed_llb) {
+               i915_gem_stolen_remove_node(dev_priv,
+                                           dev_priv->fbc.compressed_llb);
+               kfree(dev_priv->fbc.compressed_llb);
+       }
+
+       dev_priv->fbc.uncompressed_size = 0;
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+       if (!dev_priv->fbc.enable_fbc)
+               return;
+
+       mutex_lock(&dev_priv->fbc.lock);
+       __intel_fbc_cleanup_cfb(dev_priv);
+       mutex_unlock(&dev_priv->fbc.lock);
+}
+
+static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
+                              int fb_cpp)
+{
+       if (size <= dev_priv->fbc.uncompressed_size)
+               return 0;
+
+       /* Release any current block */
+       __intel_fbc_cleanup_cfb(dev_priv);
+
+       return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
+}
+
 /**
- * intel_fbc_update - enable/disable FBC as needed
- * @dev: the drm_device
+ * __intel_fbc_update - enable/disable FBC as needed, unlocked
+ * @dev_priv: i915 device instance
  *
  * Set up the framebuffer compression hardware at mode set time.  We
  * enable it if possible:
@@ -498,9 +683,8 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
  *
  * We need to enable/disable FBC on a global basis.
  */
-void intel_fbc_update(struct drm_device *dev)
+static void __intel_fbc_update(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = NULL;
        struct intel_crtc *intel_crtc;
        struct drm_framebuffer *fb;
@@ -508,22 +692,19 @@ void intel_fbc_update(struct drm_device *dev)
        const struct drm_display_mode *adjusted_mode;
        unsigned int max_width, max_height;
 
-       if (!HAS_FBC(dev))
-               return;
+       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
        /* disable framebuffer compression in vGPU */
-       if (intel_vgpu_active(dev))
+       if (intel_vgpu_active(dev_priv->dev))
                i915.enable_fbc = 0;
 
        if (i915.enable_fbc < 0) {
-               if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
-                       DRM_DEBUG_KMS("disabled per chip default\n");
+               set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
                goto out_disable;
        }
 
        if (!i915.enable_fbc) {
-               if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
-                       DRM_DEBUG_KMS("fbc disabled per module param\n");
+               set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
                goto out_disable;
        }
 
@@ -537,8 +718,15 @@ void intel_fbc_update(struct drm_device *dev)
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
        crtc = intel_fbc_find_crtc(dev_priv);
-       if (!crtc)
+       if (!crtc) {
+               set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
                goto out_disable;
+       }
+
+       if (!multiple_pipes_ok(dev_priv)) {
+               set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
+               goto out_disable;
+       }
 
        intel_crtc = to_intel_crtc(crtc);
        fb = crtc->primary->fb;
@@ -547,16 +735,14 @@ void intel_fbc_update(struct drm_device *dev)
 
        if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
            (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-               if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-                       DRM_DEBUG_KMS("mode incompatible with compression, "
-                                     "disabling\n");
+               set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
                goto out_disable;
        }
 
-       if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+       if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
                max_width = 4096;
                max_height = 4096;
-       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
                max_width = 4096;
                max_height = 2048;
        } else {
@@ -565,14 +751,12 @@ void intel_fbc_update(struct drm_device *dev)
        }
        if (intel_crtc->config->pipe_src_w > max_width ||
            intel_crtc->config->pipe_src_h > max_height) {
-               if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
-                       DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+               set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
                goto out_disable;
        }
-       if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+       if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
            intel_crtc->plane != PLANE_A) {
-               if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
-                       DRM_DEBUG_KMS("plane not A, disabling compression\n");
+               set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
                goto out_disable;
        }
 
@@ -581,25 +765,24 @@ void intel_fbc_update(struct drm_device *dev)
         */
        if (obj->tiling_mode != I915_TILING_X ||
            obj->fence_reg == I915_FENCE_REG_NONE) {
-               if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
-                       DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+               set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
                goto out_disable;
        }
-       if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+       if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
            crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
-               if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-                       DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+               set_no_fbc_reason(dev_priv, FBC_ROTATION);
                goto out_disable;
        }
 
        /* If the kernel debugger is active, always disable compression */
-       if (in_dbg_master())
+       if (in_dbg_master()) {
+               set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
                goto out_disable;
+       }
 
-       if (i915_gem_stolen_setup_compression(dev, obj->base.size,
-                                             drm_format_plane_cpp(fb->pixel_format, 0))) {
-               if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
-                       DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+       if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
+                               drm_format_plane_cpp(fb->pixel_format, 0))) {
+               set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
                goto out_disable;
        }
 
@@ -613,7 +796,7 @@ void intel_fbc_update(struct drm_device *dev)
            dev_priv->fbc.y == crtc->y)
                return;
 
-       if (intel_fbc_enabled(dev)) {
+       if (intel_fbc_enabled(dev_priv)) {
                /* We update FBC along two paths, after changing fb/crtc
                 * configuration (modeswitching) and after page-flipping
                 * finishes. For the latter, we know that not only did
@@ -638,58 +821,87 @@ void intel_fbc_update(struct drm_device *dev)
                 * some point. And we wait before enabling FBC anyway.
                 */
                DRM_DEBUG_KMS("disabling active FBC for update\n");
-               intel_fbc_disable(dev);
+               __intel_fbc_disable(dev_priv);
        }
 
-       intel_fbc_enable(crtc);
+       intel_fbc_enable(intel_crtc);
        dev_priv->fbc.no_fbc_reason = FBC_OK;
        return;
 
 out_disable:
        /* Multiple disables should be harmless */
-       if (intel_fbc_enabled(dev)) {
+       if (intel_fbc_enabled(dev_priv)) {
                DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
-               intel_fbc_disable(dev);
+               __intel_fbc_disable(dev_priv);
        }
-       i915_gem_stolen_cleanup_compression(dev);
+       __intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev_priv: i915 device instance
+ *
+ * This function reevaluates the overall state and enables or disables FBC.
+ */
+void intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+       if (!dev_priv->fbc.enable_fbc)
+               return;
+
+       mutex_lock(&dev_priv->fbc.lock);
+       __intel_fbc_update(dev_priv);
+       mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits,
                          enum fb_op_origin origin)
 {
-       struct drm_device *dev = dev_priv->dev;
        unsigned int fbc_bits;
 
+       if (!dev_priv->fbc.enable_fbc)
+               return;
+
        if (origin == ORIGIN_GTT)
                return;
 
+       mutex_lock(&dev_priv->fbc.lock);
+
        if (dev_priv->fbc.enabled)
                fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
        else if (dev_priv->fbc.fbc_work)
                fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
-                       to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+                                       dev_priv->fbc.fbc_work->crtc->pipe);
        else
                fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
 
        dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
 
        if (dev_priv->fbc.busy_bits)
-               intel_fbc_disable(dev);
+               __intel_fbc_disable(dev_priv);
+
+       mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-                    unsigned int frontbuffer_bits)
+                    unsigned int frontbuffer_bits, enum fb_op_origin origin)
 {
-       struct drm_device *dev = dev_priv->dev;
+       if (!dev_priv->fbc.enable_fbc)
+               return;
 
-       if (!dev_priv->fbc.busy_bits)
+       if (origin == ORIGIN_GTT)
                return;
 
+       mutex_lock(&dev_priv->fbc.lock);
+
        dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
 
-       if (!dev_priv->fbc.busy_bits)
-               intel_fbc_update(dev);
+       if (!dev_priv->fbc.busy_bits) {
+               __intel_fbc_disable(dev_priv);
+               __intel_fbc_update(dev_priv);
+       }
+
+       mutex_unlock(&dev_priv->fbc.lock);
 }
 
 /**
@@ -702,6 +914,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 {
        enum pipe pipe;
 
+       mutex_init(&dev_priv->fbc.lock);
+
        if (!HAS_FBC(dev_priv)) {
                dev_priv->fbc.enabled = false;
                dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
@@ -717,25 +931,25 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
        }
 
        if (INTEL_INFO(dev_priv)->gen >= 7) {
-               dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-               dev_priv->display.enable_fbc = gen7_fbc_enable;
-               dev_priv->display.disable_fbc = ilk_fbc_disable;
+               dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+               dev_priv->fbc.enable_fbc = gen7_fbc_enable;
+               dev_priv->fbc.disable_fbc = ilk_fbc_disable;
        } else if (INTEL_INFO(dev_priv)->gen >= 5) {
-               dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-               dev_priv->display.enable_fbc = ilk_fbc_enable;
-               dev_priv->display.disable_fbc = ilk_fbc_disable;
+               dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+               dev_priv->fbc.enable_fbc = ilk_fbc_enable;
+               dev_priv->fbc.disable_fbc = ilk_fbc_disable;
        } else if (IS_GM45(dev_priv)) {
-               dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-               dev_priv->display.enable_fbc = g4x_fbc_enable;
-               dev_priv->display.disable_fbc = g4x_fbc_disable;
+               dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
+               dev_priv->fbc.enable_fbc = g4x_fbc_enable;
+               dev_priv->fbc.disable_fbc = g4x_fbc_disable;
        } else {
-               dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-               dev_priv->display.enable_fbc = i8xx_fbc_enable;
-               dev_priv->display.disable_fbc = i8xx_fbc_disable;
+               dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
+               dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
+               dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
 
                /* This value was pulled out of someone's hat */
                I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
        }
 
-       dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+       dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
 }
index 6372cfc7d0532d1eb8575a8bc5f52fcf202d8d35..7eff33ff84f69de52839817ac8d57ac219da5b53 100644 (file)
@@ -63,8 +63,7 @@ static int intel_fbdev_set_par(struct fb_info *info)
                 * now until we solve this for real.
                 */
                mutex_lock(&fb_helper->dev->struct_mutex);
-               ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
-                                                       true);
+               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
                mutex_unlock(&fb_helper->dev->struct_mutex);
        }
 
@@ -89,7 +88,7 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
                 * now until we solve this for real.
                 */
                mutex_lock(&fb_helper->dev->struct_mutex);
-               intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
                mutex_unlock(&fb_helper->dev->struct_mutex);
        }
 
@@ -115,7 +114,7 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
                 * now until we solve this for real.
                 */
                mutex_lock(&fb_helper->dev->struct_mutex);
-               intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
                mutex_unlock(&fb_helper->dev->struct_mutex);
        }
 
@@ -177,7 +176,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
        }
 
        /* Flush everything out, we'll be doing GTT only from now on */
-       ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
+       ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
        if (ret) {
                DRM_ERROR("failed to pin obj: %d\n", ret);
                goto out_fb;
@@ -484,18 +483,13 @@ retry:
                         * IMPORTANT: We want to use the adjusted mode (i.e.
                         * after the panel fitter upscaling) as the initial
                         * config, not the input mode, which is what crtc->mode
-                        * usually contains. But since our current fastboot
+                        * usually contains. But since our current
                         * code puts a mode derived from the post-pfit timings
-                        * into crtc->mode this works out correctly. We don't
-                        * use hwmode anywhere right now, so use it for this
-                        * since the fb helper layer wants a pointer to
-                        * something we own.
+                        * into crtc->mode this works out correctly.
                         */
                        DRM_DEBUG_KMS("looking for current mode on connector %s\n",
                                      connector->name);
-                       intel_mode_from_pipe_config(&encoder->crtc->hwmode,
-                                                   to_intel_crtc(encoder->crtc)->config);
-                       modes[i] = &encoder->crtc->hwmode;
+                       modes[i] = &encoder->crtc->mode;
                }
                crtcs[i] = new_crtc;
 
@@ -582,7 +576,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
        struct intel_framebuffer *fb = NULL;
        struct drm_crtc *crtc;
        struct intel_crtc *intel_crtc;
-       struct intel_initial_plane_config *plane_config = NULL;
        unsigned int max_size = 0;
 
        if (!i915.fastboot)
@@ -590,20 +583,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 
        /* Find the largest fb */
        for_each_crtc(dev, crtc) {
+               struct drm_i915_gem_object *obj =
+                       intel_fb_obj(crtc->primary->state->fb);
                intel_crtc = to_intel_crtc(crtc);
 
-               if (!intel_crtc->active || !crtc->primary->fb) {
+               if (!intel_crtc->active || !obj) {
                        DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
                                      pipe_name(intel_crtc->pipe));
                        continue;
                }
 
-               if (intel_crtc->plane_config.size > max_size) {
+               if (obj->base.size > max_size) {
                        DRM_DEBUG_KMS("found possible fb from plane %c\n",
                                      pipe_name(intel_crtc->pipe));
-                       plane_config = &intel_crtc->plane_config;
-                       fb = to_intel_framebuffer(crtc->primary->fb);
-                       max_size = plane_config->size;
+                       fb = to_intel_framebuffer(crtc->primary->state->fb);
+                       max_size = obj->base.size;
                }
        }
 
@@ -638,7 +632,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                        DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
                                      pipe_name(intel_crtc->pipe),
                                      cur_size, fb->base.pitches[0]);
-                       plane_config = NULL;
                        fb = NULL;
                        break;
                }
@@ -659,7 +652,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                        DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
                                      pipe_name(intel_crtc->pipe),
                                      cur_size, max_size);
-                       plane_config = NULL;
                        fb = NULL;
                        break;
                }
@@ -825,11 +817,20 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
 {
        int ret;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+       struct drm_fb_helper *fb_helper;
 
-       if (!dev_priv->fbdev)
+       if (!ifbdev)
                return;
 
-       ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
-       if (ret)
+       fb_helper = &ifbdev->helper;
+
+       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       if (ret) {
                DRM_DEBUG("failed to restore crtc mode\n");
+       } else {
+               mutex_lock(&fb_helper->dev->struct_mutex);
+               intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+               mutex_unlock(&fb_helper->dev->struct_mutex);
+       }
 }
index 57095f54c1f2e2611d2442bb9fbf12b645196302..ac85357010b4b652ec15811d9c7d4a3b52ad8ebd 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void intel_increase_pllclock(struct drm_device *dev,
-                                   enum pipe pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int dpll_reg = DPLL(pipe);
-       int dpll;
-
-       if (!HAS_GMCH_DISPLAY(dev))
-               return;
-
-       if (!dev_priv->lvds_downclock_avail)
-               return;
-
-       dpll = I915_READ(dpll_reg);
-       if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-               DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
-               assert_panel_unlocked(dev_priv, pipe);
-
-               dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-               I915_WRITE(dpll_reg, dpll);
-               intel_wait_for_vblank(dev, pipe);
-
-               dpll = I915_READ(dpll_reg);
-               if (dpll & DISPLAY_RATE_SELECT_FPA1)
-                       DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-       }
-}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
-                              unsigned frontbuffer_bits,
-                              struct intel_engine_cs *ring)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe pipe;
-
-       for_each_pipe(dev_priv, pipe) {
-               if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-                       continue;
-
-               intel_increase_pllclock(dev, pipe);
-       }
-}
-
 /**
  * intel_fb_obj_invalidate - invalidate frontbuffer object
  * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
  * @origin: which operation caused the invalidation
  *
  * This function gets called every time rendering on the given object starts and
  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            struct intel_engine_cs *ring,
                             enum fb_op_origin origin)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        if (!obj->frontbuffer_bits)
                return;
 
-       if (ring) {
+       if (origin == ORIGIN_CS) {
                mutex_lock(&dev_priv->fb_tracking.lock);
                dev_priv->fb_tracking.busy_bits
                        |= obj->frontbuffer_bits;
@@ -151,8 +96,6 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
                mutex_unlock(&dev_priv->fb_tracking.lock);
        }
 
-       intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
        intel_psr_invalidate(dev, obj->frontbuffer_bits);
        intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
        intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
@@ -162,6 +105,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  * intel_frontbuffer_flush - flush frontbuffer
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given planes has
  * completed and frontbuffer caching can be started again. Flushes will get
@@ -169,37 +113,40 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flush(struct drm_device *dev,
-                            unsigned frontbuffer_bits)
+static void intel_frontbuffer_flush(struct drm_device *dev,
+                                   unsigned frontbuffer_bits,
+                                   enum fb_op_origin origin)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Delay flushing when rings are still busy.*/
        mutex_lock(&dev_priv->fb_tracking.lock);
        frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
        mutex_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+       if (!frontbuffer_bits)
+               return;
 
        intel_edp_drrs_flush(dev, frontbuffer_bits);
-       intel_psr_flush(dev, frontbuffer_bits);
-       intel_fbc_flush(dev_priv, frontbuffer_bits);
+       intel_psr_flush(dev, frontbuffer_bits, origin);
+       intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_fb_obj_flush - flush frontbuffer object
  * @obj: GEM object to flush
  * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given object has
  * completed and frontbuffer caching can be started again. If @retire is true
  * then any delayed flushes will be unblocked.
  */
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                       bool retire)
+                       bool retire, enum fb_op_origin origin)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned frontbuffer_bits;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -218,7 +165,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
                mutex_unlock(&dev_priv->fb_tracking.lock);
        }
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
+       intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
@@ -236,7 +183,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
                                    unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_lock(&dev_priv->fb_tracking.lock);
        dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
@@ -244,7 +191,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
        mutex_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_psr_single_frame_update(dev);
+       intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
 
 /**
@@ -260,7 +207,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
                                     unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_lock(&dev_priv->fb_tracking.lock);
        /* Mask any cancelled flips. */
@@ -268,5 +215,29 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
        dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
        mutex_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
+       intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_device *dev,
+                           unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       /* Remove stale busy bits due to the old buffer. */
+       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
new file mode 100644 (file)
index 0000000..18d7f20
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * Copyright Â© 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+/*
+ * This file is partially autogenerated, although currently with some manual
+ * fixups afterwards. In future, it should be entirely autogenerated, in order
+ * to ensure that the definitions herein remain in sync with those used by the
+ * GuC's own firmware.
+ *
+ * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
+ */
+
+#define GFXCORE_FAMILY_GEN8            11
+#define GFXCORE_FAMILY_GEN9            12
+#define GFXCORE_FAMILY_FORCE_ULONG     0x7fffffff
+
+#define GUC_CTX_PRIORITY_CRITICAL      0
+#define GUC_CTX_PRIORITY_HIGH          1
+#define GUC_CTX_PRIORITY_NORMAL                2
+#define GUC_CTX_PRIORITY_LOW           3
+
+#define GUC_MAX_GPU_CONTEXTS           1024
+#define        GUC_INVALID_CTX_ID              (GUC_MAX_GPU_CONTEXTS + 1)
+
+/* Work queue item header definitions */
+#define WQ_STATUS_ACTIVE               1
+#define WQ_STATUS_SUSPENDED            2
+#define WQ_STATUS_CMD_ERROR            3
+#define WQ_STATUS_ENGINE_ID_NOT_USED   4
+#define WQ_STATUS_SUSPENDED_FROM_RESET 5
+#define WQ_TYPE_SHIFT                  0
+#define   WQ_TYPE_BATCH_BUF            (0x1 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_PSEUDO               (0x2 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_INORDER              (0x3 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT                        10
+#define WQ_LEN_SHIFT                   16
+#define WQ_NO_WCFLUSH_WAIT             (1 << 27)
+#define WQ_PRESENT_WORKLOAD            (1 << 28)
+#define WQ_WORKLOAD_SHIFT              29
+#define   WQ_WORKLOAD_GENERAL          (0 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_GPGPU            (1 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_TOUCH            (2 << WQ_WORKLOAD_SHIFT)
+
+#define WQ_RING_TAIL_SHIFT             20
+#define WQ_RING_TAIL_MASK              (0x7FF << WQ_RING_TAIL_SHIFT)
+
+#define GUC_DOORBELL_ENABLED           1
+#define GUC_DOORBELL_DISABLED          0
+
+#define GUC_CTX_DESC_ATTR_ACTIVE       (1 << 0)
+#define GUC_CTX_DESC_ATTR_PENDING_DB   (1 << 1)
+#define GUC_CTX_DESC_ATTR_KERNEL       (1 << 2)
+#define GUC_CTX_DESC_ATTR_PREEMPT      (1 << 3)
+#define GUC_CTX_DESC_ATTR_RESET                (1 << 4)
+#define GUC_CTX_DESC_ATTR_WQLOCKED     (1 << 5)
+#define GUC_CTX_DESC_ATTR_PCH          (1 << 6)
+
+/* The guc control data is 10 DWORDs */
+#define GUC_CTL_CTXINFO                        0
+#define   GUC_CTL_CTXNUM_IN16_SHIFT    0
+#define   GUC_CTL_BASE_ADDR_SHIFT      12
+#define GUC_CTL_ARAT_HIGH              1
+#define GUC_CTL_ARAT_LOW               2
+#define GUC_CTL_DEVICE_INFO            3
+#define   GUC_CTL_GTTYPE_SHIFT         0
+#define   GUC_CTL_COREFAMILY_SHIFT     7
+#define GUC_CTL_LOG_PARAMS             4
+#define   GUC_LOG_VALID                        (1 << 0)
+#define   GUC_LOG_NOTIFY_ON_HALF_FULL  (1 << 1)
+#define   GUC_LOG_ALLOC_IN_MEGABYTE    (1 << 3)
+#define   GUC_LOG_CRASH_PAGES          1
+#define   GUC_LOG_CRASH_SHIFT          4
+#define   GUC_LOG_DPC_PAGES            3
+#define   GUC_LOG_DPC_SHIFT            6
+#define   GUC_LOG_ISR_PAGES            3
+#define   GUC_LOG_ISR_SHIFT            9
+#define   GUC_LOG_BUF_ADDR_SHIFT       12
+#define GUC_CTL_PAGE_FAULT_CONTROL     5
+#define GUC_CTL_WA                     6
+#define   GUC_CTL_WA_UK_BY_DRIVER      (1 << 3)
+#define GUC_CTL_FEATURE                        7
+#define   GUC_CTL_VCS2_ENABLED         (1 << 0)
+#define   GUC_CTL_KERNEL_SUBMISSIONS   (1 << 1)
+#define   GUC_CTL_FEATURE2             (1 << 2)
+#define   GUC_CTL_POWER_GATING         (1 << 3)
+#define   GUC_CTL_DISABLE_SCHEDULER    (1 << 4)
+#define   GUC_CTL_PREEMPTION_LOG       (1 << 5)
+#define   GUC_CTL_ENABLE_SLPC          (1 << 7)
+#define GUC_CTL_DEBUG                  8
+#define   GUC_LOG_VERBOSITY_SHIFT      0
+#define   GUC_LOG_VERBOSITY_LOW                (0 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_MED                (1 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_HIGH       (2 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_ULTRA      (3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define          GUC_LOG_VERBOSITY_MIN         0
+#define          GUC_LOG_VERBOSITY_MAX         3
+
+#define GUC_CTL_MAX_DWORDS             (GUC_CTL_DEBUG + 1)
+
+struct guc_doorbell_info {
+       u32 db_status;
+       u32 cookie;
+       u32 reserved[14];
+} __packed;
+
+union guc_doorbell_qw {
+       struct {
+               u32 db_status;
+               u32 cookie;
+       };
+       u64 value_qw;
+} __packed;
+
+#define GUC_MAX_DOORBELLS              256
+#define GUC_INVALID_DOORBELL_ID                (GUC_MAX_DOORBELLS)
+
+#define GUC_DB_SIZE                    (PAGE_SIZE)
+#define GUC_WQ_SIZE                    (PAGE_SIZE * 2)
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+       u32 header;
+       u32 context_desc;
+       u32 ring_tail;
+       u32 fence_id;
+} __packed;
+
+struct guc_process_desc {
+       u32 context_id;
+       u64 db_base_addr;
+       u32 head;
+       u32 tail;
+       u32 error_offset;
+       u64 wq_base_addr;
+       u32 wq_size_bytes;
+       u32 wq_status;
+       u32 engine_presence;
+       u32 priority;
+       u32 reserved[30];
+} __packed;
+
+/* engine id and context id is packed into guc_execlist_context.context_id*/
+#define GUC_ELC_CTXID_OFFSET           0
+#define GUC_ELC_ENGINE_OFFSET          29
+
+/* The execlist context including software and HW information */
+struct guc_execlist_context {
+       u32 context_desc;
+       u32 context_id;
+       u32 ring_status;
+       u32 ring_lcra;
+       u32 ring_begin;
+       u32 ring_end;
+       u32 ring_next_free_location;
+       u32 ring_current_tail_pointer_value;
+       u8 engine_state_submit_value;
+       u8 engine_state_wait_value;
+       u16 pagefault_count;
+       u16 engine_submit_queue_count;
+} __packed;
+
+/*Context descriptor for communicating between uKernel and Driver*/
+struct guc_context_desc {
+       u32 sched_common_area;
+       u32 context_id;
+       u32 pas_id;
+       u8 engines_used;
+       u64 db_trigger_cpu;
+       u32 db_trigger_uk;
+       u64 db_trigger_phy;
+       u16 db_id;
+
+       struct guc_execlist_context lrc[I915_NUM_RINGS];
+
+       u8 attribute;
+
+       u32 priority;
+
+       u32 wq_sampled_tail_offset;
+       u32 wq_total_submit_enqueues;
+
+       u32 process_desc;
+       u32 wq_addr;
+       u32 wq_size;
+
+       u32 engine_presence;
+
+       u32 reserved0[1];
+       u64 reserved1[1];
+
+       u64 desc_private;
+} __packed;
+
+/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+enum host2guc_action {
+       HOST2GUC_ACTION_DEFAULT = 0x0,
+       HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+       HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+       HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+       HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+       HOST2GUC_ACTION_LIMIT
+};
+
+/*
+ * The GuC sends its response to a command by overwriting the
+ * command in SS0. The response is distinguishable from a command
+ * by the fact that all the MASK bits are set. The remaining bits
+ * give more detail.
+ */
+#define        GUC2HOST_RESPONSE_MASK          ((u32)0xF0000000)
+#define        GUC2HOST_IS_RESPONSE(x)         ((u32)(x) >= GUC2HOST_RESPONSE_MASK)
+#define        GUC2HOST_STATUS(x)              (GUC2HOST_RESPONSE_MASK | (x))
+
+/* GUC will return status back to SOFT_SCRATCH_O_REG */
+enum guc2host_status {
+       GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
+       GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
+       GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
+       GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+};
+
+#endif
index e97731aab6dcfee2a380d09b95cefdff57e193e1..51cbea8247fe9b2cfc6987e6dec114b06d09cc00 100644 (file)
@@ -174,10 +174,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        u32 val = I915_READ(VIDEO_DIP_CTL);
 
-       if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-               return val & VIDEO_DIP_ENABLE;
+       if ((val & VIDEO_DIP_ENABLE) == 0)
+               return false;
 
-       return false;
+       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+               return false;
+
+       return val & (VIDEO_DIP_ENABLE_AVI |
+                     VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 }
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -227,10 +231,15 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
        int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 
-       if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-               return val & VIDEO_DIP_ENABLE;
+       if ((val & VIDEO_DIP_ENABLE) == 0)
+               return false;
+
+       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+               return false;
 
-       return false;
+       return val & (VIDEO_DIP_ENABLE_AVI |
+                     VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                     VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -282,7 +291,12 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
        int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 
-       return val & VIDEO_DIP_ENABLE;
+       if ((val & VIDEO_DIP_ENABLE) == 0)
+               return false;
+
+       return val & (VIDEO_DIP_ENABLE_AVI |
+                     VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                     VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -332,10 +346,15 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
        int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 
-       if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-               return val & VIDEO_DIP_ENABLE;
+       if ((val & VIDEO_DIP_ENABLE) == 0)
+               return false;
 
-       return false;
+       if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+               return false;
+
+       return val & (VIDEO_DIP_ENABLE_AVI |
+                     VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                     VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -383,8 +402,9 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
        u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
        u32 val = I915_READ(ctl_reg);
 
-       return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
-                     VIDEO_DIP_ENABLE_VS_HSW);
+       return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+                     VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+                     VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
 }
 
 /*
@@ -514,7 +534,13 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
        if (!enable) {
                if (!(val & VIDEO_DIP_ENABLE))
                        return;
-               val &= ~VIDEO_DIP_ENABLE;
+               if (port != (val & VIDEO_DIP_PORT_MASK)) {
+                       DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
+                                     (val & VIDEO_DIP_PORT_MASK) >> 29);
+                       return;
+               }
+               val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+                        VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
                I915_WRITE(reg, val);
                POSTING_READ(reg);
                return;
@@ -522,16 +548,17 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
 
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
                if (val & VIDEO_DIP_ENABLE) {
-                       val &= ~VIDEO_DIP_ENABLE;
-                       I915_WRITE(reg, val);
-                       POSTING_READ(reg);
+                       DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
+                                     (val & VIDEO_DIP_PORT_MASK) >> 29);
+                       return;
                }
                val &= ~VIDEO_DIP_PORT_MASK;
                val |= port;
        }
 
        val |= VIDEO_DIP_ENABLE;
-       val &= ~VIDEO_DIP_ENABLE_VENDOR;
+       val &= ~(VIDEO_DIP_ENABLE_AVI |
+                VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 
        I915_WRITE(reg, val);
        POSTING_READ(reg);
@@ -541,6 +568,97 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
        intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
 }
 
+static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_connector *connector;
+
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+       /*
+        * HDMI cloning is only supported on g4x which doesn't
+        * support deep color or GCP infoframes anyway so no
+        * need to worry about multiple HDMI sinks here.
+        */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               if (connector->encoder == encoder)
+                       return connector->display_info.bpc > 8;
+
+       return false;
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ *   phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+                                      const struct drm_display_mode *mode)
+{
+       unsigned int pixels_per_group;
+
+       switch (pipe_bpp) {
+       case 30:
+               /* 4 pixels in 5 clocks */
+               pixels_per_group = 4;
+               break;
+       case 36:
+               /* 2 pixels in 3 clocks */
+               pixels_per_group = 2;
+               break;
+       case 48:
+               /* 1 pixel in 2 clocks */
+               pixels_per_group = 1;
+               break;
+       default:
+               /* phase information not relevant for 8bpc */
+               return false;
+       }
+
+       return mode->crtc_hdisplay % pixels_per_group == 0 &&
+               mode->crtc_htotal % pixels_per_group == 0 &&
+               mode->crtc_hblank_start % pixels_per_group == 0 &&
+               mode->crtc_hblank_end % pixels_per_group == 0 &&
+               mode->crtc_hsync_start % pixels_per_group == 0 &&
+               mode->crtc_hsync_end % pixels_per_group == 0 &&
+               ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+                mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+       u32 reg, val = 0;
+
+       if (HAS_DDI(dev_priv))
+               reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+       else if (IS_VALLEYVIEW(dev_priv))
+               reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+       else if (HAS_PCH_SPLIT(dev_priv->dev))
+               reg = TVIDEO_DIP_GCP(crtc->pipe);
+       else
+               return false;
+
+       /* Indicate color depth whenever the sink supports deep color */
+       if (hdmi_sink_is_deep_color(encoder))
+               val |= GCP_COLOR_INDICATION;
+
+       /* Enable default_phase whenever the display mode is suitably aligned */
+       if (gcp_default_phase_possible(crtc->config->pipe_bpp,
+                                      &crtc->config->base.adjusted_mode))
+               val |= GCP_DEFAULT_PHASE_ENABLE;
+
+       I915_WRITE(reg, val);
+
+       return val != 0;
+}
+
 static void ibx_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               struct drm_display_mode *adjusted_mode)
@@ -561,25 +679,29 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
        if (!enable) {
                if (!(val & VIDEO_DIP_ENABLE))
                        return;
-               val &= ~VIDEO_DIP_ENABLE;
+               val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+                        VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                        VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
                I915_WRITE(reg, val);
                POSTING_READ(reg);
                return;
        }
 
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
-               if (val & VIDEO_DIP_ENABLE) {
-                       val &= ~VIDEO_DIP_ENABLE;
-                       I915_WRITE(reg, val);
-                       POSTING_READ(reg);
-               }
+               WARN(val & VIDEO_DIP_ENABLE,
+                    "DIP already enabled on port %c\n",
+                    (val & VIDEO_DIP_PORT_MASK) >> 29);
                val &= ~VIDEO_DIP_PORT_MASK;
                val |= port;
        }
 
        val |= VIDEO_DIP_ENABLE;
-       val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-                VIDEO_DIP_ENABLE_GCP);
+       val &= ~(VIDEO_DIP_ENABLE_AVI |
+                VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+       if (intel_hdmi_set_gcp_infoframe(encoder))
+               val |= VIDEO_DIP_ENABLE_GCP;
 
        I915_WRITE(reg, val);
        POSTING_READ(reg);
@@ -607,7 +729,9 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
        if (!enable) {
                if (!(val & VIDEO_DIP_ENABLE))
                        return;
-               val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+               val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+                        VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                        VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
                I915_WRITE(reg, val);
                POSTING_READ(reg);
                return;
@@ -616,7 +740,10 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
        /* Set both together, unset both together: see the spec. */
        val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
        val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-                VIDEO_DIP_ENABLE_GCP);
+                VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+       if (intel_hdmi_set_gcp_infoframe(encoder))
+               val |= VIDEO_DIP_ENABLE_GCP;
 
        I915_WRITE(reg, val);
        POSTING_READ(reg);
@@ -646,25 +773,29 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
        if (!enable) {
                if (!(val & VIDEO_DIP_ENABLE))
                        return;
-               val &= ~VIDEO_DIP_ENABLE;
+               val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+                        VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                        VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
                I915_WRITE(reg, val);
                POSTING_READ(reg);
                return;
        }
 
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
-               if (val & VIDEO_DIP_ENABLE) {
-                       val &= ~VIDEO_DIP_ENABLE;
-                       I915_WRITE(reg, val);
-                       POSTING_READ(reg);
-               }
+               WARN(val & VIDEO_DIP_ENABLE,
+                    "DIP already enabled on port %c\n",
+                    (val & VIDEO_DIP_PORT_MASK) >> 29);
                val &= ~VIDEO_DIP_PORT_MASK;
                val |= port;
        }
 
        val |= VIDEO_DIP_ENABLE;
-       val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
-                VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
+       val &= ~(VIDEO_DIP_ENABLE_AVI |
+                VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+                VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+       if (intel_hdmi_set_gcp_infoframe(encoder))
+               val |= VIDEO_DIP_ENABLE_GCP;
 
        I915_WRITE(reg, val);
        POSTING_READ(reg);
@@ -686,14 +817,18 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
 
        assert_hdmi_port_disabled(intel_hdmi);
 
+       val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+                VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+                VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
        if (!enable) {
-               I915_WRITE(reg, 0);
+               I915_WRITE(reg, val);
                POSTING_READ(reg);
                return;
        }
 
-       val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
-                VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+       if (intel_hdmi_set_gcp_infoframe(encoder))
+               val |= VIDEO_DIP_ENABLE_GCP_HSW;
 
        I915_WRITE(reg, val);
        POSTING_READ(reg);
@@ -808,58 +943,146 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
        else
                dotclock = pipe_config->port_clock;
 
+       if (pipe_config->pixel_multiplier)
+               dotclock /= pipe_config->pixel_multiplier;
+
        if (HAS_PCH_SPLIT(dev_priv->dev))
                ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
        pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 }
 
-static void intel_enable_hdmi(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+{
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+       WARN_ON(!crtc->config->has_hdmi_sink);
+       DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+                        pipe_name(crtc->pipe));
+       intel_audio_codec_enable(encoder);
+}
+
+static void g4x_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        u32 temp;
-       u32 enable_bits = SDVO_ENABLE;
 
-       if (intel_crtc->config->has_audio)
-               enable_bits |= SDVO_AUDIO_ENABLE;
+       temp = I915_READ(intel_hdmi->hdmi_reg);
+
+       temp |= SDVO_ENABLE;
+       if (crtc->config->has_audio)
+               temp |= SDVO_AUDIO_ENABLE;
+
+       I915_WRITE(intel_hdmi->hdmi_reg, temp);
+       POSTING_READ(intel_hdmi->hdmi_reg);
+
+       if (crtc->config->has_audio)
+               intel_enable_hdmi_audio(encoder);
+}
+
+static void ibx_enable_hdmi(struct intel_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       u32 temp;
 
        temp = I915_READ(intel_hdmi->hdmi_reg);
 
-       /* HW workaround for IBX, we need to move the port to transcoder A
-        * before disabling it, so restore the transcoder select bit here. */
-       if (HAS_PCH_IBX(dev))
-               enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
+       temp |= SDVO_ENABLE;
+       if (crtc->config->has_audio)
+               temp |= SDVO_AUDIO_ENABLE;
+
+       /*
+        * HW workaround, need to write this twice for issue
+        * that may result in first write getting masked.
+        */
+       I915_WRITE(intel_hdmi->hdmi_reg, temp);
+       POSTING_READ(intel_hdmi->hdmi_reg);
+       I915_WRITE(intel_hdmi->hdmi_reg, temp);
+       POSTING_READ(intel_hdmi->hdmi_reg);
 
-       /* HW workaround, need to toggle enable bit off and on for 12bpc, but
-        * we do this anyway which shows more stable in testing.
+       /*
+        * HW workaround, need to toggle enable bit off and on
+        * for 12bpc with pixel repeat.
+        *
+        * FIXME: BSpec says this should be done at the end of
+        * of the modeset sequence, so not sure if this isn't too soon.
         */
-       if (HAS_PCH_SPLIT(dev)) {
+       if (crtc->config->pipe_bpp > 24 &&
+           crtc->config->pixel_multiplier > 1) {
                I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
                POSTING_READ(intel_hdmi->hdmi_reg);
+
+               /*
+                * HW workaround, need to write this twice for issue
+                * that may result in first write getting masked.
+                */
+               I915_WRITE(intel_hdmi->hdmi_reg, temp);
+               POSTING_READ(intel_hdmi->hdmi_reg);
+               I915_WRITE(intel_hdmi->hdmi_reg, temp);
+               POSTING_READ(intel_hdmi->hdmi_reg);
        }
 
-       temp |= enable_bits;
+       if (crtc->config->has_audio)
+               intel_enable_hdmi_audio(encoder);
+}
+
+static void cpt_enable_hdmi(struct intel_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       enum pipe pipe = crtc->pipe;
+       u32 temp;
+
+       temp = I915_READ(intel_hdmi->hdmi_reg);
+
+       temp |= SDVO_ENABLE;
+       if (crtc->config->has_audio)
+               temp |= SDVO_AUDIO_ENABLE;
+
+       /*
+        * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+        *
+        * The procedure for 12bpc is as follows:
+        * 1. disable HDMI clock gating
+        * 2. enable HDMI with 8bpc
+        * 3. enable HDMI with 12bpc
+        * 4. enable HDMI clock gating
+        */
+
+       if (crtc->config->pipe_bpp > 24) {
+               I915_WRITE(TRANS_CHICKEN1(pipe),
+                          I915_READ(TRANS_CHICKEN1(pipe)) |
+                          TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+               temp &= ~SDVO_COLOR_FORMAT_MASK;
+               temp |= SDVO_COLOR_FORMAT_8bpc;
+       }
 
        I915_WRITE(intel_hdmi->hdmi_reg, temp);
        POSTING_READ(intel_hdmi->hdmi_reg);
 
-       /* HW workaround, need to write this twice for issue that may result
-        * in first write getting masked.
-        */
-       if (HAS_PCH_SPLIT(dev)) {
+       if (crtc->config->pipe_bpp > 24) {
+               temp &= ~SDVO_COLOR_FORMAT_MASK;
+               temp |= HDMI_COLOR_FORMAT_12bpc;
+
                I915_WRITE(intel_hdmi->hdmi_reg, temp);
                POSTING_READ(intel_hdmi->hdmi_reg);
-       }
 
-       if (intel_crtc->config->has_audio) {
-               WARN_ON(!intel_crtc->config->has_hdmi_sink);
-               DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
-                                pipe_name(intel_crtc->pipe));
-               intel_audio_codec_enable(encoder);
+               I915_WRITE(TRANS_CHICKEN1(pipe),
+                          I915_READ(TRANS_CHICKEN1(pipe)) &
+                          ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
        }
+
+       if (crtc->config->has_audio)
+               intel_enable_hdmi_audio(encoder);
 }
 
 static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -901,6 +1124,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
                I915_WRITE(intel_hdmi->hdmi_reg, temp);
                POSTING_READ(intel_hdmi->hdmi_reg);
        }
+
+       intel_hdmi->set_infoframes(&encoder->base, false, NULL);
 }
 
 static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -926,7 +1151,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
        intel_disable_hdmi(encoder);
 }
 
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 {
        struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
@@ -938,25 +1163,52 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
                return 225000;
 }
 
+static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+                     int clock, bool respect_dvi_limit)
+{
+       struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+       if (clock < 25000)
+               return MODE_CLOCK_LOW;
+       if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+               return MODE_CLOCK_HIGH;
+
+       /* BXT DPLL can't generate 223-240 MHz */
+       if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+               return MODE_CLOCK_RANGE;
+
+       /* CHV DPLL can't generate 216-240 MHz */
+       if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+               return MODE_CLOCK_RANGE;
+
+       return MODE_OK;
+}
+
 static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
 {
-       int clock = mode->clock;
+       struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+       struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+       enum drm_mode_status status;
+       int clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       clock = mode->clock;
        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
                clock *= 2;
 
-       if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
-                                        true))
-               return MODE_CLOCK_HIGH;
-       if (clock < 20000)
-               return MODE_CLOCK_LOW;
+       /* check if we can do 8bpc */
+       status = hdmi_port_clock_valid(hdmi, clock, true);
 
-       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return MODE_NO_DBLESCAN;
+       /* if we can't do 8bpc we may still be able to do 12bpc */
+       if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+               status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
 
-       return MODE_OK;
+       return status;
 }
 
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
@@ -997,8 +1249,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
-       int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+       int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+       int clock_12bpc = clock_8bpc * 3 / 2;
        int desired_bpp;
 
        pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
@@ -1017,6 +1269,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
                pipe_config->pixel_multiplier = 2;
+               clock_8bpc *= 2;
+               clock_12bpc *= 2;
        }
 
        if (intel_hdmi->color_range)
@@ -1035,9 +1289,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
         * within limits.
         */
        if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
-           clock_12bpc <= portclock_limit &&
-           hdmi_12bpc_possible(pipe_config) &&
-           0 /* FIXME 12bpc support totally broken */) {
+           hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+           hdmi_12bpc_possible(pipe_config)) {
                DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
                desired_bpp = 12*3;
 
@@ -1046,6 +1299,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        } else {
                DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
                desired_bpp = 8*3;
+
+               pipe_config->port_clock = clock_8bpc;
        }
 
        if (!pipe_config->bw_constrained) {
@@ -1053,8 +1308,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
                pipe_config->pipe_bpp = desired_bpp;
        }
 
-       if (adjusted_mode->crtc_clock > portclock_limit) {
-               DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+       if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
+                                 false) != MODE_OK) {
+               DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
                return false;
        }
 
@@ -1323,7 +1579,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
                                   intel_crtc->config->has_hdmi_sink,
                                   adjusted_mode);
 
-       intel_enable_hdmi(encoder);
+       g4x_enable_hdmi(encoder);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1640,7 +1896,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
                                   intel_crtc->config->has_hdmi_sink,
                                   adjusted_mode);
 
-       intel_enable_hdmi(encoder);
+       g4x_enable_hdmi(encoder);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1653,7 +1909,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
-       .dpms = intel_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_hdmi_detect,
        .force = intel_hdmi_force,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1827,7 +2083,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
                intel_encoder->post_disable = vlv_hdmi_post_disable;
        } else {
                intel_encoder->pre_enable = intel_hdmi_pre_enable;
-               intel_encoder->enable = intel_enable_hdmi;
+               if (HAS_PCH_CPT(dev))
+                       intel_encoder->enable = cpt_enable_hdmi;
+               else if (HAS_PCH_IBX(dev))
+                       intel_encoder->enable = ibx_enable_hdmi;
+               else
+                       intel_encoder->enable = g4x_enable_hdmi;
        }
 
        intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
new file mode 100644 (file)
index 0000000..032a0bf
--- /dev/null
@@ -0,0 +1,505 @@
+/*
+ * Copyright Â© 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+{
+       switch (pin) {
+       case HPD_PORT_A:
+               *port = PORT_A;
+               return true;
+       case HPD_PORT_B:
+               *port = PORT_B;
+               return true;
+       case HPD_PORT_C:
+               *port = PORT_C;
+               return true;
+       case HPD_PORT_D:
+               *port = PORT_D;
+               return true;
+       default:
+               return false;   /* no hpd */
+       }
+}
+
+#define HPD_STORM_DETECT_PERIOD                1000
+#define HPD_STORM_THRESHOLD            5
+#define HPD_STORM_REENABLE_DELAY       (2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ *
+ * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
+ * otherwise it's considered an irq storm, and the irq state is set to
+ * @HPD_MARK_DISABLED.
+ *
+ * Return true if an irq storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+                                      enum hpd_pin pin)
+{
+       unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+       unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+       bool storm = false;
+
+       if (!time_in_range(jiffies, start, end)) {
+               dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
+               dev_priv->hotplug.stats[pin].count = 0;
+               DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
+       } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+               dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+               DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+               storm = true;
+       } else {
+               dev_priv->hotplug.stats[pin].count++;
+               DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+                             dev_priv->hotplug.stats[pin].count);
+       }
+
+       return storm;
+}
+
+static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_connector *intel_connector;
+       struct intel_encoder *intel_encoder;
+       struct drm_connector *connector;
+       enum hpd_pin pin;
+       bool hpd_disabled = false;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+                       continue;
+
+               intel_connector = to_intel_connector(connector);
+               intel_encoder = intel_connector->encoder;
+               if (!intel_encoder)
+                       continue;
+
+               pin = intel_encoder->hpd_pin;
+               if (pin == HPD_NONE ||
+                   dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+                       continue;
+
+               DRM_INFO("HPD interrupt storm detected on connector %s: "
+                        "switching from hotplug detection to polling\n",
+                        connector->name);
+
+               dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT
+                       | DRM_CONNECTOR_POLL_DISCONNECT;
+               hpd_disabled = true;
+       }
+
+       /* Enable polling and queue hotplug re-enabling. */
+       if (hpd_disabled) {
+               drm_kms_helper_poll_enable(dev);
+               mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+                                msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+       }
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv),
+                            hotplug.reenable_work.work);
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       int i;
+
+       intel_runtime_pm_get(dev_priv);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       for_each_hpd_pin(i) {
+               struct drm_connector *connector;
+
+               if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+                       continue;
+
+               dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+
+               list_for_each_entry(connector, &mode_config->connector_list, head) {
+                       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+                       if (intel_connector->encoder->hpd_pin == i) {
+                               if (connector->polled != intel_connector->polled)
+                                       DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+                                                        connector->name);
+                               connector->polled = intel_connector->polled;
+                               if (!connector->polled)
+                                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+                       }
+               }
+       }
+       if (dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       intel_runtime_pm_put(dev_priv);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+                               struct drm_connector *connector)
+{
+       enum drm_connector_status old_status;
+
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+       old_status = connector->status;
+
+       connector->status = connector->funcs->detect(connector, false);
+       if (old_status == connector->status)
+               return false;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+                     connector->base.id,
+                     connector->name,
+                     drm_get_connector_status_name(old_status),
+                     drm_get_connector_status_name(connector->status));
+
+       return true;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+       u32 long_port_mask, short_port_mask;
+       struct intel_digital_port *intel_dig_port;
+       int i;
+       u32 old_bits = 0;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       long_port_mask = dev_priv->hotplug.long_port_mask;
+       dev_priv->hotplug.long_port_mask = 0;
+       short_port_mask = dev_priv->hotplug.short_port_mask;
+       dev_priv->hotplug.short_port_mask = 0;
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       for (i = 0; i < I915_MAX_PORTS; i++) {
+               bool valid = false;
+               bool long_hpd = false;
+               intel_dig_port = dev_priv->hotplug.irq_port[i];
+               if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+                       continue;
+
+               if (long_port_mask & (1 << i))  {
+                       valid = true;
+                       long_hpd = true;
+               } else if (short_port_mask & (1 << i))
+                       valid = true;
+
+               if (valid) {
+                       enum irqreturn ret;
+
+                       ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+                       if (ret == IRQ_NONE) {
+                               /* fall back to old school hpd */
+                               old_bits |= (1 << intel_dig_port->base.hpd_pin);
+                       }
+               }
+       }
+
+       if (old_bits) {
+               spin_lock_irq(&dev_priv->irq_lock);
+               dev_priv->hotplug.event_bits |= old_bits;
+               spin_unlock_irq(&dev_priv->irq_lock);
+               schedule_work(&dev_priv->hotplug.hotplug_work);
+       }
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_connector *intel_connector;
+       struct intel_encoder *intel_encoder;
+       struct drm_connector *connector;
+       bool changed = false;
+       u32 hpd_event_bits;
+
+       mutex_lock(&mode_config->mutex);
+       DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       hpd_event_bits = dev_priv->hotplug.event_bits;
+       dev_priv->hotplug.event_bits = 0;
+
+       /* Disable hotplug on connectors that hit an irq storm. */
+       intel_hpd_irq_storm_disable(dev_priv);
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               intel_connector = to_intel_connector(connector);
+               if (!intel_connector->encoder)
+                       continue;
+               intel_encoder = intel_connector->encoder;
+               if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+                       DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+                                     connector->name, intel_encoder->hpd_pin);
+                       if (intel_encoder->hot_plug)
+                               intel_encoder->hot_plug(intel_encoder);
+                       if (intel_hpd_irq_event(dev, connector))
+                               changed = true;
+               }
+       }
+       mutex_unlock(&mode_config->mutex);
+
+       if (changed)
+               drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev: drm device
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_device *dev,
+                          u32 pin_mask, u32 long_mask)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+       enum port port;
+       bool storm_detected = false;
+       bool queue_dig = false, queue_hp = false;
+       bool is_dig_port;
+
+       if (!pin_mask)
+               return;
+
+       spin_lock(&dev_priv->irq_lock);
+       for_each_hpd_pin(i) {
+               if (!(BIT(i) & pin_mask))
+                       continue;
+
+               is_dig_port = intel_hpd_pin_to_port(i, &port) &&
+                             dev_priv->hotplug.irq_port[port];
+
+               if (is_dig_port) {
+                       bool long_hpd = long_mask & BIT(i);
+
+                       DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+                                        long_hpd ? "long" : "short");
+                       /*
+                        * For long HPD pulses we want to have the digital queue happen,
+                        * but we still want HPD storm detection to function.
+                        */
+                       queue_dig = true;
+                       if (long_hpd) {
+                               dev_priv->hotplug.long_port_mask |= (1 << port);
+                       } else {
+                               /* for short HPD just trigger the digital queue */
+                               dev_priv->hotplug.short_port_mask |= (1 << port);
+                               continue;
+                       }
+               }
+
+               if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+                       /*
+                        * On GMCH platforms the interrupt mask bits only
+                        * prevent irq generation, not the setting of the
+                        * hotplug bits itself. So only WARN about unexpected
+                        * interrupts on saner platforms.
+                        */
+                       WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+                                 "Received HPD interrupt on pin %d although disabled\n", i);
+                       continue;
+               }
+
+               if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+                       continue;
+
+               if (!is_dig_port) {
+                       dev_priv->hotplug.event_bits |= BIT(i);
+                       queue_hp = true;
+               }
+
+               if (intel_hpd_irq_storm_detect(dev_priv, i)) {
+                       dev_priv->hotplug.event_bits &= ~BIT(i);
+                       storm_detected = true;
+               }
+       }
+
+       if (storm_detected)
+               dev_priv->display.hpd_irq_setup(dev);
+       spin_unlock(&dev_priv->irq_lock);
+
+       /*
+        * Our hotplug handler can grab modeset locks (by calling down into the
+        * fb helpers). Hence it must not be run on our own dev-priv->wq work
+        * queue for otherwise the flush_work in the pageflip code will
+        * deadlock.
+        */
+       if (queue_dig)
+               queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+       if (queue_hp)
+               schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+       int i;
+
+       for_each_hpd_pin(i) {
+               dev_priv->hotplug.stats[i].count = 0;
+               dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+       }
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               struct intel_connector *intel_connector = to_intel_connector(connector);
+               connector->polled = intel_connector->polled;
+               if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+               if (intel_connector->mst_port)
+                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+       }
+
+       /*
+        * Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked checks happy.
+        */
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev);
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+       INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+       INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+       INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+                         intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       dev_priv->hotplug.long_port_mask = 0;
+       dev_priv->hotplug.short_port_mask = 0;
+       dev_priv->hotplug.event_bits = 0;
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+       cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+       cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
index 9b74ffae5f5a7bab8ef545525d361c29fd4c3bf6..5bc0ce1347cef5d7dd0e1c0397f2eefb4ba4e151 100644 (file)
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "intel_mocs.h"
 
 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
 #define GEN8_CTX_PRIVILEGE (1<<8)
 
 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
-       const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
-               ppgtt->pdp.page_directory[n]->daddr : \
-               ppgtt->scratch_pd->daddr; \
+       const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
        reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
        reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
 }
@@ -211,9 +210,9 @@ enum {
        FAULT_AND_CONTINUE /* Unsupported */
 };
 #define GEN8_CTX_ID_SHIFT 32
+#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-               struct intel_context *ctx);
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,10 +262,11 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
        return lrca >> 12;
 }
 
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
-                                        struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
 {
+       struct intel_engine_cs *ring = rq->ring;
        struct drm_device *dev = ring->dev;
+       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
        uint64_t desc;
        uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
 
@@ -294,55 +294,59 @@ static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
        return desc;
 }
 
-static void execlists_elsp_write(struct intel_engine_cs *ring,
-                                struct drm_i915_gem_object *ctx_obj0,
-                                struct drm_i915_gem_object *ctx_obj1)
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+                                struct drm_i915_gem_request *rq1)
 {
+
+       struct intel_engine_cs *ring = rq0->ring;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint64_t temp = 0;
-       uint32_t desc[4];
+       uint64_t desc[2];
 
-       /* XXX: You must always write both descriptors in the order below. */
-       if (ctx_obj1)
-               temp = execlists_ctx_descriptor(ring, ctx_obj1);
-       else
-               temp = 0;
-       desc[1] = (u32)(temp >> 32);
-       desc[0] = (u32)temp;
+       if (rq1) {
+               desc[1] = execlists_ctx_descriptor(rq1);
+               rq1->elsp_submitted++;
+       } else {
+               desc[1] = 0;
+       }
 
-       temp = execlists_ctx_descriptor(ring, ctx_obj0);
-       desc[3] = (u32)(temp >> 32);
-       desc[2] = (u32)temp;
+       desc[0] = execlists_ctx_descriptor(rq0);
+       rq0->elsp_submitted++;
 
+       /* You must always write both descriptors in the order below. */
        spin_lock(&dev_priv->uncore.lock);
        intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-       I915_WRITE_FW(RING_ELSP(ring), desc[1]);
-       I915_WRITE_FW(RING_ELSP(ring), desc[0]);
-       I915_WRITE_FW(RING_ELSP(ring), desc[3]);
+       I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
+       I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
 
+       I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
        /* The context is automatically loaded after the following */
-       I915_WRITE_FW(RING_ELSP(ring), desc[2]);
+       I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
 
-       /* ELSP is a wo register, so use another nearby reg for posting instead */
+       /* ELSP is a wo register, use another nearby reg for posting */
        POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
        intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
        spin_unlock(&dev_priv->uncore.lock);
 }
 
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
-                                   struct drm_i915_gem_object *ring_obj,
-                                   struct i915_hw_ppgtt *ppgtt,
-                                   u32 tail)
+static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
+       struct intel_engine_cs *ring = rq->ring;
+       struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+       struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
        struct page *page;
        uint32_t *reg_state;
 
+       BUG_ON(!ctx_obj);
+       WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+       WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
        page = i915_gem_object_get_page(ctx_obj, 1);
        reg_state = kmap_atomic(page);
 
-       reg_state[CTX_RING_TAIL+1] = tail;
-       reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+       reg_state[CTX_RING_TAIL+1] = rq->tail;
+       reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
        /* True PPGTT with dynamic page allocation: update PDP registers and
         * point the unallocated PDPs to the scratch page
@@ -359,32 +363,15 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
        return 0;
 }
 
-static void execlists_submit_contexts(struct intel_engine_cs *ring,
-                                     struct intel_context *to0, u32 tail0,
-                                     struct intel_context *to1, u32 tail1)
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+                                     struct drm_i915_gem_request *rq1)
 {
-       struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
-       struct drm_i915_gem_object *ctx_obj1 = NULL;
-       struct intel_ringbuffer *ringbuf1 = NULL;
-
-       BUG_ON(!ctx_obj0);
-       WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
-       WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
-
-       execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
+       execlists_update_context(rq0);
 
-       if (to1) {
-               ringbuf1 = to1->engine[ring->id].ringbuf;
-               ctx_obj1 = to1->engine[ring->id].state;
-               BUG_ON(!ctx_obj1);
-               WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
-               WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
+       if (rq1)
+               execlists_update_context(rq1);
 
-               execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
-       }
-
-       execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+       execlists_elsp_write(rq0, rq1);
 }
 
 static void execlists_context_unqueue(struct intel_engine_cs *ring)
@@ -444,13 +431,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 
        WARN_ON(req1 && req1->elsp_submitted);
 
-       execlists_submit_contexts(ring, req0->ctx, req0->tail,
-                                 req1 ? req1->ctx : NULL,
-                                 req1 ? req1->tail : 0);
-
-       req0->elsp_submitted++;
-       if (req1)
-               req1->elsp_submitted++;
+       execlists_submit_requests(req0, req1);
 }
 
 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
@@ -516,6 +497,9 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
                status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
                                (read_pointer % 6) * 8 + 4);
 
+               if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+                       continue;
+
                if (status & GEN8_CTX_STATUS_PREEMPTED) {
                        if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
                                if (execlists_check_remove_request(ring, status_id))
@@ -540,37 +524,21 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        ring->next_context_status_buffer = write_pointer % 6;
 
        I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-                  ((u32)ring->next_context_status_buffer & 0x07) << 8);
+                  _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
 }
 
-static int execlists_context_queue(struct intel_engine_cs *ring,
-                                  struct intel_context *to,
-                                  u32 tail,
-                                  struct drm_i915_gem_request *request)
+static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
+       struct intel_engine_cs *ring = request->ring;
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
-       if (to != ring->default_context)
-               intel_lr_context_pin(ring, to);
+       if (request->ctx != ring->default_context)
+               intel_lr_context_pin(request);
 
-       if (!request) {
-               /*
-                * If there isn't a request associated with this submission,
-                * create one as a temporary holder.
-                */
-               request = kzalloc(sizeof(*request), GFP_KERNEL);
-               if (request == NULL)
-                       return -ENOMEM;
-               request->ring = ring;
-               request->ctx = to;
-               kref_init(&request->ref);
-               i915_gem_context_reference(request->ctx);
-       } else {
-               i915_gem_request_reference(request);
-               WARN_ON(to != request->ctx);
-       }
-       request->tail = tail;
+       i915_gem_request_reference(request);
+
+       request->tail = request->ringbuf->tail;
 
        spin_lock_irq(&ring->execlist_lock);
 
@@ -585,7 +553,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                                           struct drm_i915_gem_request,
                                           execlist_link);
 
-               if (to == tail_req->ctx) {
+               if (request->ctx == tail_req->ctx) {
                        WARN(tail_req->elsp_submitted != 0,
                                "More than 2 already-submitted reqs queued\n");
                        list_del(&tail_req->execlist_link);
@@ -603,10 +571,9 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
        return 0;
 }
 
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
-                                             struct intel_context *ctx)
+static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
+       struct intel_engine_cs *ring = req->ring;
        uint32_t flush_domains;
        int ret;
 
@@ -614,8 +581,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
        if (ring->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->emit_flush(ringbuf, ctx,
-                              I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
@@ -623,12 +589,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
        return 0;
 }
 
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
-                                struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
                                 struct list_head *vmas)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       const unsigned other_rings = ~intel_ring_flag(ring);
+       const unsigned other_rings = ~intel_ring_flag(req->ring);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -638,7 +602,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, ring);
+                       ret = i915_gem_object_sync(obj, req->ring, &req);
                        if (ret)
                                return ret;
                }
@@ -655,59 +619,59 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       return logical_ring_invalidate_all_caches(ringbuf, ctx);
+       return logical_ring_invalidate_all_caches(req);
 }
 
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-                                           struct intel_context *ctx)
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
        int ret;
 
-       if (ctx != request->ring->default_context) {
-               ret = intel_lr_context_pin(request->ring, ctx);
+       request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
+       if (request->ctx != request->ring->default_context) {
+               ret = intel_lr_context_pin(request);
                if (ret)
                        return ret;
        }
 
-       request->ringbuf = ctx->engine[request->ring->id].ringbuf;
-       request->ctx     = ctx;
-       i915_gem_context_reference(request->ctx);
-
        return 0;
 }
 
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
-                                      struct intel_context *ctx,
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
                                       int bytes)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       struct drm_i915_gem_request *request;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_engine_cs *ring = req->ring;
+       struct drm_i915_gem_request *target;
        unsigned space;
        int ret;
 
        if (intel_ring_space(ringbuf) >= bytes)
                return 0;
 
-       list_for_each_entry(request, &ring->request_list, list) {
+       /* The whole point of reserving space is to not wait! */
+       WARN_ON(ringbuf->reserved_in_use);
+
+       list_for_each_entry(target, &ring->request_list, list) {
                /*
                 * The request queue is per-engine, so can contain requests
                 * from multiple ringbuffers. Here, we must ignore any that
                 * aren't from the ringbuffer we're considering.
                 */
-               if (request->ringbuf != ringbuf)
+               if (target->ringbuf != ringbuf)
                        continue;
 
                /* Would completion of this request free enough space? */
-               space = __intel_ring_space(request->postfix, ringbuf->tail,
+               space = __intel_ring_space(target->postfix, ringbuf->tail,
                                           ringbuf->size);
                if (space >= bytes)
                        break;
        }
 
-       if (WARN_ON(&request->list == &ring->request_list))
+       if (WARN_ON(&target->list == &ring->request_list))
                return -ENOSPC;
 
-       ret = i915_wait_request(request);
+       ret = i915_wait_request(target);
        if (ret)
                return ret;
 
@@ -717,7 +681,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 
 /*
  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
+ * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
  * really happens during submission is that the context and current tail will be placed
@@ -725,33 +689,23 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
-                                     struct intel_context *ctx,
-                                     struct drm_i915_gem_request *request)
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
+       struct intel_engine_cs *ring = request->ring;
 
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_advance(request->ringbuf);
 
        if (intel_ring_stopped(ring))
                return;
 
-       execlists_context_queue(ring, ctx, ringbuf->tail, request);
+       execlists_context_queue(request);
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
-                                   struct intel_context *ctx)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
        uint32_t __iomem *virt;
        int rem = ringbuf->size - ringbuf->tail;
 
-       if (ringbuf->space < rem) {
-               int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
-               if (ret)
-                       return ret;
-       }
-
        virt = ringbuf->virtual_start + ringbuf->tail;
        rem /= 4;
        while (rem--)
@@ -759,25 +713,50 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
 
        ringbuf->tail = 0;
        intel_ring_update_space(ringbuf);
-
-       return 0;
 }
 
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
-                               struct intel_context *ctx, int bytes)
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
-       int ret;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       int remain_usable = ringbuf->effective_size - ringbuf->tail;
+       int remain_actual = ringbuf->size - ringbuf->tail;
+       int ret, total_bytes, wait_bytes = 0;
+       bool need_wrap = false;
+
+       if (ringbuf->reserved_in_use)
+               total_bytes = bytes;
+       else
+               total_bytes = bytes + ringbuf->reserved_size;
 
-       if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-               ret = logical_ring_wrap_buffer(ringbuf, ctx);
-               if (unlikely(ret))
-                       return ret;
+       if (unlikely(bytes > remain_usable)) {
+               /*
+                * Not enough space for the basic request. So need to flush
+                * out the remainder and then wait for base + reserved.
+                */
+               wait_bytes = remain_actual + total_bytes;
+               need_wrap = true;
+       } else {
+               if (unlikely(total_bytes > remain_usable)) {
+                       /*
+                        * The base request will fit but the reserved space
+                        * falls off the end. So only need to to wait for the
+                        * reserved size after flushing out the remainder.
+                        */
+                       wait_bytes = remain_actual + ringbuf->reserved_size;
+                       need_wrap = true;
+               } else if (total_bytes > ringbuf->space) {
+                       /* No wrapping required, just waiting. */
+                       wait_bytes = total_bytes;
+               }
        }
 
-       if (unlikely(ringbuf->space < bytes)) {
-               ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+       if (wait_bytes) {
+               ret = logical_ring_wait_for_space(req, wait_bytes);
                if (unlikely(ret))
                        return ret;
+
+               if (need_wrap)
+                       __wrap_ring_buffer(ringbuf);
        }
 
        return 0;
@@ -786,7 +765,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
 /**
  * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
  *
- * @ringbuf: Logical ringbuffer.
+ * @request: The request to start some new work for
+ * @ctx: Logical ring context whose ringbuffer is being prepared.
  * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
  *
  * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -796,32 +776,42 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
  *
  * Return: non-zero if the ringbuffer is not ready to be written to.
  */
-static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
-                                   struct intel_context *ctx, int num_dwords)
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv;
        int ret;
 
+       WARN_ON(req == NULL);
+       dev_priv = req->ring->dev->dev_private;
+
        ret = i915_gem_check_wedge(&dev_priv->gpu_error,
                                   dev_priv->mm.interruptible);
        if (ret)
                return ret;
 
-       ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
-       if (ret)
-               return ret;
-
-       /* Preallocate the olr before touching the ring */
-       ret = i915_gem_request_alloc(ring, ctx);
+       ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
        if (ret)
                return ret;
 
-       ringbuf->space -= num_dwords * sizeof(uint32_t);
+       req->ringbuf->space -= num_dwords * sizeof(uint32_t);
        return 0;
 }
 
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+       /*
+        * The first call merely notes the reserve request and is common for
+        * all back ends. The subsequent localised _begin() call actually
+        * ensures that the reservation is available. Without the begin, if
+        * the request creator immediately submitted the request without
+        * adding any commands to it then there might not actually be
+        * sufficient room for the submission commands.
+        */
+       intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+       return intel_logical_ring_begin(request, 0);
+}
+
 /**
  * execlists_submission() - submit a batchbuffer for execution, Execlists style
  * @dev: DRM device.
@@ -839,16 +829,15 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
  *
  * Return: non-zero if the submission fails.
  */
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-                              struct intel_engine_cs *ring,
-                              struct intel_context *ctx,
+int intel_execlists_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
-                              struct list_head *vmas,
-                              struct drm_i915_gem_object *batch_obj,
-                              u64 exec_start, u32 dispatch_flags)
+                              struct list_head *vmas)
 {
+       struct drm_device       *dev = params->dev;
+       struct intel_engine_cs  *ring = params->ring;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+       u64 exec_start;
        int instp_mode;
        u32 instp_mask;
        int ret;
@@ -899,13 +888,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
                return -EINVAL;
        }
 
-       ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
+       ret = execlists_move_to_gpu(params->request, vmas);
        if (ret)
                return ret;
 
        if (ring == &dev_priv->ring[RCS] &&
            instp_mode != dev_priv->relative_constants_mode) {
-               ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+               ret = intel_logical_ring_begin(params->request, 4);
                if (ret)
                        return ret;
 
@@ -918,14 +907,17 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
                dev_priv->relative_constants_mode = instp_mode;
        }
 
-       ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
+       exec_start = params->batch_obj_vm_offset +
+                    args->batch_start_offset;
+
+       ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+       trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-       i915_gem_execbuffer_move_to_active(vmas, ring);
-       i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+       i915_gem_execbuffer_move_to_active(vmas, params->request);
+       i915_gem_execbuffer_retire_commands(params);
 
        return 0;
 }
@@ -950,7 +942,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
                                ctx->engine[ring->id].state;
 
                if (ctx_obj && (ctx != ring->default_context))
-                       intel_lr_context_unpin(ring, ctx);
+                       intel_lr_context_unpin(req);
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
@@ -978,16 +970,15 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
        I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-                                 struct intel_context *ctx)
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        if (!ring->gpu_caches_dirty)
                return 0;
 
-       ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
+       ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
@@ -995,15 +986,15 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
        return 0;
 }
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-               struct intel_context *ctx)
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct intel_engine_cs *ring = rq->ring;
+       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+       struct intel_ringbuffer *ringbuf = rq->ringbuf;
        int ret = 0;
 
        WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-       if (ctx->engine[ring->id].pin_count++ == 0) {
+       if (rq->ctx->engine[ring->id].pin_count++ == 0) {
                ret = i915_gem_obj_ggtt_pin(ctx_obj,
                                GEN8_LR_CONTEXT_ALIGN, 0);
                if (ret)
@@ -1019,31 +1010,31 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
 unpin_ctx_obj:
        i915_gem_object_ggtt_unpin(ctx_obj);
 reset_pin_count:
-       ctx->engine[ring->id].pin_count = 0;
+       rq->ctx->engine[ring->id].pin_count = 0;
 
        return ret;
 }
 
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-               struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct intel_engine_cs *ring = rq->ring;
+       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+       struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
        if (ctx_obj) {
                WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-               if (--ctx->engine[ring->id].pin_count == 0) {
+               if (--rq->ctx->engine[ring->id].pin_count == 0) {
                        intel_unpin_ringbuffer_obj(ringbuf);
                        i915_gem_object_ggtt_unpin(ctx_obj);
                }
        }
 }
 
-static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
-                                              struct intel_context *ctx)
+static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
        int ret, i;
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_workarounds *w = &dev_priv->workarounds;
@@ -1052,11 +1043,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
                return 0;
 
        ring->gpu_caches_dirty = true;
-       ret = logical_ring_flush_all_caches(ringbuf, ctx);
+       ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
-       ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
+       ret = intel_logical_ring_begin(req, w->count * 2 + 2);
        if (ret)
                return ret;
 
@@ -1070,13 +1061,361 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
        intel_logical_ring_advance(ringbuf);
 
        ring->gpu_caches_dirty = true;
-       ret = logical_ring_flush_all_caches(ringbuf, ctx);
+       ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
        return 0;
 }
 
+#define wa_ctx_emit(batch, index, cmd)                                 \
+       do {                                                            \
+               int __index = (index)++;                                \
+               if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
+                       return -ENOSPC;                                 \
+               }                                                       \
+               batch[__index] = (cmd);                                 \
+       } while (0)
+
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+                                               uint32_t *const batch,
+                                               uint32_t index)
+{
+       uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+       /*
+        * WaDisableLSQCROPERFforOCL:skl
+        * This WA is implemented in skl_init_clock_gating() but since
+        * this batch updates GEN8_L3SQCREG4 with default value we need to
+        * set this bit here to retain the WA during flush.
+        */
+       if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+               l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
+
+       wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+                                  MI_SRM_LRM_GLOBAL_GTT));
+       wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+       wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, 0);
+
+       wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+       wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+       wa_ctx_emit(batch, index, l3sqc4_flush);
+
+       wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+       wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
+                                  PIPE_CONTROL_DC_FLUSH_ENABLE));
+       wa_ctx_emit(batch, index, 0);
+       wa_ctx_emit(batch, index, 0);
+       wa_ctx_emit(batch, index, 0);
+       wa_ctx_emit(batch, index, 0);
+
+       wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+                                  MI_SRM_LRM_GLOBAL_GTT));
+       wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+       wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, 0);
+
+       return index;
+}
+
+static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
+                                   uint32_t offset,
+                                   uint32_t start_alignment)
+{
+       return wa_ctx->offset = ALIGN(offset, start_alignment);
+}
+
+static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
+                            uint32_t offset,
+                            uint32_t size_alignment)
+{
+       wa_ctx->size = offset - wa_ctx->offset;
+
+       WARN(wa_ctx->size % size_alignment,
+            "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
+            wa_ctx->size, size_alignment);
+       return 0;
+}
+
+/**
+ * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned. This is updated
+ *    with the offset value received as input.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of the batch, it should be
+ *  cache-aligned otherwise it is adjusted accordingly.
+ *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ *  initialized at the beginning and shared across all contexts but this field
+ *  helps us to have multiple batches at different offsets and select them based
+ *  on a criteria. At the moment this batch always start at the beginning of the page
+ *  and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ *  The number of WA applied are not known at the beginning; we use this field
+ *  to return the no of DWORDS written.
+ *
+ *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ *  so it adds NOOPs as padding to make it cacheline aligned.
+ *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ *  makes a complete batch buffer.
+ *
+ * Return: non-zero if we exceed the PAGE_SIZE limit.
+ */
+
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+                                   struct i915_wa_ctx_bb *wa_ctx,
+                                   uint32_t *const batch,
+                                   uint32_t *offset)
+{
+       uint32_t scratch_addr;
+       uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+       /* WaDisableCtxRestoreArbitration:bdw,chv */
+       wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+       /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+       if (IS_BROADWELL(ring->dev)) {
+               index = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+               if (index < 0)
+                       return index;
+       }
+
+       /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+       /* Actual scratch location is at 128 bytes offset */
+       scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+       wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+       wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+                                  PIPE_CONTROL_GLOBAL_GTT_IVB |
+                                  PIPE_CONTROL_CS_STALL |
+                                  PIPE_CONTROL_QW_WRITE));
+       wa_ctx_emit(batch, index, scratch_addr);
+       wa_ctx_emit(batch, index, 0);
+       wa_ctx_emit(batch, index, 0);
+       wa_ctx_emit(batch, index, 0);
+
+       /* Pad to end of cacheline */
+       while (index % CACHELINE_DWORDS)
+               wa_ctx_emit(batch, index, MI_NOOP);
+
+       /*
+        * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+        * execution depends on the length specified in terms of cache lines
+        * in the register CTX_RCS_INDIRECT_CTX
+        */
+
+       return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+/**
+ * gen8_init_perctx_bb() - initialize per ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of this batch.
+ *   This batch is started immediately after indirect_ctx batch. Since we ensure
+ *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
+ *
+ *   The number of DWORDS written are returned using this field.
+ *
+ *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
+ *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
+ */
+static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+                              struct i915_wa_ctx_bb *wa_ctx,
+                              uint32_t *const batch,
+                              uint32_t *offset)
+{
+       uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+       /* WaDisableCtxRestoreArbitration:bdw,chv */
+       wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+       wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+       return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+                                   struct i915_wa_ctx_bb *wa_ctx,
+                                   uint32_t *const batch,
+                                   uint32_t *offset)
+{
+       int ret;
+       struct drm_device *dev = ring->dev;
+       uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+       /* WaDisableCtxRestoreArbitration:skl,bxt */
+       if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+           (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+               wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+       /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+       ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+       if (ret < 0)
+               return ret;
+       index = ret;
+
+       /* Pad to end of cacheline */
+       while (index % CACHELINE_DWORDS)
+               wa_ctx_emit(batch, index, MI_NOOP);
+
+       return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+                              struct i915_wa_ctx_bb *wa_ctx,
+                              uint32_t *const batch,
+                              uint32_t *offset)
+{
+       struct drm_device *dev = ring->dev;
+       uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+       /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+       if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
+           (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+               wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+               wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
+               wa_ctx_emit(batch, index,
+                           _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
+               wa_ctx_emit(batch, index, MI_NOOP);
+       }
+
+       /* WaDisableCtxRestoreArbitration:skl,bxt */
+       if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+           (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+               wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+       wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+       return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+{
+       int ret;
+
+       ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+       if (!ring->wa_ctx.obj) {
+               DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
+               return -ENOMEM;
+       }
+
+       ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+       if (ret) {
+               DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
+                                ret);
+               drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+{
+       if (ring->wa_ctx.obj) {
+               i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
+               drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+               ring->wa_ctx.obj = NULL;
+       }
+}
+
+static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+{
+       int ret;
+       uint32_t *batch;
+       uint32_t offset;
+       struct page *page;
+       struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+
+       WARN_ON(ring->id != RCS);
+
+       /* update this when WA for higher Gen are added */
+       if (INTEL_INFO(ring->dev)->gen > 9) {
+               DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
+                         INTEL_INFO(ring->dev)->gen);
+               return 0;
+       }
+
+       /* some WA perform writes to scratch page, ensure it is valid */
+       if (ring->scratch.obj == NULL) {
+               DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+               return -EINVAL;
+       }
+
+       ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+               return ret;
+       }
+
+       page = i915_gem_object_get_page(wa_ctx->obj, 0);
+       batch = kmap_atomic(page);
+       offset = 0;
+
+       if (INTEL_INFO(ring->dev)->gen == 8) {
+               ret = gen8_init_indirectctx_bb(ring,
+                                              &wa_ctx->indirect_ctx,
+                                              batch,
+                                              &offset);
+               if (ret)
+                       goto out;
+
+               ret = gen8_init_perctx_bb(ring,
+                                         &wa_ctx->per_ctx,
+                                         batch,
+                                         &offset);
+               if (ret)
+                       goto out;
+       } else if (INTEL_INFO(ring->dev)->gen == 9) {
+               ret = gen9_init_indirectctx_bb(ring,
+                                              &wa_ctx->indirect_ctx,
+                                              batch,
+                                              &offset);
+               if (ret)
+                       goto out;
+
+               ret = gen9_init_perctx_bb(ring,
+                                         &wa_ctx->per_ctx,
+                                         batch,
+                                         &offset);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       kunmap_atomic(batch);
+       if (ret)
+               lrc_destroy_wa_ctx_obj(ring);
+
+       return ret;
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
@@ -1137,19 +1476,64 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
        return init_workarounds_ring(ring);
 }
 
-static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
-                             struct intel_context *ctx,
+static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+{
+       struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
+       int i, ret;
+
+       ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+       if (ret)
+               return ret;
+
+       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+       for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+               intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+               intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+               intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+               intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+       }
+
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_logical_ring_advance(ringbuf);
+
+       return 0;
+}
+
+static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
                              u64 offset, unsigned dispatch_flags)
 {
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
        bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
-       ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+       /* Don't rely in hw updating PDPs, specially in lite-restore.
+        * Ideally, we should set Force PD Restore in ctx descriptor,
+        * but we can't. Force Restore would be a second option, but
+        * it is unsafe in case of lite-restore (because the ctx is
+        * not idle). */
+       if (req->ctx->ppgtt &&
+           (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+               ret = intel_logical_ring_emit_pdps(req);
+               if (ret)
+                       return ret;
+
+               req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+       }
+
+       ret = intel_logical_ring_begin(req, 4);
        if (ret)
                return ret;
 
        /* FIXME(BDW): Address space and security selectors. */
-       intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+       intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+                               (ppgtt<<8) |
+                               (dispatch_flags & I915_DISPATCH_RS ?
+                                MI_BATCH_RESOURCE_STREAMER : 0));
        intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
        intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
        intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1191,18 +1575,18 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
-                          struct intel_context *ctx,
+static int gen8_emit_flush(struct drm_i915_gem_request *request,
                           u32 invalidate_domains,
                           u32 unused)
 {
+       struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *ring = ringbuf->ring;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t cmd;
        int ret;
 
-       ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+       ret = intel_logical_ring_begin(request, 4);
        if (ret)
                return ret;
 
@@ -1232,11 +1616,11 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
        return 0;
 }
 
-static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
-                                 struct intel_context *ctx,
+static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                                  u32 invalidate_domains,
                                  u32 flush_domains)
 {
+       struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *ring = ringbuf->ring;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        bool vf_flush_wa;
@@ -1268,7 +1652,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
        vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
                      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
 
-       ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
+       ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
        if (ret)
                return ret;
 
@@ -1302,9 +1686,9 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
        intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
-                            struct drm_i915_gem_request *request)
+static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
+       struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *ring = ringbuf->ring;
        u32 cmd;
        int ret;
@@ -1314,7 +1698,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
         * used as a workaround for not being allowed to do lite
         * restore with HEAD==TAIL (WaIdleLiteRestore).
         */
-       ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
+       ret = intel_logical_ring_begin(request, 8);
        if (ret)
                return ret;
 
@@ -1326,11 +1710,10 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
                                (ring->status_page.gfx_addr +
                                (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf,
-               i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+       intel_logical_ring_advance_and_submit(request);
 
        /*
         * Here we add two extra NOOPs as padding to avoid
@@ -1343,49 +1726,53 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
        return 0;
 }
 
-static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
-                                             struct intel_context *ctx)
+static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 {
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
        struct render_state so;
-       struct drm_i915_file_private *file_priv = ctx->file_priv;
-       struct drm_file *file = file_priv ? file_priv->file : NULL;
        int ret;
 
-       ret = i915_gem_render_state_prepare(ring, &so);
+       ret = i915_gem_render_state_prepare(req->ring, &so);
        if (ret)
                return ret;
 
        if (so.rodata == NULL)
                return 0;
 
-       ret = ring->emit_bb_start(ringbuf,
-                       ctx,
-                       so.ggtt_offset,
-                       I915_DISPATCH_SECURE);
+       ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+                                      I915_DISPATCH_SECURE);
        if (ret)
                goto out;
 
-       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+       ret = req->ring->emit_bb_start(req,
+                                      (so.ggtt_offset + so.aux_batch_offset),
+                                      I915_DISPATCH_SECURE);
+       if (ret)
+               goto out;
+
+       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
-       ret = __i915_add_request(ring, file, so.obj);
-       /* intel_logical_ring_add_request moves object to inactive if it
-        * fails */
 out:
        i915_gem_render_state_fini(&so);
        return ret;
 }
 
-static int gen8_init_rcs_context(struct intel_engine_cs *ring,
-                      struct intel_context *ctx)
+static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 {
        int ret;
 
-       ret = intel_logical_ring_workarounds_emit(ring, ctx);
+       ret = intel_logical_ring_workarounds_emit(req);
        if (ret)
                return ret;
 
-       return intel_lr_context_render_state_init(ring, ctx);
+       ret = intel_rcs_context_init_mocs(req);
+       /*
+        * Failing to program the MOCS is non-fatal.The system will not
+        * run at peak performance. So generate an error and carry on.
+        */
+       if (ret)
+               DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+
+       return intel_lr_context_render_state_init(req);
 }
 
 /**
@@ -1405,7 +1792,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 
        intel_logical_ring_stop(ring);
        WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
        if (ring->cleanup)
                ring->cleanup(ring);
@@ -1417,6 +1803,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
                kunmap(sg_page(ring->status_page.obj->pages->sgl));
                ring->status_page.obj = NULL;
        }
+
+       lrc_destroy_wa_ctx_obj(ring);
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
@@ -1476,11 +1864,28 @@ static int logical_render_ring_init(struct drm_device *dev)
        ring->emit_bb_start = gen8_emit_bb_start;
 
        ring->dev = dev;
-       ret = logical_ring_init(dev, ring);
+
+       ret = intel_init_pipe_control(ring);
        if (ret)
                return ret;
 
-       return intel_init_pipe_control(ring);
+       ret = intel_init_workaround_bb(ring);
+       if (ret) {
+               /*
+                * We continue even if we fail to initialize WA batch
+                * because we only expect rare glitches but nothing
+                * critical to prevent us from using GPU
+                */
+               DRM_ERROR("WA batch buffer initialization failed: %d\n",
+                         ret);
+       }
+
+       ret = logical_ring_init(dev, ring);
+       if (ret) {
+               lrc_destroy_wa_ctx_obj(ring);
+       }
+
+       return ret;
 }
 
 static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1735,7 +2140,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
        reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
        reg_state[CTX_CONTEXT_CONTROL+1] =
                _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
-                               CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+                                  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+                                  CTX_CTRL_RS_CTX_ENABLE);
        reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
        reg_state[CTX_RING_HEAD+1] = 0;
        reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1760,15 +2166,27 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
        reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
        reg_state[CTX_SECOND_BB_STATE+1] = 0;
        if (ring->id == RCS) {
-               /* TODO: according to BSpec, the register state context
-                * for CHV does not have these. OTOH, these registers do
-                * exist in CHV. I'm waiting for a clarification */
                reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
                reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
                reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
                reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
                reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
                reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+               if (ring->wa_ctx.obj) {
+                       struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+                       uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+
+                       reg_state[CTX_RCS_INDIRECT_CTX+1] =
+                               (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
+                               (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+
+                       reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+                               CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+
+                       reg_state[CTX_BB_PER_CTX_PTR+1] =
+                               (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
+                               0x01;
+               }
        }
        reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
        reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
@@ -1973,13 +2391,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
                lrc_setup_hardware_status_page(ring, ctx_obj);
        else if (ring->id == RCS && !ctx->rcs_initialized) {
                if (ring->init_context) {
-                       ret = ring->init_context(ring, ctx);
+                       struct drm_i915_gem_request *req;
+
+                       ret = i915_gem_request_alloc(ring, ctx, &req);
+                       if (ret)
+                               return ret;
+
+                       ret = ring->init_context(req);
                        if (ret) {
                                DRM_ERROR("ring init context: %d\n", ret);
+                               i915_gem_request_cancel(req);
                                ctx->engine[ring->id].ringbuf = NULL;
                                ctx->engine[ring->id].state = NULL;
                                goto error;
                        }
+
+                       i915_add_request_no_flush(req);
                }
 
                ctx->rcs_initialized = true;
index 04d3a6d8b207e2ff2031df23a486300503c18a98..64f89f9982a20f745cb41ef5950cd637437000ca 100644 (file)
 #define RING_CONTEXT_CONTROL(ring)     ((ring)->mmio_base+0x244)
 #define          CTX_CTRL_INHIBIT_SYN_CTX_SWITCH       (1 << 3)
 #define          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT   (1 << 0)
+#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
 #define RING_CONTEXT_STATUS_BUF(ring)  ((ring)->mmio_base+0x370)
 #define RING_CONTEXT_STATUS_PTR(ring)  ((ring)->mmio_base+0x3a0)
 
 /* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-                                           struct intel_context *ctx);
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
 void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-                                 struct intel_context *ctx);
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
  * @ringbuf: Ringbuffer to advance.
@@ -70,20 +71,16 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
                                     struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-               struct intel_context *ctx);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
 void intel_lr_context_reset(struct drm_device *dev,
                        struct intel_context *ctx);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-                              struct intel_engine_cs *ring,
-                              struct intel_context *ctx,
+struct i915_execbuffer_params;
+int intel_execlists_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
-                              struct list_head *vmas,
-                              struct drm_i915_gem_object *batch_obj,
-                              u64 exec_start, u32 dispatch_flags);
+                              struct list_head *vmas);
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
index 161ab26f81fb8b2a8b56a87f5b0605875f924fc6..881b5d13592ef8075e64786c68e31050f6741d94 100644 (file)
@@ -239,8 +239,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       struct intel_connector *intel_connector =
-               &lvds_encoder->attached_connector->base;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 ctl_reg, stat_reg;
 
@@ -252,8 +250,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
                stat_reg = PP_STATUS;
        }
 
-       intel_panel_disable_backlight(intel_connector);
-
        I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
        if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
                DRM_ERROR("timed out waiting for panel to power off\n");
@@ -262,6 +258,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
        POSTING_READ(lvds_encoder->reg);
 }
 
+static void gmch_disable_lvds(struct intel_encoder *encoder)
+{
+       struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+       struct intel_connector *intel_connector =
+               &lvds_encoder->attached_connector->base;
+
+       intel_panel_disable_backlight(intel_connector);
+
+       intel_disable_lvds(encoder);
+}
+
+static void pch_disable_lvds(struct intel_encoder *encoder)
+{
+       struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+       struct intel_connector *intel_connector =
+               &lvds_encoder->attached_connector->base;
+
+       intel_panel_disable_backlight(intel_connector);
+}
+
+static void pch_post_disable_lvds(struct intel_encoder *encoder)
+{
+       intel_disable_lvds(encoder);
+}
+
 static enum drm_mode_status
 intel_lvds_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
@@ -452,7 +473,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         */
        if (!HAS_PCH_SPLIT(dev)) {
                drm_modeset_lock_all(dev);
-               intel_modeset_setup_hw_state(dev, true);
+               intel_display_resume(dev);
                drm_modeset_unlock_all(dev);
        }
 
@@ -528,7 +549,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-       .dpms = intel_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_lvds_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_lvds_set_property,
@@ -942,12 +963,6 @@ void intel_lvds_init(struct drm_device *dev)
        if (dmi_check_system(intel_no_lvds))
                return;
 
-       pin = GMBUS_PIN_PANEL;
-       if (!lvds_is_present_in_vbt(dev, &pin)) {
-               DRM_DEBUG_KMS("LVDS is not present in VBT\n");
-               return;
-       }
-
        if (HAS_PCH_SPLIT(dev)) {
                if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
                        return;
@@ -957,6 +972,16 @@ void intel_lvds_init(struct drm_device *dev)
                }
        }
 
+       pin = GMBUS_PIN_PANEL;
+       if (!lvds_is_present_in_vbt(dev, &pin)) {
+               u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
+               if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
+                       DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+                       return;
+               }
+               DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+       }
+
        lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
        if (!lvds_encoder)
                return;
@@ -988,7 +1013,12 @@ void intel_lvds_init(struct drm_device *dev)
        intel_encoder->enable = intel_enable_lvds;
        intel_encoder->pre_enable = intel_pre_enable_lvds;
        intel_encoder->compute_config = intel_lvds_compute_config;
-       intel_encoder->disable = intel_disable_lvds;
+       if (HAS_PCH_SPLIT(dev_priv)) {
+               intel_encoder->disable = pch_disable_lvds;
+               intel_encoder->post_disable = pch_post_disable_lvds;
+       } else {
+               intel_encoder->disable = gmch_disable_lvds;
+       }
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
        intel_encoder->get_config = intel_lvds_get_config;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1068,24 +1098,8 @@ void intel_lvds_init(struct drm_device *dev)
                        drm_mode_debug_printmodeline(scan);
 
                        fixed_mode = drm_mode_duplicate(dev, scan);
-                       if (fixed_mode) {
-                               downclock_mode =
-                                       intel_find_panel_downclock(dev,
-                                       fixed_mode, connector);
-                               if (downclock_mode != NULL &&
-                                       i915.lvds_downclock) {
-                                       /* We found the downclock for LVDS. */
-                                       dev_priv->lvds_downclock_avail = true;
-                                       dev_priv->lvds_downclock =
-                                               downclock_mode->clock;
-                                       DRM_DEBUG_KMS("LVDS downclock is found"
-                                       " in EDID. Normal clock %dKhz, "
-                                       "downclock %dKhz\n",
-                                       fixed_mode->clock,
-                                       dev_priv->lvds_downclock);
-                               }
+                       if (fixed_mode)
                                goto out;
-                       }
                }
        }
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
new file mode 100644 (file)
index 0000000..6d3c6c0
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions: *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_mocs.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+       u32 control_value;
+       u16 l3cc_value;
+};
+
+struct drm_i915_mocs_table {
+       u32 size;
+       const struct drm_i915_mocs_entry *table;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define LE_CACHEABILITY(value) ((value) << 0)
+#define LE_TGT_CACHE(value)    ((value) << 2)
+#define LE_LRUM(value)         ((value) << 4)
+#define LE_AOM(value)          ((value) << 6)
+#define LE_RSC(value)          ((value) << 7)
+#define LE_SCC(value)          ((value) << 8)
+#define LE_PFM(value)          ((value) << 11)
+#define LE_SCF(value)          ((value) << 14)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value)          ((value) << 0)
+#define L3_SCC(value)          ((value) << 1)
+#define L3_CACHEABILITY(value) ((value) << 4)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES  62  /* 62 out of 64 - 63 & 64 are reserved. */
+
+/* (e)LLC caching options */
+#define LE_PAGETABLE           0
+#define LE_UC                  1
+#define LE_WT                  2
+#define LE_WB                  3
+
+/* L3 caching options */
+#define L3_DIRECT              0
+#define L3_UC                  1
+#define L3_RESERVED            2
+#define L3_WB                  3
+
+/* Target cache */
+#define ELLC                   0
+#define LLC                    1
+#define LLC_ELLC               2
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * platforms. However some of the fields are not applicable to all of
+ * them.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon.  For the time
+ * being they will be implicitly initialized to the strictest caching
+ * configuration (uncached) to guarantee forwards compatibility with
+ * userspace programs written against more recent kernels providing
+ * additional MOCS entries.
+ *
+ * NOTE: These tables MUST start with being uncached and the length
+ *       MUST be less than 63 as the last two registers are reserved
+ *       by the hardware.  These tables are part of the kernel ABI and
+ *       may only be updated incrementally by adding entries at the
+ *       end.
+ */
+static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+       /* { 0x00000009, 0x0010 } */
+       { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+       /* { 0x00000038, 0x0030 } */
+       { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+       /* { 0x0000003b, 0x0030 } */
+       { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+       /* { 0x00000009, 0x0010 } */
+       { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+       /* { 0x00000038, 0x0030 } */
+       { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+       /* { 0x0000003b, 0x0030 } */
+       { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/**
+ * get_mocs_settings()
+ * @dev:        DRM device.
+ * @table:      Output table that will be made to point at appropriate
+ *              MOCS values for the device.
+ *
+ * This function will return the values of the MOCS table that needs to
+ * be programmed for the platform. It will return the values that need
+ * to be programmed and if they need to be programmed.
+ *
+ * Return: true if there are applicable MOCS settings for the device.
+ */
+static bool get_mocs_settings(struct drm_device *dev,
+                             struct drm_i915_mocs_table *table)
+{
+       bool result = false;
+
+       if (IS_SKYLAKE(dev)) {
+               table->size  = ARRAY_SIZE(skylake_mocs_table);
+               table->table = skylake_mocs_table;
+               result = true;
+       } else if (IS_BROXTON(dev)) {
+               table->size  = ARRAY_SIZE(broxton_mocs_table);
+               table->table = broxton_mocs_table;
+               result = true;
+       } else {
+               WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+                         "Platform that should have a MOCS table does not.\n");
+       }
+
+       return result;
+}
+
+/**
+ * emit_mocs_control_table() - emit the mocs control table
+ * @req:       Request to set up the MOCS table for.
+ * @table:     The values to program into the control regs.
+ * @reg_base:  The base for the engine that needs to be programmed.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+                                  const struct drm_i915_mocs_table *table,
+                                  u32 reg_base)
+{
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       unsigned int index;
+       int ret;
+
+       if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+               return -ENODEV;
+
+       ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+       if (ret) {
+               DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+               return ret;
+       }
+
+       intel_logical_ring_emit(ringbuf,
+                               MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+
+       for (index = 0; index < table->size; index++) {
+               intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+               intel_logical_ring_emit(ringbuf,
+                                       table->table[index].control_value);
+       }
+
+       /*
+        * Ok, now set the unused entries to uncached. These entries
+        * are officially undefined and no contract for the contents
+        * and settings is given for these entries.
+        *
+        * Entry 0 in the table is uncached - so we are just writing
+        * that value to all the used entries.
+        */
+       for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+               intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+               intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+       }
+
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_logical_ring_advance(ringbuf);
+
+       return 0;
+}
+
+/**
+ * emit_mocs_l3cc_table() - emit the mocs control table
+ * @req:       Request to set up the MOCS table for.
+ * @table:     The values to program into the control regs.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address. This register set is
+ * programmed in pairs.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+                               const struct drm_i915_mocs_table *table)
+{
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       unsigned int count;
+       unsigned int i;
+       u32 value;
+       u32 filler = (table->table[0].l3cc_value & 0xffff) |
+                       ((table->table[0].l3cc_value & 0xffff) << 16);
+       int ret;
+
+       if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+               return -ENODEV;
+
+       ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+       if (ret) {
+               DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+               return ret;
+       }
+
+       intel_logical_ring_emit(ringbuf,
+                       MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+
+       for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
+               value = (table->table[count].l3cc_value & 0xffff) |
+                       ((table->table[count + 1].l3cc_value & 0xffff) << 16);
+
+               intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+               intel_logical_ring_emit(ringbuf, value);
+       }
+
+       if (table->size & 0x01) {
+               /* Odd table size - 1 left over */
+               value = (table->table[count].l3cc_value & 0xffff) |
+                       ((table->table[0].l3cc_value & 0xffff) << 16);
+       } else
+               value = filler;
+
+       /*
+        * Now set the rest of the table to uncached - use entry 0 as
+        * this will be uncached. Leave the last pair uninitialised as
+        * they are reserved by the hardware.
+        */
+       for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+               intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+               intel_logical_ring_emit(ringbuf, value);
+
+               value = filler;
+       }
+
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_logical_ring_advance(ringbuf);
+
+       return 0;
+}
+
+/**
+ * intel_rcs_context_init_mocs() - program the MOCS register.
+ * @req:       Request to set up the MOCS tables for.
+ *
+ * This function will emit a batch buffer with the values required for
+ * programming the MOCS register values for all the currently supported
+ * rings.
+ *
+ * These registers are partially stored in the RCS context, so they are
+ * emitted at the same time so that when a context is created these registers
+ * are set up. These registers have to be emitted into the start of the
+ * context as setting the ELSP will re-init some of these registers back
+ * to the hw values.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+{
+       struct drm_i915_mocs_table t;
+       int ret;
+
+       if (get_mocs_settings(req->ring->dev, &t)) {
+               /* Program the control registers */
+               ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
+               if (ret)
+                       return ret;
+
+               ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
+               if (ret)
+                       return ret;
+
+               ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
+               if (ret)
+                       return ret;
+
+               ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
+               if (ret)
+                       return ret;
+
+               ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
+               if (ret)
+                       return ret;
+
+               /* Now program the l3cc registers */
+               ret = emit_mocs_l3cc_table(req, &t);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
new file mode 100644 (file)
index 0000000..76e45b1
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+
+#endif
index 481337436f7215eae46d7c407f1596d8e31fe031..cb1c65739425e6e943a083621b58926a301bc3d8 100644 (file)
@@ -25,8 +25,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/acpi.h>
 #include <acpi/video.h>
 
@@ -53,6 +51,7 @@
 #define MBOX_ACPI      (1<<0)
 #define MBOX_SWSCI     (1<<1)
 #define MBOX_ASLE      (1<<2)
+#define MBOX_ASLE_EXT  (1<<4)
 
 struct opregion_header {
        u8 signature[16];
@@ -62,7 +61,10 @@ struct opregion_header {
        u8 vbios_ver[16];
        u8 driver_ver[16];
        u32 mboxes;
-       u8 reserved[164];
+       u32 driver_model;
+       u32 pcon;
+       u8 dver[32];
+       u8 rsvd[124];
 } __packed;
 
 /* OpRegion mailbox #1: public ACPI methods */
@@ -84,7 +86,9 @@ struct opregion_acpi {
        u32 evts;       /* ASL supported events */
        u32 cnot;       /* current OS notification */
        u32 nrdy;       /* driver status */
-       u8 rsvd2[60];
+       u32 did2[7];    /* extended supported display devices ID list */
+       u32 cpd2[7];    /* extended attached display devices list */
+       u8 rsvd2[4];
 } __packed;
 
 /* OpRegion mailbox #2: SWSCI */
@@ -113,7 +117,10 @@ struct opregion_asle {
        u32 pcft;       /* power conservation features */
        u32 srot;       /* supported rotation angles */
        u32 iuer;       /* IUER events */
-       u8 rsvd[86];
+       u64 fdss;
+       u32 fdsp;
+       u32 stat;
+       u8 rsvd[70];
 } __packed;
 
 /* Driver readiness indicator */
@@ -611,6 +618,38 @@ static struct notifier_block intel_opregion_notifier = {
  * (version 3)
  */
 
+static u32 get_did(struct intel_opregion *opregion, int i)
+{
+       u32 did;
+
+       if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+               did = ioread32(&opregion->acpi->didl[i]);
+       } else {
+               i -= ARRAY_SIZE(opregion->acpi->didl);
+
+               if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+                       return 0;
+
+               did = ioread32(&opregion->acpi->did2[i]);
+       }
+
+       return did;
+}
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+       if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+               iowrite32(val, &opregion->acpi->didl[i]);
+       } else {
+               i -= ARRAY_SIZE(opregion->acpi->didl);
+
+               if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+                       return;
+
+               iowrite32(val, &opregion->acpi->did2[i]);
+       }
+}
+
 static void intel_didl_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -620,7 +659,7 @@ static void intel_didl_outputs(struct drm_device *dev)
        struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
        unsigned long long device_id;
        acpi_status status;
-       u32 temp;
+       u32 temp, max_outputs;
        int i = 0;
 
        handle = ACPI_HANDLE(&dev->pdev->dev);
@@ -639,41 +678,50 @@ static void intel_didl_outputs(struct drm_device *dev)
        }
 
        if (!acpi_video_bus) {
-               pr_warn("No ACPI video bus found\n");
+               DRM_ERROR("No ACPI video bus found\n");
                return;
        }
 
+       /*
+        * In theory, did2, the extended didl, gets added at opregion version
+        * 3.0. In practice, however, we're supposed to set it for earlier
+        * versions as well, since a BIOS that doesn't understand did2 should
+        * not look at it anyway. Use a variable so we can tweak this if a need
+        * arises later.
+        */
+       max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+               ARRAY_SIZE(opregion->acpi->did2);
+
        list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
-               if (i >= 8) {
-                       dev_dbg(&dev->pdev->dev,
-                               "More than 8 outputs detected via ACPI\n");
+               if (i >= max_outputs) {
+                       DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
+                                     max_outputs);
                        return;
                }
-               status =
-                       acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
-                                               NULL, &device_id);
+               status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+                                              NULL, &device_id);
                if (ACPI_SUCCESS(status)) {
                        if (!device_id)
                                goto blind_set;
-                       iowrite32((u32)(device_id & 0x0f0f),
-                                 &opregion->acpi->didl[i]);
-                       i++;
+                       set_did(opregion, i++, (u32)(device_id & 0x0f0f));
                }
        }
 
 end:
-       /* If fewer than 8 outputs, the list must be null terminated */
-       if (i < 8)
-               iowrite32(0, &opregion->acpi->didl[i]);
+       DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+       /* If fewer than max outputs, the list must be null terminated */
+       if (i < max_outputs)
+               set_did(opregion, i, 0);
        return;
 
 blind_set:
        i = 0;
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                int output_type = ACPI_OTHER_OUTPUT;
-               if (i >= 8) {
-                       dev_dbg(&dev->pdev->dev,
-                               "More than 8 outputs in connector list\n");
+               if (i >= max_outputs) {
+                       DRM_DEBUG_KMS("More than %u outputs in connector list\n",
+                                     max_outputs);
                        return;
                }
                switch (connector->connector_type) {
@@ -698,9 +746,8 @@ blind_set:
                        output_type = ACPI_LVDS_OUTPUT;
                        break;
                }
-               temp = ioread32(&opregion->acpi->didl[i]);
-               iowrite32(temp | (1<<31) | output_type | i,
-                         &opregion->acpi->didl[i]);
+               temp = get_did(opregion, i);
+               set_did(opregion, i, temp | (1 << 31) | output_type | i);
                i++;
        }
        goto end;
@@ -720,7 +767,7 @@ static void intel_setup_cadls(struct drm_device *dev)
         * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
         * there are less than eight devices. */
        do {
-               disp_id = ioread32(&opregion->acpi->didl[i]);
+               disp_id = get_did(opregion, i);
                iowrite32(disp_id, &opregion->acpi->cadl[i]);
        } while (++i < 8 && disp_id != 0);
 }
@@ -852,6 +899,11 @@ int intel_opregion_setup(struct drm_device *dev)
        char buf[sizeof(OPREGION_SIGNATURE)];
        int err = 0;
 
+       BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+       BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+       BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+       BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+
        pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
        DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
        if (asls == 0) {
index 25c8ec697da1fa5364164be9163bc24241172854..444542696a2c19a18b6ffbe48d1cf2933b3d2ab6 100644 (file)
@@ -210,19 +210,14 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
 }
 
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+                                        struct drm_i915_gem_request *req,
                                         void (*tail)(struct intel_overlay *))
 {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        int ret;
 
        WARN_ON(overlay->last_flip_req);
-       i915_gem_request_assign(&overlay->last_flip_req,
-                                            ring->outstanding_lazy_request);
-       ret = i915_add_request(ring);
-       if (ret)
-               return ret;
+       i915_gem_request_assign(&overlay->last_flip_req, req);
+       i915_add_request(req);
 
        overlay->flip_tail = tail;
        ret = i915_wait_request(overlay->last_flip_req);
@@ -239,15 +234,22 @@ static int intel_overlay_on(struct intel_overlay *overlay)
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+       struct drm_i915_gem_request *req;
        int ret;
 
        WARN_ON(overlay->active);
        WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-       ret = intel_ring_begin(ring, 4);
+       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
        if (ret)
                return ret;
 
+       ret = intel_ring_begin(req, 4);
+       if (ret) {
+               i915_gem_request_cancel(req);
+               return ret;
+       }
+
        overlay->active = true;
 
        intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -256,7 +258,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
-       return intel_overlay_do_wait_request(overlay, NULL);
+       return intel_overlay_do_wait_request(overlay, req, NULL);
 }
 
 /* overlay needs to be enabled in OCMD reg */
@@ -266,6 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+       struct drm_i915_gem_request *req;
        u32 flip_addr = overlay->flip_addr;
        u32 tmp;
        int ret;
@@ -280,18 +283,25 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
        if (tmp & (1 << 17))
                DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-       ret = intel_ring_begin(ring, 2);
+       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
        if (ret)
                return ret;
 
+       ret = intel_ring_begin(req, 2);
+       if (ret) {
+               i915_gem_request_cancel(req);
+               return ret;
+       }
+
        intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
        intel_ring_emit(ring, flip_addr);
        intel_ring_advance(ring);
 
        WARN_ON(overlay->last_flip_req);
-       i915_gem_request_assign(&overlay->last_flip_req,
-                                            ring->outstanding_lazy_request);
-       return i915_add_request(ring);
+       i915_gem_request_assign(&overlay->last_flip_req, req);
+       i915_add_request(req);
+
+       return 0;
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -327,6 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+       struct drm_i915_gem_request *req;
        u32 flip_addr = overlay->flip_addr;
        int ret;
 
@@ -338,10 +349,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         * of the hw. Do it in both cases */
        flip_addr |= OFC_UPDATE;
 
-       ret = intel_ring_begin(ring, 6);
+       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
        if (ret)
                return ret;
 
+       ret = intel_ring_begin(req, 6);
+       if (ret) {
+               i915_gem_request_cancel(req);
+               return ret;
+       }
+
        /* wait for overlay to go idle */
        intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
        intel_ring_emit(ring, flip_addr);
@@ -360,7 +377,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
        }
        intel_ring_advance(ring);
 
-       return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+       return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
@@ -404,15 +421,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
        if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
                /* synchronous slowpath */
-               ret = intel_ring_begin(ring, 2);
+               struct drm_i915_gem_request *req;
+
+               ret = i915_gem_request_alloc(ring, ring->default_context, &req);
                if (ret)
                        return ret;
 
+               ret = intel_ring_begin(req, 2);
+               if (ret) {
+                       i915_gem_request_cancel(req);
+                       return ret;
+               }
+
                intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
                intel_ring_emit(ring, MI_NOOP);
                intel_ring_advance(ring);
 
-               ret = intel_overlay_do_wait_request(overlay,
+               ret = intel_overlay_do_wait_request(overlay, req,
                                                    intel_overlay_release_old_vid_tail);
                if (ret)
                        return ret;
@@ -724,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        if (ret != 0)
                return ret;
 
-       ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+       ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
                                                   &i915_ggtt_view_normal);
        if (ret != 0)
                return ret;
index 55aad2322e10ec8e7ea168aa84e0b7d98dcf9b91..e2ab3f6ed0222b9728f2a463f241a700cdc62e41 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/moduleparam.h>
+#include <linux/pwm.h>
 #include "intel_drv.h"
 
+#define CRC_PMIC_PWM_PERIOD_NS 21333
+
 void
 intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
                       struct drm_display_mode *adjusted_mode)
@@ -544,6 +547,15 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
        return I915_READ(BXT_BLC_PWM_DUTY1);
 }
 
+static u32 pwm_get_backlight(struct intel_connector *connector)
+{
+       struct intel_panel *panel = &connector->panel;
+       int duty_ns;
+
+       duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
+       return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static u32 intel_panel_get_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
@@ -632,6 +644,14 @@ static void bxt_set_backlight(struct intel_connector *connector, u32 level)
        I915_WRITE(BXT_BLC_PWM_DUTY1, level);
 }
 
+static void pwm_set_backlight(struct intel_connector *connector, u32 level)
+{
+       struct intel_panel *panel = &connector->panel;
+       int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+       pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static void
 intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
 {
@@ -769,6 +789,16 @@ static void bxt_disable_backlight(struct intel_connector *connector)
        I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_disable_backlight(struct intel_connector *connector)
+{
+       struct intel_panel *panel = &connector->panel;
+
+       /* Disable the backlight */
+       pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+       usleep_range(2000, 3000);
+       pwm_disable(panel->backlight.pwm);
+}
+
 void intel_panel_disable_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
@@ -1010,6 +1040,14 @@ static void bxt_enable_backlight(struct intel_connector *connector)
        I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_enable_backlight(struct intel_connector *connector)
+{
+       struct intel_panel *panel = &connector->panel;
+
+       pwm_enable(panel->backlight.pwm);
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
 void intel_panel_enable_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
@@ -1386,6 +1424,40 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
        return 0;
 }
 
+static int pwm_setup_backlight(struct intel_connector *connector,
+                              enum pipe pipe)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct intel_panel *panel = &connector->panel;
+       int retval;
+
+       /* Get the PWM chip for backlight control */
+       panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+       if (IS_ERR(panel->backlight.pwm)) {
+               DRM_ERROR("Failed to own the pwm chip\n");
+               panel->backlight.pwm = NULL;
+               return -ENODEV;
+       }
+
+       retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
+                           CRC_PMIC_PWM_PERIOD_NS);
+       if (retval < 0) {
+               DRM_ERROR("Failed to configure the pwm chip\n");
+               pwm_put(panel->backlight.pwm);
+               panel->backlight.pwm = NULL;
+               return retval;
+       }
+
+       panel->backlight.min = 0; /* 0% */
+       panel->backlight.max = 100; /* 100% */
+       panel->backlight.level = DIV_ROUND_UP(
+                                pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+                                CRC_PMIC_PWM_PERIOD_NS);
+       panel->backlight.enabled = panel->backlight.level != 0;
+
+       return 0;
+}
+
 int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 {
        struct drm_device *dev = connector->dev;
@@ -1429,6 +1501,10 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_panel *panel = &intel_connector->panel;
 
+       /* dispose of the pwm */
+       if (panel->backlight.pwm)
+               pwm_put(panel->backlight.pwm);
+
        panel->backlight.present = false;
 }
 
@@ -1456,11 +1532,19 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
                dev_priv->display.set_backlight = pch_set_backlight;
                dev_priv->display.get_backlight = pch_get_backlight;
        } else if (IS_VALLEYVIEW(dev)) {
-               dev_priv->display.setup_backlight = vlv_setup_backlight;
-               dev_priv->display.enable_backlight = vlv_enable_backlight;
-               dev_priv->display.disable_backlight = vlv_disable_backlight;
-               dev_priv->display.set_backlight = vlv_set_backlight;
-               dev_priv->display.get_backlight = vlv_get_backlight;
+               if (dev_priv->vbt.has_mipi) {
+                       dev_priv->display.setup_backlight = pwm_setup_backlight;
+                       dev_priv->display.enable_backlight = pwm_enable_backlight;
+                       dev_priv->display.disable_backlight = pwm_disable_backlight;
+                       dev_priv->display.set_backlight = pwm_set_backlight;
+                       dev_priv->display.get_backlight = pwm_get_backlight;
+               } else {
+                       dev_priv->display.setup_backlight = vlv_setup_backlight;
+                       dev_priv->display.enable_backlight = vlv_enable_backlight;
+                       dev_priv->display.disable_backlight = vlv_disable_backlight;
+                       dev_priv->display.set_backlight = vlv_set_backlight;
+                       dev_priv->display.get_backlight = vlv_get_backlight;
+               }
        } else if (IS_GEN4(dev)) {
                dev_priv->display.setup_backlight = i965_setup_backlight;
                dev_priv->display.enable_backlight = i965_enable_backlight;
index eadc15cddbeb45d00dcc9cd49a0df6c7d379d895..fff0c22682ee32f947907da7bb27f4fda0463073 100644 (file)
@@ -59,6 +59,10 @@ static void gen9_init_clock_gating(struct drm_device *dev)
        /* WaEnableLbsSlaRetryTimerDecrement:skl */
        I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
                   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+       /* WaDisableKillLogic:bxt,skl */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+                  ECOCHK_DIS_TLB);
 }
 
 static void skl_init_clock_gating(struct drm_device *dev)
@@ -91,10 +95,19 @@ static void skl_init_clock_gating(struct drm_device *dev)
                           _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
        }
 
+       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+        * involving this register should also be added to WA batch as required.
+        */
        if (INTEL_REVID(dev) <= SKL_REVID_E0)
                /* WaDisableLSQCROPERFforOCL:skl */
                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
                           GEN8_LQSC_RO_PERF_DIS);
+
+       /* WaEnableGapsTsvCreditFix:skl */
+       if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+               I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+                                          GEN9_GAPS_TSV_CREDIT_DISABLE));
+       }
 }
 
 static void bxt_init_clock_gating(struct drm_device *dev)
@@ -334,22 +347,26 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 
        if (IS_VALLEYVIEW(dev)) {
                I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-               if (IS_CHERRYVIEW(dev))
-                       chv_set_memory_pm5(dev_priv, enable);
+               POSTING_READ(FW_BLC_SELF_VLV);
+               dev_priv->wm.vlv.cxsr = enable;
        } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
                I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+               POSTING_READ(FW_BLC_SELF);
        } else if (IS_PINEVIEW(dev)) {
                val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
                val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
                I915_WRITE(DSPFW3, val);
+               POSTING_READ(DSPFW3);
        } else if (IS_I945G(dev) || IS_I945GM(dev)) {
                val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
                               _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
                I915_WRITE(FW_BLC_SELF, val);
+               POSTING_READ(FW_BLC_SELF);
        } else if (IS_I915GM(dev)) {
                val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
                               _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
                I915_WRITE(INSTPM, val);
+               POSTING_READ(INSTPM);
        } else {
                return;
        }
@@ -923,223 +940,484 @@ static void vlv_write_wm_values(struct intel_crtc *crtc,
                           FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
        }
 
-       POSTING_READ(DSPFW1);
+       /* zero (unused) WM1 watermarks */
+       I915_WRITE(DSPFW4, 0);
+       I915_WRITE(DSPFW5, 0);
+       I915_WRITE(DSPFW6, 0);
+       I915_WRITE(DSPHOWM1, 0);
 
-       dev_priv->wm.vlv = *wm;
+       POSTING_READ(DSPFW1);
 }
 
 #undef FW_WM_VLV
 
-static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
-                                        struct drm_plane *plane)
-{
-       struct drm_device *dev = crtc->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int entries, prec_mult, drain_latency, pixel_size;
-       int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
-       const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
-
-       /*
-        * FIXME the plane might have an fb
-        * but be invisible (eg. due to clipping)
-        */
-       if (!intel_crtc->active || !plane->state->fb)
-               return 0;
+enum vlv_wm_level {
+       VLV_WM_LEVEL_PM2,
+       VLV_WM_LEVEL_PM5,
+       VLV_WM_LEVEL_DDR_DVFS,
+       CHV_WM_NUM_LEVELS,
+       VLV_WM_NUM_LEVELS = 1,
+};
 
-       if (WARN(clock == 0, "Pixel clock is zero!\n"))
-               return 0;
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+                                  unsigned int pipe_htotal,
+                                  unsigned int horiz_pixels,
+                                  unsigned int bytes_per_pixel,
+                                  unsigned int latency)
+{
+       unsigned int ret;
 
-       pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
+       ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+       ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+       ret = DIV_ROUND_UP(ret, 64);
 
-       if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
-               return 0;
+       return ret;
+}
 
-       entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+static void vlv_setup_wm_latency(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       prec_mult = high_precision;
-       drain_latency = 64 * prec_mult * 4 / entries;
+       /* all latencies in usec */
+       dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
 
-       if (drain_latency > DRAIN_LATENCY_MASK) {
-               prec_mult /= 2;
-               drain_latency = 64 * prec_mult * 4 / entries;
+       if (IS_CHERRYVIEW(dev_priv)) {
+               dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+               dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
        }
-
-       if (drain_latency > DRAIN_LATENCY_MASK)
-               drain_latency = DRAIN_LATENCY_MASK;
-
-       return drain_latency | (prec_mult == high_precision ?
-                               DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
 }
 
-static int vlv_compute_wm(struct intel_crtc *crtc,
-                         struct intel_plane *plane,
-                         int fifo_size)
+static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
+                                    struct intel_crtc *crtc,
+                                    const struct intel_plane_state *state,
+                                    int level)
 {
-       int clock, entries, pixel_size;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       int clock, htotal, pixel_size, width, wm;
 
-       /*
-        * FIXME the plane might have an fb
-        * but be invisible (eg. due to clipping)
-        */
-       if (!crtc->active || !plane->base.state->fb)
+       if (dev_priv->wm.pri_latency[level] == 0)
+               return USHRT_MAX;
+
+       if (!state->visible)
                return 0;
 
-       pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+       pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
        clock = crtc->config->base.adjusted_mode.crtc_clock;
+       htotal = crtc->config->base.adjusted_mode.crtc_htotal;
+       width = crtc->config->pipe_src_w;
+       if (WARN_ON(htotal == 0))
+               htotal = 1;
 
-       entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+       if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+               /*
+                * FIXME the formula gives values that are
+                * too big for the cursor FIFO, and hence we
+                * would never be able to use cursors. For
+                * now just hardcode the watermark.
+                */
+               wm = 63;
+       } else {
+               wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+                                   dev_priv->wm.pri_latency[level] * 10);
+       }
 
-       /*
-        * Set up the watermark such that we don't start issuing memory
-        * requests until we are within PND's max deadline value (256us).
-        * Idea being to be idle as long as possible while still taking
-        * advatange of PND's deadline scheduling. The limit of 8
-        * cachelines (used when the FIFO will anyway drain in less time
-        * than 256us) should match what we would be done if trickle
-        * feed were enabled.
-        */
-       return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+       return min_t(int, wm, USHRT_MAX);
 }
 
-static bool vlv_compute_sr_wm(struct drm_device *dev,
-                             struct vlv_wm_values *wm)
+static void vlv_compute_fifo(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *crtc;
-       enum pipe pipe = INVALID_PIPE;
-       int num_planes = 0;
-       int fifo_size = 0;
+       struct drm_device *dev = crtc->base.dev;
+       struct vlv_wm_state *wm_state = &crtc->wm_state;
        struct intel_plane *plane;
+       unsigned int total_rate = 0;
+       const int fifo_size = 512 - 1;
+       int fifo_extra, fifo_left = fifo_size;
 
-       wm->sr.cursor = wm->sr.plane = 0;
+       for_each_intel_plane_on_crtc(dev, crtc, plane) {
+               struct intel_plane_state *state =
+                       to_intel_plane_state(plane->base.state);
 
-       crtc = single_enabled_crtc(dev);
-       /* maxfifo not supported on pipe C */
-       if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
-               pipe = to_intel_crtc(crtc)->pipe;
-               num_planes = !!wm->pipe[pipe].primary +
-                       !!wm->pipe[pipe].sprite[0] +
-                       !!wm->pipe[pipe].sprite[1];
-               fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+               if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+                       continue;
+
+               if (state->visible) {
+                       wm_state->num_active_planes++;
+                       total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+               }
        }
 
-       if (fifo_size == 0 || num_planes > 1)
-               return false;
+       for_each_intel_plane_on_crtc(dev, crtc, plane) {
+               struct intel_plane_state *state =
+                       to_intel_plane_state(plane->base.state);
+               unsigned int rate;
+
+               if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+                       plane->wm.fifo_size = 63;
+                       continue;
+               }
+
+               if (!state->visible) {
+                       plane->wm.fifo_size = 0;
+                       continue;
+               }
+
+               rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+               plane->wm.fifo_size = fifo_size * rate / total_rate;
+               fifo_left -= plane->wm.fifo_size;
+       }
+
+       fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
 
-       wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
-                                      to_intel_plane(crtc->cursor), 0x3f);
+       /* spread the remainder evenly */
+       for_each_intel_plane_on_crtc(dev, crtc, plane) {
+               int plane_extra;
+
+               if (fifo_left == 0)
+                       break;
 
-       list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
                if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
-               if (plane->pipe != pipe)
+               /* give it all to the first plane if none are active */
+               if (plane->wm.fifo_size == 0 &&
+                   wm_state->num_active_planes)
                        continue;
 
-               wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
-                                             plane, fifo_size);
-               if (wm->sr.plane != 0)
+               plane_extra = min(fifo_extra, fifo_left);
+               plane->wm.fifo_size += plane_extra;
+               fifo_left -= plane_extra;
+       }
+
+       WARN_ON(fifo_left != 0);
+}
+
+static void vlv_invert_wms(struct intel_crtc *crtc)
+{
+       struct vlv_wm_state *wm_state = &crtc->wm_state;
+       int level;
+
+       for (level = 0; level < wm_state->num_levels; level++) {
+               struct drm_device *dev = crtc->base.dev;
+               const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+               struct intel_plane *plane;
+
+               wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
+               wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
+
+               for_each_intel_plane_on_crtc(dev, crtc, plane) {
+                       switch (plane->base.type) {
+                               int sprite;
+                       case DRM_PLANE_TYPE_CURSOR:
+                               wm_state->wm[level].cursor = plane->wm.fifo_size -
+                                       wm_state->wm[level].cursor;
+                               break;
+                       case DRM_PLANE_TYPE_PRIMARY:
+                               wm_state->wm[level].primary = plane->wm.fifo_size -
+                                       wm_state->wm[level].primary;
+                               break;
+                       case DRM_PLANE_TYPE_OVERLAY:
+                               sprite = plane->plane;
+                               wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
+                                       wm_state->wm[level].sprite[sprite];
+                               break;
+                       }
+               }
+       }
+}
+
+static void vlv_compute_wm(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct vlv_wm_state *wm_state = &crtc->wm_state;
+       struct intel_plane *plane;
+       int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+       int level;
+
+       memset(wm_state, 0, sizeof(*wm_state));
+
+       wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
+       if (IS_CHERRYVIEW(dev))
+               wm_state->num_levels = CHV_WM_NUM_LEVELS;
+       else
+               wm_state->num_levels = VLV_WM_NUM_LEVELS;
+
+       wm_state->num_active_planes = 0;
+
+       vlv_compute_fifo(crtc);
+
+       if (wm_state->num_active_planes != 1)
+               wm_state->cxsr = false;
+
+       if (wm_state->cxsr) {
+               for (level = 0; level < wm_state->num_levels; level++) {
+                       wm_state->sr[level].plane = sr_fifo_size;
+                       wm_state->sr[level].cursor = 63;
+               }
+       }
+
+       for_each_intel_plane_on_crtc(dev, crtc, plane) {
+               struct intel_plane_state *state =
+                       to_intel_plane_state(plane->base.state);
+
+               if (!state->visible)
+                       continue;
+
+               /* normal watermarks */
+               for (level = 0; level < wm_state->num_levels; level++) {
+                       int wm = vlv_compute_wm_level(plane, crtc, state, level);
+                       int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+
+                       /* hack */
+                       if (WARN_ON(level == 0 && wm > max_wm))
+                               wm = max_wm;
+
+                       if (wm > plane->wm.fifo_size)
+                               break;
+
+                       switch (plane->base.type) {
+                               int sprite;
+                       case DRM_PLANE_TYPE_CURSOR:
+                               wm_state->wm[level].cursor = wm;
+                               break;
+                       case DRM_PLANE_TYPE_PRIMARY:
+                               wm_state->wm[level].primary = wm;
+                               break;
+                       case DRM_PLANE_TYPE_OVERLAY:
+                               sprite = plane->plane;
+                               wm_state->wm[level].sprite[sprite] = wm;
+                               break;
+                       }
+               }
+
+               wm_state->num_levels = level;
+
+               if (!wm_state->cxsr)
+                       continue;
+
+               /* maxfifo watermarks */
+               switch (plane->base.type) {
+                       int sprite, level;
+               case DRM_PLANE_TYPE_CURSOR:
+                       for (level = 0; level < wm_state->num_levels; level++)
+                               wm_state->sr[level].cursor =
+                                       wm_state->sr[level].cursor;
+                       break;
+               case DRM_PLANE_TYPE_PRIMARY:
+                       for (level = 0; level < wm_state->num_levels; level++)
+                               wm_state->sr[level].plane =
+                                       min(wm_state->sr[level].plane,
+                                           wm_state->wm[level].primary);
+                       break;
+               case DRM_PLANE_TYPE_OVERLAY:
+                       sprite = plane->plane;
+                       for (level = 0; level < wm_state->num_levels; level++)
+                               wm_state->sr[level].plane =
+                                       min(wm_state->sr[level].plane,
+                                           wm_state->wm[level].sprite[sprite]);
                        break;
+               }
        }
 
-       return true;
+       /* clear any (partially) filled invalid levels */
+       for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+               memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
+               memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+       }
+
+       vlv_invert_wms(crtc);
 }
 
-static void valleyview_update_wm(struct drm_crtc *crtc)
+#define VLV_FIFO(plane, value) \
+       (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       bool cxsr_enabled;
-       struct vlv_wm_values wm = dev_priv->wm.vlv;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_plane *plane;
+       int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
 
-       wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
-       wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
-                                              to_intel_plane(crtc->primary),
-                                              vlv_get_fifo_size(dev, pipe, 0));
+       for_each_intel_plane_on_crtc(dev, crtc, plane) {
+               if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+                       WARN_ON(plane->wm.fifo_size != 63);
+                       continue;
+               }
 
-       wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
-       wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
-                                             to_intel_plane(crtc->cursor),
-                                             0x3f);
+               if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+                       sprite0_start = plane->wm.fifo_size;
+               else if (plane->plane == 0)
+                       sprite1_start = sprite0_start + plane->wm.fifo_size;
+               else
+                       fifo_size = sprite1_start + plane->wm.fifo_size;
+       }
 
-       cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+       WARN_ON(fifo_size != 512 - 1);
 
-       if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
-               return;
+       DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
+                     pipe_name(crtc->pipe), sprite0_start,
+                     sprite1_start, fifo_size);
 
-       DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
-                     "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-                     wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
-                     wm.sr.plane, wm.sr.cursor);
+       switch (crtc->pipe) {
+               uint32_t dsparb, dsparb2, dsparb3;
+       case PIPE_A:
+               dsparb = I915_READ(DSPARB);
+               dsparb2 = I915_READ(DSPARB2);
 
-       /*
-        * FIXME DDR DVFS introduces massive memory latencies which
-        * are not known to system agent so any deadline specified
-        * by the display may not be respected. To support DDR DVFS
-        * the watermark code needs to be rewritten to essentially
-        * bypass deadline mechanism and rely solely on the
-        * watermarks. For now disable DDR DVFS.
-        */
-       if (IS_CHERRYVIEW(dev_priv))
-               chv_set_memory_dvfs(dev_priv, false);
+               dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+                           VLV_FIFO(SPRITEB, 0xff));
+               dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+                          VLV_FIFO(SPRITEB, sprite1_start));
 
-       if (!cxsr_enabled)
-               intel_set_memory_cxsr(dev_priv, false);
+               dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+                            VLV_FIFO(SPRITEB_HI, 0x1));
+               dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+                          VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
 
-       vlv_write_wm_values(intel_crtc, &wm);
+               I915_WRITE(DSPARB, dsparb);
+               I915_WRITE(DSPARB2, dsparb2);
+               break;
+       case PIPE_B:
+               dsparb = I915_READ(DSPARB);
+               dsparb2 = I915_READ(DSPARB2);
 
-       if (cxsr_enabled)
-               intel_set_memory_cxsr(dev_priv, true);
+               dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+                           VLV_FIFO(SPRITED, 0xff));
+               dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+                          VLV_FIFO(SPRITED, sprite1_start));
+
+               dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+                            VLV_FIFO(SPRITED_HI, 0xff));
+               dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+                          VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+               I915_WRITE(DSPARB, dsparb);
+               I915_WRITE(DSPARB2, dsparb2);
+               break;
+       case PIPE_C:
+               dsparb3 = I915_READ(DSPARB3);
+               dsparb2 = I915_READ(DSPARB2);
+
+               dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+                            VLV_FIFO(SPRITEF, 0xff));
+               dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+                           VLV_FIFO(SPRITEF, sprite1_start));
+
+               dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+                            VLV_FIFO(SPRITEF_HI, 0xff));
+               dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+                          VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+               I915_WRITE(DSPARB3, dsparb3);
+               I915_WRITE(DSPARB2, dsparb2);
+               break;
+       default:
+               break;
+       }
 }
 
-static void valleyview_update_sprite_wm(struct drm_plane *plane,
-                                       struct drm_crtc *crtc,
-                                       uint32_t sprite_width,
-                                       uint32_t sprite_height,
-                                       int pixel_size,
-                                       bool enabled, bool scaled)
+#undef VLV_FIFO
+
+static void vlv_merge_wm(struct drm_device *dev,
+                        struct vlv_wm_values *wm)
+{
+       struct intel_crtc *crtc;
+       int num_active_crtcs = 0;
+
+       if (IS_CHERRYVIEW(dev))
+               wm->level = VLV_WM_LEVEL_DDR_DVFS;
+       else
+               wm->level = VLV_WM_LEVEL_PM2;
+       wm->cxsr = true;
+
+       for_each_intel_crtc(dev, crtc) {
+               const struct vlv_wm_state *wm_state = &crtc->wm_state;
+
+               if (!crtc->active)
+                       continue;
+
+               if (!wm_state->cxsr)
+                       wm->cxsr = false;
+
+               num_active_crtcs++;
+               wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+       }
+
+       if (num_active_crtcs != 1)
+               wm->cxsr = false;
+
+       if (num_active_crtcs > 1)
+               wm->level = VLV_WM_LEVEL_PM2;
+
+       for_each_intel_crtc(dev, crtc) {
+               struct vlv_wm_state *wm_state = &crtc->wm_state;
+               enum pipe pipe = crtc->pipe;
+
+               if (!crtc->active)
+                       continue;
+
+               wm->pipe[pipe] = wm_state->wm[wm->level];
+               if (wm->cxsr)
+                       wm->sr = wm_state->sr[wm->level];
+
+               wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
+               wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
+               wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
+               wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+       }
+}
+
+static void vlv_update_wm(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
-       int sprite = to_intel_plane(plane)->plane;
-       bool cxsr_enabled;
-       struct vlv_wm_values wm = dev_priv->wm.vlv;
+       struct vlv_wm_values wm = {};
 
-       if (enabled) {
-               wm.ddl[pipe].sprite[sprite] =
-                       vlv_compute_drain_latency(crtc, plane);
+       vlv_compute_wm(intel_crtc);
+       vlv_merge_wm(dev, &wm);
 
-               wm.pipe[pipe].sprite[sprite] =
-                       vlv_compute_wm(intel_crtc,
-                                      to_intel_plane(plane),
-                                      vlv_get_fifo_size(dev, pipe, sprite+1));
-       } else {
-               wm.ddl[pipe].sprite[sprite] = 0;
-               wm.pipe[pipe].sprite[sprite] = 0;
+       if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+               /* FIXME should be part of crtc atomic commit */
+               vlv_pipe_set_fifo_size(intel_crtc);
+               return;
        }
 
-       cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
-       if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
-               return;
+       if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
+           dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
+               chv_set_memory_dvfs(dev_priv, false);
 
-       DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
-                     "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-                     sprite_name(pipe, sprite),
-                     wm.pipe[pipe].sprite[sprite],
-                     wm.sr.plane, wm.sr.cursor);
+       if (wm.level < VLV_WM_LEVEL_PM5 &&
+           dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+               chv_set_memory_pm5(dev_priv, false);
 
-       if (!cxsr_enabled)
+       if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
                intel_set_memory_cxsr(dev_priv, false);
 
+       /* FIXME should be part of crtc atomic commit */
+       vlv_pipe_set_fifo_size(intel_crtc);
+
        vlv_write_wm_values(intel_crtc, &wm);
 
-       if (cxsr_enabled)
+       DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+                     "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
+                     pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+                     wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
+                     wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+
+       if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
                intel_set_memory_cxsr(dev_priv, true);
+
+       if (wm.level >= VLV_WM_LEVEL_PM5 &&
+           dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+               chv_set_memory_pm5(dev_priv, true);
+
+       if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
+           dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+               chv_set_memory_dvfs(dev_priv, true);
+
+       dev_priv->wm.vlv = wm;
 }
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1434,23 +1712,22 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
        I915_WRITE(FW_BLC, fwater_lo);
 }
 
-static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
-                                   struct drm_crtc *crtc)
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pixel_rate;
 
-       pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
+       pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
 
        /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
         * adjust the pixel_rate here. */
 
-       if (intel_crtc->config->pch_pfit.enabled) {
+       if (pipe_config->pch_pfit.enabled) {
                uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
-               uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
+               uint32_t pfit_size = pipe_config->pch_pfit.size;
+
+               pipe_w = pipe_config->pipe_src_w;
+               pipe_h = pipe_config->pipe_src_h;
 
-               pipe_w = intel_crtc->config->pipe_src_w;
-               pipe_h = intel_crtc->config->pipe_src_h;
                pfit_w = (pfit_size >> 16) & 0xFFFF;
                pfit_h = pfit_size & 0xFFFF;
                if (pipe_w < pfit_w)
@@ -1815,7 +2092,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
        linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
                                     mode->crtc_clock);
        ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
-                                        dev_priv->display.get_display_clock_speed(dev_priv->dev));
+                                        dev_priv->cdclk_freq);
 
        return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
               PIPE_WM_LINETIME_TIME(linetime);
@@ -2066,7 +2343,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
 
        p->active = true;
        p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
-       p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+       p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
 
        if (crtc->primary->state->fb)
                p->pri.bytes_per_pixel =
@@ -2085,7 +2362,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
        p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
        p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
 
-       drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+       drm_for_each_legacy_plane(plane, dev) {
                struct intel_plane *intel_plane = to_intel_plane(plane);
 
                if (intel_plane->pipe == pipe) {
@@ -2215,6 +2492,7 @@ static void ilk_wm_merge(struct drm_device *dev,
                         const struct ilk_wm_maximums *max,
                         struct intel_pipe_wm *merged)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int level, max_level = ilk_wm_max_level(dev);
        int last_enabled_level = max_level;
 
@@ -2255,7 +2533,8 @@ static void ilk_wm_merge(struct drm_device *dev,
         * What we should check here is whether FBC can be
         * enabled sometime later.
         */
-       if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+       if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+           intel_fbc_enabled(dev_priv)) {
                for (level = 2; level <= max_level; level++) {
                        struct intel_wm_level *wm = &merged->wm[level];
 
@@ -3043,8 +3322,10 @@ skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
        if (!to_intel_crtc(crtc)->active)
                return 0;
 
-       return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+       if (WARN_ON(p->pixel_rate == 0))
+               return 0;
 
+       return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
 }
 
 static void skl_compute_transition_wm(struct drm_crtc *crtc,
@@ -3685,6 +3966,139 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
        }
 }
 
+#define _FW_WM(value, plane) \
+       (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+       (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+                              struct vlv_wm_values *wm)
+{
+       enum pipe pipe;
+       uint32_t tmp;
+
+       for_each_pipe(dev_priv, pipe) {
+               tmp = I915_READ(VLV_DDL(pipe));
+
+               wm->ddl[pipe].primary =
+                       (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+               wm->ddl[pipe].cursor =
+                       (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+               wm->ddl[pipe].sprite[0] =
+                       (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+               wm->ddl[pipe].sprite[1] =
+                       (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+       }
+
+       tmp = I915_READ(DSPFW1);
+       wm->sr.plane = _FW_WM(tmp, SR);
+       wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
+       wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
+       wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+
+       tmp = I915_READ(DSPFW2);
+       wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
+       wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
+       wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+
+       tmp = I915_READ(DSPFW3);
+       wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+       if (IS_CHERRYVIEW(dev_priv)) {
+               tmp = I915_READ(DSPFW7_CHV);
+               wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+               wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+               tmp = I915_READ(DSPFW8_CHV);
+               wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
+               wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+
+               tmp = I915_READ(DSPFW9_CHV);
+               wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
+               wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+
+               tmp = I915_READ(DSPHOWM);
+               wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+               wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+               wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+               wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
+               wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+               wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+               wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+               wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+               wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+               wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+       } else {
+               tmp = I915_READ(DSPFW7);
+               wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+               wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+               tmp = I915_READ(DSPHOWM);
+               wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+               wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+               wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+               wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+               wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+               wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+               wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+       }
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+void vlv_wm_get_hw_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+       struct intel_plane *plane;
+       enum pipe pipe;
+       u32 val;
+
+       vlv_read_wm_values(dev_priv, wm);
+
+       for_each_intel_plane(dev, plane) {
+               switch (plane->base.type) {
+                       int sprite;
+               case DRM_PLANE_TYPE_CURSOR:
+                       plane->wm.fifo_size = 63;
+                       break;
+               case DRM_PLANE_TYPE_PRIMARY:
+                       plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+                       break;
+               case DRM_PLANE_TYPE_OVERLAY:
+                       sprite = plane->plane;
+                       plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+                       break;
+               }
+       }
+
+       wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+       wm->level = VLV_WM_LEVEL_PM2;
+
+       if (IS_CHERRYVIEW(dev_priv)) {
+               mutex_lock(&dev_priv->rps.hw_lock);
+
+               val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+               if (val & DSP_MAXFIFO_PM5_ENABLE)
+                       wm->level = VLV_WM_LEVEL_PM5;
+
+               val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+               if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+                       wm->level = VLV_WM_LEVEL_DDR_DVFS;
+
+               mutex_unlock(&dev_priv->rps.hw_lock);
+       }
+
+       for_each_pipe(dev_priv, pipe)
+               DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+                             pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
+                             wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+
+       DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+                     wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4083,14 +4497,14 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
                      "Odd GPU freq value\n"))
                val &= ~1;
 
+       I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
        if (val != dev_priv->rps.cur_freq) {
                vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
                if (!IS_CHERRYVIEW(dev_priv))
                        gen6_set_rps_thresholds(dev_priv, val);
        }
 
-       I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
        dev_priv->rps.cur_freq = val;
        trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 }
@@ -4250,12 +4664,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
 
 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
 {
-       /* No RC6 before Ironlake */
-       if (INTEL_INFO(dev)->gen < 5)
-               return 0;
-
-       /* RC6 is only on Ironlake mobile not on desktop */
-       if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+       /* No RC6 before Ironlake and code is gone for ilk. */
+       if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
        /* Respect the kernel parameter if it is set */
@@ -4275,10 +4685,6 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
                return enable_rc6 & mask;
        }
 
-       /* Disable RC6 on Ironlake */
-       if (INTEL_INFO(dev)->gen == 5)
-               return 0;
-
        if (IS_IVYBRIDGE(dev))
                return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
@@ -4297,25 +4703,26 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
        u32 ddcc_status = 0;
        int ret;
 
-       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        /* All of these values are in units of 50MHz */
        dev_priv->rps.cur_freq          = 0;
        /* static values from HW: RP0 > RP1 > RPn (min_freq) */
-       dev_priv->rps.rp0_freq          = (rp_state_cap >>  0) & 0xff;
-       dev_priv->rps.rp1_freq          = (rp_state_cap >>  8) & 0xff;
-       dev_priv->rps.min_freq          = (rp_state_cap >> 16) & 0xff;
-       if (IS_SKYLAKE(dev)) {
-               /* Store the frequency values in 16.66 MHZ units, which is
-                  the natural hardware unit for SKL */
-               dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+       if (IS_BROXTON(dev)) {
+               rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+               dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
+               dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+               dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
+       } else {
+               rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+               dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
+               dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+               dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
        }
+
        /* hw_max = RP0 until we check for overclocking */
        dev_priv->rps.max_freq          = dev_priv->rps.rp0_freq;
 
        dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
                ret = sandybridge_pcode_read(dev_priv,
                                        HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
                                        &ddcc_status);
@@ -4327,6 +4734,16 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
                                        dev_priv->rps.max_freq);
        }
 
+       if (IS_SKYLAKE(dev)) {
+               /* Store the frequency values in 16.66 MHZ units, which is
+                  the natural hardware unit for SKL */
+               dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+               dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+               dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+               dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
+               dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+       }
+
        dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
 
        /* Preserve min/max settings in case of re-init */
@@ -4619,6 +5036,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
        int min_freq = 15;
        unsigned int gpu_freq;
        unsigned int max_ia_freq, min_ring_freq;
+       unsigned int max_gpu_freq, min_gpu_freq;
        int scaling_factor = 180;
        struct cpufreq_policy *policy;
 
@@ -4643,17 +5061,31 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
        /* convert DDR frequency from units of 266.6MHz to bandwidth */
        min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
+       if (IS_SKYLAKE(dev)) {
+               /* Convert GT frequency to 50 HZ units */
+               min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
+               max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+       } else {
+               min_gpu_freq = dev_priv->rps.min_freq;
+               max_gpu_freq = dev_priv->rps.max_freq;
+       }
+
        /*
         * For each potential GPU frequency, load a ring frequency we'd like
         * to use for memory access.  We do this by specifying the IA frequency
         * the PCU should use as a reference to determine the ring frequency.
         */
-       for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
-            gpu_freq--) {
-               int diff = dev_priv->rps.max_freq - gpu_freq;
+       for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+               int diff = max_gpu_freq - gpu_freq;
                unsigned int ia_freq = 0, ring_freq = 0;
 
-               if (INTEL_INFO(dev)->gen >= 8) {
+               if (IS_SKYLAKE(dev)) {
+                       /*
+                        * ring_freq = 2 * GT. ring_freq is in 100MHz units
+                        * No floor required for ring frequency on SKL.
+                        */
+                       ring_freq = gpu_freq;
+               } else if (INTEL_INFO(dev)->gen >= 8) {
                        /* max(2 * GT, DDR). NB: GT is 50MHz units */
                        ring_freq = max(min_ring_freq, gpu_freq);
                } else if (IS_HASWELL(dev)) {
@@ -4687,7 +5119,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+       if (!HAS_CORE_RING_FREQ(dev))
                return;
 
        mutex_lock(&dev_priv->rps.hw_lock);
@@ -5802,7 +6234,8 @@ static void intel_gen6_powersave_work(struct work_struct *work)
        } else if (INTEL_INFO(dev)->gen >= 9) {
                gen9_enable_rc6(dev);
                gen9_enable_rps(dev);
-               __gen6_update_ring_freq(dev);
+               if (IS_SKYLAKE(dev))
+                       __gen6_update_ring_freq(dev);
        } else if (IS_BROADWELL(dev)) {
                gen8_enable_rps(dev);
                __gen6_update_ring_freq(dev);
@@ -6686,13 +7119,15 @@ void intel_init_pm(struct drm_device *dev)
                else if (INTEL_INFO(dev)->gen == 8)
                        dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
        } else if (IS_CHERRYVIEW(dev)) {
-               dev_priv->display.update_wm = valleyview_update_wm;
-               dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+               vlv_setup_wm_latency(dev);
+
+               dev_priv->display.update_wm = vlv_update_wm;
                dev_priv->display.init_clock_gating =
                        cherryview_init_clock_gating;
        } else if (IS_VALLEYVIEW(dev)) {
-               dev_priv->display.update_wm = valleyview_update_wm;
-               dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+               vlv_setup_wm_latency(dev);
+
+               dev_priv->display.update_wm = vlv_update_wm;
                dev_priv->display.init_clock_gating =
                        valleyview_init_clock_gating;
        } else if (IS_PINEVIEW(dev)) {
index 5ee0fa57ed1999b4a8432c89265d82bc2f90513e..a04b4dc5ed9b459ce42de39c12de74539a620066 100644 (file)
@@ -254,10 +254,13 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
        uint32_t max_sleep_time = 0x1f;
        /* Lately it was identified that depending on panel idle frame count
         * calculated at HW can be off by 1. So let's use what came
-        * from VBT + 1 and at minimum 2 to be on the safe side.
+        * from VBT + 1.
+        * There are also other cases where panel demands at least 4
+        * but VBT is not being set. To cover these 2 cases lets use
+        * at least 5 when VBT isn't set to be on the safest side.
         */
        uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
-                              dev_priv->vbt.psr.idle_frames + 1 : 2;
+                              dev_priv->vbt.psr.idle_frames + 1 : 5;
        uint32_t val = 0x0;
        const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
@@ -400,7 +403,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
 
                /* Avoid continuous PSR exit by masking memup and hpd */
                I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
-                          EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+                          EDP_PSR_DEBUG_MASK_HPD);
 
                /* Enable PSR on the panel */
                hsw_psr_enable_sink(intel_dp);
@@ -596,13 +599,15 @@ static void intel_psr_exit(struct drm_device *dev)
 /**
  * intel_psr_single_frame_update - Single Frame Update
  * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
  * send and update only one frame on Remote Frame Buffer.
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev)
+void intel_psr_single_frame_update(struct drm_device *dev,
+                                  unsigned frontbuffer_bits)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
@@ -624,14 +629,16 @@ void intel_psr_single_frame_update(struct drm_device *dev)
 
        crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
-       val = I915_READ(VLV_PSRCTL(pipe));
 
-       /*
-        * We need to set this bit before writing registers for a flip.
-        * This bit will be self-clear when it gets to the PSR active state.
-        */
-       I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+       if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
+               val = I915_READ(VLV_PSRCTL(pipe));
 
+               /*
+                * We need to set this bit before writing registers for a flip.
+                * This bit will be self-clear when it gets to the PSR active state.
+                */
+               I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+       }
        mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -648,7 +655,7 @@ void intel_psr_single_frame_update(struct drm_device *dev)
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
 void intel_psr_invalidate(struct drm_device *dev,
-                             unsigned frontbuffer_bits)
+                         unsigned frontbuffer_bits)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
@@ -663,11 +670,12 @@ void intel_psr_invalidate(struct drm_device *dev,
        crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
 
-       intel_psr_exit(dev);
-
        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
        dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+       if (frontbuffer_bits)
+               intel_psr_exit(dev);
+
        mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -675,6 +683,7 @@ void intel_psr_invalidate(struct drm_device *dev,
  * intel_psr_flush - Flush PSR
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
  * with the software frontbuffer tracking. This function gets called every
@@ -684,11 +693,12 @@ void intel_psr_invalidate(struct drm_device *dev,
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
 void intel_psr_flush(struct drm_device *dev,
-                        unsigned frontbuffer_bits)
+                    unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
        enum pipe pipe;
+       int delay_ms = HAS_DDI(dev) ? 100 : 500;
 
        mutex_lock(&dev_priv->psr.lock);
        if (!dev_priv->psr.enabled) {
@@ -698,30 +708,33 @@ void intel_psr_flush(struct drm_device *dev,
 
        crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
-       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-       /*
-        * On Haswell sprite plane updates don't result in a psr invalidating
-        * signal in the hardware. Which means we need to manually fake this in
-        * software for all flushes, not just when we've seen a preceding
-        * invalidation through frontbuffer rendering.
-        */
-       if (IS_HASWELL(dev) &&
-           (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
-               intel_psr_exit(dev);
+       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-       /*
-        * On Valleyview and Cherryview we don't use hardware tracking so
-        * any plane updates or cursor moves don't result in a PSR
-        * invalidating. Which means we need to manually fake this in
-        * software for all flushes, not just when we've seen a preceding
-        * invalidation through frontbuffer rendering. */
-       if (!HAS_DDI(dev))
-               intel_psr_exit(dev);
+       if (HAS_DDI(dev)) {
+               /*
+                * By definition every flush should mean invalidate + flush,
+                * however on core platforms let's minimize the
+                * disable/re-enable so we can avoid the invalidate when flip
+                * originated the flush.
+                */
+               if (frontbuffer_bits && origin != ORIGIN_FLIP)
+                       intel_psr_exit(dev);
+       } else {
+               /*
+                * On Valleyview and Cherryview we don't use hardware tracking
+                * so any plane updates or cursor moves don't result in a PSR
+                * invalidating. Which means we need to manually fake this in
+                * software for all flushes.
+                */
+               if (frontbuffer_bits)
+                       intel_psr_exit(dev);
+       }
 
        if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
                schedule_delayed_work(&dev_priv->psr.work,
-                                     msecs_to_jiffies(100));
+                                     msecs_to_jiffies(delay_ms));
        mutex_unlock(&dev_priv->psr.lock);
 }
 
index 3817a6f00d9ef1fe4455282acfedf90c436e7300..6e6b8db996ef2450c615a71ef10b7ffcbbc62479 100644 (file)
@@ -81,7 +81,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring)
        return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
 }
 
-void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *ring)
 {
        struct intel_ringbuffer *ringbuf = ring->buffer;
        ringbuf->tail &= ringbuf->size - 1;
@@ -91,10 +91,11 @@ void __intel_ring_advance(struct intel_engine_cs *ring)
 }
 
 static int
-gen2_render_ring_flush(struct intel_engine_cs *ring,
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 cmd;
        int ret;
 
@@ -105,7 +106,7 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
        if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
                cmd |= MI_READ_FLUSH;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
@@ -117,10 +118,11 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen4_render_ring_flush(struct intel_engine_cs *ring,
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ring->dev;
        u32 cmd;
        int ret;
@@ -163,7 +165,7 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
            (IS_G4X(dev) || IS_GEN5(dev)))
                cmd |= MI_INVALIDATE_ISP;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
@@ -212,13 +214,13 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
-
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -231,7 +233,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -247,15 +249,16 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
 }
 
 static int
-gen6_render_ring_flush(struct intel_engine_cs *ring,
-                         u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+                      u32 invalidate_domains, u32 flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 flags = 0;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
-       ret = intel_emit_post_sync_nonzero_flush(ring);
+       ret = intel_emit_post_sync_nonzero_flush(req);
        if (ret)
                return ret;
 
@@ -285,7 +288,7 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
                flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
        }
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -299,11 +302,12 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -318,9 +322,10 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
 }
 
 static int
-gen7_render_ring_flush(struct intel_engine_cs *ring,
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 flags = 0;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
@@ -362,10 +367,10 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
                /* Workaround: we must issue a pipe_control with CS-stall bit
                 * set before a pipe_control command that has the state cache
                 * invalidate bit set. */
-               gen7_render_ring_cs_stall_wa(ring);
+               gen7_render_ring_cs_stall_wa(req);
        }
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -379,12 +384,13 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen8_emit_pipe_control(struct intel_engine_cs *ring,
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
                       u32 flags, u32 scratch_addr)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -400,11 +406,11 @@ gen8_emit_pipe_control(struct intel_engine_cs *ring,
 }
 
 static int
-gen8_render_ring_flush(struct intel_engine_cs *ring,
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        flags |= PIPE_CONTROL_CS_STALL;
@@ -424,7 +430,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
                /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
-               ret = gen8_emit_pipe_control(ring,
+               ret = gen8_emit_pipe_control(req,
                                             PIPE_CONTROL_CS_STALL |
                                             PIPE_CONTROL_STALL_AT_SCOREBOARD,
                                             0);
@@ -432,7 +438,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
                        return ret;
        }
 
-       return gen8_emit_pipe_control(ring, flags, scratch_addr);
+       return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
 static void ring_write_tail(struct intel_engine_cs *ring,
@@ -703,10 +709,10 @@ err:
        return ret;
 }
 
-static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
-                                      struct intel_context *ctx)
+static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
        int ret, i;
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_workarounds *w = &dev_priv->workarounds;
@@ -715,11 +721,11 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
                return 0;
 
        ring->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(ring);
+       ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
-       ret = intel_ring_begin(ring, (w->count * 2 + 2));
+       ret = intel_ring_begin(req, (w->count * 2 + 2));
        if (ret)
                return ret;
 
@@ -733,7 +739,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
        intel_ring_advance(ring);
 
        ring->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(ring);
+       ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
@@ -742,16 +748,15 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
        return 0;
 }
 
-static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
-                             struct intel_context *ctx)
+static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 {
        int ret;
 
-       ret = intel_ring_workarounds_emit(ring, ctx);
+       ret = intel_ring_workarounds_emit(req);
        if (ret != 0)
                return ret;
 
-       ret = i915_gem_render_state_init(ring);
+       ret = i915_gem_render_state_init(req);
        if (ret)
                DRM_ERROR("init render state: %d\n", ret);
 
@@ -775,11 +780,11 @@ static int wa_add(struct drm_i915_private *dev_priv,
        return 0;
 }
 
-#define WA_REG(addr, mask, val) { \
+#define WA_REG(addr, mask, val) do { \
                const int r = wa_add(dev_priv, (addr), (mask), (val)); \
                if (r) \
                        return r; \
-       }
+       } while (0)
 
 #define WA_SET_BIT_MASKED(addr, mask) \
        WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -800,6 +805,11 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+       /* WaDisableAsyncFlipPerfMode:bdw */
+       WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
        /* WaDisablePartialInstShootdown:bdw */
        /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -861,6 +871,11 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+       /* WaDisableAsyncFlipPerfMode:chv */
+       WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
        /* WaDisablePartialInstShootdown:chv */
        /* WaDisableThreadStallDopClockGating:chv */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -931,8 +946,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
                /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
                WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
                                  GEN9_RHWO_OPTIMIZATION_DISABLE);
-               WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
-                                 DISABLE_PIXEL_MASK_CAMMING);
+               /*
+                * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+                * but we do that in per ctx batchbuffer as there is an issue
+                * with this register not getting restored on ctx restore
+                */
        }
 
        if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
@@ -1023,13 +1041,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       if (INTEL_REVID(dev) == SKL_REVID_C0 ||
-           INTEL_REVID(dev) == SKL_REVID_D0)
-               /* WaBarrierPerformanceFixDisable:skl */
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FENCE_DEST_SLM_DISABLE |
-                                 HDC_BARRIER_PERFORMANCE_DISABLE);
-
        if (INTEL_REVID(dev) <= SKL_REVID_D0) {
                /*
                 *Use Force Non-Coherent whenever executing a 3D context. This
@@ -1041,6 +1052,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                                  HDC_FORCE_NON_COHERENT);
        }
 
+       if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+           INTEL_REVID(dev) == SKL_REVID_D0)
+               /* WaBarrierPerformanceFixDisable:skl */
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE |
+                                 HDC_BARRIER_PERFORMANCE_DISABLE);
+
+       /* WaDisableSbeCacheDispatchPortSharing:skl */
+       if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+               WA_SET_BIT_MASKED(
+                       GEN7_HALF_SLICE_CHICKEN1,
+                       GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+       }
+
        return skl_tune_iz_hashing(ring);
 }
 
@@ -1105,9 +1130,9 @@ static int init_render_ring(struct intel_engine_cs *ring)
         * to use MI_WAIT_FOR_EVENT within the CS. It should already be
         * programmed to '1' on all products.
         *
-        * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+        * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
+       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
@@ -1132,7 +1157,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
        }
 
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
        if (HAS_L3_DPF(dev))
@@ -1155,10 +1180,11 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
        intel_fini_pipe_control(ring);
 }
 
-static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
                           unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
+       struct intel_engine_cs *signaller = signaller_req->ring;
        struct drm_device *dev = signaller->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *waiter;
@@ -1168,7 +1194,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-       ret = intel_ring_begin(signaller, num_dwords);
+       ret = intel_ring_begin(signaller_req, num_dwords);
        if (ret)
                return ret;
 
@@ -1178,8 +1204,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(
-                                          signaller->outstanding_lazy_request);
+               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
@@ -1196,10 +1221,11 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
        return 0;
 }
 
-static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
                           unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
+       struct intel_engine_cs *signaller = signaller_req->ring;
        struct drm_device *dev = signaller->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *waiter;
@@ -1209,7 +1235,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-       ret = intel_ring_begin(signaller, num_dwords);
+       ret = intel_ring_begin(signaller_req, num_dwords);
        if (ret)
                return ret;
 
@@ -1219,8 +1245,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(
-                                          signaller->outstanding_lazy_request);
+               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
@@ -1235,9 +1260,10 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
        return 0;
 }
 
-static int gen6_signal(struct intel_engine_cs *signaller,
+static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                       unsigned int num_dwords)
 {
+       struct intel_engine_cs *signaller = signaller_req->ring;
        struct drm_device *dev = signaller->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *useless;
@@ -1248,15 +1274,14 @@ static int gen6_signal(struct intel_engine_cs *signaller,
        num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
-       ret = intel_ring_begin(signaller, num_dwords);
+       ret = intel_ring_begin(signaller_req, num_dwords);
        if (ret)
                return ret;
 
        for_each_ring(useless, dev_priv, i) {
                u32 mbox_reg = signaller->semaphore.mbox.signal[i];
                if (mbox_reg != GEN6_NOSYNC) {
-                       u32 seqno = i915_gem_request_get_seqno(
-                                          signaller->outstanding_lazy_request);
+                       u32 seqno = i915_gem_request_get_seqno(signaller_req);
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit(signaller, mbox_reg);
                        intel_ring_emit(signaller, seqno);
@@ -1272,30 +1297,29 @@ static int gen6_signal(struct intel_engine_cs *signaller,
 
 /**
  * gen6_add_request - Update the semaphore mailbox registers
- * 
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
+ *
+ * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_engine_cs *ring)
+gen6_add_request(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        if (ring->semaphore.signal)
-               ret = ring->semaphore.signal(ring, 4);
+               ret = ring->semaphore.signal(req, 4);
        else
-               ret = intel_ring_begin(ring, 4);
+               ret = intel_ring_begin(req, 4);
 
        if (ret)
                return ret;
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
@@ -1318,14 +1342,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
  */
 
 static int
-gen8_ring_sync(struct intel_engine_cs *waiter,
+gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
               struct intel_engine_cs *signaller,
               u32 seqno)
 {
+       struct intel_engine_cs *waiter = waiter_req->ring;
        struct drm_i915_private *dev_priv = waiter->dev->dev_private;
        int ret;
 
-       ret = intel_ring_begin(waiter, 4);
+       ret = intel_ring_begin(waiter_req, 4);
        if (ret)
                return ret;
 
@@ -1343,10 +1368,11 @@ gen8_ring_sync(struct intel_engine_cs *waiter,
 }
 
 static int
-gen6_ring_sync(struct intel_engine_cs *waiter,
+gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
               struct intel_engine_cs *signaller,
               u32 seqno)
 {
+       struct intel_engine_cs *waiter = waiter_req->ring;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
@@ -1361,7 +1387,7 @@ gen6_ring_sync(struct intel_engine_cs *waiter,
 
        WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-       ret = intel_ring_begin(waiter, 4);
+       ret = intel_ring_begin(waiter_req, 4);
        if (ret)
                return ret;
 
@@ -1392,8 +1418,9 @@ do {                                                                      \
 } while (0)
 
 static int
-pc_render_add_request(struct intel_engine_cs *ring)
+pc_render_add_request(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
@@ -1405,7 +1432,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
         * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
         * memory before requesting an interrupt.
         */
-       ret = intel_ring_begin(ring, 32);
+       ret = intel_ring_begin(req, 32);
        if (ret)
                return ret;
 
@@ -1413,8 +1440,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1433,8 +1459,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, 0);
        __intel_ring_advance(ring);
 
@@ -1585,13 +1610,14 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
 }
 
 static int
-bsd_ring_flush(struct intel_engine_cs *ring,
+bsd_ring_flush(struct drm_i915_gem_request *req,
               u32     invalidate_domains,
               u32     flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
@@ -1602,18 +1628,18 @@ bsd_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-i9xx_add_request(struct intel_engine_cs *ring)
+i9xx_add_request(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
@@ -1745,13 +1771,14 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 length,
                         unsigned dispatch_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
@@ -1771,14 +1798,15 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 len,
                         unsigned dispatch_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 cs_offset = ring->scratch.gtt_offset;
        int ret;
 
-       ret = intel_ring_begin(ring, 6);
+       ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
@@ -1795,7 +1823,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
 
-               ret = intel_ring_begin(ring, 6 + 2);
+               ret = intel_ring_begin(req, 6 + 2);
                if (ret)
                        return ret;
 
@@ -1818,7 +1846,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
                offset = cs_offset;
        }
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -1833,13 +1861,14 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
 }
 
 static int
-i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 len,
                         unsigned dispatch_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
@@ -2082,7 +2111,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 
        intel_unpin_ringbuffer_obj(ringbuf);
        intel_destroy_ringbuffer_obj(ringbuf);
-       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
        if (ring->cleanup)
                ring->cleanup(ring);
@@ -2106,6 +2134,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
        if (intel_ring_space(ringbuf) >= n)
                return 0;
 
+       /* The whole point of reserving space is to not wait! */
+       WARN_ON(ringbuf->reserved_in_use);
+
        list_for_each_entry(request, &ring->request_list, list) {
                space = __intel_ring_space(request->postfix, ringbuf->tail,
                                           ringbuf->size);
@@ -2124,18 +2155,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
        return 0;
 }
 
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
        uint32_t __iomem *virt;
-       struct intel_ringbuffer *ringbuf = ring->buffer;
        int rem = ringbuf->size - ringbuf->tail;
 
-       if (ringbuf->space < rem) {
-               int ret = ring_wait_for_space(ring, rem);
-               if (ret)
-                       return ret;
-       }
-
        virt = ringbuf->virtual_start + ringbuf->tail;
        rem /= 4;
        while (rem--)
@@ -2143,21 +2167,11 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 
        ringbuf->tail = 0;
        intel_ring_update_space(ringbuf);
-
-       return 0;
 }
 
 int intel_ring_idle(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_request *req;
-       int ret;
-
-       /* We need to add any requests required to flush the objects and ring */
-       if (ring->outstanding_lazy_request) {
-               ret = i915_add_request(ring);
-               if (ret)
-                       return ret;
-       }
 
        /* Wait upon the last request to be completed */
        if (list_empty(&ring->request_list))
@@ -2180,33 +2194,126 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
        return 0;
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring,
-                               int bytes)
+int intel_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+       /*
+        * The first call merely notes the reserve request and is common for
+        * all back ends. The subsequent localised _begin() call actually
+        * ensures that the reservation is available. Without the begin, if
+        * the request creator immediately submitted the request without
+        * adding any commands to it then there might not actually be
+        * sufficient room for the submission commands.
+        */
+       intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+       return intel_ring_begin(request, 0);
+}
+
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+       WARN_ON(ringbuf->reserved_size);
+       WARN_ON(ringbuf->reserved_in_use);
+
+       ringbuf->reserved_size = size;
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+       WARN_ON(ringbuf->reserved_in_use);
+
+       ringbuf->reserved_size   = 0;
+       ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+       WARN_ON(ringbuf->reserved_in_use);
+
+       ringbuf->reserved_in_use = true;
+       ringbuf->reserved_tail   = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+       WARN_ON(!ringbuf->reserved_in_use);
+       if (ringbuf->tail > ringbuf->reserved_tail) {
+               WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+                    "request reserved size too small: %d vs %d!\n",
+                    ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+       } else {
+               /*
+                * The ring was wrapped while the reserved space was in use.
+                * That means that some unknown amount of the ring tail was
+                * no-op filled and skipped. Thus simply adding the ring size
+                * to the tail and doing the above space check will not work.
+                * Rather than attempt to track how much tail was skipped,
+                * it is much simpler to say that also skipping the sanity
+                * check every once in a while is not a big issue.
+                */
+       }
+
+       ringbuf->reserved_size   = 0;
+       ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
 {
        struct intel_ringbuffer *ringbuf = ring->buffer;
-       int ret;
+       int remain_usable = ringbuf->effective_size - ringbuf->tail;
+       int remain_actual = ringbuf->size - ringbuf->tail;
+       int ret, total_bytes, wait_bytes = 0;
+       bool need_wrap = false;
 
-       if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-               ret = intel_wrap_ring_buffer(ring);
-               if (unlikely(ret))
-                       return ret;
+       if (ringbuf->reserved_in_use)
+               total_bytes = bytes;
+       else
+               total_bytes = bytes + ringbuf->reserved_size;
+
+       if (unlikely(bytes > remain_usable)) {
+               /*
+                * Not enough space for the basic request. So need to flush
+                * out the remainder and then wait for base + reserved.
+                */
+               wait_bytes = remain_actual + total_bytes;
+               need_wrap = true;
+       } else {
+               if (unlikely(total_bytes > remain_usable)) {
+                       /*
+                        * The base request will fit but the reserved space
+                        * falls off the end. So only need to to wait for the
+                        * reserved size after flushing out the remainder.
+                        */
+                       wait_bytes = remain_actual + ringbuf->reserved_size;
+                       need_wrap = true;
+               } else if (total_bytes > ringbuf->space) {
+                       /* No wrapping required, just waiting. */
+                       wait_bytes = total_bytes;
+               }
        }
 
-       if (unlikely(ringbuf->space < bytes)) {
-               ret = ring_wait_for_space(ring, bytes);
+       if (wait_bytes) {
+               ret = ring_wait_for_space(ring, wait_bytes);
                if (unlikely(ret))
                        return ret;
+
+               if (need_wrap)
+                       __wrap_ring_buffer(ringbuf);
        }
 
        return 0;
 }
 
-int intel_ring_begin(struct intel_engine_cs *ring,
+int intel_ring_begin(struct drm_i915_gem_request *req,
                     int num_dwords)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct intel_engine_cs *ring;
+       struct drm_i915_private *dev_priv;
        int ret;
 
+       WARN_ON(req == NULL);
+       ring = req->ring;
+       dev_priv = ring->dev->dev_private;
+
        ret = i915_gem_check_wedge(&dev_priv->gpu_error,
                                   dev_priv->mm.interruptible);
        if (ret)
@@ -2216,18 +2323,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
        if (ret)
                return ret;
 
-       /* Preallocate the olr before touching the ring */
-       ret = i915_gem_request_alloc(ring, ring->default_context);
-       if (ret)
-               return ret;
-
        ring->buffer->space -= num_dwords * sizeof(uint32_t);
        return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine_cs *ring)
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        int ret;
 
@@ -2235,7 +2338,7 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
                return 0;
 
        num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-       ret = intel_ring_begin(ring, num_dwords);
+       ret = intel_ring_begin(req, num_dwords);
        if (ret)
                return ret;
 
@@ -2252,8 +2355,6 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       BUG_ON(ring->outstanding_lazy_request);
-
        if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
                I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
@@ -2298,13 +2399,14 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
                   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
                               u32 invalidate, u32 flush)
 {
+       struct intel_engine_cs *ring = req->ring;
        uint32_t cmd;
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -2342,20 +2444,23 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                              u64 offset, u32 len,
                              unsigned dispatch_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        bool ppgtt = USES_PPGTT(ring->dev) &&
                        !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
        /* FIXME(BDW): Address space and security selectors. */
-       intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+                       (dispatch_flags & I915_DISPATCH_RS ?
+                        MI_BATCH_RESOURCE_STREAMER : 0));
        intel_ring_emit(ring, lower_32_bits(offset));
        intel_ring_emit(ring, upper_32_bits(offset));
        intel_ring_emit(ring, MI_NOOP);
@@ -2365,20 +2470,23 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                             u64 offset, u32 len,
                             unsigned dispatch_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        (dispatch_flags & I915_DISPATCH_SECURE ?
-                        0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
+                        0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+                       (dispatch_flags & I915_DISPATCH_RS ?
+                        MI_BATCH_RESOURCE_STREAMER : 0));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
@@ -2387,13 +2495,14 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                              u64 offset, u32 len,
                              unsigned dispatch_flags)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
-       ret = intel_ring_begin(ring, 2);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
@@ -2410,14 +2519,15 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct intel_engine_cs *ring,
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
                           u32 invalidate, u32 flush)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ring->dev;
        uint32_t cmd;
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
@@ -2818,26 +2928,28 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 }
 
 int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        if (!ring->gpu_caches_dirty)
                return 0;
 
-       ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+       ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+       trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
        ring->gpu_caches_dirty = false;
        return 0;
 }
 
 int
-intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        uint32_t flush_domains;
        int ret;
 
@@ -2845,11 +2957,11 @@ intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
        if (ring->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+       trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
        ring->gpu_caches_dirty = false;
        return 0;
index 4be66f60504d13661f07886cb891fd9417da754f..2e85fda949638079d2c7c4c9f8ac9daf39f97f5f 100644 (file)
@@ -12,6 +12,7 @@
  * workarounds!
  */
 #define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
 
 /*
  * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
@@ -105,6 +106,9 @@ struct intel_ringbuffer {
        int space;
        int size;
        int effective_size;
+       int reserved_size;
+       int reserved_tail;
+       bool reserved_in_use;
 
        /** We track the position of the requests in the ring buffer, and
         * when each is retired we increment last_retired_head as the GPU
@@ -120,6 +124,25 @@ struct intel_ringbuffer {
 struct intel_context;
 struct drm_i915_reg_descriptor;
 
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ *  offset: specifies batch starting position, also helpful in case
+ *    if we want to have multiple batches at different offsets based on
+ *    some criteria. It is not a requirement at the moment but provides
+ *    an option for future use.
+ *  size: size of the batch in DWORDS
+ */
+struct  i915_ctx_workarounds {
+       struct i915_wa_ctx_bb {
+               u32 offset;
+               u32 size;
+       } indirect_ctx, per_ctx;
+       struct drm_i915_gem_object *obj;
+};
+
 struct  intel_engine_cs {
        const char      *name;
        enum intel_ring_id {
@@ -143,6 +166,7 @@ struct  intel_engine_cs {
        struct i915_gem_batch_pool batch_pool;
 
        struct intel_hw_status_page status_page;
+       struct i915_ctx_workarounds wa_ctx;
 
        unsigned irq_refcount; /* protected by dev_priv->irq_lock */
        u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
@@ -152,15 +176,14 @@ struct  intel_engine_cs {
 
        int             (*init_hw)(struct intel_engine_cs *ring);
 
-       int             (*init_context)(struct intel_engine_cs *ring,
-                                       struct intel_context *ctx);
+       int             (*init_context)(struct drm_i915_gem_request *req);
 
        void            (*write_tail)(struct intel_engine_cs *ring,
                                      u32 value);
-       int __must_check (*flush)(struct intel_engine_cs *ring,
+       int __must_check (*flush)(struct drm_i915_gem_request *req,
                                  u32   invalidate_domains,
                                  u32   flush_domains);
-       int             (*add_request)(struct intel_engine_cs *ring);
+       int             (*add_request)(struct drm_i915_gem_request *req);
        /* Some chipsets are not quite as coherent as advertised and need
         * an expensive kick to force a true read of the up-to-date seqno.
         * However, the up-to-date seqno is not always required and the last
@@ -171,11 +194,12 @@ struct  intel_engine_cs {
                                     bool lazy_coherency);
        void            (*set_seqno)(struct intel_engine_cs *ring,
                                     u32 seqno);
-       int             (*dispatch_execbuffer)(struct intel_engine_cs *ring,
+       int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
                                               u64 offset, u32 length,
                                               unsigned dispatch_flags);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
        void            (*cleanup)(struct intel_engine_cs *ring);
 
        /* GEN8 signal/wait table - never trust comments!
@@ -229,10 +253,10 @@ struct  intel_engine_cs {
                };
 
                /* AKA wait() */
-               int     (*sync_to)(struct intel_engine_cs *ring,
-                                  struct intel_engine_cs *to,
+               int     (*sync_to)(struct drm_i915_gem_request *to_req,
+                                  struct intel_engine_cs *from,
                                   u32 seqno);
-               int     (*signal)(struct intel_engine_cs *signaller,
+               int     (*signal)(struct drm_i915_gem_request *signaller_req,
                                  /* num_dwords needed by caller */
                                  unsigned int num_dwords);
        } semaphore;
@@ -243,14 +267,11 @@ struct  intel_engine_cs {
        struct list_head execlist_retired_req_list;
        u8 next_context_status_buffer;
        u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-       int             (*emit_request)(struct intel_ringbuffer *ringbuf,
-                                       struct drm_i915_gem_request *request);
-       int             (*emit_flush)(struct intel_ringbuffer *ringbuf,
-                                     struct intel_context *ctx,
+       int             (*emit_request)(struct drm_i915_gem_request *request);
+       int             (*emit_flush)(struct drm_i915_gem_request *request,
                                      u32 invalidate_domains,
                                      u32 flush_domains);
-       int             (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
-                                        struct intel_context *ctx,
+       int             (*emit_bb_start)(struct drm_i915_gem_request *req,
                                         u64 offset, unsigned dispatch_flags);
 
        /**
@@ -271,10 +292,6 @@ struct  intel_engine_cs {
         */
        struct list_head request_list;
 
-       /**
-        * Do we have some not yet emitted requests outstanding?
-        */
-       struct drm_i915_gem_request *outstanding_lazy_request;
        /**
         * Seqno of request most recently submitted to request_list.
         * Used exclusively by hang checker to avoid grabbing lock while
@@ -408,8 +425,8 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 static inline void intel_ring_emit(struct intel_engine_cs *ring,
                                   u32 data)
 {
@@ -426,12 +443,11 @@ int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
 bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
 
 int __must_check intel_ring_idle(struct intel_engine_cs *ring);
 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 void intel_fini_pipe_control(struct intel_engine_cs *ring);
 int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -451,11 +467,29 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
        return ringbuf->tail;
 }
 
-static inline struct drm_i915_gem_request *
-intel_ring_get_request(struct intel_engine_cs *ring)
-{
-       BUG_ON(ring->outstanding_lazy_request == NULL);
-       return ring->outstanding_lazy_request;
-}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST      160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
index 1a45385f4d66947087c8ae072e2e2b2328ad6651..821644d1b544eb618e57db8196014768ebe9d7b8 100644 (file)
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
                                    int power_well_id);
 
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+                                   struct i915_power_well *power_well)
+{
+       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+       power_well->ops->enable(dev_priv, power_well);
+       power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+                                    struct i915_power_well *power_well)
+{
+       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+       power_well->hw_enabled = false;
+       power_well->ops->disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -835,12 +851,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
        return enabled;
 }
 
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
-       vlv_set_power_well(dev_priv, power_well, true);
 
        spin_lock_irq(&dev_priv->irq_lock);
        valleyview_enable_display_irqs(dev_priv);
@@ -858,18 +870,33 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
        i915_redisable_vga_power_on(dev_priv->dev);
 }
 
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_disable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       vlv_display_power_well_init(dev_priv);
+}
+
 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
        WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
-       spin_lock_irq(&dev_priv->irq_lock);
-       valleyview_disable_display_irqs(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       vlv_display_power_well_deinit(dev_priv);
 
        vlv_set_power_well(dev_priv, power_well, false);
-
-       vlv_power_sequencer_reset(dev_priv);
 }
 
 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -882,8 +909,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
         * display and the reference clock for VGA
         * hotplug / manual detection.
         */
-       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                  DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+                  DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
        udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
        vlv_set_power_well(dev_priv, power_well, true);
@@ -933,14 +960,14 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
         */
        if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
                phy = DPIO_PHY0;
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                          DPLL_REFA_CLK_ENABLE_VLV);
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+                          DPLL_REF_CLK_ENABLE_VLV);
+               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+                          DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
        } else {
                phy = DPIO_PHY1;
-               I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
-                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+               I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
+                          DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
        }
        udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
        vlv_set_power_well(dev_priv, power_well, true);
@@ -1042,53 +1069,29 @@ out:
 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
                                        struct i915_power_well *power_well)
 {
+       WARN_ON_ONCE(power_well->data != PIPE_A);
+
        chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
 }
 
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
                                       struct i915_power_well *power_well)
 {
-       WARN_ON_ONCE(power_well->data != PIPE_A &&
-                    power_well->data != PIPE_B &&
-                    power_well->data != PIPE_C);
+       WARN_ON_ONCE(power_well->data != PIPE_A);
 
        chv_set_pipe_power_well(dev_priv, power_well, true);
 
-       if (power_well->data == PIPE_A) {
-               spin_lock_irq(&dev_priv->irq_lock);
-               valleyview_enable_display_irqs(dev_priv);
-               spin_unlock_irq(&dev_priv->irq_lock);
-
-               /*
-                * During driver initialization/resume we can avoid restoring the
-                * part of the HW/SW state that will be inited anyway explicitly.
-                */
-               if (dev_priv->power_domains.initializing)
-                       return;
-
-               intel_hpd_init(dev_priv);
-
-               i915_redisable_vga_power_on(dev_priv->dev);
-       }
+       vlv_display_power_well_init(dev_priv);
 }
 
 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
                                        struct i915_power_well *power_well)
 {
-       WARN_ON_ONCE(power_well->data != PIPE_A &&
-                    power_well->data != PIPE_B &&
-                    power_well->data != PIPE_C);
+       WARN_ON_ONCE(power_well->data != PIPE_A);
 
-       if (power_well->data == PIPE_A) {
-               spin_lock_irq(&dev_priv->irq_lock);
-               valleyview_disable_display_irqs(dev_priv);
-               spin_unlock_irq(&dev_priv->irq_lock);
-       }
+       vlv_display_power_well_deinit(dev_priv);
 
        chv_set_pipe_power_well(dev_priv, power_well, false);
-
-       if (power_well->data == PIPE_A)
-               vlv_power_sequencer_reset(dev_priv);
 }
 
 /**
@@ -1117,11 +1120,8 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
        mutex_lock(&power_domains->lock);
 
        for_each_power_well(i, power_well, BIT(domain), power_domains) {
-               if (!power_well->count++) {
-                       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-                       power_well->ops->enable(dev_priv, power_well);
-                       power_well->hw_enabled = true;
-               }
+               if (!power_well->count++)
+                       intel_power_well_enable(dev_priv, power_well);
        }
 
        power_domains->domain_use_count[domain]++;
@@ -1155,11 +1155,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
                WARN_ON(!power_well->count);
 
-               if (!--power_well->count && i915.disable_power_well) {
-                       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-                       power_well->hw_enabled = false;
-                       power_well->ops->disable(dev_priv, power_well);
-               }
+               if (!--power_well->count && i915.disable_power_well)
+                       intel_power_well_disable(dev_priv, power_well);
        }
 
        mutex_unlock(&power_domains->lock);
index aa2fd751609cf2be92826d4f8e6ba85df971892e..c98098e884ccef64e7d722a70063bd97b404a7cc 100644 (file)
@@ -1508,51 +1508,6 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
        intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
-{
-       struct drm_crtc *crtc;
-       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-
-       /* dvo supports only 2 dpms states. */
-       if (mode != DRM_MODE_DPMS_ON)
-               mode = DRM_MODE_DPMS_OFF;
-
-       if (mode == connector->dpms)
-               return;
-
-       connector->dpms = mode;
-
-       /* Only need to change hw state when actually enabled */
-       crtc = intel_sdvo->base.base.crtc;
-       if (!crtc) {
-               intel_sdvo->base.connectors_active = false;
-               return;
-       }
-
-       /* We set active outputs manually below in case pipe dpms doesn't change
-        * due to cloning. */
-       if (mode != DRM_MODE_DPMS_ON) {
-               intel_sdvo_set_active_outputs(intel_sdvo, 0);
-               if (0)
-                       intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-
-               intel_sdvo->base.connectors_active = false;
-
-               intel_crtc_update_dpms(crtc);
-       } else {
-               intel_sdvo->base.connectors_active = true;
-
-               intel_crtc_update_dpms(crtc);
-
-               if (0)
-                       intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-               intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
-       }
-
-       intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_sdvo_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
@@ -2190,7 +2145,7 @@ done:
 }
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
-       .dpms = intel_sdvo_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_sdvo_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_sdvo_set_property,
index 8193a35388d7a0fbed6a947650f4a6bc8895c807..9d8af2f8a87596caf23ddc65ae7e13025d9c5f9a 100644 (file)
@@ -75,10 +75,8 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
  * until a subsequent call to intel_pipe_update_end(). That is done to
  * avoid random delays. The value written to @start_vbl_count should be
  * supplied to intel_pipe_update_end() for error checking.
- *
- * Return: true if the call was successful
  */
-bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 {
        struct drm_device *dev = crtc->base.dev;
        const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
@@ -96,13 +94,14 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
        min = vblank_start - usecs_to_scanlines(mode, 100);
        max = vblank_start - 1;
 
+       local_irq_disable();
+       *start_vbl_count = 0;
+
        if (min <= 0 || max <= 0)
-               return false;
+               return;
 
        if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
-               return false;
-
-       local_irq_disable();
+               return;
 
        trace_i915_pipe_update_start(crtc, min, max);
 
@@ -138,8 +137,6 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
        *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
 
        trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
-
-       return true;
 }
 
 /**
@@ -161,7 +158,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
 
        local_irq_enable();
 
-       if (start_vbl_count != end_vbl_count)
+       if (start_vbl_count && start_vbl_count != end_vbl_count)
                DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
                          pipe_name(pipe), start_vbl_count, end_vbl_count);
 }
@@ -182,7 +179,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
        const int plane = intel_plane->plane + 1;
        u32 plane_ctl, stride_div, stride;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+       const struct drm_intel_sprite_colorkey *key =
+               &to_intel_plane_state(drm_plane->state)->ckey;
        unsigned long surf_addr;
        u32 tile_height, plane_offset, plane_size;
        unsigned int rotation;
@@ -272,7 +270,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
 }
 
 static void
-skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = dplane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -344,7 +342,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
        u32 sprctl;
        unsigned long sprsurf_offset, linear_offset;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+       const struct drm_intel_sprite_colorkey *key =
+               &to_intel_plane_state(dplane->state)->ckey;
 
        sprctl = SP_ENABLE;
 
@@ -400,10 +399,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
        if (obj->tiling_mode != I915_TILING_NONE)
                sprctl |= SP_TILED;
 
-       intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
-                                      pixel_size, true,
-                                      src_w != crtc_w || src_h != crtc_h);
-
        /* Sizes are 0 based */
        src_w--;
        src_h--;
@@ -411,7 +406,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
        crtc_h--;
 
        linear_offset = y * fb->pitches[0] + x * pixel_size;
-       sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+       sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
+                                                       &x, &y,
                                                        obj->tiling_mode,
                                                        pixel_size,
                                                        fb->pitches[0]);
@@ -455,7 +451,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 }
 
 static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = dplane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -467,8 +463,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
 
        I915_WRITE(SPSURF(pipe, plane), 0);
        POSTING_READ(SPSURF(pipe, plane));
-
-       intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
 }
 
 static void
@@ -487,7 +481,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        u32 sprctl, sprscale = 0;
        unsigned long sprsurf_offset, linear_offset;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+       const struct drm_intel_sprite_colorkey *key =
+               &to_intel_plane_state(plane->state)->ckey;
 
        sprctl = SPRITE_ENABLE;
 
@@ -546,7 +541,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
        linear_offset = y * fb->pitches[0] + x * pixel_size;
        sprsurf_offset =
-               intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+               intel_gen4_compute_page_offset(dev_priv,
+                                              &x, &y, obj->tiling_mode,
                                               pixel_size, fb->pitches[0]);
        linear_offset -= sprsurf_offset;
 
@@ -595,7 +591,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 }
 
 static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -627,7 +623,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        unsigned long dvssurf_offset, linear_offset;
        u32 dvscntr, dvsscale;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+       const struct drm_intel_sprite_colorkey *key =
+               &to_intel_plane_state(plane->state)->ckey;
 
        dvscntr = DVS_ENABLE;
 
@@ -682,7 +679,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
        linear_offset = y * fb->pitches[0] + x * pixel_size;
        dvssurf_offset =
-               intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+               intel_gen4_compute_page_offset(dev_priv,
+                                              &x, &y, obj->tiling_mode,
                                               pixel_size, fb->pitches[0]);
        linear_offset -= dvssurf_offset;
 
@@ -722,7 +720,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 }
 
 static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -739,11 +737,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
 
 static int
 intel_check_sprite_plane(struct drm_plane *plane,
+                        struct intel_crtc_state *crtc_state,
                         struct intel_plane_state *state)
 {
        struct drm_device *dev = plane->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
-       struct intel_crtc_state *crtc_state;
+       struct drm_crtc *crtc = state->base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = state->base.fb;
        int crtc_x, crtc_y;
@@ -756,15 +755,10 @@ intel_check_sprite_plane(struct drm_plane *plane,
        int max_scale, min_scale;
        bool can_scale;
        int pixel_size;
-       int ret;
-
-       intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
-       crtc_state = state->base.state ?
-               intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
 
        if (!fb) {
                state->visible = false;
-               goto finish;
+               return 0;
        }
 
        /* Don't modify another pipe's plane */
@@ -782,7 +776,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
        /* setup can_scale, min_scale, max_scale */
        if (INTEL_INFO(dev)->gen >= 9) {
                /* use scaler when colorkey is not required */
-               if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
+               if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
                        can_scale = 1;
                        min_scale = 1;
                        max_scale = skl_max_scale(intel_crtc, crtc_state);
@@ -802,7 +796,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
         * coordinates and sizes. We probably need some way to decide whether
         * more strict checking should be done instead.
         */
-
        drm_rect_rotate(src, fb->width << 16, fb->height << 16,
                        state->base.rotation);
 
@@ -812,7 +805,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
        vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(vscale < 0);
 
-       state->visible =  drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+       state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
        crtc_x = dst->x1;
        crtc_y = dst->y1;
@@ -917,36 +910,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
        dst->y1 = crtc_y;
        dst->y2 = crtc_y + crtc_h;
 
-finish:
-       /*
-        * If the sprite is completely covering the primary plane,
-        * we can disable the primary and save power.
-        */
-       if (intel_crtc->active) {
-               intel_crtc->atomic.fb_bits |=
-                       INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
-
-               if (intel_wm_need_update(plane, &state->base))
-                       intel_crtc->atomic.update_wm = true;
-
-               if (!state->visible) {
-                       /*
-                        * Avoid underruns when disabling the sprite.
-                        * FIXME remove once watermark updates are done properly.
-                        */
-                       intel_crtc->atomic.wait_vblank = true;
-                       intel_crtc->atomic.update_sprite_watermarks |=
-                               (1 << drm_plane_index(plane));
-               }
-       }
-
-       if (INTEL_INFO(dev)->gen >= 9) {
-               ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
-                       state, 0);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -955,34 +918,27 @@ intel_commit_sprite_plane(struct drm_plane *plane,
                          struct intel_plane_state *state)
 {
        struct drm_crtc *crtc = state->base.crtc;
-       struct intel_crtc *intel_crtc;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = state->base.fb;
-       int crtc_x, crtc_y;
-       unsigned int crtc_w, crtc_h;
-       uint32_t src_x, src_y, src_w, src_h;
 
        crtc = crtc ? crtc : plane->crtc;
-       intel_crtc = to_intel_crtc(crtc);
 
        plane->fb = fb;
 
-       if (intel_crtc->active) {
-               if (state->visible) {
-                       crtc_x = state->dst.x1;
-                       crtc_y = state->dst.y1;
-                       crtc_w = drm_rect_width(&state->dst);
-                       crtc_h = drm_rect_height(&state->dst);
-                       src_x = state->src.x1 >> 16;
-                       src_y = state->src.y1 >> 16;
-                       src_w = drm_rect_width(&state->src) >> 16;
-                       src_h = drm_rect_height(&state->src) >> 16;
-                       intel_plane->update_plane(plane, crtc, fb,
-                                                 crtc_x, crtc_y, crtc_w, crtc_h,
-                                                 src_x, src_y, src_w, src_h);
-               } else {
-                       intel_plane->disable_plane(plane, crtc, false);
-               }
+       if (!crtc->state->active)
+               return;
+
+       if (state->visible) {
+               intel_plane->update_plane(plane, crtc, fb,
+                                         state->dst.x1, state->dst.y1,
+                                         drm_rect_width(&state->dst),
+                                         drm_rect_height(&state->dst),
+                                         state->src.x1 >> 16,
+                                         state->src.y1 >> 16,
+                                         drm_rect_width(&state->src) >> 16,
+                                         drm_rect_height(&state->src) >> 16);
+       } else {
+               intel_plane->disable_plane(plane, crtc);
        }
 }
 
@@ -991,7 +947,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 {
        struct drm_intel_sprite_colorkey *set = data;
        struct drm_plane *plane;
-       struct intel_plane *intel_plane;
+       struct drm_plane_state *plane_state;
+       struct drm_atomic_state *state;
+       struct drm_modeset_acquire_ctx ctx;
        int ret = 0;
 
        /* Make sure we don't try to enable both src & dest simultaneously */
@@ -1002,50 +960,41 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
            set->flags & I915_SET_COLORKEY_DESTINATION)
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
-
        plane = drm_plane_find(dev, set->plane_id);
-       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
-               ret = -ENOENT;
-               goto out_unlock;
-       }
+       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+               return -ENOENT;
 
-       intel_plane = to_intel_plane(plane);
+       drm_modeset_acquire_init(&ctx, 0);
 
-       if (INTEL_INFO(dev)->gen >= 9) {
-               /* plane scaling and colorkey are mutually exclusive */
-               if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
-                       DRM_ERROR("colorkey not allowed with scaler\n");
-                       ret = -EINVAL;
-                       goto out_unlock;
-               }
+       state = drm_atomic_state_alloc(plane->dev);
+       if (!state) {
+               ret = -ENOMEM;
+               goto out;
        }
+       state->acquire_ctx = &ctx;
+
+       while (1) {
+               plane_state = drm_atomic_get_plane_state(state, plane);
+               ret = PTR_ERR_OR_ZERO(plane_state);
+               if (!ret) {
+                       to_intel_plane_state(plane_state)->ckey = *set;
+                       ret = drm_atomic_commit(state);
+               }
 
-       intel_plane->ckey = *set;
-
-       /*
-        * The only way this could fail would be due to
-        * the current plane state being unsupportable already,
-        * and we dont't consider that an error for the
-        * colorkey ioctl. So just ignore any error.
-        */
-       intel_plane_restore(plane);
+               if (ret != -EDEADLK)
+                       break;
 
-out_unlock:
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(&ctx);
+       }
 
-int intel_plane_restore(struct drm_plane *plane)
-{
-       if (!plane->crtc || !plane->state->fb)
-               return 0;
+       if (ret)
+               drm_atomic_state_free(state);
 
-       return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
-                                      plane->state->crtc_x, plane->state->crtc_y,
-                                      plane->state->crtc_w, plane->state->crtc_h,
-                                      plane->state->src_x, plane->state->src_y,
-                                      plane->state->src_w, plane->state->src_h);
+out:
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+       return ret;
 }
 
 static const uint32_t ilk_plane_formats[] = {
@@ -1172,9 +1121,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 
        intel_plane->pipe = pipe;
        intel_plane->plane = plane;
+       intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
        intel_plane->check_plane = intel_check_sprite_plane;
        intel_plane->commit_plane = intel_commit_sprite_plane;
-       intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
        possible_crtcs = (1 << pipe);
        ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
                                       &intel_plane_funcs,
@@ -1189,6 +1138,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 
        drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
- out:
+out:
        return ret;
 }
index 8b9d325bda3c7e1c73af68afb6dc46ac3cb9d4fc..0568ae6ec9dd2c945b0395a14305c281daf18be2 100644 (file)
@@ -1509,7 +1509,7 @@ out:
 }
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
-       .dpms = intel_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_tv_detect,
        .destroy = intel_tv_destroy,
        .set_property = intel_tv_set_property,
index 260389acfb7752d01ce0ebd59597f06a671b2ea1..9d3c2e420d2b68611d52e9c6395d35721da19447 100644 (file)
@@ -1467,20 +1467,80 @@ static int gen6_do_reset(struct drm_device *dev)
        return ret;
 }
 
-int intel_gpu_reset(struct drm_device *dev)
+static int wait_for_register(struct drm_i915_private *dev_priv,
+                            const u32 reg,
+                            const u32 mask,
+                            const u32 value,
+                            const unsigned long timeout_ms)
+{
+       return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+}
+
+static int gen8_do_reset(struct drm_device *dev)
 {
-       if (INTEL_INFO(dev)->gen >= 6)
-               return gen6_do_reset(dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *engine;
+       int i;
+
+       for_each_ring(engine, dev_priv, i) {
+               I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+                          _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+               if (wait_for_register(dev_priv,
+                                     RING_RESET_CTL(engine->mmio_base),
+                                     RESET_CTL_READY_TO_RESET,
+                                     RESET_CTL_READY_TO_RESET,
+                                     700)) {
+                       DRM_ERROR("%s: reset request timeout\n", engine->name);
+                       goto not_ready;
+               }
+       }
+
+       return gen6_do_reset(dev);
+
+not_ready:
+       for_each_ring(engine, dev_priv, i)
+               I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+                          _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+
+       return -EIO;
+}
+
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+{
+       if (!i915.reset)
+               return NULL;
+
+       if (INTEL_INFO(dev)->gen >= 8)
+               return gen8_do_reset;
+       else if (INTEL_INFO(dev)->gen >= 6)
+               return gen6_do_reset;
        else if (IS_GEN5(dev))
-               return ironlake_do_reset(dev);
+               return ironlake_do_reset;
        else if (IS_G4X(dev))
-               return g4x_do_reset(dev);
+               return g4x_do_reset;
        else if (IS_G33(dev))
-               return g33_do_reset(dev);
+               return g33_do_reset;
        else if (INTEL_INFO(dev)->gen >= 3)
-               return i915_do_reset(dev);
+               return i915_do_reset;
        else
+               return NULL;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+       int (*reset)(struct drm_device *);
+
+       reset = intel_get_gpu_reset(dev);
+       if (reset == NULL)
                return -ENODEV;
+
+       return reset(dev);
+}
+
+bool intel_has_gpu_reset(struct drm_device *dev)
+{
+       return intel_get_gpu_reset(dev) != NULL;
 }
 
 void intel_uncore_check_errors(struct drm_device *dev)
index c36b8304042b70ddf109758b72e59bebe852ab4c..958cf3cf082dcbef92a0d7926ff98d65a6484c23 100644 (file)
@@ -167,7 +167,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
        struct drm_framebuffer *fb;
        struct drm_gem_object *gobj = NULL;
        struct device *device = &dev->pdev->dev;
-       struct mgag200_bo *bo;
        int ret;
        void *sysram;
        int size;
@@ -185,7 +184,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
                DRM_ERROR("failed to create fbcon backing object %d\n", ret);
                return ret;
        }
-       bo = gem_to_mga_bo(gobj);
 
        sysram = vmalloc(size);
        if (!sysram)
index ad4b9010dfb0bbed135185e9f64aed98c3239a24..cd75cff096e1ccebbb1be0a4461e0f9b7d855fbf 100644 (file)
@@ -158,7 +158,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
 static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
 {
        unsigned int vcomax, vcomin, pllreffreq;
-       unsigned int delta, tmpdelta, permitteddelta;
+       unsigned int delta, tmpdelta;
        unsigned int testp, testm, testn;
        unsigned int p, m, n;
        unsigned int computed;
@@ -172,7 +172,6 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
        pllreffreq = 48000;
 
        delta = 0xffffffff;
-       permitteddelta = clock * 5 / 1000;
 
        for (testp = 1; testp < 9; testp++) {
                if (clock * testp > vcomax)
@@ -298,7 +297,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
 static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 {
        unsigned int vcomax, vcomin, pllreffreq;
-       unsigned int delta, tmpdelta, permitteddelta;
+       unsigned int delta, tmpdelta;
        unsigned int testp, testm, testn;
        unsigned int p, m, n;
        unsigned int computed;
@@ -310,7 +309,6 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        pllreffreq = 50000;
 
        delta = 0xffffffff;
-       permitteddelta = clock * 5 / 1000;
 
        for (testp = 16; testp > 0; testp--) {
                if (clock * testp > vcomax)
@@ -392,7 +390,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
 {
        unsigned int vcomax, vcomin, pllreffreq;
-       unsigned int delta, tmpdelta, permitteddelta;
+       unsigned int delta, tmpdelta;
        unsigned int testp, testm, testn;
        unsigned int p, m, n;
        unsigned int computed;
@@ -406,7 +404,6 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
        pllreffreq = 33333;
 
        delta = 0xffffffff;
-       permitteddelta = clock * 5 / 1000;
 
        for (testp = 16; testp > 0; testp >>= 1) {
                if (clock * testp > vcomax)
index d16964ea0ed4820039ca1b90030032e8ed735212..05108b505fbfa1382ad071ab80e4357ad4db4ae2 100644 (file)
@@ -378,7 +378,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int mgag200_bo_unpin(struct mgag200_bo *bo)
 {
-       int i, ret;
+       int i;
        if (!bo->pin_count) {
                DRM_ERROR("unpin bad %p\n", bo);
                return 0;
@@ -389,11 +389,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
-       if (ret)
-               return ret;
-
-       return 0;
+       return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 }
 
 int mgag200_bo_push_sysram(struct mgag200_bo *bo)
index c4bb9d9c7667e8453242fced07eae6d06c63bf74..4dc158ed2e9540ccb6358ce43aaec9d3adf8f1d1 100644 (file)
@@ -334,13 +334,15 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
        return 0;
 }
 
-static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        DBG("%s: begin", mdp4_crtc->name);
 }
 
-static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        struct drm_device *dev = crtc->dev;
index dea3d2e559b1cdf80c04cb35ca1fc3ae61ba3814..4c1df4e6e5bcaef44736a919b6e9382d7a3240a4 100644 (file)
@@ -388,13 +388,15 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
        return 0;
 }
 
-static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        DBG("%s: begin", mdp5_crtc->name);
 }
 
-static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct drm_device *dev = crtc->dev;
index 3162040bc3148b2157249dd29f945d8d05585812..1f26eba245d10b624ecdbab392ed7ef30f0d163d 100644 (file)
@@ -919,7 +919,7 @@ nouveau_connector_funcs_lvds = {
        .force = nouveau_connector_force
 };
 
-static void
+static int
 nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
 {
        struct nouveau_encoder *nv_encoder = NULL;
@@ -938,7 +938,7 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
                }
        }
 
-       drm_helper_connector_dpms(connector, mode);
+       return drm_helper_connector_dpms(connector, mode);
 }
 
 static const struct drm_connector_funcs
index 477cbb12809b029c2de6d62bdd0bcb3ba415001c..109b8262dc85942ac54720c57682402c161d435e 100644 (file)
@@ -946,7 +946,8 @@ static struct drm_driver
 driver_stub = {
        .driver_features =
                DRIVER_USE_AGP |
-               DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
+               DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
+               DRIVER_KMS_LEGACY_CONTEXT,
 
        .load = nouveau_drm_load,
        .unload = nouveau_drm_unload,
index 257b10be5cda902861339d9fde17c38e4f238d06..5e09c061847f50c688650d12625a462e8c4737cd 100644 (file)
@@ -246,9 +246,10 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
        kfree(radeon_connector);
 }
 
-static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+static int radeon_connector_dpms(struct drm_connector *connector, int mode)
 {
        DRM_DEBUG_KMS("\n");
+       return 0;
 }
 
 static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
@@ -284,11 +285,10 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
 
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
        drm_mode_connector_set_path_property(connector, pathprop);
-       drm_reinit_primary_mode_group(dev);
 
-       mutex_lock(&dev->mode_config.mutex);
+       drm_modeset_lock_all(dev);
        radeon_fb_add_connector(rdev, connector);
-       mutex_unlock(&dev->mode_config.mutex);
+       drm_modeset_unlock_all(dev);
 
        drm_connector_register(connector);
        return connector;
@@ -303,14 +303,12 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 
        drm_connector_unregister(connector);
        /* need to nuke the connector */
-       mutex_lock(&dev->mode_config.mutex);
+       drm_modeset_lock_all(dev);
        /* dpms off */
        radeon_fb_remove_connector(rdev, connector);
 
        drm_connector_cleanup(connector);
-       mutex_unlock(&dev->mode_config.mutex);
-       drm_reinit_primary_mode_group(dev);
-
+       drm_modeset_unlock_all(dev);
 
        kfree(connector);
        DRM_DEBUG_KMS("\n");
index e476c331f3fa6e3c91d4a5203f0aab8829028dfa..9a4d69e59401509b0d7be27b31332ed2b43b72b7 100644 (file)
@@ -845,7 +845,8 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
                hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
                break;
 
-       case KGD_ENGINE_SDMA:
+       case KGD_ENGINE_SDMA1:
+       case KGD_ENGINE_SDMA2:
                hdr = (const union radeon_firmware_header *)
                                                        rdev->sdma_fw->data;
                break;
index 65d6ba6621aca5b1883ac8a4a57a93a92e0ca913..48cb19949ca3f01eeb365b3a1dadc30509888cfd 100644 (file)
@@ -496,7 +496,8 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
        return true;
 }
 
-static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
+                                     struct drm_crtc_state *old_crtc_state)
 {
        struct drm_pending_vblank_event *event = crtc->state->event;
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -512,7 +513,8 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
        }
 }
 
-static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
+                                     struct drm_crtc_state *old_crtc_state)
 {
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
index 859ccb658601e9ca476bbc6e1c5db50c4bbc8160..e9272b0a859246970d62c69b8c7132e20f799d68 100644 (file)
@@ -248,7 +248,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
        lcdc_write(sdev, LDDDSR, value);
 
        /* Setup planes. */
-       drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+       drm_for_each_legacy_plane(plane, dev) {
                if (plane->crtc == crtc)
                        shmob_drm_plane_setup(plane);
        }
index 6b641c5a2ec7d10609f20a38cbe6bab56c9f4c5e..26e63bf14efe296524f7f72d5bfe1c60d5f29d36 100644 (file)
@@ -164,7 +164,8 @@ sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
        sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
 }
 
-static void sti_drm_atomic_begin(struct drm_crtc *crtc)
+static void sti_drm_atomic_begin(struct drm_crtc *crtc,
+                                struct drm_crtc_state *old_crtc_state)
 {
        struct sti_mixer *mixer = to_sti_mixer(crtc);
 
@@ -178,7 +179,8 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc)
        }
 }
 
-static void sti_drm_atomic_flush(struct drm_crtc *crtc)
+static void sti_drm_atomic_flush(struct drm_crtc *crtc,
+                                struct drm_crtc_state *old_crtc_state)
 {
 }
 
index a287e4fec8653d91e55bb2765e2379984b65bef5..bf8ef3133e5bb929e2fa9d644a89bf7311d2472f 100644 (file)
@@ -1277,7 +1277,8 @@ static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
        return 0;
 }
 
-static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_crtc_state)
 {
        struct tegra_dc *dc = to_tegra_dc(crtc);
 
@@ -1291,7 +1292,8 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
        }
 }
 
-static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_crtc_state)
 {
        struct tegra_dc_state *state = to_dc_state(crtc->state);
        struct tegra_dc *dc = to_tegra_dc(crtc);
index ed970f62290306e5c78158f9600211d51251dc78..dc97c0b3681d95d60c6b983c29afd036c7aa14f5 100644 (file)
@@ -726,8 +726,9 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
                tegra_dsi_soft_reset(dsi->slave);
 }
 
-static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
+static int tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
 {
+       return 0;
 }
 
 static void tegra_dsi_connector_reset(struct drm_connector *connector)
index 06ab1783bba11e7b1299e3d950accf1547e20285..fe4008a7ddba56695439bafd5c3e185e34d766ad 100644 (file)
@@ -772,9 +772,10 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
        return drm_detect_hdmi_monitor(edid);
 }
 
-static void tegra_hdmi_connector_dpms(struct drm_connector *connector,
-                                     int mode)
+static int tegra_hdmi_connector_dpms(struct drm_connector *connector,
+                                    int mode)
 {
+       return 0;
 }
 
 static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
index 7cd833f5b5b591257da40c04efc209057ec62549..9a99d213e1b1e8aeb2481771aac46c4a3fa2b3b6 100644 (file)
@@ -88,9 +88,10 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
                tegra_dc_writel(dc, table[i].value, table[i].offset);
 }
 
-static void tegra_rgb_connector_dpms(struct drm_connector *connector,
-                                    int mode)
+static int tegra_rgb_connector_dpms(struct drm_connector *connector,
+                                   int mode)
 {
+       return 0;
 }
 
 static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
index 7591d8901f9a24ddd61d035500116c617e4df3f6..ee8ad0d4a0f28700ab54d663716cc37889ef7034 100644 (file)
@@ -866,8 +866,9 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
        sor->debugfs_files = NULL;
 }
 
-static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
+static int tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
 {
+       return 0;
 }
 
 static enum drm_connector_status
index 07cda8cbbddbcb5e6f56127c57dac0e683541fb9..2adc11bc09209cee810eae29b807c7c0e89be2bd 100644 (file)
@@ -1808,8 +1808,9 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
        }
 }
 
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
 {
+       return 0;
 }
 
 void vmw_du_connector_save(struct drm_connector *connector)
index 8d038c36bd57599b311f129532a768e9d39fdfa0..f1a324cfb4c3008b1b8ad9db8a17604fa683ea3d 100644 (file)
@@ -133,7 +133,7 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                           uint32_t handle, uint32_t width, uint32_t height);
 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
 void vmw_du_connector_save(struct drm_connector *connector);
 void vmw_du_connector_restore(struct drm_connector *connector);
 enum drm_connector_status
index 653815950aa2416b277718df69213545573aa557..379a420245eacdb3c17e34571ab738258ea5c44a 100644 (file)
@@ -318,6 +318,7 @@ config LPC_SCH
 
 config INTEL_SOC_PMIC
        bool "Support for Intel Atom SoC PMIC"
+       depends on GPIOLIB
        depends on I2C=y
        select MFD_CORE
        select REGMAP_I2C
index 7b50b6b208a5b04bf578804590add7117d21deb0..a00ddd93dc15f250847f81e136edd4454ea33895 100644 (file)
 #include <linux/acpi.h>
 #include <linux/regmap.h>
 #include <linux/mfd/intel_soc_pmic.h>
+#include <linux/gpio/machine.h>
+#include <linux/pwm.h>
 #include "intel_soc_pmic_core.h"
 
+/* Lookup table for the Panel Enable/Disable line as GPIO signals */
+static struct gpiod_lookup_table panel_gpio_table = {
+       /* Intel GFX is consumer */
+       .dev_id = "0000:00:02.0",
+       .table = {
+               /* Panel EN/DISABLE */
+               GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+       },
+};
+
+/* PWM consumed by the Intel GFX */
+static struct pwm_lookup crc_pwm_lookup[] = {
+       PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+};
+
 static int intel_soc_pmic_find_gpio_irq(struct device *dev)
 {
        struct gpio_desc *desc;
@@ -85,6 +102,12 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
        if (ret)
                dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
 
+       /* Add lookup table binding for Panel Control to the GPIO Chip */
+       gpiod_add_lookup_table(&panel_gpio_table);
+
+       /* Add lookup table for crc-pwm */
+       pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
        ret = mfd_add_devices(dev, -1, config->cell_dev,
                              config->n_cell_devs, NULL, 0,
                              regmap_irq_get_domain(pmic->irq_chip_data));
@@ -104,6 +127,12 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
 
        regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
 
+       /* Remove lookup table for Panel Control from the GPIO Chip */
+       gpiod_remove_lookup_table(&panel_gpio_table);
+
+       /* remove crc-pwm lookup table */
+       pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
        mfd_remove_devices(&i2c->dev);
 
        return 0;
index 7436075e89832b3271cbe337f968e4b6a9a2e988..4a7494872da245abf1db040d8d68acec812e8d33 100644 (file)
@@ -109,6 +109,9 @@ static struct mfd_cell crystal_cove_dev[] = {
        {
                .name = "crystal_cove_pmic",
        },
+       {
+               .name = "crystal_cove_pwm",
+       },
 };
 
 static const struct regmap_config crystal_cove_regmap_config = {
index b1541f40fd8d19cc0d1ef6a39e86b1dbe8d573fa..948d9abd27f1159355d60705727683b9e0deb4d1 100644 (file)
@@ -111,6 +111,13 @@ config PWM_CLPS711X
          To compile this driver as a module, choose M here: the module
          will be called pwm-clps711x.
 
+config PWM_CRC
+       bool "Intel Crystalcove (CRC) PWM support"
+       depends on X86 && INTEL_SOC_PMIC
+       help
+         Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
+         control.
+
 config PWM_EP93XX
        tristate "Cirrus Logic EP93xx PWM support"
        depends on ARCH_EP93XX
index ec50eb5b5a8fd72e1745f4d43801652d73afb65a..d186f35a65388d532709ee1d323f9241f3e97f35 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_PWM_BCM_KONA)      += pwm-bcm-kona.o
 obj-$(CONFIG_PWM_BCM2835)      += pwm-bcm2835.o
 obj-$(CONFIG_PWM_BFIN)         += pwm-bfin.o
 obj-$(CONFIG_PWM_CLPS711X)     += pwm-clps711x.o
+obj-$(CONFIG_PWM_CRC)          += pwm-crc.o
 obj-$(CONFIG_PWM_EP93XX)       += pwm-ep93xx.o
 obj-$(CONFIG_PWM_FSL_FTM)      += pwm-fsl-ftm.o
 obj-$(CONFIG_PWM_IMG)          += pwm-img.o
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
new file mode 100644 (file)
index 0000000..7101c70
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pwm.h>
+
+#define PWM0_CLK_DIV           0x4B
+#define  PWM_OUTPUT_ENABLE     BIT(7)
+#define  PWM_DIV_CLK_0         0x00 /* DIVIDECLK = BASECLK */
+#define  PWM_DIV_CLK_100       0x63 /* DIVIDECLK = BASECLK/100 */
+#define  PWM_DIV_CLK_128       0x7F /* DIVIDECLK = BASECLK/128 */
+
+#define PWM0_DUTY_CYCLE                0x4E
+#define BACKLIGHT_EN           0x51
+
+#define PWM_MAX_LEVEL          0xFF
+
+#define PWM_BASE_CLK           6000000  /* 6 MHz */
+#define PWM_MAX_PERIOD_NS      21333    /* 46.875KHz */
+
+/**
+ * struct crystalcove_pwm - Crystal Cove PWM controller
+ * @chip: the abstract pwm_chip structure.
+ * @regmap: the regmap from the parent device.
+ */
+struct crystalcove_pwm {
+       struct pwm_chip chip;
+       struct regmap *regmap;
+};
+
+static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
+{
+       return container_of(pc, struct crystalcove_pwm, chip);
+}
+
+static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+       struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+       regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+
+       return 0;
+}
+
+static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+       struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+       regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+}
+
+static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
+                         int duty_ns, int period_ns)
+{
+       struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+       struct device *dev = crc_pwm->chip.dev;
+       int level;
+
+       if (period_ns > PWM_MAX_PERIOD_NS) {
+               dev_err(dev, "un-supported period_ns\n");
+               return -EINVAL;
+       }
+
+       if (pwm->period != period_ns) {
+               int clk_div;
+
+               /* changing the clk divisor, need to disable fisrt */
+               crc_pwm_disable(c, pwm);
+               clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC;
+
+               regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
+                                       clk_div | PWM_OUTPUT_ENABLE);
+
+               /* enable back */
+               crc_pwm_enable(c, pwm);
+       }
+
+       /* change the pwm duty cycle */
+       level = duty_ns * PWM_MAX_LEVEL / period_ns;
+       regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+
+       return 0;
+}
+
+static const struct pwm_ops crc_pwm_ops = {
+       .config = crc_pwm_config,
+       .enable = crc_pwm_enable,
+       .disable = crc_pwm_disable,
+};
+
+static int crystalcove_pwm_probe(struct platform_device *pdev)
+{
+       struct crystalcove_pwm *pwm;
+       struct device *dev = pdev->dev.parent;
+       struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+       if (!pwm)
+               return -ENOMEM;
+
+       pwm->chip.dev = &pdev->dev;
+       pwm->chip.ops = &crc_pwm_ops;
+       pwm->chip.base = -1;
+       pwm->chip.npwm = 1;
+
+       /* get the PMIC regmap */
+       pwm->regmap = pmic->regmap;
+
+       platform_set_drvdata(pdev, pwm);
+
+       return pwmchip_add(&pwm->chip);
+}
+
+static int crystalcove_pwm_remove(struct platform_device *pdev)
+{
+       struct crystalcove_pwm *pwm = platform_get_drvdata(pdev);
+
+       return pwmchip_remove(&pwm->chip);
+}
+
+static struct platform_driver crystalcove_pwm_driver = {
+       .probe = crystalcove_pwm_probe,
+       .remove = crystalcove_pwm_remove,
+       .driver = {
+               .name = "crystal_cove_pwm",
+       },
+};
+
+builtin_platform_driver(crystalcove_pwm_driver);
index 5aa519711e0b6fcb1212a517e50be5afac8ed7d7..5908848d86b38a331bd9c11556de5d05da77aaf2 100644 (file)
@@ -137,17 +137,18 @@ void drm_err(const char *format, ...);
 /*@{*/
 
 /* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP     0x1
-#define DRIVER_PCI_DMA     0x8
-#define DRIVER_SG          0x10
-#define DRIVER_HAVE_DMA    0x20
-#define DRIVER_HAVE_IRQ    0x40
-#define DRIVER_IRQ_SHARED  0x80
-#define DRIVER_GEM         0x1000
-#define DRIVER_MODESET     0x2000
-#define DRIVER_PRIME       0x4000
-#define DRIVER_RENDER      0x8000
-#define DRIVER_ATOMIC      0x10000
+#define DRIVER_USE_AGP                 0x1
+#define DRIVER_PCI_DMA                 0x8
+#define DRIVER_SG                      0x10
+#define DRIVER_HAVE_DMA                        0x20
+#define DRIVER_HAVE_IRQ                        0x40
+#define DRIVER_IRQ_SHARED              0x80
+#define DRIVER_GEM                     0x1000
+#define DRIVER_MODESET                 0x2000
+#define DRIVER_PRIME                   0x4000
+#define DRIVER_RENDER                  0x8000
+#define DRIVER_ATOMIC                  0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT      0x20000
 
 /***********************************************************************/
 /** \name Macros to make printk easier */
@@ -675,7 +676,6 @@ struct drm_minor {
 
        /* currently active master for this node. Protected by master_mutex */
        struct drm_master *master;
-       struct drm_mode_group mode_group;
 };
 
 
index 8a3a913320ebb10647fea413430a3871f1f34eb1..e67aeac2aee05c077eec5a47abc9d73eef6831d6 100644 (file)
@@ -166,7 +166,8 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
 static inline bool
 drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
 {
-       return state->mode_changed || state->active_changed;
+       return state->mode_changed || state->active_changed ||
+              state->connectors_changed;
 }
 
 
index cc1fee8a12d0a5e71babb3626644604618b987ba..11266d147a29409b12718a1b575520579d588fde 100644 (file)
@@ -87,8 +87,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
                                struct drm_framebuffer *fb,
                                struct drm_pending_vblank_event *event,
                                uint32_t flags);
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-                                     int mode);
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+                                    int mode);
 
 /* default implementations for state handling */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
index 57ca8cc383a615344498202384b1b814911bc766..5746569651263dc938b9ee7abeede90ce48ea301 100644 (file)
@@ -255,12 +255,13 @@ struct drm_atomic_state;
  * @crtc: backpointer to the CRTC
  * @enable: whether the CRTC should be enabled, gates all other state
  * @active: whether the CRTC is actively displaying (used for DPMS)
- * @mode_changed: for use by helpers and drivers when computing state updates
- * @active_changed: for use by helpers and drivers when computing state updates
+ * @planes_changed: planes on this crtc are updated
+ * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
+ * @active_changed: crtc_state->active has been toggled.
+ * @connectors_changed: connectors to this crtc have been updated
  * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
  * @last_vblank_count: for helpers and drivers to capture the vblank of the
  *     update to ensure framebuffer cleanup isn't done too early
- * @planes_changed: for use by helpers and drivers when computing state updates
  * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
  * @mode: current mode timings
  * @event: optional pointer to a DRM event to signal upon completion of the
@@ -283,6 +284,7 @@ struct drm_crtc_state {
        bool planes_changed : 1;
        bool mode_changed : 1;
        bool active_changed : 1;
+       bool connectors_changed : 1;
 
        /* attached planes bitmask:
         * WARNING: transitional helpers do not maintain plane_mask so
@@ -525,7 +527,7 @@ struct drm_connector_state {
  * etc.
  */
 struct drm_connector_funcs {
-       void (*dpms)(struct drm_connector *connector, int mode);
+       int (*dpms)(struct drm_connector *connector, int mode);
        void (*save)(struct drm_connector *connector);
        void (*restore)(struct drm_connector *connector);
        void (*reset)(struct drm_connector *connector);
@@ -1017,29 +1019,6 @@ struct drm_mode_config_funcs {
        void (*atomic_state_free)(struct drm_atomic_state *state);
 };
 
-/**
- * struct drm_mode_group - group of mode setting resources for potential sub-grouping
- * @num_crtcs: CRTC count
- * @num_encoders: encoder count
- * @num_connectors: connector count
- * @num_bridges: bridge count
- * @id_list: list of KMS object IDs in this group
- *
- * Currently this simply tracks the global mode setting state.  But in the
- * future it could allow groups of objects to be set aside into independent
- * control groups for use by different user level processes (e.g. two X servers
- * running simultaneously on different heads, each with their own mode
- * configuration and freedom of mode setting).
- */
-struct drm_mode_group {
-       uint32_t num_crtcs;
-       uint32_t num_encoders;
-       uint32_t num_connectors;
-
-       /* list of object IDs for this group */
-       uint32_t *id_list;
-};
-
 /**
  * struct drm_mode_config - Mode configuration control structure
  * @mutex: mutex protecting KMS related lists and structures
@@ -1324,9 +1303,6 @@ extern const char *drm_get_tv_select_name(int val);
 extern void drm_fb_release(struct drm_file *file_priv);
 extern void drm_property_destroy_user_blobs(struct drm_device *dev,
                                             struct drm_file *file_priv);
-extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
-extern void drm_mode_group_destroy(struct drm_mode_group *group);
-extern void drm_reinit_primary_mode_group(struct drm_device *dev);
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
                                 struct i2c_adapter *adapter);
@@ -1579,8 +1555,45 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
 }
 
 /* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, planelist) \
-       list_for_each_entry(plane, planelist, head) \
+#define drm_for_each_legacy_plane(plane, dev) \
+       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
                if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
+#define drm_for_each_plane(plane, dev) \
+       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+#define drm_for_each_crtc(crtc, dev) \
+       list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+static inline void
+assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
+{
+       /*
+        * The connector hotadd/remove code currently grabs both locks when
+        * updating lists. Hence readers need only hold either of them to be
+        * safe and the check amounts to
+        *
+        * WARN_ON(not_holding(A) && not_holding(B)).
+        */
+       WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
+               !drm_modeset_is_locked(&mode_config->connection_mutex));
+}
+
+#define drm_for_each_connector(connector, dev) \
+       for (assert_drm_connector_list_read_locked(&(dev)->mode_config),        \
+            connector = list_first_entry(&(dev)->mode_config.connector_list,   \
+                                         struct drm_connector, head);          \
+            &connector->head != (&(dev)->mode_config.connector_list);          \
+            connector = list_next_entry(connector, head))
+
+#define drm_for_each_encoder(encoder, dev) \
+       list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#define drm_for_each_fb(fb, dev) \
+       for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),            \
+            fb = list_first_entry(&(dev)->mode_config.fb_list, \
+                                         struct drm_framebuffer, head);        \
+            &fb->head != (&(dev)->mode_config.fb_list);                        \
+            fb = list_next_entry(fb, head))
+
 #endif /* __DRM_CRTC_H__ */
index 918aa68b5199d54501a2a9d68404e44388e9e04e..2a747a91fdede982354438e1c48fc1a332d06885 100644 (file)
@@ -108,8 +108,10 @@ struct drm_crtc_helper_funcs {
        /* atomic helpers */
        int (*atomic_check)(struct drm_crtc *crtc,
                            struct drm_crtc_state *state);
-       void (*atomic_begin)(struct drm_crtc *crtc);
-       void (*atomic_flush)(struct drm_crtc *crtc);
+       void (*atomic_begin)(struct drm_crtc *crtc,
+                            struct drm_crtc_state *old_crtc_state);
+       void (*atomic_flush)(struct drm_crtc *crtc,
+                            struct drm_crtc_state *old_crtc_state);
 };
 
 /**
@@ -190,7 +192,7 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
 extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
-extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
 
index 2e86f642fc3390f5909a87d2edeb6bdb97a626f6..94898f6ea02af17d84dbbe46f0c8e8818b5b9124 100644 (file)
 
 #define DP_TEST_SINK_MISC                  0x246
 # define DP_TEST_CRC_SUPPORTED             (1 << 5)
-# define DP_TEST_COUNT_MASK                0x7
+# define DP_TEST_COUNT_MASK                0xf
 
 #define DP_TEST_RESPONSE                   0x260
 # define DP_TEST_ACK                       (1 << 0)
index b08bdade60025679eeecff76746d98cd51f2e7df..9e9bddaa58a50792ddf0df90979aa5689a300d4e 100644 (file)
@@ -3,8 +3,8 @@
 #ifndef _DRM_INTEL_GTT_H
 #define        _DRM_INTEL_GTT_H
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-                  phys_addr_t *mappable_base, unsigned long *mappable_end);
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+                  phys_addr_t *mappable_base, u64 *mappable_end);
 
 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
                     struct agp_bridge_data *bridge);
index e2706140eafffe99017f175b564ef219b2d5fa0d..c0d712d22b079ebc16129ef2618f41762276cf5e 100644 (file)
@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
 }
 
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
 
 #endif /* __LINUX_GPIO_MACHINE_H */
index 2f295cde657e214e075365d6bbea025502019551..8c5e8b91a3cbbb6b35542e57b64365b608c4073b 100644 (file)
 /* color index */
 #define DRM_FORMAT_C8          fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
+/* 8 bpp Red */
+#define DRM_FORMAT_R8          fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88                fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88                fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
 /* 8 bpp RGB */
 #define DRM_FORMAT_RGB332      fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
 #define DRM_FORMAT_BGR233      fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
index db809b722985d3a1a03480cc1e3d36a656af2d2d..dbd16a2d37db6defcadf264d5f5a5b474d819be2 100644 (file)
@@ -354,9 +354,15 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_REVISION              32
 #define I915_PARAM_SUBSLICE_TOTAL       33
 #define I915_PARAM_EU_TOTAL             34
+#define I915_PARAM_HAS_GPU_RESET        35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 
 typedef struct drm_i915_getparam {
-       int param;
+       s32 param;
+       /*
+        * WARNING: Using pointers instead of fixed-size u64 means we need to write
+        * compat32 code. Don't repeat this mistake.
+        */
        int __user *value;
 } drm_i915_getparam_t;
 
@@ -764,7 +770,12 @@ struct drm_i915_gem_execbuffer2 {
 #define I915_EXEC_BSD_RING1            (1<<13)
 #define I915_EXEC_BSD_RING2            (2<<13)
 
-#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
+/** Tell the kernel that the batchbuffer is processed by
+ *  the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
 
 #define I915_EXEC_CONTEXT_ID_MASK      (0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1114,6 +1125,7 @@ struct drm_i915_gem_context_param {
        __u32 size;
        __u64 param;
 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
        __u64 value;
 };