Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_display.c
index 453d444fdfbd8b2c8a7c7f0fa0fd23b7fd1b6ae3..37514a52b05c71040b7b771c78f256c52aa93bcd 100644 (file)
@@ -24,6 +24,7 @@
  *     Eric Anholt <eric@anholt.net>
  */
 
+#include <linux/dmi.h>
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/input.h>
@@ -360,6 +361,82 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
        .find_pll = intel_find_pll_ironlake_dp,
 };
 
+u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+       unsigned long flags;
+       u32 val = 0;
+
+       spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+       if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+               DRM_ERROR("DPIO idle wait timed out\n");
+               goto out_unlock;
+       }
+
+       I915_WRITE(DPIO_REG, reg);
+       I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+                  DPIO_BYTE);
+       if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+               DRM_ERROR("DPIO read wait timed out\n");
+               goto out_unlock;
+       }
+       val = I915_READ(DPIO_DATA);
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+       return val;
+}
+
+static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
+                            u32 val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+       if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+               DRM_ERROR("DPIO idle wait timed out\n");
+               goto out_unlock;
+       }
+
+       I915_WRITE(DPIO_DATA, val);
+       I915_WRITE(DPIO_REG, reg);
+       I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
+                  DPIO_BYTE);
+       if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
+               DRM_ERROR("DPIO write wait timed out\n");
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+}
+
+static void vlv_init_dpio(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Reset the DPIO config */
+       I915_WRITE(DPIO_CTL, 0);
+       POSTING_READ(DPIO_CTL);
+       I915_WRITE(DPIO_CTL, 1);
+       POSTING_READ(DPIO_CTL);
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+       DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+       {
+               .callback = intel_dual_link_lvds_callback,
+               .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+               },
+       },
+       { }     /* terminating entry */
+};
+
 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
                              unsigned int reg)
 {
@@ -369,6 +446,9 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
        if (i915_lvds_channel_mode > 0)
                return i915_lvds_channel_mode == 2;
 
+       if (dmi_check_system(intel_dual_link_lvds))
+               return true;
+
        if (dev_priv->lvds_val)
                val = dev_priv->lvds_val;
        else {
@@ -3487,6 +3567,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
        return true;
 }
 
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+       return 400000; /* FIXME */
+}
+
 static int i945_get_display_clock_speed(struct drm_device *dev)
 {
        return 400000;
@@ -4172,6 +4257,81 @@ static bool g4x_compute_srwm(struct drm_device *dev,
                              display, cursor);
 }
 
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+                                    int plane,
+                                    int *plane_prec_mult,
+                                    int *plane_dl,
+                                    int *cursor_prec_mult,
+                                    int *cursor_dl)
+{
+       struct drm_crtc *crtc;
+       int clock, pixel_size;
+       int entries;
+
+       crtc = intel_get_crtc_for_plane(dev, plane);
+       if (crtc->fb == NULL || !crtc->enabled)
+               return false;
+
+       clock = crtc->mode.clock;       /* VESA DOT Clock */
+       pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
+
+       entries = (clock / 1000) * pixel_size;
+       *plane_prec_mult = (entries > 256) ?
+               DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+       *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+                                                    pixel_size);
+
+       entries = (clock / 1000) * 4;   /* BPP is always 4 for cursor */
+       *cursor_prec_mult = (entries > 256) ?
+               DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+       *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+       return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int planea_prec, planea_dl, planeb_prec, planeb_dl;
+       int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+       int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+                                                       either 16 or 32 */
+
+       /* For plane A, Cursor A */
+       if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+                                     &cursor_prec_mult, &cursora_dl)) {
+               cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+                       DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+               planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+                       DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+               I915_WRITE(VLV_DDL1, cursora_prec |
+                               (cursora_dl << DDL_CURSORA_SHIFT) |
+                               planea_prec | planea_dl);
+       }
+
+       /* For plane B, Cursor B */
+       if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+                                     &cursor_prec_mult, &cursorb_dl)) {
+               cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+                       DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+               planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+                       DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+               I915_WRITE(VLV_DDL2, cursorb_prec |
+                               (cursorb_dl << DDL_CURSORB_SHIFT) |
+                               planeb_prec | planeb_dl);
+       }
+}
+
 #define single_plane_enabled(mask) is_power_of_2(mask)
 
 static void valleyview_update_wm(struct drm_device *dev)
@@ -4182,6 +4342,8 @@ static void valleyview_update_wm(struct drm_device *dev)
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
 
+       vlv_update_drain_latency(dev);
+
        if (g4x_compute_wm0(dev, 0,
                            &valleyview_wm_info, latency_ns,
                            &valleyview_cursor_wm_info, latency_ns,
@@ -4830,8 +4992,17 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
 
        crtc = intel_get_crtc_for_plane(dev, plane);
        clock = crtc->mode.clock;
+       if (!clock) {
+               *sprite_wm = 0;
+               return false;
+       }
 
        line_time_us = (sprite_width * 1000) / clock;
+       if (!line_time_us) {
+               *sprite_wm = 0;
+               return false;
+       }
+
        line_count = (latency_ns / line_time_us + 1000) / 1000;
        line_size = sprite_width * pixel_size;
 
@@ -5695,7 +5866,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
                if (intel_panel_use_ssc(dev_priv) && can_ssc) {
                        DRM_DEBUG_KMS("Using SSC on panel\n");
                        temp |= DREF_SSC1_ENABLE;
-               }
+               } else
+                       temp &= ~DREF_SSC1_ENABLE;
 
                /* Get SSC going before enabling the outputs */
                I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -6433,7 +6605,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
        int i;
 
        /* The clocks have to be on to load the palette. */
-       if (!crtc->enabled)
+       if (!crtc->enabled || !intel_crtc->active)
                return;
 
        /* use legacy palette for Ironlake */
@@ -6819,7 +6991,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
        mode_cmd.height = mode->vdisplay;
        mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
                                                                bpp);
-       mode_cmd.pixel_format = 0;
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 
        return intel_framebuffer_create(dev, &mode_cmd, obj);
 }
@@ -7736,6 +7908,12 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg, val;
 
+       /* Clear any frame start delays used for debugging left by the BIOS */
+       for_each_pipe(pipe) {
+               reg = PIPECONF(pipe);
+               I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+       }
+
        if (HAS_PCH_SPLIT(dev))
                return;
 
@@ -8066,6 +8244,7 @@ int intel_framebuffer_init(struct drm_device *dev,
        case DRM_FORMAT_RGB332:
        case DRM_FORMAT_RGB565:
        case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
        case DRM_FORMAT_ARGB8888:
        case DRM_FORMAT_XRGB2101010:
        case DRM_FORMAT_ARGB2101010:
@@ -8370,7 +8549,7 @@ void intel_init_emon(struct drm_device *dev)
        dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
-static bool intel_enable_rc6(struct drm_device *dev)
+static int intel_enable_rc6(struct drm_device *dev)
 {
        /*
         * Respect the kernel parameter if it is set
@@ -8388,11 +8567,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
         * Disable rc6 on Sandybridge
         */
        if (INTEL_INFO(dev)->gen == 6) {
-               DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
-               return 0;
+               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+               return INTEL_RC6_ENABLE;
        }
-       DRM_DEBUG_DRIVER("RC6 enabled\n");
-       return 1;
+       DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+       return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -8402,6 +8581,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        u32 pcu_mbox, rc6_mask = 0;
        u32 gtfifodbg;
        int cur_freq, min_freq, max_freq;
+       int rc6_mode;
        int i;
 
        /* Here begins a magic sequence of register writes to enable
@@ -8439,9 +8619,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-       if (intel_enable_rc6(dev_priv->dev))
-               rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
-                       GEN6_RC_CTL_RC6_ENABLE;
+       rc6_mode = intel_enable_rc6(dev_priv->dev);
+       if (rc6_mode & INTEL_RC6_ENABLE)
+               rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+       if (rc6_mode & INTEL_RC6p_ENABLE)
+               rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+       if (rc6_mode & INTEL_RC6pp_ENABLE)
+               rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+       DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+                       (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+                       (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+                       (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
 
        I915_WRITE(GEN6_RC_CONTROL,
                   rc6_mask |
@@ -8719,18 +8910,86 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
+       /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+        * This implements the WaDisableRCZUnitClockGating workaround.
+        */
+       I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+       I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+       I915_WRITE(IVB_CHICKEN3,
+                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+       /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+       /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+       I915_WRITE(GEN7_L3CNTLREG1,
+                       GEN7_WA_FOR_GEN7_L3_CONTROL);
+       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+                       GEN7_WA_L3_CHICKEN_MODE);
+
+       /* This is required by WaCatErrorRejectionIssue */
+       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+                       I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+                       GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+       for_each_pipe(pipe) {
+               I915_WRITE(DSPCNTR(pipe),
+                          I915_READ(DSPCNTR(pipe)) |
+                          DISPPLANE_TRICKLE_FEED_DISABLE);
+               intel_flush_display_plane(dev_priv, pipe);
+       }
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe;
+       uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+       I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+       I915_WRITE(WM3_LP_ILK, 0);
+       I915_WRITE(WM2_LP_ILK, 0);
+       I915_WRITE(WM1_LP_ILK, 0);
+
+       /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+        * This implements the WaDisableRCZUnitClockGating workaround.
+        */
+       I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
        I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
+       /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+       /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+       I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+       /* This is required by WaCatErrorRejectionIssue */
+       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+                  I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+                  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
        for_each_pipe(pipe) {
                I915_WRITE(DSPCNTR(pipe),
                           I915_READ(DSPCNTR(pipe)) |
                           DISPPLANE_TRICKLE_FEED_DISABLE);
                intel_flush_display_plane(dev_priv, pipe);
        }
+
+       I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) |
+                  (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) |
+                  PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
 }
 
 static void g4x_init_clock_gating(struct drm_device *dev)
@@ -8987,7 +9246,10 @@ static void intel_init_display(struct drm_device *dev)
        }
 
        /* Returns the core display clock speed */
-       if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+       if (IS_VALLEYVIEW(dev))
+               dev_priv->display.get_display_clock_speed =
+                       valleyview_get_display_clock_speed;
+       else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
                dev_priv->display.get_display_clock_speed =
                        i945_get_display_clock_speed;
        else if (IS_I915G(dev))
@@ -9084,6 +9346,10 @@ static void intel_init_display(struct drm_device *dev)
                        dev_priv->display.update_wm = NULL;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.update_wm = valleyview_update_wm;
+               dev_priv->display.init_clock_gating =
+                       valleyview_init_clock_gating;
+               dev_priv->display.force_wake_get = vlv_force_wake_get;
+               dev_priv->display.force_wake_put = vlv_force_wake_put;
        } else if (IS_PINEVIEW(dev)) {
                if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
                                            dev_priv->is_ddr3,
@@ -9167,7 +9433,7 @@ static void quirk_pipea_force(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        dev_priv->quirks |= QUIRK_PIPEA_FORCE;
-       DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+       DRM_INFO("applying pipe a force quirk\n");
 }
 
 /*
@@ -9177,6 +9443,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+       DRM_INFO("applying lvds SSC disable quirk\n");
 }
 
 /*
@@ -9187,6 +9454,7 @@ static void quirk_invert_brightness(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+       DRM_INFO("applying inverted panel brightness quirk\n");
 }
 
 struct intel_quirk {
@@ -9367,6 +9635,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
        if (IS_IRONLAKE_M(dev))
                ironlake_disable_rc6(dev);
 
+       if (IS_VALLEYVIEW(dev))
+               vlv_init_dpio(dev);
+
        mutex_unlock(&dev->struct_mutex);
 
        /* Disable the irq before mode object teardown, for the irq might