2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
69 static void i8xx_disable_fbc(struct drm_device *dev)
71 struct drm_i915_private *dev_priv = dev->dev_private;
74 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0)
79 fbc_ctl &= ~FBC_CTL_EN;
80 I915_WRITE(FBC_CONTROL, fbc_ctl);
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
88 DRM_DEBUG_KMS("disabled FBC\n");
91 static void i8xx_enable_fbc(struct drm_crtc *crtc)
93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
97 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
102 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
103 if (fb->pitches[0] < cfb_pitch)
104 cfb_pitch = fb->pitches[0];
106 /* FBC_CTL wants 32B or 64B units */
108 cfb_pitch = (cfb_pitch / 32) - 1;
110 cfb_pitch = (cfb_pitch / 64) - 1;
113 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
114 I915_WRITE(FBC_TAG + (i * 4), 0);
120 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
121 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
122 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
123 I915_WRITE(FBC_FENCE_OFF, crtc->y);
127 fbc_ctl = I915_READ(FBC_CONTROL);
128 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
129 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
131 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
132 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
133 fbc_ctl |= obj->fence_reg;
134 I915_WRITE(FBC_CONTROL, fbc_ctl);
136 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
137 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
140 static bool i8xx_fbc_enabled(struct drm_device *dev)
142 struct drm_i915_private *dev_priv = dev->dev_private;
144 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
147 static void g4x_enable_fbc(struct drm_crtc *crtc)
149 struct drm_device *dev = crtc->dev;
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct drm_framebuffer *fb = crtc->primary->fb;
152 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
156 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
157 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
158 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
160 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
161 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
163 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
166 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
168 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
171 static void g4x_disable_fbc(struct drm_device *dev)
173 struct drm_i915_private *dev_priv = dev->dev_private;
176 /* Disable compression */
177 dpfc_ctl = I915_READ(DPFC_CONTROL);
178 if (dpfc_ctl & DPFC_CTL_EN) {
179 dpfc_ctl &= ~DPFC_CTL_EN;
180 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
182 DRM_DEBUG_KMS("disabled FBC\n");
186 static bool g4x_fbc_enabled(struct drm_device *dev)
188 struct drm_i915_private *dev_priv = dev->dev_private;
190 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
193 static void sandybridge_blit_fbc_update(struct drm_device *dev)
195 struct drm_i915_private *dev_priv = dev->dev_private;
198 /* Make sure blitter notifies FBC of writes */
200 /* Blitter is part of Media powerwell on VLV. No impact of
201 * his param in other platforms for now */
202 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
204 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
205 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
206 GEN6_BLITTER_LOCK_SHIFT;
207 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
208 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
211 GEN6_BLITTER_LOCK_SHIFT);
212 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
213 POSTING_READ(GEN6_BLITTER_ECOSKPD);
215 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
218 static void ironlake_enable_fbc(struct drm_crtc *crtc)
220 struct drm_device *dev = crtc->dev;
221 struct drm_i915_private *dev_priv = dev->dev_private;
222 struct drm_framebuffer *fb = crtc->primary->fb;
223 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++;
231 switch (dev_priv->fbc.threshold) {
234 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
237 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
240 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
243 dpfc_ctl |= DPFC_CTL_FENCE_EN;
245 dpfc_ctl |= obj->fence_reg;
247 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
248 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
250 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
253 I915_WRITE(SNB_DPFC_CTL_SA,
254 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
255 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
256 sandybridge_blit_fbc_update(dev);
259 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
262 static void ironlake_disable_fbc(struct drm_device *dev)
264 struct drm_i915_private *dev_priv = dev->dev_private;
267 /* Disable compression */
268 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
269 if (dpfc_ctl & DPFC_CTL_EN) {
270 dpfc_ctl &= ~DPFC_CTL_EN;
271 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
273 DRM_DEBUG_KMS("disabled FBC\n");
277 static bool ironlake_fbc_enabled(struct drm_device *dev)
279 struct drm_i915_private *dev_priv = dev->dev_private;
281 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
284 static void gen7_enable_fbc(struct drm_crtc *crtc)
286 struct drm_device *dev = crtc->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 struct drm_framebuffer *fb = crtc->primary->fb;
289 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++;
297 switch (dev_priv->fbc.threshold) {
300 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
303 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
306 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
312 if (dev_priv->fbc.false_color)
313 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
315 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
317 if (IS_IVYBRIDGE(dev)) {
318 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
319 I915_WRITE(ILK_DISPLAY_CHICKEN1,
320 I915_READ(ILK_DISPLAY_CHICKEN1) |
323 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
324 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
325 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
329 I915_WRITE(SNB_DPFC_CTL_SA,
330 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
331 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
333 sandybridge_blit_fbc_update(dev);
335 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
338 bool intel_fbc_enabled(struct drm_device *dev)
340 struct drm_i915_private *dev_priv = dev->dev_private;
342 if (!dev_priv->display.fbc_enabled)
345 return dev_priv->display.fbc_enabled(dev);
348 void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
350 struct drm_i915_private *dev_priv = dev->dev_private;
355 I915_WRITE(MSG_FBC_REND_STATE, value);
358 static void intel_fbc_work_fn(struct work_struct *__work)
360 struct intel_fbc_work *work =
361 container_of(to_delayed_work(__work),
362 struct intel_fbc_work, work);
363 struct drm_device *dev = work->crtc->dev;
364 struct drm_i915_private *dev_priv = dev->dev_private;
366 mutex_lock(&dev->struct_mutex);
367 if (work == dev_priv->fbc.fbc_work) {
368 /* Double check that we haven't switched fb without cancelling
371 if (work->crtc->primary->fb == work->fb) {
372 dev_priv->display.enable_fbc(work->crtc);
374 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
375 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
376 dev_priv->fbc.y = work->crtc->y;
379 dev_priv->fbc.fbc_work = NULL;
381 mutex_unlock(&dev->struct_mutex);
386 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
388 if (dev_priv->fbc.fbc_work == NULL)
391 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
393 /* Synchronisation is provided by struct_mutex and checking of
394 * dev_priv->fbc.fbc_work, so we can perform the cancellation
395 * entirely asynchronously.
397 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
398 /* tasklet was killed before being run, clean up */
399 kfree(dev_priv->fbc.fbc_work);
401 /* Mark the work as no longer wanted so that if it does
402 * wake-up (because the work was already running and waiting
403 * for our mutex), it will discover that is no longer
406 dev_priv->fbc.fbc_work = NULL;
409 static void intel_enable_fbc(struct drm_crtc *crtc)
411 struct intel_fbc_work *work;
412 struct drm_device *dev = crtc->dev;
413 struct drm_i915_private *dev_priv = dev->dev_private;
415 if (!dev_priv->display.enable_fbc)
418 intel_cancel_fbc_work(dev_priv);
420 work = kzalloc(sizeof(*work), GFP_KERNEL);
422 DRM_ERROR("Failed to allocate FBC work structure\n");
423 dev_priv->display.enable_fbc(crtc);
428 work->fb = crtc->primary->fb;
429 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
431 dev_priv->fbc.fbc_work = work;
433 /* Delay the actual enabling to let pageflipping cease and the
434 * display to settle before starting the compression. Note that
435 * this delay also serves a second purpose: it allows for a
436 * vblank to pass after disabling the FBC before we attempt
437 * to modify the control registers.
439 * A more complicated solution would involve tracking vblanks
440 * following the termination of the page-flipping sequence
441 * and indeed performing the enable as a co-routine and not
442 * waiting synchronously upon the vblank.
444 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
446 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
449 void intel_disable_fbc(struct drm_device *dev)
451 struct drm_i915_private *dev_priv = dev->dev_private;
453 intel_cancel_fbc_work(dev_priv);
455 if (!dev_priv->display.disable_fbc)
458 dev_priv->display.disable_fbc(dev);
459 dev_priv->fbc.plane = -1;
462 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
463 enum no_fbc_reason reason)
465 if (dev_priv->fbc.no_fbc_reason == reason)
468 dev_priv->fbc.no_fbc_reason = reason;
473 * intel_update_fbc - enable/disable FBC as needed
474 * @dev: the drm_device
476 * Set up the framebuffer compression hardware at mode set time. We
477 * enable it if possible:
478 * - plane A only (on pre-965)
479 * - no pixel mulitply/line duplication
480 * - no alpha buffer discard
482 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
484 * We can't assume that any compression will take place (worst case),
485 * so the compressed buffer has to be the same size as the uncompressed
486 * one. It also must reside (along with the line length buffer) in
489 * We need to enable/disable FBC on a global basis.
491 void intel_update_fbc(struct drm_device *dev)
493 struct drm_i915_private *dev_priv = dev->dev_private;
494 struct drm_crtc *crtc = NULL, *tmp_crtc;
495 struct intel_crtc *intel_crtc;
496 struct drm_framebuffer *fb;
497 struct drm_i915_gem_object *obj;
498 const struct drm_display_mode *adjusted_mode;
499 unsigned int max_width, max_height;
502 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
506 if (!i915.powersave) {
507 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
508 DRM_DEBUG_KMS("fbc disabled per module param\n");
513 * If FBC is already on, we just have to verify that we can
514 * keep it that way...
515 * Need to disable if:
516 * - more than one pipe is active
517 * - changing FBC params (stride, fence, mode)
518 * - new fb is too large to fit in compressed buffer
519 * - going to an unsupported config (interlace, pixel multiply, etc.)
521 for_each_crtc(dev, tmp_crtc) {
522 if (intel_crtc_active(tmp_crtc) &&
523 to_intel_crtc(tmp_crtc)->primary_enabled) {
525 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
526 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
533 if (!crtc || crtc->primary->fb == NULL) {
534 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
535 DRM_DEBUG_KMS("no output, disabling\n");
539 intel_crtc = to_intel_crtc(crtc);
540 fb = crtc->primary->fb;
541 obj = intel_fb_obj(fb);
542 adjusted_mode = &intel_crtc->config.adjusted_mode;
544 if (i915.enable_fbc < 0) {
545 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
546 DRM_DEBUG_KMS("disabled per chip default\n");
549 if (!i915.enable_fbc) {
550 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
551 DRM_DEBUG_KMS("fbc disabled per module param\n");
554 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
555 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
556 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
557 DRM_DEBUG_KMS("mode incompatible with compression, "
562 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
565 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
572 if (intel_crtc->config.pipe_src_w > max_width ||
573 intel_crtc->config.pipe_src_h > max_height) {
574 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
575 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
578 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
579 intel_crtc->plane != PLANE_A) {
580 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
581 DRM_DEBUG_KMS("plane not A, disabling compression\n");
585 /* The use of a CPU fence is mandatory in order to detect writes
586 * by the CPU to the scanout and trigger updates to the FBC.
588 if (obj->tiling_mode != I915_TILING_X ||
589 obj->fence_reg == I915_FENCE_REG_NONE) {
590 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
591 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
594 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
595 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
596 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
597 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
601 /* If the kernel debugger is active, always disable compression */
605 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
606 drm_format_plane_cpp(fb->pixel_format, 0))) {
607 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
608 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
612 /* If the scanout has not changed, don't modify the FBC settings.
613 * Note that we make the fundamental assumption that the fb->obj
614 * cannot be unpinned (and have its GTT offset and fence revoked)
615 * without first being decoupled from the scanout and FBC disabled.
617 if (dev_priv->fbc.plane == intel_crtc->plane &&
618 dev_priv->fbc.fb_id == fb->base.id &&
619 dev_priv->fbc.y == crtc->y)
622 if (intel_fbc_enabled(dev)) {
623 /* We update FBC along two paths, after changing fb/crtc
624 * configuration (modeswitching) and after page-flipping
625 * finishes. For the latter, we know that not only did
626 * we disable the FBC at the start of the page-flip
627 * sequence, but also more than one vblank has passed.
629 * For the former case of modeswitching, it is possible
630 * to switch between two FBC valid configurations
631 * instantaneously so we do need to disable the FBC
632 * before we can modify its control registers. We also
633 * have to wait for the next vblank for that to take
634 * effect. However, since we delay enabling FBC we can
635 * assume that a vblank has passed since disabling and
636 * that we can safely alter the registers in the deferred
639 * In the scenario that we go from a valid to invalid
640 * and then back to valid FBC configuration we have
641 * no strict enforcement that a vblank occurred since
642 * disabling the FBC. However, along all current pipe
643 * disabling paths we do need to wait for a vblank at
644 * some point. And we wait before enabling FBC anyway.
646 DRM_DEBUG_KMS("disabling active FBC for update\n");
647 intel_disable_fbc(dev);
650 intel_enable_fbc(crtc);
651 dev_priv->fbc.no_fbc_reason = FBC_OK;
655 /* Multiple disables should be harmless */
656 if (intel_fbc_enabled(dev)) {
657 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
658 intel_disable_fbc(dev);
660 i915_gem_stolen_cleanup_compression(dev);
663 static void i915_pineview_get_mem_freq(struct drm_device *dev)
665 struct drm_i915_private *dev_priv = dev->dev_private;
668 tmp = I915_READ(CLKCFG);
670 switch (tmp & CLKCFG_FSB_MASK) {
672 dev_priv->fsb_freq = 533; /* 133*4 */
675 dev_priv->fsb_freq = 800; /* 200*4 */
678 dev_priv->fsb_freq = 667; /* 167*4 */
681 dev_priv->fsb_freq = 400; /* 100*4 */
685 switch (tmp & CLKCFG_MEM_MASK) {
687 dev_priv->mem_freq = 533;
690 dev_priv->mem_freq = 667;
693 dev_priv->mem_freq = 800;
697 /* detect pineview DDR3 setting */
698 tmp = I915_READ(CSHRDDR3CTL);
699 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
702 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
704 struct drm_i915_private *dev_priv = dev->dev_private;
707 ddrpll = I915_READ16(DDRMPLL1);
708 csipll = I915_READ16(CSIPLL0);
710 switch (ddrpll & 0xff) {
712 dev_priv->mem_freq = 800;
715 dev_priv->mem_freq = 1066;
718 dev_priv->mem_freq = 1333;
721 dev_priv->mem_freq = 1600;
724 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
726 dev_priv->mem_freq = 0;
730 dev_priv->ips.r_t = dev_priv->mem_freq;
732 switch (csipll & 0x3ff) {
734 dev_priv->fsb_freq = 3200;
737 dev_priv->fsb_freq = 3733;
740 dev_priv->fsb_freq = 4266;
743 dev_priv->fsb_freq = 4800;
746 dev_priv->fsb_freq = 5333;
749 dev_priv->fsb_freq = 5866;
752 dev_priv->fsb_freq = 6400;
755 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
757 dev_priv->fsb_freq = 0;
761 if (dev_priv->fsb_freq == 3200) {
762 dev_priv->ips.c_m = 0;
763 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
764 dev_priv->ips.c_m = 1;
766 dev_priv->ips.c_m = 2;
770 static const struct cxsr_latency cxsr_latency_table[] = {
771 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
772 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
773 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
774 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
775 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
777 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
778 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
779 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
780 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
781 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
783 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
784 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
785 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
786 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
787 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
789 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
790 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
791 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
792 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
793 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
795 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
796 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
797 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
798 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
799 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
801 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
802 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
803 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
804 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
805 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
808 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
813 const struct cxsr_latency *latency;
816 if (fsb == 0 || mem == 0)
819 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
820 latency = &cxsr_latency_table[i];
821 if (is_desktop == latency->is_desktop &&
822 is_ddr3 == latency->is_ddr3 &&
823 fsb == latency->fsb_freq && mem == latency->mem_freq)
827 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
832 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
834 struct drm_device *dev = dev_priv->dev;
837 if (IS_VALLEYVIEW(dev)) {
838 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
839 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
840 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
841 } else if (IS_PINEVIEW(dev)) {
842 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
843 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
844 I915_WRITE(DSPFW3, val);
845 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
846 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
847 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
848 I915_WRITE(FW_BLC_SELF, val);
849 } else if (IS_I915GM(dev)) {
850 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
851 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
852 I915_WRITE(INSTPM, val);
857 DRM_DEBUG_KMS("memory self-refresh is %s\n",
858 enable ? "enabled" : "disabled");
862 * Latency for FIFO fetches is dependent on several factors:
863 * - memory configuration (speed, channels)
865 * - current MCH state
866 * It can be fairly high in some situations, so here we assume a fairly
867 * pessimal value. It's a tradeoff between extra memory fetches (if we
868 * set this value too high, the FIFO will fetch frequently to stay full)
869 * and power consumption (set it too low to save power and we might see
870 * FIFO underruns and display "flicker").
872 * A value of 5us seems to be a good balance; safe for very low end
873 * platforms but not overly aggressive on lower latency configs.
875 static const int pessimal_latency_ns = 5000;
877 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
879 struct drm_i915_private *dev_priv = dev->dev_private;
880 uint32_t dsparb = I915_READ(DSPARB);
883 size = dsparb & 0x7f;
885 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
887 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
888 plane ? "B" : "A", size);
893 static int i830_get_fifo_size(struct drm_device *dev, int plane)
895 struct drm_i915_private *dev_priv = dev->dev_private;
896 uint32_t dsparb = I915_READ(DSPARB);
899 size = dsparb & 0x1ff;
901 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
902 size >>= 1; /* Convert to cachelines */
904 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
905 plane ? "B" : "A", size);
910 static int i845_get_fifo_size(struct drm_device *dev, int plane)
912 struct drm_i915_private *dev_priv = dev->dev_private;
913 uint32_t dsparb = I915_READ(DSPARB);
916 size = dsparb & 0x7f;
917 size >>= 2; /* Convert to cachelines */
919 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
926 /* Pineview has different values for various configs */
927 static const struct intel_watermark_params pineview_display_wm = {
928 .fifo_size = PINEVIEW_DISPLAY_FIFO,
929 .max_wm = PINEVIEW_MAX_WM,
930 .default_wm = PINEVIEW_DFT_WM,
931 .guard_size = PINEVIEW_GUARD_WM,
932 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
934 static const struct intel_watermark_params pineview_display_hplloff_wm = {
935 .fifo_size = PINEVIEW_DISPLAY_FIFO,
936 .max_wm = PINEVIEW_MAX_WM,
937 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
938 .guard_size = PINEVIEW_GUARD_WM,
939 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
941 static const struct intel_watermark_params pineview_cursor_wm = {
942 .fifo_size = PINEVIEW_CURSOR_FIFO,
943 .max_wm = PINEVIEW_CURSOR_MAX_WM,
944 .default_wm = PINEVIEW_CURSOR_DFT_WM,
945 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
946 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
948 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
949 .fifo_size = PINEVIEW_CURSOR_FIFO,
950 .max_wm = PINEVIEW_CURSOR_MAX_WM,
951 .default_wm = PINEVIEW_CURSOR_DFT_WM,
952 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
953 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
955 static const struct intel_watermark_params g4x_wm_info = {
956 .fifo_size = G4X_FIFO_SIZE,
957 .max_wm = G4X_MAX_WM,
958 .default_wm = G4X_MAX_WM,
960 .cacheline_size = G4X_FIFO_LINE_SIZE,
962 static const struct intel_watermark_params g4x_cursor_wm_info = {
963 .fifo_size = I965_CURSOR_FIFO,
964 .max_wm = I965_CURSOR_MAX_WM,
965 .default_wm = I965_CURSOR_DFT_WM,
967 .cacheline_size = G4X_FIFO_LINE_SIZE,
969 static const struct intel_watermark_params valleyview_wm_info = {
970 .fifo_size = VALLEYVIEW_FIFO_SIZE,
971 .max_wm = VALLEYVIEW_MAX_WM,
972 .default_wm = VALLEYVIEW_MAX_WM,
974 .cacheline_size = G4X_FIFO_LINE_SIZE,
976 static const struct intel_watermark_params valleyview_cursor_wm_info = {
977 .fifo_size = I965_CURSOR_FIFO,
978 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
979 .default_wm = I965_CURSOR_DFT_WM,
981 .cacheline_size = G4X_FIFO_LINE_SIZE,
983 static const struct intel_watermark_params i965_cursor_wm_info = {
984 .fifo_size = I965_CURSOR_FIFO,
985 .max_wm = I965_CURSOR_MAX_WM,
986 .default_wm = I965_CURSOR_DFT_WM,
988 .cacheline_size = I915_FIFO_LINE_SIZE,
990 static const struct intel_watermark_params i945_wm_info = {
991 .fifo_size = I945_FIFO_SIZE,
992 .max_wm = I915_MAX_WM,
995 .cacheline_size = I915_FIFO_LINE_SIZE,
997 static const struct intel_watermark_params i915_wm_info = {
998 .fifo_size = I915_FIFO_SIZE,
999 .max_wm = I915_MAX_WM,
1002 .cacheline_size = I915_FIFO_LINE_SIZE,
1004 static const struct intel_watermark_params i830_a_wm_info = {
1005 .fifo_size = I855GM_FIFO_SIZE,
1006 .max_wm = I915_MAX_WM,
1009 .cacheline_size = I830_FIFO_LINE_SIZE,
1011 static const struct intel_watermark_params i830_bc_wm_info = {
1012 .fifo_size = I855GM_FIFO_SIZE,
1013 .max_wm = I915_MAX_WM/2,
1016 .cacheline_size = I830_FIFO_LINE_SIZE,
1018 static const struct intel_watermark_params i845_wm_info = {
1019 .fifo_size = I830_FIFO_SIZE,
1020 .max_wm = I915_MAX_WM,
1023 .cacheline_size = I830_FIFO_LINE_SIZE,
1027 * intel_calculate_wm - calculate watermark level
1028 * @clock_in_khz: pixel clock
1029 * @wm: chip FIFO params
1030 * @pixel_size: display pixel size
1031 * @latency_ns: memory latency for the platform
1033 * Calculate the watermark level (the level at which the display plane will
1034 * start fetching from memory again). Each chip has a different display
1035 * FIFO size and allocation, so the caller needs to figure that out and pass
1036 * in the correct intel_watermark_params structure.
1038 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1039 * on the pixel size. When it reaches the watermark level, it'll start
1040 * fetching FIFO line sized based chunks from memory until the FIFO fills
1041 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1042 * will occur, and a display engine hang could result.
1044 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1045 const struct intel_watermark_params *wm,
1048 unsigned long latency_ns)
1050 long entries_required, wm_size;
1053 * Note: we need to make sure we don't overflow for various clock &
1055 * clocks go from a few thousand to several hundred thousand.
1056 * latency is usually a few thousand
1058 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1060 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1062 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1064 wm_size = fifo_size - (entries_required + wm->guard_size);
1066 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1068 /* Don't promote wm_size to unsigned... */
1069 if (wm_size > (long)wm->max_wm)
1070 wm_size = wm->max_wm;
1072 wm_size = wm->default_wm;
1075 * Bspec seems to indicate that the value shouldn't be lower than
1076 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
1077 * Lets go for 8 which is the burst size since certain platforms
1078 * already use a hardcoded 8 (which is what the spec says should be
1087 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1089 struct drm_crtc *crtc, *enabled = NULL;
1091 for_each_crtc(dev, crtc) {
1092 if (intel_crtc_active(crtc)) {
1102 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1104 struct drm_device *dev = unused_crtc->dev;
1105 struct drm_i915_private *dev_priv = dev->dev_private;
1106 struct drm_crtc *crtc;
1107 const struct cxsr_latency *latency;
1111 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1112 dev_priv->fsb_freq, dev_priv->mem_freq);
1114 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1115 intel_set_memory_cxsr(dev_priv, false);
1119 crtc = single_enabled_crtc(dev);
1121 const struct drm_display_mode *adjusted_mode;
1122 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1125 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1126 clock = adjusted_mode->crtc_clock;
1129 wm = intel_calculate_wm(clock, &pineview_display_wm,
1130 pineview_display_wm.fifo_size,
1131 pixel_size, latency->display_sr);
1132 reg = I915_READ(DSPFW1);
1133 reg &= ~DSPFW_SR_MASK;
1134 reg |= wm << DSPFW_SR_SHIFT;
1135 I915_WRITE(DSPFW1, reg);
1136 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1139 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1140 pineview_display_wm.fifo_size,
1141 pixel_size, latency->cursor_sr);
1142 reg = I915_READ(DSPFW3);
1143 reg &= ~DSPFW_CURSOR_SR_MASK;
1144 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1145 I915_WRITE(DSPFW3, reg);
1147 /* Display HPLL off SR */
1148 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1149 pineview_display_hplloff_wm.fifo_size,
1150 pixel_size, latency->display_hpll_disable);
1151 reg = I915_READ(DSPFW3);
1152 reg &= ~DSPFW_HPLL_SR_MASK;
1153 reg |= wm & DSPFW_HPLL_SR_MASK;
1154 I915_WRITE(DSPFW3, reg);
1156 /* cursor HPLL off SR */
1157 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1158 pineview_display_hplloff_wm.fifo_size,
1159 pixel_size, latency->cursor_hpll_disable);
1160 reg = I915_READ(DSPFW3);
1161 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1162 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1163 I915_WRITE(DSPFW3, reg);
1164 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1166 intel_set_memory_cxsr(dev_priv, true);
1168 intel_set_memory_cxsr(dev_priv, false);
1172 static bool g4x_compute_wm0(struct drm_device *dev,
1174 const struct intel_watermark_params *display,
1175 int display_latency_ns,
1176 const struct intel_watermark_params *cursor,
1177 int cursor_latency_ns,
1181 struct drm_crtc *crtc;
1182 const struct drm_display_mode *adjusted_mode;
1183 int htotal, hdisplay, clock, pixel_size;
1184 int line_time_us, line_count;
1185 int entries, tlb_miss;
1187 crtc = intel_get_crtc_for_plane(dev, plane);
1188 if (!intel_crtc_active(crtc)) {
1189 *cursor_wm = cursor->guard_size;
1190 *plane_wm = display->guard_size;
1194 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1195 clock = adjusted_mode->crtc_clock;
1196 htotal = adjusted_mode->crtc_htotal;
1197 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1198 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1200 /* Use the small buffer method to calculate plane watermark */
1201 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1202 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1204 entries += tlb_miss;
1205 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1206 *plane_wm = entries + display->guard_size;
1207 if (*plane_wm > (int)display->max_wm)
1208 *plane_wm = display->max_wm;
1210 /* Use the large buffer method to calculate cursor watermark */
1211 line_time_us = max(htotal * 1000 / clock, 1);
1212 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1213 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1214 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1216 entries += tlb_miss;
1217 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1218 *cursor_wm = entries + cursor->guard_size;
1219 if (*cursor_wm > (int)cursor->max_wm)
1220 *cursor_wm = (int)cursor->max_wm;
1226 * Check the wm result.
1228 * If any calculated watermark values is larger than the maximum value that
1229 * can be programmed into the associated watermark register, that watermark
1232 static bool g4x_check_srwm(struct drm_device *dev,
1233 int display_wm, int cursor_wm,
1234 const struct intel_watermark_params *display,
1235 const struct intel_watermark_params *cursor)
1237 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1238 display_wm, cursor_wm);
1240 if (display_wm > display->max_wm) {
1241 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1242 display_wm, display->max_wm);
1246 if (cursor_wm > cursor->max_wm) {
1247 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1248 cursor_wm, cursor->max_wm);
1252 if (!(display_wm || cursor_wm)) {
1253 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1260 static bool g4x_compute_srwm(struct drm_device *dev,
1263 const struct intel_watermark_params *display,
1264 const struct intel_watermark_params *cursor,
1265 int *display_wm, int *cursor_wm)
1267 struct drm_crtc *crtc;
1268 const struct drm_display_mode *adjusted_mode;
1269 int hdisplay, htotal, pixel_size, clock;
1270 unsigned long line_time_us;
1271 int line_count, line_size;
1276 *display_wm = *cursor_wm = 0;
1280 crtc = intel_get_crtc_for_plane(dev, plane);
1281 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1282 clock = adjusted_mode->crtc_clock;
1283 htotal = adjusted_mode->crtc_htotal;
1284 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1285 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1287 line_time_us = max(htotal * 1000 / clock, 1);
1288 line_count = (latency_ns / line_time_us + 1000) / 1000;
1289 line_size = hdisplay * pixel_size;
1291 /* Use the minimum of the small and large buffer method for primary */
1292 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1293 large = line_count * line_size;
1295 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1296 *display_wm = entries + display->guard_size;
1298 /* calculate the self-refresh watermark for display cursor */
1299 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1300 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1301 *cursor_wm = entries + cursor->guard_size;
1303 return g4x_check_srwm(dev,
1304 *display_wm, *cursor_wm,
1308 static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1314 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1316 if (WARN(clock == 0, "Pixel clock is zero!\n"))
1319 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
1322 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1323 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1324 DRAIN_LATENCY_PRECISION_32;
1325 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1327 if (*drain_latency > DRAIN_LATENCY_MASK)
1328 *drain_latency = DRAIN_LATENCY_MASK;
1334 * Update drain latency registers of memory arbiter
1336 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1337 * to be programmed. Each plane has a drain latency multiplier and a drain
1341 static void vlv_update_drain_latency(struct drm_crtc *crtc)
1343 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1347 enum pipe pipe = intel_crtc->pipe;
1348 int plane_prec, prec_mult, plane_dl;
1350 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
1351 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
1352 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1354 if (!intel_crtc_active(crtc)) {
1355 I915_WRITE(VLV_DDL(pipe), plane_dl);
1359 /* Primary plane Drain Latency */
1360 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1361 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1362 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1363 DDL_PLANE_PRECISION_64 :
1364 DDL_PLANE_PRECISION_32;
1365 plane_dl |= plane_prec | drain_latency;
1368 /* Cursor Drain Latency
1369 * BPP is always 4 for cursor
1373 /* Program cursor DL only if it is enabled */
1374 if (intel_crtc->cursor_base &&
1375 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1376 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1377 DDL_CURSOR_PRECISION_64 :
1378 DDL_CURSOR_PRECISION_32;
1379 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1382 I915_WRITE(VLV_DDL(pipe), plane_dl);
1385 #define single_plane_enabled(mask) is_power_of_2(mask)
1387 static void valleyview_update_wm(struct drm_crtc *crtc)
1389 struct drm_device *dev = crtc->dev;
1390 static const int sr_latency_ns = 12000;
1391 struct drm_i915_private *dev_priv = dev->dev_private;
1392 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1393 int plane_sr, cursor_sr;
1394 int ignore_plane_sr, ignore_cursor_sr;
1395 unsigned int enabled = 0;
1398 vlv_update_drain_latency(crtc);
1400 if (g4x_compute_wm0(dev, PIPE_A,
1401 &valleyview_wm_info, pessimal_latency_ns,
1402 &valleyview_cursor_wm_info, pessimal_latency_ns,
1403 &planea_wm, &cursora_wm))
1404 enabled |= 1 << PIPE_A;
1406 if (g4x_compute_wm0(dev, PIPE_B,
1407 &valleyview_wm_info, pessimal_latency_ns,
1408 &valleyview_cursor_wm_info, pessimal_latency_ns,
1409 &planeb_wm, &cursorb_wm))
1410 enabled |= 1 << PIPE_B;
1412 if (single_plane_enabled(enabled) &&
1413 g4x_compute_srwm(dev, ffs(enabled) - 1,
1415 &valleyview_wm_info,
1416 &valleyview_cursor_wm_info,
1417 &plane_sr, &ignore_cursor_sr) &&
1418 g4x_compute_srwm(dev, ffs(enabled) - 1,
1420 &valleyview_wm_info,
1421 &valleyview_cursor_wm_info,
1422 &ignore_plane_sr, &cursor_sr)) {
1423 cxsr_enabled = true;
1425 cxsr_enabled = false;
1426 intel_set_memory_cxsr(dev_priv, false);
1427 plane_sr = cursor_sr = 0;
1430 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1431 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1432 planea_wm, cursora_wm,
1433 planeb_wm, cursorb_wm,
1434 plane_sr, cursor_sr);
1437 (plane_sr << DSPFW_SR_SHIFT) |
1438 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1439 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1440 (planea_wm << DSPFW_PLANEA_SHIFT));
1442 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1443 (cursora_wm << DSPFW_CURSORA_SHIFT));
1445 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1446 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1449 intel_set_memory_cxsr(dev_priv, true);
1452 static void cherryview_update_wm(struct drm_crtc *crtc)
1454 struct drm_device *dev = crtc->dev;
1455 static const int sr_latency_ns = 12000;
1456 struct drm_i915_private *dev_priv = dev->dev_private;
1457 int planea_wm, planeb_wm, planec_wm;
1458 int cursora_wm, cursorb_wm, cursorc_wm;
1459 int plane_sr, cursor_sr;
1460 int ignore_plane_sr, ignore_cursor_sr;
1461 unsigned int enabled = 0;
1464 vlv_update_drain_latency(crtc);
1466 if (g4x_compute_wm0(dev, PIPE_A,
1467 &valleyview_wm_info, pessimal_latency_ns,
1468 &valleyview_cursor_wm_info, pessimal_latency_ns,
1469 &planea_wm, &cursora_wm))
1470 enabled |= 1 << PIPE_A;
1472 if (g4x_compute_wm0(dev, PIPE_B,
1473 &valleyview_wm_info, pessimal_latency_ns,
1474 &valleyview_cursor_wm_info, pessimal_latency_ns,
1475 &planeb_wm, &cursorb_wm))
1476 enabled |= 1 << PIPE_B;
1478 if (g4x_compute_wm0(dev, PIPE_C,
1479 &valleyview_wm_info, pessimal_latency_ns,
1480 &valleyview_cursor_wm_info, pessimal_latency_ns,
1481 &planec_wm, &cursorc_wm))
1482 enabled |= 1 << PIPE_C;
1484 if (single_plane_enabled(enabled) &&
1485 g4x_compute_srwm(dev, ffs(enabled) - 1,
1487 &valleyview_wm_info,
1488 &valleyview_cursor_wm_info,
1489 &plane_sr, &ignore_cursor_sr) &&
1490 g4x_compute_srwm(dev, ffs(enabled) - 1,
1492 &valleyview_wm_info,
1493 &valleyview_cursor_wm_info,
1494 &ignore_plane_sr, &cursor_sr)) {
1495 cxsr_enabled = true;
1497 cxsr_enabled = false;
1498 intel_set_memory_cxsr(dev_priv, false);
1499 plane_sr = cursor_sr = 0;
1502 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1503 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1504 "SR: plane=%d, cursor=%d\n",
1505 planea_wm, cursora_wm,
1506 planeb_wm, cursorb_wm,
1507 planec_wm, cursorc_wm,
1508 plane_sr, cursor_sr);
1511 (plane_sr << DSPFW_SR_SHIFT) |
1512 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1513 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1514 (planea_wm << DSPFW_PLANEA_SHIFT));
1516 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1517 (cursora_wm << DSPFW_CURSORA_SHIFT));
1519 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1520 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1521 I915_WRITE(DSPFW9_CHV,
1522 (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
1523 DSPFW_CURSORC_MASK)) |
1524 (planec_wm << DSPFW_PLANEC_SHIFT) |
1525 (cursorc_wm << DSPFW_CURSORC_SHIFT));
1528 intel_set_memory_cxsr(dev_priv, true);
1531 static void valleyview_update_sprite_wm(struct drm_plane *plane,
1532 struct drm_crtc *crtc,
1533 uint32_t sprite_width,
1534 uint32_t sprite_height,
1536 bool enabled, bool scaled)
1538 struct drm_device *dev = crtc->dev;
1539 struct drm_i915_private *dev_priv = dev->dev_private;
1540 int pipe = to_intel_plane(plane)->pipe;
1541 int sprite = to_intel_plane(plane)->plane;
1547 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
1548 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1550 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1552 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1553 DDL_SPRITE_PRECISION_64(sprite) :
1554 DDL_SPRITE_PRECISION_32(sprite);
1555 sprite_dl |= plane_prec |
1556 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1559 I915_WRITE(VLV_DDL(pipe), sprite_dl);
1562 static void g4x_update_wm(struct drm_crtc *crtc)
1564 struct drm_device *dev = crtc->dev;
1565 static const int sr_latency_ns = 12000;
1566 struct drm_i915_private *dev_priv = dev->dev_private;
1567 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1568 int plane_sr, cursor_sr;
1569 unsigned int enabled = 0;
1572 if (g4x_compute_wm0(dev, PIPE_A,
1573 &g4x_wm_info, pessimal_latency_ns,
1574 &g4x_cursor_wm_info, pessimal_latency_ns,
1575 &planea_wm, &cursora_wm))
1576 enabled |= 1 << PIPE_A;
1578 if (g4x_compute_wm0(dev, PIPE_B,
1579 &g4x_wm_info, pessimal_latency_ns,
1580 &g4x_cursor_wm_info, pessimal_latency_ns,
1581 &planeb_wm, &cursorb_wm))
1582 enabled |= 1 << PIPE_B;
1584 if (single_plane_enabled(enabled) &&
1585 g4x_compute_srwm(dev, ffs(enabled) - 1,
1588 &g4x_cursor_wm_info,
1589 &plane_sr, &cursor_sr)) {
1590 cxsr_enabled = true;
1592 cxsr_enabled = false;
1593 intel_set_memory_cxsr(dev_priv, false);
1594 plane_sr = cursor_sr = 0;
1597 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1598 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1599 planea_wm, cursora_wm,
1600 planeb_wm, cursorb_wm,
1601 plane_sr, cursor_sr);
1604 (plane_sr << DSPFW_SR_SHIFT) |
1605 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1606 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1607 (planea_wm << DSPFW_PLANEA_SHIFT));
1609 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1610 (cursora_wm << DSPFW_CURSORA_SHIFT));
1611 /* HPLL off in SR has some issues on G4x... disable it */
1613 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1614 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1617 intel_set_memory_cxsr(dev_priv, true);
1620 static void i965_update_wm(struct drm_crtc *unused_crtc)
1622 struct drm_device *dev = unused_crtc->dev;
1623 struct drm_i915_private *dev_priv = dev->dev_private;
1624 struct drm_crtc *crtc;
1629 /* Calc sr entries for one plane configs */
1630 crtc = single_enabled_crtc(dev);
1632 /* self-refresh has much higher latency */
1633 static const int sr_latency_ns = 12000;
1634 const struct drm_display_mode *adjusted_mode =
1635 &to_intel_crtc(crtc)->config.adjusted_mode;
1636 int clock = adjusted_mode->crtc_clock;
1637 int htotal = adjusted_mode->crtc_htotal;
1638 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1639 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1640 unsigned long line_time_us;
1643 line_time_us = max(htotal * 1000 / clock, 1);
1645 /* Use ns/us then divide to preserve precision */
1646 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1647 pixel_size * hdisplay;
1648 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1649 srwm = I965_FIFO_SIZE - entries;
1653 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1656 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1657 pixel_size * to_intel_crtc(crtc)->cursor_width;
1658 entries = DIV_ROUND_UP(entries,
1659 i965_cursor_wm_info.cacheline_size);
1660 cursor_sr = i965_cursor_wm_info.fifo_size -
1661 (entries + i965_cursor_wm_info.guard_size);
1663 if (cursor_sr > i965_cursor_wm_info.max_wm)
1664 cursor_sr = i965_cursor_wm_info.max_wm;
1666 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1667 "cursor %d\n", srwm, cursor_sr);
1669 cxsr_enabled = true;
1671 cxsr_enabled = false;
1672 /* Turn off self refresh if both pipes are enabled */
1673 intel_set_memory_cxsr(dev_priv, false);
1676 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1679 /* 965 has limitations... */
1680 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1681 (8 << DSPFW_CURSORB_SHIFT) |
1682 (8 << DSPFW_PLANEB_SHIFT) |
1683 (8 << DSPFW_PLANEA_SHIFT));
1684 I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1685 (8 << DSPFW_PLANEC_SHIFT_OLD));
1686 /* update cursor SR watermark */
1687 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1690 intel_set_memory_cxsr(dev_priv, true);
1693 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1695 struct drm_device *dev = unused_crtc->dev;
1696 struct drm_i915_private *dev_priv = dev->dev_private;
1697 const struct intel_watermark_params *wm_info;
1702 int planea_wm, planeb_wm;
1703 struct drm_crtc *crtc, *enabled = NULL;
1706 wm_info = &i945_wm_info;
1707 else if (!IS_GEN2(dev))
1708 wm_info = &i915_wm_info;
1710 wm_info = &i830_a_wm_info;
1712 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1713 crtc = intel_get_crtc_for_plane(dev, 0);
1714 if (intel_crtc_active(crtc)) {
1715 const struct drm_display_mode *adjusted_mode;
1716 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1720 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1721 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1722 wm_info, fifo_size, cpp,
1723 pessimal_latency_ns);
1726 planea_wm = fifo_size - wm_info->guard_size;
1727 if (planea_wm > (long)wm_info->max_wm)
1728 planea_wm = wm_info->max_wm;
1732 wm_info = &i830_bc_wm_info;
1734 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1735 crtc = intel_get_crtc_for_plane(dev, 1);
1736 if (intel_crtc_active(crtc)) {
1737 const struct drm_display_mode *adjusted_mode;
1738 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1742 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1743 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1744 wm_info, fifo_size, cpp,
1745 pessimal_latency_ns);
1746 if (enabled == NULL)
1751 planeb_wm = fifo_size - wm_info->guard_size;
1752 if (planeb_wm > (long)wm_info->max_wm)
1753 planeb_wm = wm_info->max_wm;
1756 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1758 if (IS_I915GM(dev) && enabled) {
1759 struct drm_i915_gem_object *obj;
1761 obj = intel_fb_obj(enabled->primary->fb);
1763 /* self-refresh seems busted with untiled */
1764 if (obj->tiling_mode == I915_TILING_NONE)
1769 * Overlay gets an aggressive default since video jitter is bad.
1773 /* Play safe and disable self-refresh before adjusting watermarks. */
1774 intel_set_memory_cxsr(dev_priv, false);
1776 /* Calc sr entries for one plane configs */
1777 if (HAS_FW_BLC(dev) && enabled) {
1778 /* self-refresh has much higher latency */
1779 static const int sr_latency_ns = 6000;
1780 const struct drm_display_mode *adjusted_mode =
1781 &to_intel_crtc(enabled)->config.adjusted_mode;
1782 int clock = adjusted_mode->crtc_clock;
1783 int htotal = adjusted_mode->crtc_htotal;
1784 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1785 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1786 unsigned long line_time_us;
1789 line_time_us = max(htotal * 1000 / clock, 1);
1791 /* Use ns/us then divide to preserve precision */
1792 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1793 pixel_size * hdisplay;
1794 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1795 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1796 srwm = wm_info->fifo_size - entries;
1800 if (IS_I945G(dev) || IS_I945GM(dev))
1801 I915_WRITE(FW_BLC_SELF,
1802 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1803 else if (IS_I915GM(dev))
1804 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1807 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1808 planea_wm, planeb_wm, cwm, srwm);
1810 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1811 fwater_hi = (cwm & 0x1f);
1813 /* Set request length to 8 cachelines per fetch */
1814 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1815 fwater_hi = fwater_hi | (1 << 8);
1817 I915_WRITE(FW_BLC, fwater_lo);
1818 I915_WRITE(FW_BLC2, fwater_hi);
1821 intel_set_memory_cxsr(dev_priv, true);
1824 static void i845_update_wm(struct drm_crtc *unused_crtc)
1826 struct drm_device *dev = unused_crtc->dev;
1827 struct drm_i915_private *dev_priv = dev->dev_private;
1828 struct drm_crtc *crtc;
1829 const struct drm_display_mode *adjusted_mode;
1833 crtc = single_enabled_crtc(dev);
1837 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1838 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1840 dev_priv->display.get_fifo_size(dev, 0),
1841 4, pessimal_latency_ns);
1842 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1843 fwater_lo |= (3<<8) | planea_wm;
1845 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1847 I915_WRITE(FW_BLC, fwater_lo);
1850 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1851 struct drm_crtc *crtc)
1853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1854 uint32_t pixel_rate;
1856 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1858 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1859 * adjust the pixel_rate here. */
1861 if (intel_crtc->config.pch_pfit.enabled) {
1862 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1863 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1865 pipe_w = intel_crtc->config.pipe_src_w;
1866 pipe_h = intel_crtc->config.pipe_src_h;
1867 pfit_w = (pfit_size >> 16) & 0xFFFF;
1868 pfit_h = pfit_size & 0xFFFF;
1869 if (pipe_w < pfit_w)
1871 if (pipe_h < pfit_h)
1874 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1881 /* latency must be in 0.1us units. */
1882 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1887 if (WARN(latency == 0, "Latency value missing\n"))
1890 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1891 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1896 /* latency must be in 0.1us units. */
1897 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1898 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1903 if (WARN(latency == 0, "Latency value missing\n"))
1906 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1907 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1908 ret = DIV_ROUND_UP(ret, 64) + 2;
1912 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1913 uint8_t bytes_per_pixel)
1915 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1918 struct ilk_pipe_wm_parameters {
1920 uint32_t pipe_htotal;
1921 uint32_t pixel_rate;
1922 struct intel_plane_wm_parameters pri;
1923 struct intel_plane_wm_parameters spr;
1924 struct intel_plane_wm_parameters cur;
1927 struct ilk_wm_maximums {
1934 /* used in computing the new watermarks state */
1935 struct intel_wm_config {
1936 unsigned int num_pipes_active;
1937 bool sprites_enabled;
1938 bool sprites_scaled;
1942 * For both WM_PIPE and WM_LP.
1943 * mem_value must be in 0.1us units.
1945 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1949 uint32_t method1, method2;
1951 if (!params->active || !params->pri.enabled)
1954 method1 = ilk_wm_method1(params->pixel_rate,
1955 params->pri.bytes_per_pixel,
1961 method2 = ilk_wm_method2(params->pixel_rate,
1962 params->pipe_htotal,
1963 params->pri.horiz_pixels,
1964 params->pri.bytes_per_pixel,
1967 return min(method1, method2);
1971 * For both WM_PIPE and WM_LP.
1972 * mem_value must be in 0.1us units.
1974 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1977 uint32_t method1, method2;
1979 if (!params->active || !params->spr.enabled)
1982 method1 = ilk_wm_method1(params->pixel_rate,
1983 params->spr.bytes_per_pixel,
1985 method2 = ilk_wm_method2(params->pixel_rate,
1986 params->pipe_htotal,
1987 params->spr.horiz_pixels,
1988 params->spr.bytes_per_pixel,
1990 return min(method1, method2);
1994 * For both WM_PIPE and WM_LP.
1995 * mem_value must be in 0.1us units.
1997 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
2000 if (!params->active || !params->cur.enabled)
2003 return ilk_wm_method2(params->pixel_rate,
2004 params->pipe_htotal,
2005 params->cur.horiz_pixels,
2006 params->cur.bytes_per_pixel,
2010 /* Only for WM_LP. */
2011 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
2014 if (!params->active || !params->pri.enabled)
2017 return ilk_wm_fbc(pri_val,
2018 params->pri.horiz_pixels,
2019 params->pri.bytes_per_pixel);
2022 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2024 if (INTEL_INFO(dev)->gen >= 8)
2026 else if (INTEL_INFO(dev)->gen >= 7)
2032 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
2033 int level, bool is_sprite)
2035 if (INTEL_INFO(dev)->gen >= 8)
2036 /* BDW primary/sprite plane watermarks */
2037 return level == 0 ? 255 : 2047;
2038 else if (INTEL_INFO(dev)->gen >= 7)
2039 /* IVB/HSW primary/sprite plane watermarks */
2040 return level == 0 ? 127 : 1023;
2041 else if (!is_sprite)
2042 /* ILK/SNB primary plane watermarks */
2043 return level == 0 ? 127 : 511;
2045 /* ILK/SNB sprite plane watermarks */
2046 return level == 0 ? 63 : 255;
2049 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
2052 if (INTEL_INFO(dev)->gen >= 7)
2053 return level == 0 ? 63 : 255;
2055 return level == 0 ? 31 : 63;
2058 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
2060 if (INTEL_INFO(dev)->gen >= 8)
2066 /* Calculate the maximum primary/sprite plane watermark */
2067 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2069 const struct intel_wm_config *config,
2070 enum intel_ddb_partitioning ddb_partitioning,
2073 unsigned int fifo_size = ilk_display_fifo_size(dev);
2075 /* if sprites aren't enabled, sprites get nothing */
2076 if (is_sprite && !config->sprites_enabled)
2079 /* HSW allows LP1+ watermarks even with multiple pipes */
2080 if (level == 0 || config->num_pipes_active > 1) {
2081 fifo_size /= INTEL_INFO(dev)->num_pipes;
2084 * For some reason the non self refresh
2085 * FIFO size is only half of the self
2086 * refresh FIFO size on ILK/SNB.
2088 if (INTEL_INFO(dev)->gen <= 6)
2092 if (config->sprites_enabled) {
2093 /* level 0 is always calculated with 1:1 split */
2094 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2103 /* clamp to max that the registers can hold */
2104 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
2107 /* Calculate the maximum cursor plane watermark */
2108 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2110 const struct intel_wm_config *config)
2112 /* HSW LP1+ watermarks w/ multiple pipes */
2113 if (level > 0 && config->num_pipes_active > 1)
2116 /* otherwise just report max that registers can hold */
2117 return ilk_cursor_wm_reg_max(dev, level);
2120 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2122 const struct intel_wm_config *config,
2123 enum intel_ddb_partitioning ddb_partitioning,
2124 struct ilk_wm_maximums *max)
2126 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2127 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2128 max->cur = ilk_cursor_wm_max(dev, level, config);
2129 max->fbc = ilk_fbc_wm_reg_max(dev);
2132 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
2134 struct ilk_wm_maximums *max)
2136 max->pri = ilk_plane_wm_reg_max(dev, level, false);
2137 max->spr = ilk_plane_wm_reg_max(dev, level, true);
2138 max->cur = ilk_cursor_wm_reg_max(dev, level);
2139 max->fbc = ilk_fbc_wm_reg_max(dev);
2142 static bool ilk_validate_wm_level(int level,
2143 const struct ilk_wm_maximums *max,
2144 struct intel_wm_level *result)
2148 /* already determined to be invalid? */
2149 if (!result->enable)
2152 result->enable = result->pri_val <= max->pri &&
2153 result->spr_val <= max->spr &&
2154 result->cur_val <= max->cur;
2156 ret = result->enable;
2159 * HACK until we can pre-compute everything,
2160 * and thus fail gracefully if LP0 watermarks
2163 if (level == 0 && !result->enable) {
2164 if (result->pri_val > max->pri)
2165 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2166 level, result->pri_val, max->pri);
2167 if (result->spr_val > max->spr)
2168 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2169 level, result->spr_val, max->spr);
2170 if (result->cur_val > max->cur)
2171 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2172 level, result->cur_val, max->cur);
2174 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2175 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2176 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2177 result->enable = true;
2183 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2185 const struct ilk_pipe_wm_parameters *p,
2186 struct intel_wm_level *result)
2188 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2189 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2190 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2192 /* WM1+ latency values stored in 0.5us units */
2199 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2200 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2201 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2202 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2203 result->enable = true;
2207 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2209 struct drm_i915_private *dev_priv = dev->dev_private;
2210 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2211 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2212 u32 linetime, ips_linetime;
2214 if (!intel_crtc_active(crtc))
2217 /* The WM are computed with base on how long it takes to fill a single
2218 * row at the given clock rate, multiplied by 8.
2220 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2222 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2223 intel_ddi_get_cdclk_freq(dev_priv));
2225 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2226 PIPE_WM_LINETIME_TIME(linetime);
2229 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2231 struct drm_i915_private *dev_priv = dev->dev_private;
2233 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2234 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2236 wm[0] = (sskpd >> 56) & 0xFF;
2238 wm[0] = sskpd & 0xF;
2239 wm[1] = (sskpd >> 4) & 0xFF;
2240 wm[2] = (sskpd >> 12) & 0xFF;
2241 wm[3] = (sskpd >> 20) & 0x1FF;
2242 wm[4] = (sskpd >> 32) & 0x1FF;
2243 } else if (INTEL_INFO(dev)->gen >= 6) {
2244 uint32_t sskpd = I915_READ(MCH_SSKPD);
2246 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2247 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2248 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2249 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2250 } else if (INTEL_INFO(dev)->gen >= 5) {
2251 uint32_t mltr = I915_READ(MLTR_ILK);
2253 /* ILK primary LP0 latency is 700 ns */
2255 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2256 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2260 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2262 /* ILK sprite LP0 latency is 1300 ns */
2263 if (INTEL_INFO(dev)->gen == 5)
2267 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2269 /* ILK cursor LP0 latency is 1300 ns */
2270 if (INTEL_INFO(dev)->gen == 5)
2273 /* WaDoubleCursorLP3Latency:ivb */
2274 if (IS_IVYBRIDGE(dev))
2278 int ilk_wm_max_level(const struct drm_device *dev)
2280 /* how many WM levels are we expecting */
2281 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2283 else if (INTEL_INFO(dev)->gen >= 6)
2289 static void intel_print_wm_latency(struct drm_device *dev,
2291 const uint16_t wm[5])
2293 int level, max_level = ilk_wm_max_level(dev);
2295 for (level = 0; level <= max_level; level++) {
2296 unsigned int latency = wm[level];
2299 DRM_ERROR("%s WM%d latency not provided\n",
2304 /* WM1+ latency values in 0.5us units */
2308 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2309 name, level, wm[level],
2310 latency / 10, latency % 10);
2314 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2315 uint16_t wm[5], uint16_t min)
2317 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2322 wm[0] = max(wm[0], min);
2323 for (level = 1; level <= max_level; level++)
2324 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2329 static void snb_wm_latency_quirk(struct drm_device *dev)
2331 struct drm_i915_private *dev_priv = dev->dev_private;
2335 * The BIOS provided WM memory latency values are often
2336 * inadequate for high resolution displays. Adjust them.
2338 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2339 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2340 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2345 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2346 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2347 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2348 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2351 static void ilk_setup_wm_latency(struct drm_device *dev)
2353 struct drm_i915_private *dev_priv = dev->dev_private;
2355 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2357 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2358 sizeof(dev_priv->wm.pri_latency));
2359 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2360 sizeof(dev_priv->wm.pri_latency));
2362 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2363 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2365 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2366 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2367 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2370 snb_wm_latency_quirk(dev);
2373 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2374 struct ilk_pipe_wm_parameters *p)
2376 struct drm_device *dev = crtc->dev;
2377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2378 enum pipe pipe = intel_crtc->pipe;
2379 struct drm_plane *plane;
2381 if (!intel_crtc_active(crtc))
2385 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2386 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2387 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2388 p->cur.bytes_per_pixel = 4;
2389 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2390 p->cur.horiz_pixels = intel_crtc->cursor_width;
2391 /* TODO: for now, assume primary and cursor planes are always enabled. */
2392 p->pri.enabled = true;
2393 p->cur.enabled = true;
2395 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2396 struct intel_plane *intel_plane = to_intel_plane(plane);
2398 if (intel_plane->pipe == pipe) {
2399 p->spr = intel_plane->wm;
2405 static void ilk_compute_wm_config(struct drm_device *dev,
2406 struct intel_wm_config *config)
2408 struct intel_crtc *intel_crtc;
2410 /* Compute the currently _active_ config */
2411 for_each_intel_crtc(dev, intel_crtc) {
2412 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2414 if (!wm->pipe_enabled)
2417 config->sprites_enabled |= wm->sprites_enabled;
2418 config->sprites_scaled |= wm->sprites_scaled;
2419 config->num_pipes_active++;
2423 /* Compute new watermarks for the pipe */
2424 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2425 const struct ilk_pipe_wm_parameters *params,
2426 struct intel_pipe_wm *pipe_wm)
2428 struct drm_device *dev = crtc->dev;
2429 const struct drm_i915_private *dev_priv = dev->dev_private;
2430 int level, max_level = ilk_wm_max_level(dev);
2431 /* LP0 watermark maximums depend on this pipe alone */
2432 struct intel_wm_config config = {
2433 .num_pipes_active = 1,
2434 .sprites_enabled = params->spr.enabled,
2435 .sprites_scaled = params->spr.scaled,
2437 struct ilk_wm_maximums max;
2439 pipe_wm->pipe_enabled = params->active;
2440 pipe_wm->sprites_enabled = params->spr.enabled;
2441 pipe_wm->sprites_scaled = params->spr.scaled;
2443 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2444 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2447 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2448 if (params->spr.scaled)
2451 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2453 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2454 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2456 /* LP0 watermarks always use 1/2 DDB partitioning */
2457 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2459 /* At least LP0 must be valid */
2460 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2463 ilk_compute_wm_reg_maximums(dev, 1, &max);
2465 for (level = 1; level <= max_level; level++) {
2466 struct intel_wm_level wm = {};
2468 ilk_compute_wm_level(dev_priv, level, params, &wm);
2471 * Disable any watermark level that exceeds the
2472 * register maximums since such watermarks are
2475 if (!ilk_validate_wm_level(level, &max, &wm))
2478 pipe_wm->wm[level] = wm;
2485 * Merge the watermarks from all active pipes for a specific level.
2487 static void ilk_merge_wm_level(struct drm_device *dev,
2489 struct intel_wm_level *ret_wm)
2491 const struct intel_crtc *intel_crtc;
2493 ret_wm->enable = true;
2495 for_each_intel_crtc(dev, intel_crtc) {
2496 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2497 const struct intel_wm_level *wm = &active->wm[level];
2499 if (!active->pipe_enabled)
2503 * The watermark values may have been used in the past,
2504 * so we must maintain them in the registers for some
2505 * time even if the level is now disabled.
2508 ret_wm->enable = false;
2510 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2511 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2512 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2513 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2518 * Merge all low power watermarks for all active pipes.
2520 static void ilk_wm_merge(struct drm_device *dev,
2521 const struct intel_wm_config *config,
2522 const struct ilk_wm_maximums *max,
2523 struct intel_pipe_wm *merged)
2525 int level, max_level = ilk_wm_max_level(dev);
2526 int last_enabled_level = max_level;
2528 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2529 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2530 config->num_pipes_active > 1)
2533 /* ILK: FBC WM must be disabled always */
2534 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2536 /* merge each WM1+ level */
2537 for (level = 1; level <= max_level; level++) {
2538 struct intel_wm_level *wm = &merged->wm[level];
2540 ilk_merge_wm_level(dev, level, wm);
2542 if (level > last_enabled_level)
2544 else if (!ilk_validate_wm_level(level, max, wm))
2545 /* make sure all following levels get disabled */
2546 last_enabled_level = level - 1;
2549 * The spec says it is preferred to disable
2550 * FBC WMs instead of disabling a WM level.
2552 if (wm->fbc_val > max->fbc) {
2554 merged->fbc_wm_enabled = false;
2559 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2561 * FIXME this is racy. FBC might get enabled later.
2562 * What we should check here is whether FBC can be
2563 * enabled sometime later.
2565 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2566 for (level = 2; level <= max_level; level++) {
2567 struct intel_wm_level *wm = &merged->wm[level];
2574 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2576 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2577 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2580 /* The value we need to program into the WM_LPx latency field */
2581 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2583 struct drm_i915_private *dev_priv = dev->dev_private;
2585 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2588 return dev_priv->wm.pri_latency[level];
2591 static void ilk_compute_wm_results(struct drm_device *dev,
2592 const struct intel_pipe_wm *merged,
2593 enum intel_ddb_partitioning partitioning,
2594 struct ilk_wm_values *results)
2596 struct intel_crtc *intel_crtc;
2599 results->enable_fbc_wm = merged->fbc_wm_enabled;
2600 results->partitioning = partitioning;
2602 /* LP1+ register values */
2603 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2604 const struct intel_wm_level *r;
2606 level = ilk_wm_lp_to_level(wm_lp, merged);
2608 r = &merged->wm[level];
2611 * Maintain the watermark values even if the level is
2612 * disabled. Doing otherwise could cause underruns.
2614 results->wm_lp[wm_lp - 1] =
2615 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2616 (r->pri_val << WM1_LP_SR_SHIFT) |
2620 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2622 if (INTEL_INFO(dev)->gen >= 8)
2623 results->wm_lp[wm_lp - 1] |=
2624 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2626 results->wm_lp[wm_lp - 1] |=
2627 r->fbc_val << WM1_LP_FBC_SHIFT;
2630 * Always set WM1S_LP_EN when spr_val != 0, even if the
2631 * level is disabled. Doing otherwise could cause underruns.
2633 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2634 WARN_ON(wm_lp != 1);
2635 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2637 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2640 /* LP0 register values */
2641 for_each_intel_crtc(dev, intel_crtc) {
2642 enum pipe pipe = intel_crtc->pipe;
2643 const struct intel_wm_level *r =
2644 &intel_crtc->wm.active.wm[0];
2646 if (WARN_ON(!r->enable))
2649 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2651 results->wm_pipe[pipe] =
2652 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2653 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2658 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2659 * case both are at the same level. Prefer r1 in case they're the same. */
2660 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2661 struct intel_pipe_wm *r1,
2662 struct intel_pipe_wm *r2)
2664 int level, max_level = ilk_wm_max_level(dev);
2665 int level1 = 0, level2 = 0;
2667 for (level = 1; level <= max_level; level++) {
2668 if (r1->wm[level].enable)
2670 if (r2->wm[level].enable)
2674 if (level1 == level2) {
2675 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2679 } else if (level1 > level2) {
2686 /* dirty bits used to track which watermarks need changes */
2687 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2688 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2689 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2690 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2691 #define WM_DIRTY_FBC (1 << 24)
2692 #define WM_DIRTY_DDB (1 << 25)
2694 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2695 const struct ilk_wm_values *old,
2696 const struct ilk_wm_values *new)
2698 unsigned int dirty = 0;
2702 for_each_pipe(dev_priv, pipe) {
2703 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2704 dirty |= WM_DIRTY_LINETIME(pipe);
2705 /* Must disable LP1+ watermarks too */
2706 dirty |= WM_DIRTY_LP_ALL;
2709 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2710 dirty |= WM_DIRTY_PIPE(pipe);
2711 /* Must disable LP1+ watermarks too */
2712 dirty |= WM_DIRTY_LP_ALL;
2716 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2717 dirty |= WM_DIRTY_FBC;
2718 /* Must disable LP1+ watermarks too */
2719 dirty |= WM_DIRTY_LP_ALL;
2722 if (old->partitioning != new->partitioning) {
2723 dirty |= WM_DIRTY_DDB;
2724 /* Must disable LP1+ watermarks too */
2725 dirty |= WM_DIRTY_LP_ALL;
2728 /* LP1+ watermarks already deemed dirty, no need to continue */
2729 if (dirty & WM_DIRTY_LP_ALL)
2732 /* Find the lowest numbered LP1+ watermark in need of an update... */
2733 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2734 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2735 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2739 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2740 for (; wm_lp <= 3; wm_lp++)
2741 dirty |= WM_DIRTY_LP(wm_lp);
2746 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2749 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2750 bool changed = false;
2752 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2753 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2754 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2757 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2758 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2759 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2762 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2763 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2764 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2769 * Don't touch WM1S_LP_EN here.
2770 * Doing so could cause underruns.
2777 * The spec says we shouldn't write when we don't need, because every write
2778 * causes WMs to be re-evaluated, expending some power.
2780 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2781 struct ilk_wm_values *results)
2783 struct drm_device *dev = dev_priv->dev;
2784 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2788 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2792 _ilk_disable_lp_wm(dev_priv, dirty);
2794 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2795 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2796 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2797 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2798 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2799 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2801 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2802 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2803 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2804 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2805 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2806 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2808 if (dirty & WM_DIRTY_DDB) {
2809 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2810 val = I915_READ(WM_MISC);
2811 if (results->partitioning == INTEL_DDB_PART_1_2)
2812 val &= ~WM_MISC_DATA_PARTITION_5_6;
2814 val |= WM_MISC_DATA_PARTITION_5_6;
2815 I915_WRITE(WM_MISC, val);
2817 val = I915_READ(DISP_ARB_CTL2);
2818 if (results->partitioning == INTEL_DDB_PART_1_2)
2819 val &= ~DISP_DATA_PARTITION_5_6;
2821 val |= DISP_DATA_PARTITION_5_6;
2822 I915_WRITE(DISP_ARB_CTL2, val);
2826 if (dirty & WM_DIRTY_FBC) {
2827 val = I915_READ(DISP_ARB_CTL);
2828 if (results->enable_fbc_wm)
2829 val &= ~DISP_FBC_WM_DIS;
2831 val |= DISP_FBC_WM_DIS;
2832 I915_WRITE(DISP_ARB_CTL, val);
2835 if (dirty & WM_DIRTY_LP(1) &&
2836 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2837 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2839 if (INTEL_INFO(dev)->gen >= 7) {
2840 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2841 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2842 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2843 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2846 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2847 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2848 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2849 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2850 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2851 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2853 dev_priv->wm.hw = *results;
2856 static bool ilk_disable_lp_wm(struct drm_device *dev)
2858 struct drm_i915_private *dev_priv = dev->dev_private;
2860 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2863 static void ilk_update_wm(struct drm_crtc *crtc)
2865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2866 struct drm_device *dev = crtc->dev;
2867 struct drm_i915_private *dev_priv = dev->dev_private;
2868 struct ilk_wm_maximums max;
2869 struct ilk_pipe_wm_parameters params = {};
2870 struct ilk_wm_values results = {};
2871 enum intel_ddb_partitioning partitioning;
2872 struct intel_pipe_wm pipe_wm = {};
2873 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2874 struct intel_wm_config config = {};
2876 ilk_compute_wm_parameters(crtc, ¶ms);
2878 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2880 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2883 intel_crtc->wm.active = pipe_wm;
2885 ilk_compute_wm_config(dev, &config);
2887 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2888 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2890 /* 5/6 split only in single pipe config on IVB+ */
2891 if (INTEL_INFO(dev)->gen >= 7 &&
2892 config.num_pipes_active == 1 && config.sprites_enabled) {
2893 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2894 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2896 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2898 best_lp_wm = &lp_wm_1_2;
2901 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2902 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2904 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2906 ilk_write_wm_values(dev_priv, &results);
2910 ilk_update_sprite_wm(struct drm_plane *plane,
2911 struct drm_crtc *crtc,
2912 uint32_t sprite_width, uint32_t sprite_height,
2913 int pixel_size, bool enabled, bool scaled)
2915 struct drm_device *dev = plane->dev;
2916 struct intel_plane *intel_plane = to_intel_plane(plane);
2918 intel_plane->wm.enabled = enabled;
2919 intel_plane->wm.scaled = scaled;
2920 intel_plane->wm.horiz_pixels = sprite_width;
2921 intel_plane->wm.vert_pixels = sprite_width;
2922 intel_plane->wm.bytes_per_pixel = pixel_size;
2925 * IVB workaround: must disable low power watermarks for at least
2926 * one frame before enabling scaling. LP watermarks can be re-enabled
2927 * when scaling is disabled.
2929 * WaCxSRDisabledForSpriteScaling:ivb
2931 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2932 intel_wait_for_vblank(dev, intel_plane->pipe);
2934 ilk_update_wm(crtc);
2937 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2939 struct drm_device *dev = crtc->dev;
2940 struct drm_i915_private *dev_priv = dev->dev_private;
2941 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2942 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2943 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2944 enum pipe pipe = intel_crtc->pipe;
2945 static const unsigned int wm0_pipe_reg[] = {
2946 [PIPE_A] = WM0_PIPEA_ILK,
2947 [PIPE_B] = WM0_PIPEB_ILK,
2948 [PIPE_C] = WM0_PIPEC_IVB,
2951 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2952 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2953 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2955 active->pipe_enabled = intel_crtc_active(crtc);
2957 if (active->pipe_enabled) {
2958 u32 tmp = hw->wm_pipe[pipe];
2961 * For active pipes LP0 watermark is marked as
2962 * enabled, and LP1+ watermaks as disabled since
2963 * we can't really reverse compute them in case
2964 * multiple pipes are active.
2966 active->wm[0].enable = true;
2967 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2968 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2969 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2970 active->linetime = hw->wm_linetime[pipe];
2972 int level, max_level = ilk_wm_max_level(dev);
2975 * For inactive pipes, all watermark levels
2976 * should be marked as enabled but zeroed,
2977 * which is what we'd compute them to.
2979 for (level = 0; level <= max_level; level++)
2980 active->wm[level].enable = true;
2984 void ilk_wm_get_hw_state(struct drm_device *dev)
2986 struct drm_i915_private *dev_priv = dev->dev_private;
2987 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2988 struct drm_crtc *crtc;
2990 for_each_crtc(dev, crtc)
2991 ilk_pipe_wm_get_hw_state(crtc);
2993 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2994 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2995 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2997 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2998 if (INTEL_INFO(dev)->gen >= 7) {
2999 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3000 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3003 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3004 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3005 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3006 else if (IS_IVYBRIDGE(dev))
3007 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
3008 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3011 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3015 * intel_update_watermarks - update FIFO watermark values based on current modes
3017 * Calculate watermark values for the various WM regs based on current mode
3018 * and plane configuration.
3020 * There are several cases to deal with here:
3021 * - normal (i.e. non-self-refresh)
3022 * - self-refresh (SR) mode
3023 * - lines are large relative to FIFO size (buffer can hold up to 2)
3024 * - lines are small relative to FIFO size (buffer can hold more than 2
3025 * lines), so need to account for TLB latency
3027 * The normal calculation is:
3028 * watermark = dotclock * bytes per pixel * latency
3029 * where latency is platform & configuration dependent (we assume pessimal
3032 * The SR calculation is:
3033 * watermark = (trunc(latency/line time)+1) * surface width *
3036 * line time = htotal / dotclock
3037 * surface width = hdisplay for normal plane and 64 for cursor
3038 * and latency is assumed to be high, as above.
3040 * The final value programmed to the register should always be rounded up,
3041 * and include an extra 2 entries to account for clock crossings.
3043 * We don't use the sprite, so we can ignore that. And on Crestline we have
3044 * to set the non-SR watermarks to 8.
3046 void intel_update_watermarks(struct drm_crtc *crtc)
3048 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3050 if (dev_priv->display.update_wm)
3051 dev_priv->display.update_wm(crtc);
3054 void intel_update_sprite_watermarks(struct drm_plane *plane,
3055 struct drm_crtc *crtc,
3056 uint32_t sprite_width,
3057 uint32_t sprite_height,
3059 bool enabled, bool scaled)
3061 struct drm_i915_private *dev_priv = plane->dev->dev_private;
3063 if (dev_priv->display.update_sprite_wm)
3064 dev_priv->display.update_sprite_wm(plane, crtc,
3065 sprite_width, sprite_height,
3066 pixel_size, enabled, scaled);
3069 static struct drm_i915_gem_object *
3070 intel_alloc_context_page(struct drm_device *dev)
3072 struct drm_i915_gem_object *ctx;
3075 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3077 ctx = i915_gem_alloc_object(dev, 4096);
3079 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3083 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
3085 DRM_ERROR("failed to pin power context: %d\n", ret);
3089 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
3091 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
3098 i915_gem_object_ggtt_unpin(ctx);
3100 drm_gem_object_unreference(&ctx->base);
3105 * Lock protecting IPS related data structures
3107 DEFINE_SPINLOCK(mchdev_lock);
3109 /* Global for IPS driver to get at the current i915 device. Protected by
3111 static struct drm_i915_private *i915_mch_dev;
3113 bool ironlake_set_drps(struct drm_device *dev, u8 val)
3115 struct drm_i915_private *dev_priv = dev->dev_private;
3118 assert_spin_locked(&mchdev_lock);
3120 rgvswctl = I915_READ16(MEMSWCTL);
3121 if (rgvswctl & MEMCTL_CMD_STS) {
3122 DRM_DEBUG("gpu busy, RCS change rejected\n");
3123 return false; /* still busy with another command */
3126 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3127 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3128 I915_WRITE16(MEMSWCTL, rgvswctl);
3129 POSTING_READ16(MEMSWCTL);
3131 rgvswctl |= MEMCTL_CMD_STS;
3132 I915_WRITE16(MEMSWCTL, rgvswctl);
3137 static void ironlake_enable_drps(struct drm_device *dev)
3139 struct drm_i915_private *dev_priv = dev->dev_private;
3140 u32 rgvmodectl = I915_READ(MEMMODECTL);
3141 u8 fmax, fmin, fstart, vstart;
3143 spin_lock_irq(&mchdev_lock);
3145 /* Enable temp reporting */
3146 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3147 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3149 /* 100ms RC evaluation intervals */
3150 I915_WRITE(RCUPEI, 100000);
3151 I915_WRITE(RCDNEI, 100000);
3153 /* Set max/min thresholds to 90ms and 80ms respectively */
3154 I915_WRITE(RCBMAXAVG, 90000);
3155 I915_WRITE(RCBMINAVG, 80000);
3157 I915_WRITE(MEMIHYST, 1);
3159 /* Set up min, max, and cur for interrupt handling */
3160 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3161 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3162 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3163 MEMMODE_FSTART_SHIFT;
3165 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3168 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3169 dev_priv->ips.fstart = fstart;
3171 dev_priv->ips.max_delay = fstart;
3172 dev_priv->ips.min_delay = fmin;
3173 dev_priv->ips.cur_delay = fstart;
3175 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3176 fmax, fmin, fstart);
3178 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3181 * Interrupts will be enabled in ironlake_irq_postinstall
3184 I915_WRITE(VIDSTART, vstart);
3185 POSTING_READ(VIDSTART);
3187 rgvmodectl |= MEMMODE_SWMODE_EN;
3188 I915_WRITE(MEMMODECTL, rgvmodectl);
3190 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3191 DRM_ERROR("stuck trying to change perf mode\n");
3194 ironlake_set_drps(dev, fstart);
3196 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3198 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3199 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3200 dev_priv->ips.last_time2 = ktime_get_raw_ns();
3202 spin_unlock_irq(&mchdev_lock);
3205 static void ironlake_disable_drps(struct drm_device *dev)
3207 struct drm_i915_private *dev_priv = dev->dev_private;
3210 spin_lock_irq(&mchdev_lock);
3212 rgvswctl = I915_READ16(MEMSWCTL);
3214 /* Ack interrupts, disable EFC interrupt */
3215 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3216 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3217 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3218 I915_WRITE(DEIIR, DE_PCU_EVENT);
3219 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3221 /* Go back to the starting frequency */
3222 ironlake_set_drps(dev, dev_priv->ips.fstart);
3224 rgvswctl |= MEMCTL_CMD_STS;
3225 I915_WRITE(MEMSWCTL, rgvswctl);
3228 spin_unlock_irq(&mchdev_lock);
3231 /* There's a funny hw issue where the hw returns all 0 when reading from
3232 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3233 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3234 * all limits and the gpu stuck at whatever frequency it is at atm).
3236 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3240 /* Only set the down limit when we've reached the lowest level to avoid
3241 * getting more interrupts, otherwise leave this clear. This prevents a
3242 * race in the hw when coming out of rc6: There's a tiny window where
3243 * the hw runs at the minimal clock before selecting the desired
3244 * frequency, if the down threshold expires in that window we will not
3245 * receive a down interrupt. */
3246 limits = dev_priv->rps.max_freq_softlimit << 24;
3247 if (val <= dev_priv->rps.min_freq_softlimit)
3248 limits |= dev_priv->rps.min_freq_softlimit << 16;
3253 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3257 new_power = dev_priv->rps.power;
3258 switch (dev_priv->rps.power) {
3260 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3261 new_power = BETWEEN;
3265 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3266 new_power = LOW_POWER;
3267 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3268 new_power = HIGH_POWER;
3272 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3273 new_power = BETWEEN;
3276 /* Max/min bins are special */
3277 if (val == dev_priv->rps.min_freq_softlimit)
3278 new_power = LOW_POWER;
3279 if (val == dev_priv->rps.max_freq_softlimit)
3280 new_power = HIGH_POWER;
3281 if (new_power == dev_priv->rps.power)
3284 /* Note the units here are not exactly 1us, but 1280ns. */
3285 switch (new_power) {
3287 /* Upclock if more than 95% busy over 16ms */
3288 I915_WRITE(GEN6_RP_UP_EI, 12500);
3289 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3291 /* Downclock if less than 85% busy over 32ms */
3292 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3293 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3295 I915_WRITE(GEN6_RP_CONTROL,
3296 GEN6_RP_MEDIA_TURBO |
3297 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3298 GEN6_RP_MEDIA_IS_GFX |
3300 GEN6_RP_UP_BUSY_AVG |
3301 GEN6_RP_DOWN_IDLE_AVG);
3305 /* Upclock if more than 90% busy over 13ms */
3306 I915_WRITE(GEN6_RP_UP_EI, 10250);
3307 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3309 /* Downclock if less than 75% busy over 32ms */
3310 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3311 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3313 I915_WRITE(GEN6_RP_CONTROL,
3314 GEN6_RP_MEDIA_TURBO |
3315 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3316 GEN6_RP_MEDIA_IS_GFX |
3318 GEN6_RP_UP_BUSY_AVG |
3319 GEN6_RP_DOWN_IDLE_AVG);
3323 /* Upclock if more than 85% busy over 10ms */
3324 I915_WRITE(GEN6_RP_UP_EI, 8000);
3325 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3327 /* Downclock if less than 60% busy over 32ms */
3328 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3329 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3331 I915_WRITE(GEN6_RP_CONTROL,
3332 GEN6_RP_MEDIA_TURBO |
3333 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3334 GEN6_RP_MEDIA_IS_GFX |
3336 GEN6_RP_UP_BUSY_AVG |
3337 GEN6_RP_DOWN_IDLE_AVG);
3341 dev_priv->rps.power = new_power;
3342 dev_priv->rps.last_adj = 0;
3345 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3349 if (val > dev_priv->rps.min_freq_softlimit)
3350 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3351 if (val < dev_priv->rps.max_freq_softlimit)
3352 mask |= GEN6_PM_RP_UP_THRESHOLD;
3354 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3355 mask &= dev_priv->pm_rps_events;
3357 /* IVB and SNB hard hangs on looping batchbuffer
3358 * if GEN6_PM_UP_EI_EXPIRED is masked.
3360 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3361 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3363 if (IS_GEN8(dev_priv->dev))
3364 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
3369 /* gen6_set_rps is called to update the frequency request, but should also be
3370 * called when the range (min_delay and max_delay) is modified so that we can
3371 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3372 void gen6_set_rps(struct drm_device *dev, u8 val)
3374 struct drm_i915_private *dev_priv = dev->dev_private;
3376 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3377 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3378 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3380 /* min/max delay may still have been modified so be sure to
3381 * write the limits value.
3383 if (val != dev_priv->rps.cur_freq) {
3384 gen6_set_rps_thresholds(dev_priv, val);
3386 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3387 I915_WRITE(GEN6_RPNSWREQ,
3388 HSW_FREQUENCY(val));
3390 I915_WRITE(GEN6_RPNSWREQ,
3391 GEN6_FREQUENCY(val) |
3393 GEN6_AGGRESSIVE_TURBO);
3396 /* Make sure we continue to get interrupts
3397 * until we hit the minimum or maximum frequencies.
3399 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3400 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3402 POSTING_READ(GEN6_RPNSWREQ);
3404 dev_priv->rps.cur_freq = val;
3405 trace_intel_gpu_freq_change(val * 50);
3408 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3410 * * If Gfx is Idle, then
3411 * 1. Mask Turbo interrupts
3412 * 2. Bring up Gfx clock
3413 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3414 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3415 * 5. Unmask Turbo interrupts
3417 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3419 struct drm_device *dev = dev_priv->dev;
3421 /* Latest VLV doesn't need to force the gfx clock */
3422 if (dev->pdev->revision >= 0xd) {
3423 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3428 * When we are idle. Drop to min voltage state.
3431 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3434 /* Mask turbo interrupt so that they will not come in between */
3435 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3437 vlv_force_gfx_clock(dev_priv, true);
3439 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3441 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3442 dev_priv->rps.min_freq_softlimit);
3444 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3445 & GENFREQSTATUS) == 0, 5))
3446 DRM_ERROR("timed out waiting for Punit\n");
3448 vlv_force_gfx_clock(dev_priv, false);
3450 I915_WRITE(GEN6_PMINTRMSK,
3451 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3454 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3456 struct drm_device *dev = dev_priv->dev;
3458 mutex_lock(&dev_priv->rps.hw_lock);
3459 if (dev_priv->rps.enabled) {
3460 if (IS_CHERRYVIEW(dev))
3461 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3462 else if (IS_VALLEYVIEW(dev))
3463 vlv_set_rps_idle(dev_priv);
3465 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3466 dev_priv->rps.last_adj = 0;
3468 mutex_unlock(&dev_priv->rps.hw_lock);
3471 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3473 struct drm_device *dev = dev_priv->dev;
3475 mutex_lock(&dev_priv->rps.hw_lock);
3476 if (dev_priv->rps.enabled) {
3477 if (IS_VALLEYVIEW(dev))
3478 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3480 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3481 dev_priv->rps.last_adj = 0;
3483 mutex_unlock(&dev_priv->rps.hw_lock);
3486 void valleyview_set_rps(struct drm_device *dev, u8 val)
3488 struct drm_i915_private *dev_priv = dev->dev_private;
3490 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3491 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3492 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3494 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3495 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3496 dev_priv->rps.cur_freq,
3497 vlv_gpu_freq(dev_priv, val), val);
3499 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3500 "Odd GPU freq value\n"))
3503 if (val != dev_priv->rps.cur_freq)
3504 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3506 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3508 dev_priv->rps.cur_freq = val;
3509 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3512 static void gen8_disable_rps_interrupts(struct drm_device *dev)
3514 struct drm_i915_private *dev_priv = dev->dev_private;
3516 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3517 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3518 ~dev_priv->pm_rps_events);
3519 /* Complete PM interrupt masking here doesn't race with the rps work
3520 * item again unmasking PM interrupts because that is using a different
3521 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3522 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3523 * gen8_enable_rps will clean up. */
3525 spin_lock_irq(&dev_priv->irq_lock);
3526 dev_priv->rps.pm_iir = 0;
3527 spin_unlock_irq(&dev_priv->irq_lock);
3529 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3532 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3534 struct drm_i915_private *dev_priv = dev->dev_private;
3536 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3537 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3538 ~dev_priv->pm_rps_events);
3539 /* Complete PM interrupt masking here doesn't race with the rps work
3540 * item again unmasking PM interrupts because that is using a different
3541 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3542 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3544 spin_lock_irq(&dev_priv->irq_lock);
3545 dev_priv->rps.pm_iir = 0;
3546 spin_unlock_irq(&dev_priv->irq_lock);
3548 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3551 static void gen6_disable_rps(struct drm_device *dev)
3553 struct drm_i915_private *dev_priv = dev->dev_private;
3555 I915_WRITE(GEN6_RC_CONTROL, 0);
3556 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3558 if (IS_BROADWELL(dev))
3559 gen8_disable_rps_interrupts(dev);
3561 gen6_disable_rps_interrupts(dev);
3564 static void cherryview_disable_rps(struct drm_device *dev)
3566 struct drm_i915_private *dev_priv = dev->dev_private;
3568 I915_WRITE(GEN6_RC_CONTROL, 0);
3570 gen8_disable_rps_interrupts(dev);
3573 static void valleyview_disable_rps(struct drm_device *dev)
3575 struct drm_i915_private *dev_priv = dev->dev_private;
3577 /* we're doing forcewake before Disabling RC6,
3578 * This what the BIOS expects when going into suspend */
3579 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3581 I915_WRITE(GEN6_RC_CONTROL, 0);
3583 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3585 gen6_disable_rps_interrupts(dev);
3588 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3590 if (IS_VALLEYVIEW(dev)) {
3591 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3592 mode = GEN6_RC_CTL_RC6_ENABLE;
3596 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3597 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3598 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3599 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3602 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3604 /* No RC6 before Ironlake */
3605 if (INTEL_INFO(dev)->gen < 5)
3608 /* RC6 is only on Ironlake mobile not on desktop */
3609 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3612 /* Respect the kernel parameter if it is set */
3613 if (enable_rc6 >= 0) {
3616 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
3617 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3620 mask = INTEL_RC6_ENABLE;
3622 if ((enable_rc6 & mask) != enable_rc6)
3623 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3624 enable_rc6 & mask, enable_rc6, mask);
3626 return enable_rc6 & mask;
3629 /* Disable RC6 on Ironlake */
3630 if (INTEL_INFO(dev)->gen == 5)
3633 if (IS_IVYBRIDGE(dev))
3634 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3636 return INTEL_RC6_ENABLE;
3639 int intel_enable_rc6(const struct drm_device *dev)
3641 return i915.enable_rc6;
3644 static void gen8_enable_rps_interrupts(struct drm_device *dev)
3646 struct drm_i915_private *dev_priv = dev->dev_private;
3648 spin_lock_irq(&dev_priv->irq_lock);
3649 WARN_ON(dev_priv->rps.pm_iir);
3650 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3651 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3652 spin_unlock_irq(&dev_priv->irq_lock);
3655 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3657 struct drm_i915_private *dev_priv = dev->dev_private;
3659 spin_lock_irq(&dev_priv->irq_lock);
3660 WARN_ON(dev_priv->rps.pm_iir);
3661 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3662 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3663 spin_unlock_irq(&dev_priv->irq_lock);
3666 static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3668 /* All of these values are in units of 50MHz */
3669 dev_priv->rps.cur_freq = 0;
3670 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3671 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3672 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3673 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3674 /* XXX: only BYT has a special efficient freq */
3675 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3676 /* hw_max = RP0 until we check for overclocking */
3677 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3679 /* Preserve min/max settings in case of re-init */
3680 if (dev_priv->rps.max_freq_softlimit == 0)
3681 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3683 if (dev_priv->rps.min_freq_softlimit == 0)
3684 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3687 static void gen8_enable_rps(struct drm_device *dev)
3689 struct drm_i915_private *dev_priv = dev->dev_private;
3690 struct intel_engine_cs *ring;
3691 uint32_t rc6_mask = 0, rp_state_cap;
3694 /* 1a: Software RC state - RC0 */
3695 I915_WRITE(GEN6_RC_STATE, 0);
3697 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3698 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3699 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3701 /* 2a: Disable RC states. */
3702 I915_WRITE(GEN6_RC_CONTROL, 0);
3704 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3705 parse_rp_state_cap(dev_priv, rp_state_cap);
3707 /* 2b: Program RC6 thresholds.*/
3708 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3709 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3710 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3711 for_each_ring(ring, dev_priv, unused)
3712 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3713 I915_WRITE(GEN6_RC_SLEEP, 0);
3714 if (IS_BROADWELL(dev))
3715 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
3717 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3720 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3721 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3722 intel_print_rc6_info(dev, rc6_mask);
3723 if (IS_BROADWELL(dev))
3724 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3725 GEN7_RC_CTL_TO_MODE |
3728 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3729 GEN6_RC_CTL_EI_MODE(1) |
3732 /* 4 Program defaults and thresholds for RPS*/
3733 I915_WRITE(GEN6_RPNSWREQ,
3734 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3735 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3736 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3737 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3738 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3740 /* Docs recommend 900MHz, and 300 MHz respectively */
3741 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3742 dev_priv->rps.max_freq_softlimit << 24 |
3743 dev_priv->rps.min_freq_softlimit << 16);
3745 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3746 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3747 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3748 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3750 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3753 I915_WRITE(GEN6_RP_CONTROL,
3754 GEN6_RP_MEDIA_TURBO |
3755 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3756 GEN6_RP_MEDIA_IS_GFX |
3758 GEN6_RP_UP_BUSY_AVG |
3759 GEN6_RP_DOWN_IDLE_AVG);
3761 /* 6: Ring frequency + overclocking (our driver does this later */
3763 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3765 gen8_enable_rps_interrupts(dev);
3767 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3770 static void gen6_enable_rps(struct drm_device *dev)
3772 struct drm_i915_private *dev_priv = dev->dev_private;
3773 struct intel_engine_cs *ring;
3775 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3780 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3782 /* Here begins a magic sequence of register writes to enable
3783 * auto-downclocking.
3785 * Perhaps there might be some value in exposing these to
3788 I915_WRITE(GEN6_RC_STATE, 0);
3790 /* Clear the DBG now so we don't confuse earlier errors */
3791 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3792 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3793 I915_WRITE(GTFIFODBG, gtfifodbg);
3796 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3798 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3800 parse_rp_state_cap(dev_priv, rp_state_cap);
3802 /* disable the counters and set deterministic thresholds */
3803 I915_WRITE(GEN6_RC_CONTROL, 0);
3805 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3806 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3807 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3808 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3809 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3811 for_each_ring(ring, dev_priv, i)
3812 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3814 I915_WRITE(GEN6_RC_SLEEP, 0);
3815 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3816 if (IS_IVYBRIDGE(dev))
3817 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3819 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3820 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3821 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3823 /* Check if we are enabling RC6 */
3824 rc6_mode = intel_enable_rc6(dev_priv->dev);
3825 if (rc6_mode & INTEL_RC6_ENABLE)
3826 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3828 /* We don't use those on Haswell */
3829 if (!IS_HASWELL(dev)) {
3830 if (rc6_mode & INTEL_RC6p_ENABLE)
3831 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3833 if (rc6_mode & INTEL_RC6pp_ENABLE)
3834 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3837 intel_print_rc6_info(dev, rc6_mask);
3839 I915_WRITE(GEN6_RC_CONTROL,
3841 GEN6_RC_CTL_EI_MODE(1) |
3842 GEN6_RC_CTL_HW_ENABLE);
3844 /* Power down if completely idle for over 50ms */
3845 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3846 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3848 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3850 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3852 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3853 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3854 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3855 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3856 (pcu_mbox & 0xff) * 50);
3857 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3860 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3861 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3863 gen6_enable_rps_interrupts(dev);
3866 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3867 if (IS_GEN6(dev) && ret) {
3868 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3869 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3870 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3871 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3872 rc6vids &= 0xffff00;
3873 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3874 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3876 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3879 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3882 static void __gen6_update_ring_freq(struct drm_device *dev)
3884 struct drm_i915_private *dev_priv = dev->dev_private;
3886 unsigned int gpu_freq;
3887 unsigned int max_ia_freq, min_ring_freq;
3888 int scaling_factor = 180;
3889 struct cpufreq_policy *policy;
3891 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3893 policy = cpufreq_cpu_get(0);
3895 max_ia_freq = policy->cpuinfo.max_freq;
3896 cpufreq_cpu_put(policy);
3899 * Default to measured freq if none found, PCU will ensure we
3902 max_ia_freq = tsc_khz;
3905 /* Convert from kHz to MHz */
3906 max_ia_freq /= 1000;
3908 min_ring_freq = I915_READ(DCLK) & 0xf;
3909 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3910 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3913 * For each potential GPU frequency, load a ring frequency we'd like
3914 * to use for memory access. We do this by specifying the IA frequency
3915 * the PCU should use as a reference to determine the ring frequency.
3917 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3919 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3920 unsigned int ia_freq = 0, ring_freq = 0;
3922 if (INTEL_INFO(dev)->gen >= 8) {
3923 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3924 ring_freq = max(min_ring_freq, gpu_freq);
3925 } else if (IS_HASWELL(dev)) {
3926 ring_freq = mult_frac(gpu_freq, 5, 4);
3927 ring_freq = max(min_ring_freq, ring_freq);
3928 /* leave ia_freq as the default, chosen by cpufreq */
3930 /* On older processors, there is no separate ring
3931 * clock domain, so in order to boost the bandwidth
3932 * of the ring, we need to upclock the CPU (ia_freq).
3934 * For GPU frequencies less than 750MHz,
3935 * just use the lowest ring freq.
3937 if (gpu_freq < min_freq)
3940 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3941 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3944 sandybridge_pcode_write(dev_priv,
3945 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3946 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3947 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3952 void gen6_update_ring_freq(struct drm_device *dev)
3954 struct drm_i915_private *dev_priv = dev->dev_private;
3956 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
3959 mutex_lock(&dev_priv->rps.hw_lock);
3960 __gen6_update_ring_freq(dev);
3961 mutex_unlock(&dev_priv->rps.hw_lock);
3964 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
3968 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3969 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3974 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3978 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
3979 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
3984 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
3988 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3989 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3994 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3998 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3999 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
4003 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
4007 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4009 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
4014 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
4018 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4020 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
4022 rp0 = min_t(u32, rp0, 0xea);
4027 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4031 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
4032 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
4033 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
4034 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
4039 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
4041 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
4044 /* Check that the pctx buffer wasn't move under us. */
4045 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
4047 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4049 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
4050 dev_priv->vlv_pctx->stolen->start);
4054 /* Check that the pcbr address is not empty. */
4055 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
4057 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4059 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
4062 static void cherryview_setup_pctx(struct drm_device *dev)
4064 struct drm_i915_private *dev_priv = dev->dev_private;
4065 unsigned long pctx_paddr, paddr;
4066 struct i915_gtt *gtt = &dev_priv->gtt;
4068 int pctx_size = 32*1024;
4070 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4072 pcbr = I915_READ(VLV_PCBR);
4073 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
4074 paddr = (dev_priv->mm.stolen_base +
4075 (gtt->stolen_size - pctx_size));
4077 pctx_paddr = (paddr & (~4095));
4078 I915_WRITE(VLV_PCBR, pctx_paddr);
4082 static void valleyview_setup_pctx(struct drm_device *dev)
4084 struct drm_i915_private *dev_priv = dev->dev_private;
4085 struct drm_i915_gem_object *pctx;
4086 unsigned long pctx_paddr;
4088 int pctx_size = 24*1024;
4090 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4092 pcbr = I915_READ(VLV_PCBR);
4094 /* BIOS set it up already, grab the pre-alloc'd space */
4097 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
4098 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
4100 I915_GTT_OFFSET_NONE,
4106 * From the Gunit register HAS:
4107 * The Gfx driver is expected to program this register and ensure
4108 * proper allocation within Gfx stolen memory. For example, this
4109 * register should be programmed such than the PCBR range does not
4110 * overlap with other ranges, such as the frame buffer, protected
4111 * memory, or any other relevant ranges.
4113 pctx = i915_gem_object_create_stolen(dev, pctx_size);
4115 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4119 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
4120 I915_WRITE(VLV_PCBR, pctx_paddr);
4123 dev_priv->vlv_pctx = pctx;
4126 static void valleyview_cleanup_pctx(struct drm_device *dev)
4128 struct drm_i915_private *dev_priv = dev->dev_private;
4130 if (WARN_ON(!dev_priv->vlv_pctx))
4133 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
4134 dev_priv->vlv_pctx = NULL;
4137 static void valleyview_init_gt_powersave(struct drm_device *dev)
4139 struct drm_i915_private *dev_priv = dev->dev_private;
4142 valleyview_setup_pctx(dev);
4144 mutex_lock(&dev_priv->rps.hw_lock);
4146 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4147 switch ((val >> 6) & 3) {
4150 dev_priv->mem_freq = 800;
4153 dev_priv->mem_freq = 1066;
4156 dev_priv->mem_freq = 1333;
4159 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4161 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
4162 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4163 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4164 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4165 dev_priv->rps.max_freq);
4167 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
4168 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4169 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4170 dev_priv->rps.efficient_freq);
4172 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
4173 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4174 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4175 dev_priv->rps.rp1_freq);
4177 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
4178 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4179 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4180 dev_priv->rps.min_freq);
4182 /* Preserve min/max settings in case of re-init */
4183 if (dev_priv->rps.max_freq_softlimit == 0)
4184 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4186 if (dev_priv->rps.min_freq_softlimit == 0)
4187 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4189 mutex_unlock(&dev_priv->rps.hw_lock);
4192 static void cherryview_init_gt_powersave(struct drm_device *dev)
4194 struct drm_i915_private *dev_priv = dev->dev_private;
4197 cherryview_setup_pctx(dev);
4199 mutex_lock(&dev_priv->rps.hw_lock);
4201 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
4202 switch ((val >> 2) & 0x7) {
4205 dev_priv->rps.cz_freq = 200;
4206 dev_priv->mem_freq = 1600;
4209 dev_priv->rps.cz_freq = 267;
4210 dev_priv->mem_freq = 1600;
4213 dev_priv->rps.cz_freq = 333;
4214 dev_priv->mem_freq = 2000;
4217 dev_priv->rps.cz_freq = 320;
4218 dev_priv->mem_freq = 1600;
4221 dev_priv->rps.cz_freq = 400;
4222 dev_priv->mem_freq = 1600;
4225 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4227 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4228 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4229 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4230 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4231 dev_priv->rps.max_freq);
4233 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4234 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4235 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4236 dev_priv->rps.efficient_freq);
4238 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4239 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4240 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4241 dev_priv->rps.rp1_freq);
4243 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4244 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4245 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4246 dev_priv->rps.min_freq);
4248 WARN_ONCE((dev_priv->rps.max_freq |
4249 dev_priv->rps.efficient_freq |
4250 dev_priv->rps.rp1_freq |
4251 dev_priv->rps.min_freq) & 1,
4252 "Odd GPU freq values\n");
4254 /* Preserve min/max settings in case of re-init */
4255 if (dev_priv->rps.max_freq_softlimit == 0)
4256 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4258 if (dev_priv->rps.min_freq_softlimit == 0)
4259 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4261 mutex_unlock(&dev_priv->rps.hw_lock);
4264 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
4266 valleyview_cleanup_pctx(dev);
4269 static void cherryview_enable_rps(struct drm_device *dev)
4271 struct drm_i915_private *dev_priv = dev->dev_private;
4272 struct intel_engine_cs *ring;
4273 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
4276 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4278 gtfifodbg = I915_READ(GTFIFODBG);
4280 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4282 I915_WRITE(GTFIFODBG, gtfifodbg);
4285 cherryview_check_pctx(dev_priv);
4287 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4288 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4289 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4291 /* 2a: Program RC6 thresholds.*/
4292 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4293 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4294 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4296 for_each_ring(ring, dev_priv, i)
4297 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4298 I915_WRITE(GEN6_RC_SLEEP, 0);
4300 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4302 /* allows RC6 residency counter to work */
4303 I915_WRITE(VLV_COUNTER_CONTROL,
4304 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4305 VLV_MEDIA_RC6_COUNT_EN |
4306 VLV_RENDER_RC6_COUNT_EN));
4308 /* For now we assume BIOS is allocating and populating the PCBR */
4309 pcbr = I915_READ(VLV_PCBR);
4311 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4314 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4315 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4316 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
4318 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4320 /* 4 Program defaults and thresholds for RPS*/
4321 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4322 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4323 I915_WRITE(GEN6_RP_UP_EI, 66000);
4324 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4326 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4328 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4329 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4330 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4333 I915_WRITE(GEN6_RP_CONTROL,
4334 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4335 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4337 GEN6_RP_UP_BUSY_AVG |
4338 GEN6_RP_DOWN_IDLE_AVG);
4340 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4342 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4343 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4345 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4346 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4347 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4348 dev_priv->rps.cur_freq);
4350 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4351 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4352 dev_priv->rps.efficient_freq);
4354 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4356 gen8_enable_rps_interrupts(dev);
4358 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4361 static void valleyview_enable_rps(struct drm_device *dev)
4363 struct drm_i915_private *dev_priv = dev->dev_private;
4364 struct intel_engine_cs *ring;
4365 u32 gtfifodbg, val, rc6_mode = 0;
4368 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4370 valleyview_check_pctx(dev_priv);
4372 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4373 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4375 I915_WRITE(GTFIFODBG, gtfifodbg);
4378 /* If VLV, Forcewake all wells, else re-direct to regular path */
4379 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4381 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4382 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4383 I915_WRITE(GEN6_RP_UP_EI, 66000);
4384 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4386 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4387 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
4389 I915_WRITE(GEN6_RP_CONTROL,
4390 GEN6_RP_MEDIA_TURBO |
4391 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4392 GEN6_RP_MEDIA_IS_GFX |
4394 GEN6_RP_UP_BUSY_AVG |
4395 GEN6_RP_DOWN_IDLE_CONT);
4397 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
4398 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4399 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4401 for_each_ring(ring, dev_priv, i)
4402 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4404 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4406 /* allows RC6 residency counter to work */
4407 I915_WRITE(VLV_COUNTER_CONTROL,
4408 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4409 VLV_RENDER_RC0_COUNT_EN |
4410 VLV_MEDIA_RC6_COUNT_EN |
4411 VLV_RENDER_RC6_COUNT_EN));
4413 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4414 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4416 intel_print_rc6_info(dev, rc6_mode);
4418 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4420 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4422 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4423 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4425 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4426 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4427 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4428 dev_priv->rps.cur_freq);
4430 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4431 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4432 dev_priv->rps.efficient_freq);
4434 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4436 gen6_enable_rps_interrupts(dev);
4438 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4441 void ironlake_teardown_rc6(struct drm_device *dev)
4443 struct drm_i915_private *dev_priv = dev->dev_private;
4445 if (dev_priv->ips.renderctx) {
4446 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
4447 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4448 dev_priv->ips.renderctx = NULL;
4451 if (dev_priv->ips.pwrctx) {
4452 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
4453 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4454 dev_priv->ips.pwrctx = NULL;
4458 static void ironlake_disable_rc6(struct drm_device *dev)
4460 struct drm_i915_private *dev_priv = dev->dev_private;
4462 if (I915_READ(PWRCTXA)) {
4463 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4464 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4465 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4468 I915_WRITE(PWRCTXA, 0);
4469 POSTING_READ(PWRCTXA);
4471 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4472 POSTING_READ(RSTDBYCTL);
4476 static int ironlake_setup_rc6(struct drm_device *dev)
4478 struct drm_i915_private *dev_priv = dev->dev_private;
4480 if (dev_priv->ips.renderctx == NULL)
4481 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4482 if (!dev_priv->ips.renderctx)
4485 if (dev_priv->ips.pwrctx == NULL)
4486 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4487 if (!dev_priv->ips.pwrctx) {
4488 ironlake_teardown_rc6(dev);
4495 static void ironlake_enable_rc6(struct drm_device *dev)
4497 struct drm_i915_private *dev_priv = dev->dev_private;
4498 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
4499 bool was_interruptible;
4502 /* rc6 disabled by default due to repeated reports of hanging during
4505 if (!intel_enable_rc6(dev))
4508 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4510 ret = ironlake_setup_rc6(dev);
4514 was_interruptible = dev_priv->mm.interruptible;
4515 dev_priv->mm.interruptible = false;
4518 * GPU can automatically power down the render unit if given a page
4521 ret = intel_ring_begin(ring, 6);
4523 ironlake_teardown_rc6(dev);
4524 dev_priv->mm.interruptible = was_interruptible;
4528 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4529 intel_ring_emit(ring, MI_SET_CONTEXT);
4530 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
4532 MI_SAVE_EXT_STATE_EN |
4533 MI_RESTORE_EXT_STATE_EN |
4534 MI_RESTORE_INHIBIT);
4535 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4536 intel_ring_emit(ring, MI_NOOP);
4537 intel_ring_emit(ring, MI_FLUSH);
4538 intel_ring_advance(ring);
4541 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4542 * does an implicit flush, combined with MI_FLUSH above, it should be
4543 * safe to assume that renderctx is valid
4545 ret = intel_ring_idle(ring);
4546 dev_priv->mm.interruptible = was_interruptible;
4548 DRM_ERROR("failed to enable ironlake power savings\n");
4549 ironlake_teardown_rc6(dev);
4553 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4554 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4556 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
4559 static unsigned long intel_pxfreq(u32 vidfreq)
4562 int div = (vidfreq & 0x3f0000) >> 16;
4563 int post = (vidfreq & 0x3000) >> 12;
4564 int pre = (vidfreq & 0x7);
4569 freq = ((div * 133333) / ((1<<post) * pre));
4574 static const struct cparams {
4580 { 1, 1333, 301, 28664 },
4581 { 1, 1066, 294, 24460 },
4582 { 1, 800, 294, 25192 },
4583 { 0, 1333, 276, 27605 },
4584 { 0, 1066, 276, 27605 },
4585 { 0, 800, 231, 23784 },
4588 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4590 u64 total_count, diff, ret;
4591 u32 count1, count2, count3, m = 0, c = 0;
4592 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4595 assert_spin_locked(&mchdev_lock);
4597 diff1 = now - dev_priv->ips.last_time1;
4599 /* Prevent division-by-zero if we are asking too fast.
4600 * Also, we don't get interesting results if we are polling
4601 * faster than once in 10ms, so just return the saved value
4605 return dev_priv->ips.chipset_power;
4607 count1 = I915_READ(DMIEC);
4608 count2 = I915_READ(DDREC);
4609 count3 = I915_READ(CSIEC);
4611 total_count = count1 + count2 + count3;
4613 /* FIXME: handle per-counter overflow */
4614 if (total_count < dev_priv->ips.last_count1) {
4615 diff = ~0UL - dev_priv->ips.last_count1;
4616 diff += total_count;
4618 diff = total_count - dev_priv->ips.last_count1;
4621 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
4622 if (cparams[i].i == dev_priv->ips.c_m &&
4623 cparams[i].t == dev_priv->ips.r_t) {
4630 diff = div_u64(diff, diff1);
4631 ret = ((m * diff) + c);
4632 ret = div_u64(ret, 10);
4634 dev_priv->ips.last_count1 = total_count;
4635 dev_priv->ips.last_time1 = now;
4637 dev_priv->ips.chipset_power = ret;
4642 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4644 struct drm_device *dev = dev_priv->dev;
4647 if (INTEL_INFO(dev)->gen != 5)
4650 spin_lock_irq(&mchdev_lock);
4652 val = __i915_chipset_val(dev_priv);
4654 spin_unlock_irq(&mchdev_lock);
4659 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4661 unsigned long m, x, b;
4664 tsfs = I915_READ(TSFS);
4666 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4667 x = I915_READ8(TR1);
4669 b = tsfs & TSFS_INTR_MASK;
4671 return ((m * x) / 127) - b;
4674 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4676 struct drm_device *dev = dev_priv->dev;
4677 static const struct v_table {
4678 u16 vd; /* in .1 mil */
4679 u16 vm; /* in .1 mil */
4810 if (INTEL_INFO(dev)->is_mobile)
4811 return v_table[pxvid].vm;
4813 return v_table[pxvid].vd;
4816 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4818 u64 now, diff, diffms;
4821 assert_spin_locked(&mchdev_lock);
4823 now = ktime_get_raw_ns();
4824 diffms = now - dev_priv->ips.last_time2;
4825 do_div(diffms, NSEC_PER_MSEC);
4827 /* Don't divide by 0 */
4831 count = I915_READ(GFXEC);
4833 if (count < dev_priv->ips.last_count2) {
4834 diff = ~0UL - dev_priv->ips.last_count2;
4837 diff = count - dev_priv->ips.last_count2;
4840 dev_priv->ips.last_count2 = count;
4841 dev_priv->ips.last_time2 = now;
4843 /* More magic constants... */
4845 diff = div_u64(diff, diffms * 10);
4846 dev_priv->ips.gfx_power = diff;
4849 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4851 struct drm_device *dev = dev_priv->dev;
4853 if (INTEL_INFO(dev)->gen != 5)
4856 spin_lock_irq(&mchdev_lock);
4858 __i915_update_gfx_val(dev_priv);
4860 spin_unlock_irq(&mchdev_lock);
4863 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4865 unsigned long t, corr, state1, corr2, state2;
4868 assert_spin_locked(&mchdev_lock);
4870 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4871 pxvid = (pxvid >> 24) & 0x7f;
4872 ext_v = pvid_to_extvid(dev_priv, pxvid);
4876 t = i915_mch_val(dev_priv);
4878 /* Revel in the empirically derived constants */
4880 /* Correction factor in 1/100000 units */
4882 corr = ((t * 2349) + 135940);
4884 corr = ((t * 964) + 29317);
4886 corr = ((t * 301) + 1004);
4888 corr = corr * ((150142 * state1) / 10000 - 78642);
4890 corr2 = (corr * dev_priv->ips.corr);
4892 state2 = (corr2 * state1) / 10000;
4893 state2 /= 100; /* convert to mW */
4895 __i915_update_gfx_val(dev_priv);
4897 return dev_priv->ips.gfx_power + state2;
4900 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4902 struct drm_device *dev = dev_priv->dev;
4905 if (INTEL_INFO(dev)->gen != 5)
4908 spin_lock_irq(&mchdev_lock);
4910 val = __i915_gfx_val(dev_priv);
4912 spin_unlock_irq(&mchdev_lock);
4918 * i915_read_mch_val - return value for IPS use
4920 * Calculate and return a value for the IPS driver to use when deciding whether
4921 * we have thermal and power headroom to increase CPU or GPU power budget.
4923 unsigned long i915_read_mch_val(void)
4925 struct drm_i915_private *dev_priv;
4926 unsigned long chipset_val, graphics_val, ret = 0;
4928 spin_lock_irq(&mchdev_lock);
4931 dev_priv = i915_mch_dev;
4933 chipset_val = __i915_chipset_val(dev_priv);
4934 graphics_val = __i915_gfx_val(dev_priv);
4936 ret = chipset_val + graphics_val;
4939 spin_unlock_irq(&mchdev_lock);
4943 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4946 * i915_gpu_raise - raise GPU frequency limit
4948 * Raise the limit; IPS indicates we have thermal headroom.
4950 bool i915_gpu_raise(void)
4952 struct drm_i915_private *dev_priv;
4955 spin_lock_irq(&mchdev_lock);
4956 if (!i915_mch_dev) {
4960 dev_priv = i915_mch_dev;
4962 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4963 dev_priv->ips.max_delay--;
4966 spin_unlock_irq(&mchdev_lock);
4970 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4973 * i915_gpu_lower - lower GPU frequency limit
4975 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4976 * frequency maximum.
4978 bool i915_gpu_lower(void)
4980 struct drm_i915_private *dev_priv;
4983 spin_lock_irq(&mchdev_lock);
4984 if (!i915_mch_dev) {
4988 dev_priv = i915_mch_dev;
4990 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4991 dev_priv->ips.max_delay++;
4994 spin_unlock_irq(&mchdev_lock);
4998 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5001 * i915_gpu_busy - indicate GPU business to IPS
5003 * Tell the IPS driver whether or not the GPU is busy.
5005 bool i915_gpu_busy(void)
5007 struct drm_i915_private *dev_priv;
5008 struct intel_engine_cs *ring;
5012 spin_lock_irq(&mchdev_lock);
5015 dev_priv = i915_mch_dev;
5017 for_each_ring(ring, dev_priv, i)
5018 ret |= !list_empty(&ring->request_list);
5021 spin_unlock_irq(&mchdev_lock);
5025 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5028 * i915_gpu_turbo_disable - disable graphics turbo
5030 * Disable graphics turbo by resetting the max frequency and setting the
5031 * current frequency to the default.
5033 bool i915_gpu_turbo_disable(void)
5035 struct drm_i915_private *dev_priv;
5038 spin_lock_irq(&mchdev_lock);
5039 if (!i915_mch_dev) {
5043 dev_priv = i915_mch_dev;
5045 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5047 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5051 spin_unlock_irq(&mchdev_lock);
5055 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5058 * Tells the intel_ips driver that the i915 driver is now loaded, if
5059 * IPS got loaded first.
5061 * This awkward dance is so that neither module has to depend on the
5062 * other in order for IPS to do the appropriate communication of
5063 * GPU turbo limits to i915.
5066 ips_ping_for_i915_load(void)
5070 link = symbol_get(ips_link_to_i915_driver);
5073 symbol_put(ips_link_to_i915_driver);
5077 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
5079 /* We only register the i915 ips part with intel-ips once everything is
5080 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5081 spin_lock_irq(&mchdev_lock);
5082 i915_mch_dev = dev_priv;
5083 spin_unlock_irq(&mchdev_lock);
5085 ips_ping_for_i915_load();
5088 void intel_gpu_ips_teardown(void)
5090 spin_lock_irq(&mchdev_lock);
5091 i915_mch_dev = NULL;
5092 spin_unlock_irq(&mchdev_lock);
5095 static void intel_init_emon(struct drm_device *dev)
5097 struct drm_i915_private *dev_priv = dev->dev_private;
5102 /* Disable to program */
5106 /* Program energy weights for various events */
5107 I915_WRITE(SDEW, 0x15040d00);
5108 I915_WRITE(CSIEW0, 0x007f0000);
5109 I915_WRITE(CSIEW1, 0x1e220004);
5110 I915_WRITE(CSIEW2, 0x04000004);
5112 for (i = 0; i < 5; i++)
5113 I915_WRITE(PEW + (i * 4), 0);
5114 for (i = 0; i < 3; i++)
5115 I915_WRITE(DEW + (i * 4), 0);
5117 /* Program P-state weights to account for frequency power adjustment */
5118 for (i = 0; i < 16; i++) {
5119 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5120 unsigned long freq = intel_pxfreq(pxvidfreq);
5121 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5126 val *= (freq / 1000);
5128 val /= (127*127*900);
5130 DRM_ERROR("bad pxval: %ld\n", val);
5133 /* Render standby states get 0 weight */
5137 for (i = 0; i < 4; i++) {
5138 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5139 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5140 I915_WRITE(PXW + (i * 4), val);
5143 /* Adjust magic regs to magic values (more experimental results) */
5144 I915_WRITE(OGW0, 0);
5145 I915_WRITE(OGW1, 0);
5146 I915_WRITE(EG0, 0x00007f00);
5147 I915_WRITE(EG1, 0x0000000e);
5148 I915_WRITE(EG2, 0x000e0000);
5149 I915_WRITE(EG3, 0x68000300);
5150 I915_WRITE(EG4, 0x42000000);
5151 I915_WRITE(EG5, 0x00140031);
5155 for (i = 0; i < 8; i++)
5156 I915_WRITE(PXWL + (i * 4), 0);
5158 /* Enable PMON + select events */
5159 I915_WRITE(ECR, 0x80000019);
5161 lcfuse = I915_READ(LCFUSE02);
5163 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
5166 void intel_init_gt_powersave(struct drm_device *dev)
5168 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
5170 if (IS_CHERRYVIEW(dev))
5171 cherryview_init_gt_powersave(dev);
5172 else if (IS_VALLEYVIEW(dev))
5173 valleyview_init_gt_powersave(dev);
5176 void intel_cleanup_gt_powersave(struct drm_device *dev)
5178 if (IS_CHERRYVIEW(dev))
5180 else if (IS_VALLEYVIEW(dev))
5181 valleyview_cleanup_gt_powersave(dev);
5185 * intel_suspend_gt_powersave - suspend PM work and helper threads
5188 * We don't want to disable RC6 or other features here, we just want
5189 * to make sure any work we've queued has finished and won't bother
5190 * us while we're suspended.
5192 void intel_suspend_gt_powersave(struct drm_device *dev)
5194 struct drm_i915_private *dev_priv = dev->dev_private;
5196 /* Interrupts should be disabled already to avoid re-arming. */
5197 WARN_ON(intel_irqs_enabled(dev_priv));
5199 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5201 cancel_work_sync(&dev_priv->rps.work);
5203 /* Force GPU to min freq during suspend */
5204 gen6_rps_idle(dev_priv);
5207 void intel_disable_gt_powersave(struct drm_device *dev)
5209 struct drm_i915_private *dev_priv = dev->dev_private;
5211 /* Interrupts should be disabled already to avoid re-arming. */
5212 WARN_ON(intel_irqs_enabled(dev_priv));
5214 if (IS_IRONLAKE_M(dev)) {
5215 ironlake_disable_drps(dev);
5216 ironlake_disable_rc6(dev);
5217 } else if (INTEL_INFO(dev)->gen >= 6) {
5218 intel_suspend_gt_powersave(dev);
5220 mutex_lock(&dev_priv->rps.hw_lock);
5221 if (IS_CHERRYVIEW(dev))
5222 cherryview_disable_rps(dev);
5223 else if (IS_VALLEYVIEW(dev))
5224 valleyview_disable_rps(dev);
5226 gen6_disable_rps(dev);
5227 dev_priv->rps.enabled = false;
5228 mutex_unlock(&dev_priv->rps.hw_lock);
5232 static void intel_gen6_powersave_work(struct work_struct *work)
5234 struct drm_i915_private *dev_priv =
5235 container_of(work, struct drm_i915_private,
5236 rps.delayed_resume_work.work);
5237 struct drm_device *dev = dev_priv->dev;
5239 mutex_lock(&dev_priv->rps.hw_lock);
5241 if (IS_CHERRYVIEW(dev)) {
5242 cherryview_enable_rps(dev);
5243 } else if (IS_VALLEYVIEW(dev)) {
5244 valleyview_enable_rps(dev);
5245 } else if (IS_BROADWELL(dev)) {
5246 gen8_enable_rps(dev);
5247 __gen6_update_ring_freq(dev);
5249 gen6_enable_rps(dev);
5250 __gen6_update_ring_freq(dev);
5252 dev_priv->rps.enabled = true;
5253 mutex_unlock(&dev_priv->rps.hw_lock);
5255 intel_runtime_pm_put(dev_priv);
5258 void intel_enable_gt_powersave(struct drm_device *dev)
5260 struct drm_i915_private *dev_priv = dev->dev_private;
5262 if (IS_IRONLAKE_M(dev)) {
5263 mutex_lock(&dev->struct_mutex);
5264 ironlake_enable_drps(dev);
5265 ironlake_enable_rc6(dev);
5266 intel_init_emon(dev);
5267 mutex_unlock(&dev->struct_mutex);
5268 } else if (INTEL_INFO(dev)->gen >= 6) {
5270 * PCU communication is slow and this doesn't need to be
5271 * done at any specific time, so do this out of our fast path
5272 * to make resume and init faster.
5274 * We depend on the HW RC6 power context save/restore
5275 * mechanism when entering D3 through runtime PM suspend. So
5276 * disable RPM until RPS/RC6 is properly setup. We can only
5277 * get here via the driver load/system resume/runtime resume
5278 * paths, so the _noresume version is enough (and in case of
5279 * runtime resume it's necessary).
5281 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5282 round_jiffies_up_relative(HZ)))
5283 intel_runtime_pm_get_noresume(dev_priv);
5287 void intel_reset_gt_powersave(struct drm_device *dev)
5289 struct drm_i915_private *dev_priv = dev->dev_private;
5291 dev_priv->rps.enabled = false;
5292 intel_enable_gt_powersave(dev);
5295 static void ibx_init_clock_gating(struct drm_device *dev)
5297 struct drm_i915_private *dev_priv = dev->dev_private;
5300 * On Ibex Peak and Cougar Point, we need to disable clock
5301 * gating for the panel power sequencer or it will fail to
5302 * start up when no ports are active.
5304 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5307 static void g4x_disable_trickle_feed(struct drm_device *dev)
5309 struct drm_i915_private *dev_priv = dev->dev_private;
5312 for_each_pipe(dev_priv, pipe) {
5313 I915_WRITE(DSPCNTR(pipe),
5314 I915_READ(DSPCNTR(pipe)) |
5315 DISPPLANE_TRICKLE_FEED_DISABLE);
5316 intel_flush_primary_plane(dev_priv, pipe);
5320 static void ilk_init_lp_watermarks(struct drm_device *dev)
5322 struct drm_i915_private *dev_priv = dev->dev_private;
5324 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5325 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5326 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5329 * Don't touch WM1S_LP_EN here.
5330 * Doing so could cause underruns.
5334 static void ironlake_init_clock_gating(struct drm_device *dev)
5336 struct drm_i915_private *dev_priv = dev->dev_private;
5337 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5341 * WaFbcDisableDpfcClockGating:ilk
5343 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5344 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5345 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5347 I915_WRITE(PCH_3DCGDIS0,
5348 MARIUNIT_CLOCK_GATE_DISABLE |
5349 SVSMUNIT_CLOCK_GATE_DISABLE);
5350 I915_WRITE(PCH_3DCGDIS1,
5351 VFMUNIT_CLOCK_GATE_DISABLE);
5354 * According to the spec the following bits should be set in
5355 * order to enable memory self-refresh
5356 * The bit 22/21 of 0x42004
5357 * The bit 5 of 0x42020
5358 * The bit 15 of 0x45000
5360 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5361 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5362 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5363 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5364 I915_WRITE(DISP_ARB_CTL,
5365 (I915_READ(DISP_ARB_CTL) |
5368 ilk_init_lp_watermarks(dev);
5371 * Based on the document from hardware guys the following bits
5372 * should be set unconditionally in order to enable FBC.
5373 * The bit 22 of 0x42000
5374 * The bit 22 of 0x42004
5375 * The bit 7,8,9 of 0x42020.
5377 if (IS_IRONLAKE_M(dev)) {
5378 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5379 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5380 I915_READ(ILK_DISPLAY_CHICKEN1) |
5382 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5383 I915_READ(ILK_DISPLAY_CHICKEN2) |
5387 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5389 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5390 I915_READ(ILK_DISPLAY_CHICKEN2) |
5391 ILK_ELPIN_409_SELECT);
5392 I915_WRITE(_3D_CHICKEN2,
5393 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5394 _3D_CHICKEN2_WM_READ_PIPELINED);
5396 /* WaDisableRenderCachePipelinedFlush:ilk */
5397 I915_WRITE(CACHE_MODE_0,
5398 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5400 /* WaDisable_RenderCache_OperationalFlush:ilk */
5401 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5403 g4x_disable_trickle_feed(dev);
5405 ibx_init_clock_gating(dev);
5408 static void cpt_init_clock_gating(struct drm_device *dev)
5410 struct drm_i915_private *dev_priv = dev->dev_private;
5415 * On Ibex Peak and Cougar Point, we need to disable clock
5416 * gating for the panel power sequencer or it will fail to
5417 * start up when no ports are active.
5419 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5420 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5421 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5422 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5423 DPLS_EDP_PPS_FIX_DIS);
5424 /* The below fixes the weird display corruption, a few pixels shifted
5425 * downward, on (only) LVDS of some HP laptops with IVY.
5427 for_each_pipe(dev_priv, pipe) {
5428 val = I915_READ(TRANS_CHICKEN2(pipe));
5429 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5430 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5431 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5432 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5433 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5434 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5435 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5436 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5438 /* WADP0ClockGatingDisable */
5439 for_each_pipe(dev_priv, pipe) {
5440 I915_WRITE(TRANS_CHICKEN1(pipe),
5441 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5445 static void gen6_check_mch_setup(struct drm_device *dev)
5447 struct drm_i915_private *dev_priv = dev->dev_private;
5450 tmp = I915_READ(MCH_SSKPD);
5451 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
5452 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5456 static void gen6_init_clock_gating(struct drm_device *dev)
5458 struct drm_i915_private *dev_priv = dev->dev_private;
5459 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5461 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5463 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5464 I915_READ(ILK_DISPLAY_CHICKEN2) |
5465 ILK_ELPIN_409_SELECT);
5467 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5468 I915_WRITE(_3D_CHICKEN,
5469 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5471 /* WaSetupGtModeTdRowDispatch:snb */
5472 if (IS_SNB_GT1(dev))
5473 I915_WRITE(GEN6_GT_MODE,
5474 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5476 /* WaDisable_RenderCache_OperationalFlush:snb */
5477 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5480 * BSpec recoomends 8x4 when MSAA is used,
5481 * however in practice 16x4 seems fastest.
5483 * Note that PS/WM thread counts depend on the WIZ hashing
5484 * disable bit, which we don't touch here, but it's good
5485 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5487 I915_WRITE(GEN6_GT_MODE,
5488 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5490 ilk_init_lp_watermarks(dev);
5492 I915_WRITE(CACHE_MODE_0,
5493 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5495 I915_WRITE(GEN6_UCGCTL1,
5496 I915_READ(GEN6_UCGCTL1) |
5497 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5498 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5500 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5501 * gating disable must be set. Failure to set it results in
5502 * flickering pixels due to Z write ordering failures after
5503 * some amount of runtime in the Mesa "fire" demo, and Unigine
5504 * Sanctuary and Tropics, and apparently anything else with
5505 * alpha test or pixel discard.
5507 * According to the spec, bit 11 (RCCUNIT) must also be set,
5508 * but we didn't debug actual testcases to find it out.
5510 * WaDisableRCCUnitClockGating:snb
5511 * WaDisableRCPBUnitClockGating:snb
5513 I915_WRITE(GEN6_UCGCTL2,
5514 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5515 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5517 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5518 I915_WRITE(_3D_CHICKEN3,
5519 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
5523 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5524 * 3DSTATE_SF number of SF output attributes is more than 16."
5526 I915_WRITE(_3D_CHICKEN3,
5527 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
5530 * According to the spec the following bits should be
5531 * set in order to enable memory self-refresh and fbc:
5532 * The bit21 and bit22 of 0x42000
5533 * The bit21 and bit22 of 0x42004
5534 * The bit5 and bit7 of 0x42020
5535 * The bit14 of 0x70180
5536 * The bit14 of 0x71180
5538 * WaFbcAsynchFlipDisableFbcQueue:snb
5540 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5541 I915_READ(ILK_DISPLAY_CHICKEN1) |
5542 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5543 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5544 I915_READ(ILK_DISPLAY_CHICKEN2) |
5545 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5546 I915_WRITE(ILK_DSPCLK_GATE_D,
5547 I915_READ(ILK_DSPCLK_GATE_D) |
5548 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5549 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5551 g4x_disable_trickle_feed(dev);
5553 cpt_init_clock_gating(dev);
5555 gen6_check_mch_setup(dev);
5558 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5560 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5563 * WaVSThreadDispatchOverride:ivb,vlv
5565 * This actually overrides the dispatch
5566 * mode for all thread types.
5568 reg &= ~GEN7_FF_SCHED_MASK;
5569 reg |= GEN7_FF_TS_SCHED_HW;
5570 reg |= GEN7_FF_VS_SCHED_HW;
5571 reg |= GEN7_FF_DS_SCHED_HW;
5573 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5576 static void lpt_init_clock_gating(struct drm_device *dev)
5578 struct drm_i915_private *dev_priv = dev->dev_private;
5581 * TODO: this bit should only be enabled when really needed, then
5582 * disabled when not needed anymore in order to save power.
5584 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5585 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5586 I915_READ(SOUTH_DSPCLK_GATE_D) |
5587 PCH_LP_PARTITION_LEVEL_DISABLE);
5589 /* WADPOClockGatingDisable:hsw */
5590 I915_WRITE(_TRANSA_CHICKEN1,
5591 I915_READ(_TRANSA_CHICKEN1) |
5592 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5595 static void lpt_suspend_hw(struct drm_device *dev)
5597 struct drm_i915_private *dev_priv = dev->dev_private;
5599 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5600 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5602 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5603 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5607 static void broadwell_init_clock_gating(struct drm_device *dev)
5609 struct drm_i915_private *dev_priv = dev->dev_private;
5612 I915_WRITE(WM3_LP_ILK, 0);
5613 I915_WRITE(WM2_LP_ILK, 0);
5614 I915_WRITE(WM1_LP_ILK, 0);
5616 /* FIXME(BDW): Check all the w/a, some might only apply to
5617 * pre-production hw. */
5620 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5622 I915_WRITE(_3D_CHICKEN3,
5623 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5626 /* WaSwitchSolVfFArbitrationPriority:bdw */
5627 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5629 /* WaPsrDPAMaskVBlankInSRD:bdw */
5630 I915_WRITE(CHICKEN_PAR1_1,
5631 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5633 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5634 for_each_pipe(dev_priv, pipe) {
5635 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5636 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5637 BDW_DPRS_MASK_VBLANK_SRD);
5640 /* WaVSRefCountFullforceMissDisable:bdw */
5641 /* WaDSRefCountFullforceMissDisable:bdw */
5642 I915_WRITE(GEN7_FF_THREAD_MODE,
5643 I915_READ(GEN7_FF_THREAD_MODE) &
5644 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5646 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5647 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5649 /* WaDisableSDEUnitClockGating:bdw */
5650 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5651 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5653 lpt_init_clock_gating(dev);
5656 static void haswell_init_clock_gating(struct drm_device *dev)
5658 struct drm_i915_private *dev_priv = dev->dev_private;
5660 ilk_init_lp_watermarks(dev);
5662 /* L3 caching of data atomics doesn't work -- disable it. */
5663 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5664 I915_WRITE(HSW_ROW_CHICKEN3,
5665 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5667 /* This is required by WaCatErrorRejectionIssue:hsw */
5668 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5669 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5670 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5672 /* WaVSRefCountFullforceMissDisable:hsw */
5673 I915_WRITE(GEN7_FF_THREAD_MODE,
5674 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
5676 /* WaDisable_RenderCache_OperationalFlush:hsw */
5677 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5679 /* enable HiZ Raw Stall Optimization */
5680 I915_WRITE(CACHE_MODE_0_GEN7,
5681 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5683 /* WaDisable4x2SubspanOptimization:hsw */
5684 I915_WRITE(CACHE_MODE_1,
5685 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5688 * BSpec recommends 8x4 when MSAA is used,
5689 * however in practice 16x4 seems fastest.
5691 * Note that PS/WM thread counts depend on the WIZ hashing
5692 * disable bit, which we don't touch here, but it's good
5693 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5695 I915_WRITE(GEN7_GT_MODE,
5696 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5698 /* WaSwitchSolVfFArbitrationPriority:hsw */
5699 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5701 /* WaRsPkgCStateDisplayPMReq:hsw */
5702 I915_WRITE(CHICKEN_PAR1_1,
5703 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5705 lpt_init_clock_gating(dev);
5708 static void ivybridge_init_clock_gating(struct drm_device *dev)
5710 struct drm_i915_private *dev_priv = dev->dev_private;
5713 ilk_init_lp_watermarks(dev);
5715 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5717 /* WaDisableEarlyCull:ivb */
5718 I915_WRITE(_3D_CHICKEN3,
5719 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5721 /* WaDisableBackToBackFlipFix:ivb */
5722 I915_WRITE(IVB_CHICKEN3,
5723 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5724 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5726 /* WaDisablePSDDualDispatchEnable:ivb */
5727 if (IS_IVB_GT1(dev))
5728 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5729 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5731 /* WaDisable_RenderCache_OperationalFlush:ivb */
5732 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5734 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5735 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5736 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5738 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5739 I915_WRITE(GEN7_L3CNTLREG1,
5740 GEN7_WA_FOR_GEN7_L3_CONTROL);
5741 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5742 GEN7_WA_L3_CHICKEN_MODE);
5743 if (IS_IVB_GT1(dev))
5744 I915_WRITE(GEN7_ROW_CHICKEN2,
5745 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5747 /* must write both registers */
5748 I915_WRITE(GEN7_ROW_CHICKEN2,
5749 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5750 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5751 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5754 /* WaForceL3Serialization:ivb */
5755 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5756 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5759 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5760 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5762 I915_WRITE(GEN6_UCGCTL2,
5763 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5765 /* This is required by WaCatErrorRejectionIssue:ivb */
5766 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5767 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5768 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5770 g4x_disable_trickle_feed(dev);
5772 gen7_setup_fixed_func_scheduler(dev_priv);
5774 if (0) { /* causes HiZ corruption on ivb:gt1 */
5775 /* enable HiZ Raw Stall Optimization */
5776 I915_WRITE(CACHE_MODE_0_GEN7,
5777 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5780 /* WaDisable4x2SubspanOptimization:ivb */
5781 I915_WRITE(CACHE_MODE_1,
5782 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5785 * BSpec recommends 8x4 when MSAA is used,
5786 * however in practice 16x4 seems fastest.
5788 * Note that PS/WM thread counts depend on the WIZ hashing
5789 * disable bit, which we don't touch here, but it's good
5790 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5792 I915_WRITE(GEN7_GT_MODE,
5793 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5795 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5796 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5797 snpcr |= GEN6_MBC_SNPCR_MED;
5798 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5800 if (!HAS_PCH_NOP(dev))
5801 cpt_init_clock_gating(dev);
5803 gen6_check_mch_setup(dev);
5806 static void valleyview_init_clock_gating(struct drm_device *dev)
5808 struct drm_i915_private *dev_priv = dev->dev_private;
5810 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5812 /* WaDisableEarlyCull:vlv */
5813 I915_WRITE(_3D_CHICKEN3,
5814 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5816 /* WaDisableBackToBackFlipFix:vlv */
5817 I915_WRITE(IVB_CHICKEN3,
5818 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5819 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5821 /* WaPsdDispatchEnable:vlv */
5822 /* WaDisablePSDDualDispatchEnable:vlv */
5823 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5824 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5825 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5827 /* WaDisable_RenderCache_OperationalFlush:vlv */
5828 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5830 /* WaForceL3Serialization:vlv */
5831 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5832 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5834 /* WaDisableDopClockGating:vlv */
5835 I915_WRITE(GEN7_ROW_CHICKEN2,
5836 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5838 /* This is required by WaCatErrorRejectionIssue:vlv */
5839 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5840 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5841 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5843 gen7_setup_fixed_func_scheduler(dev_priv);
5846 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5847 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5849 I915_WRITE(GEN6_UCGCTL2,
5850 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5852 /* WaDisableL3Bank2xClockGate:vlv
5853 * Disabling L3 clock gating- MMIO 940c[25] = 1
5854 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
5855 I915_WRITE(GEN7_UCGCTL4,
5856 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5858 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5861 * BSpec says this must be set, even though
5862 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5864 I915_WRITE(CACHE_MODE_1,
5865 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5868 * WaIncreaseL3CreditsForVLVB0:vlv
5869 * This is the hardware default actually.
5871 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5874 * WaDisableVLVClockGating_VBIIssue:vlv
5875 * Disable clock gating on th GCFG unit to prevent a delay
5876 * in the reporting of vblank events.
5878 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5881 static void cherryview_init_clock_gating(struct drm_device *dev)
5883 struct drm_i915_private *dev_priv = dev->dev_private;
5885 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5887 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5889 /* WaVSRefCountFullforceMissDisable:chv */
5890 /* WaDSRefCountFullforceMissDisable:chv */
5891 I915_WRITE(GEN7_FF_THREAD_MODE,
5892 I915_READ(GEN7_FF_THREAD_MODE) &
5893 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5895 /* WaDisableSemaphoreAndSyncFlipWait:chv */
5896 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5897 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5899 /* WaDisableCSUnitClockGating:chv */
5900 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5901 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5903 /* WaDisableSDEUnitClockGating:chv */
5904 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5905 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5907 /* WaDisableGunitClockGating:chv (pre-production hw) */
5908 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5911 /* WaDisableFfDopClockGating:chv (pre-production hw) */
5912 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5913 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5915 /* WaDisableDopClockGating:chv (pre-production hw) */
5916 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5917 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5920 static void g4x_init_clock_gating(struct drm_device *dev)
5922 struct drm_i915_private *dev_priv = dev->dev_private;
5923 uint32_t dspclk_gate;
5925 I915_WRITE(RENCLK_GATE_D1, 0);
5926 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5927 GS_UNIT_CLOCK_GATE_DISABLE |
5928 CL_UNIT_CLOCK_GATE_DISABLE);
5929 I915_WRITE(RAMCLK_GATE_D, 0);
5930 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5931 OVRUNIT_CLOCK_GATE_DISABLE |
5932 OVCUNIT_CLOCK_GATE_DISABLE;
5934 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5935 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5937 /* WaDisableRenderCachePipelinedFlush */
5938 I915_WRITE(CACHE_MODE_0,
5939 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5941 /* WaDisable_RenderCache_OperationalFlush:g4x */
5942 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5944 g4x_disable_trickle_feed(dev);
5947 static void crestline_init_clock_gating(struct drm_device *dev)
5949 struct drm_i915_private *dev_priv = dev->dev_private;
5951 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5952 I915_WRITE(RENCLK_GATE_D2, 0);
5953 I915_WRITE(DSPCLK_GATE_D, 0);
5954 I915_WRITE(RAMCLK_GATE_D, 0);
5955 I915_WRITE16(DEUC, 0);
5956 I915_WRITE(MI_ARB_STATE,
5957 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5959 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5960 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5963 static void broadwater_init_clock_gating(struct drm_device *dev)
5965 struct drm_i915_private *dev_priv = dev->dev_private;
5967 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5968 I965_RCC_CLOCK_GATE_DISABLE |
5969 I965_RCPB_CLOCK_GATE_DISABLE |
5970 I965_ISC_CLOCK_GATE_DISABLE |
5971 I965_FBC_CLOCK_GATE_DISABLE);
5972 I915_WRITE(RENCLK_GATE_D2, 0);
5973 I915_WRITE(MI_ARB_STATE,
5974 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5976 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5977 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5980 static void gen3_init_clock_gating(struct drm_device *dev)
5982 struct drm_i915_private *dev_priv = dev->dev_private;
5983 u32 dstate = I915_READ(D_STATE);
5985 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5986 DSTATE_DOT_CLOCK_GATING;
5987 I915_WRITE(D_STATE, dstate);
5989 if (IS_PINEVIEW(dev))
5990 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5992 /* IIR "flip pending" means done if this bit is set */
5993 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5995 /* interrupts should cause a wake up from C3 */
5996 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
5998 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5999 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6001 I915_WRITE(MI_ARB_STATE,
6002 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6005 static void i85x_init_clock_gating(struct drm_device *dev)
6007 struct drm_i915_private *dev_priv = dev->dev_private;
6009 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6011 /* interrupts should cause a wake up from C3 */
6012 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6013 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6015 I915_WRITE(MEM_MODE,
6016 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6019 static void i830_init_clock_gating(struct drm_device *dev)
6021 struct drm_i915_private *dev_priv = dev->dev_private;
6023 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6025 I915_WRITE(MEM_MODE,
6026 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6027 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6030 void intel_init_clock_gating(struct drm_device *dev)
6032 struct drm_i915_private *dev_priv = dev->dev_private;
6034 dev_priv->display.init_clock_gating(dev);
6037 void intel_suspend_hw(struct drm_device *dev)
6039 if (HAS_PCH_LPT(dev))
6040 lpt_suspend_hw(dev);
6043 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
6045 i < (power_domains)->power_well_count && \
6046 ((power_well) = &(power_domains)->power_wells[i]); \
6048 if ((power_well)->domains & (domain_mask))
6050 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
6051 for (i = (power_domains)->power_well_count - 1; \
6052 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
6054 if ((power_well)->domains & (domain_mask))
6057 * We should only use the power well if we explicitly asked the hardware to
6058 * enable it, so check if it's enabled and also check if we've requested it to
6061 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
6062 struct i915_power_well *power_well)
6064 return I915_READ(HSW_PWR_WELL_DRIVER) ==
6065 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
6068 bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
6069 enum intel_display_power_domain domain)
6071 struct i915_power_domains *power_domains;
6072 struct i915_power_well *power_well;
6076 if (dev_priv->pm.suspended)
6079 power_domains = &dev_priv->power_domains;
6083 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6084 if (power_well->always_on)
6087 if (!power_well->hw_enabled) {
6096 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
6097 enum intel_display_power_domain domain)
6099 struct i915_power_domains *power_domains;
6102 power_domains = &dev_priv->power_domains;
6104 mutex_lock(&power_domains->lock);
6105 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
6106 mutex_unlock(&power_domains->lock);
6112 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6113 * when not needed anymore. We have 4 registers that can request the power well
6114 * to be enabled, and it will only be disabled if none of the registers is
6115 * requesting it to be enabled.
6117 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6119 struct drm_device *dev = dev_priv->dev;
6122 * After we re-enable the power well, if we touch VGA register 0x3d5
6123 * we'll get unclaimed register interrupts. This stops after we write
6124 * anything to the VGA MSR register. The vgacon module uses this
6125 * register all the time, so if we unbind our driver and, as a
6126 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6127 * console_unlock(). So make here we touch the VGA MSR register, making
6128 * sure vgacon can keep working normally without triggering interrupts
6129 * and error messages.
6131 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6132 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6133 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6135 if (IS_BROADWELL(dev))
6136 gen8_irq_power_well_post_enable(dev_priv);
6139 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6140 struct i915_power_well *power_well, bool enable)
6142 bool is_enabled, enable_requested;
6145 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6146 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6147 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6150 if (!enable_requested)
6151 I915_WRITE(HSW_PWR_WELL_DRIVER,
6152 HSW_PWR_WELL_ENABLE_REQUEST);
6155 DRM_DEBUG_KMS("Enabling power well\n");
6156 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6157 HSW_PWR_WELL_STATE_ENABLED), 20))
6158 DRM_ERROR("Timeout enabling power well\n");
6161 hsw_power_well_post_enable(dev_priv);
6163 if (enable_requested) {
6164 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6165 POSTING_READ(HSW_PWR_WELL_DRIVER);
6166 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6171 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6172 struct i915_power_well *power_well)
6174 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6177 * We're taking over the BIOS, so clear any requests made by it since
6178 * the driver is in charge now.
6180 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6181 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6184 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6185 struct i915_power_well *power_well)
6187 hsw_set_power_well(dev_priv, power_well, true);
6190 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6191 struct i915_power_well *power_well)
6193 hsw_set_power_well(dev_priv, power_well, false);
6196 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6197 struct i915_power_well *power_well)
6201 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6202 struct i915_power_well *power_well)
6207 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6208 struct i915_power_well *power_well, bool enable)
6210 enum punit_power_well power_well_id = power_well->data;
6215 mask = PUNIT_PWRGT_MASK(power_well_id);
6216 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6217 PUNIT_PWRGT_PWR_GATE(power_well_id);
6219 mutex_lock(&dev_priv->rps.hw_lock);
6222 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6227 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6230 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6232 if (wait_for(COND, 100))
6233 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6235 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6240 mutex_unlock(&dev_priv->rps.hw_lock);
6243 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6244 struct i915_power_well *power_well)
6246 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6249 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6250 struct i915_power_well *power_well)
6252 vlv_set_power_well(dev_priv, power_well, true);
6255 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6256 struct i915_power_well *power_well)
6258 vlv_set_power_well(dev_priv, power_well, false);
6261 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6262 struct i915_power_well *power_well)
6264 int power_well_id = power_well->data;
6265 bool enabled = false;
6270 mask = PUNIT_PWRGT_MASK(power_well_id);
6271 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6273 mutex_lock(&dev_priv->rps.hw_lock);
6275 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6277 * We only ever set the power-on and power-gate states, anything
6278 * else is unexpected.
6280 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6281 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6286 * A transient state at this point would mean some unexpected party
6287 * is poking at the power controls too.
6289 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6290 WARN_ON(ctrl != state);
6292 mutex_unlock(&dev_priv->rps.hw_lock);
6297 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6298 struct i915_power_well *power_well)
6300 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6302 vlv_set_power_well(dev_priv, power_well, true);
6304 spin_lock_irq(&dev_priv->irq_lock);
6305 valleyview_enable_display_irqs(dev_priv);
6306 spin_unlock_irq(&dev_priv->irq_lock);
6309 * During driver initialization/resume we can avoid restoring the
6310 * part of the HW/SW state that will be inited anyway explicitly.
6312 if (dev_priv->power_domains.initializing)
6315 intel_hpd_init(dev_priv->dev);
6317 i915_redisable_vga_power_on(dev_priv->dev);
6320 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6321 struct i915_power_well *power_well)
6323 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6325 spin_lock_irq(&dev_priv->irq_lock);
6326 valleyview_disable_display_irqs(dev_priv);
6327 spin_unlock_irq(&dev_priv->irq_lock);
6329 vlv_set_power_well(dev_priv, power_well, false);
6331 vlv_power_sequencer_reset(dev_priv);
6334 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6335 struct i915_power_well *power_well)
6337 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6340 * Enable the CRI clock source so we can get at the
6341 * display and the reference clock for VGA
6342 * hotplug / manual detection.
6344 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6345 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6346 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6348 vlv_set_power_well(dev_priv, power_well, true);
6351 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6352 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6353 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6354 * b. The other bits such as sfr settings / modesel may all
6357 * This should only be done on init and resume from S3 with
6358 * both PLLs disabled, or we risk losing DPIO and PLL
6361 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6364 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6365 struct i915_power_well *power_well)
6369 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6371 for_each_pipe(dev_priv, pipe)
6372 assert_pll_disabled(dev_priv, pipe);
6374 /* Assert common reset */
6375 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6377 vlv_set_power_well(dev_priv, power_well, false);
6380 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6381 struct i915_power_well *power_well)
6385 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6386 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6389 * Enable the CRI clock source so we can get at the
6390 * display and the reference clock for VGA
6391 * hotplug / manual detection.
6393 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6395 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6396 DPLL_REFA_CLK_ENABLE_VLV);
6397 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6398 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6401 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6402 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6404 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6405 vlv_set_power_well(dev_priv, power_well, true);
6407 /* Poll for phypwrgood signal */
6408 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
6409 DRM_ERROR("Display PHY %d is not power up\n", phy);
6411 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
6412 PHY_COM_LANE_RESET_DEASSERT(phy));
6415 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6416 struct i915_power_well *power_well)
6420 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6421 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6423 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6425 assert_pll_disabled(dev_priv, PIPE_A);
6426 assert_pll_disabled(dev_priv, PIPE_B);
6429 assert_pll_disabled(dev_priv, PIPE_C);
6432 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
6433 ~PHY_COM_LANE_RESET_DEASSERT(phy));
6435 vlv_set_power_well(dev_priv, power_well, false);
6438 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6439 struct i915_power_well *power_well)
6441 enum pipe pipe = power_well->data;
6445 mutex_lock(&dev_priv->rps.hw_lock);
6447 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6449 * We only ever set the power-on and power-gate states, anything
6450 * else is unexpected.
6452 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6453 enabled = state == DP_SSS_PWR_ON(pipe);
6456 * A transient state at this point would mean some unexpected party
6457 * is poking at the power controls too.
6459 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6460 WARN_ON(ctrl << 16 != state);
6462 mutex_unlock(&dev_priv->rps.hw_lock);
6467 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6468 struct i915_power_well *power_well,
6471 enum pipe pipe = power_well->data;
6475 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6477 mutex_lock(&dev_priv->rps.hw_lock);
6480 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6485 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6486 ctrl &= ~DP_SSC_MASK(pipe);
6487 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6488 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6490 if (wait_for(COND, 100))
6491 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6493 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6498 mutex_unlock(&dev_priv->rps.hw_lock);
6501 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6502 struct i915_power_well *power_well)
6504 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6507 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6508 struct i915_power_well *power_well)
6510 WARN_ON_ONCE(power_well->data != PIPE_A &&
6511 power_well->data != PIPE_B &&
6512 power_well->data != PIPE_C);
6514 chv_set_pipe_power_well(dev_priv, power_well, true);
6517 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6518 struct i915_power_well *power_well)
6520 WARN_ON_ONCE(power_well->data != PIPE_A &&
6521 power_well->data != PIPE_B &&
6522 power_well->data != PIPE_C);
6524 chv_set_pipe_power_well(dev_priv, power_well, false);
6527 static void check_power_well_state(struct drm_i915_private *dev_priv,
6528 struct i915_power_well *power_well)
6530 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6532 if (power_well->always_on || !i915.disable_power_well) {
6539 if (enabled != (power_well->count > 0))
6545 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6546 power_well->name, power_well->always_on, enabled,
6547 power_well->count, i915.disable_power_well);
6550 void intel_display_power_get(struct drm_i915_private *dev_priv,
6551 enum intel_display_power_domain domain)
6553 struct i915_power_domains *power_domains;
6554 struct i915_power_well *power_well;
6557 intel_runtime_pm_get(dev_priv);
6559 power_domains = &dev_priv->power_domains;
6561 mutex_lock(&power_domains->lock);
6563 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6564 if (!power_well->count++) {
6565 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6566 power_well->ops->enable(dev_priv, power_well);
6567 power_well->hw_enabled = true;
6570 check_power_well_state(dev_priv, power_well);
6573 power_domains->domain_use_count[domain]++;
6575 mutex_unlock(&power_domains->lock);
6578 void intel_display_power_put(struct drm_i915_private *dev_priv,
6579 enum intel_display_power_domain domain)
6581 struct i915_power_domains *power_domains;
6582 struct i915_power_well *power_well;
6585 power_domains = &dev_priv->power_domains;
6587 mutex_lock(&power_domains->lock);
6589 WARN_ON(!power_domains->domain_use_count[domain]);
6590 power_domains->domain_use_count[domain]--;
6592 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6593 WARN_ON(!power_well->count);
6595 if (!--power_well->count && i915.disable_power_well) {
6596 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6597 power_well->hw_enabled = false;
6598 power_well->ops->disable(dev_priv, power_well);
6601 check_power_well_state(dev_priv, power_well);
6604 mutex_unlock(&power_domains->lock);
6606 intel_runtime_pm_put(dev_priv);
6609 static struct i915_power_domains *hsw_pwr;
6611 /* Display audio driver power well request */
6612 int i915_request_power_well(void)
6614 struct drm_i915_private *dev_priv;
6619 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6621 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6624 EXPORT_SYMBOL_GPL(i915_request_power_well);
6626 /* Display audio driver power well release */
6627 int i915_release_power_well(void)
6629 struct drm_i915_private *dev_priv;
6634 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6636 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6639 EXPORT_SYMBOL_GPL(i915_release_power_well);
6642 * Private interface for the audio driver to get CDCLK in kHz.
6644 * Caller must request power well using i915_request_power_well() prior to
6647 int i915_get_cdclk_freq(void)
6649 struct drm_i915_private *dev_priv;
6654 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6657 return intel_ddi_get_cdclk_freq(dev_priv);
6659 EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6662 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6664 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6665 BIT(POWER_DOMAIN_PIPE_A) | \
6666 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6667 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6668 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6669 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6670 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6671 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6672 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6673 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6674 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6675 BIT(POWER_DOMAIN_PORT_CRT) | \
6676 BIT(POWER_DOMAIN_PLLS) | \
6677 BIT(POWER_DOMAIN_INIT))
6678 #define HSW_DISPLAY_POWER_DOMAINS ( \
6679 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6680 BIT(POWER_DOMAIN_INIT))
6682 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6683 HSW_ALWAYS_ON_POWER_DOMAINS | \
6684 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6685 #define BDW_DISPLAY_POWER_DOMAINS ( \
6686 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6687 BIT(POWER_DOMAIN_INIT))
6689 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6690 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6692 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6693 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6694 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6695 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6696 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6697 BIT(POWER_DOMAIN_PORT_CRT) | \
6698 BIT(POWER_DOMAIN_INIT))
6700 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6701 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6702 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6703 BIT(POWER_DOMAIN_INIT))
6705 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6706 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6707 BIT(POWER_DOMAIN_INIT))
6709 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6710 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6711 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6712 BIT(POWER_DOMAIN_INIT))
6714 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6715 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6716 BIT(POWER_DOMAIN_INIT))
6718 #define CHV_PIPE_A_POWER_DOMAINS ( \
6719 BIT(POWER_DOMAIN_PIPE_A) | \
6720 BIT(POWER_DOMAIN_INIT))
6722 #define CHV_PIPE_B_POWER_DOMAINS ( \
6723 BIT(POWER_DOMAIN_PIPE_B) | \
6724 BIT(POWER_DOMAIN_INIT))
6726 #define CHV_PIPE_C_POWER_DOMAINS ( \
6727 BIT(POWER_DOMAIN_PIPE_C) | \
6728 BIT(POWER_DOMAIN_INIT))
6730 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6731 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6732 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6733 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6734 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6735 BIT(POWER_DOMAIN_INIT))
6737 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6738 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6739 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6740 BIT(POWER_DOMAIN_INIT))
6742 #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6743 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6744 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6745 BIT(POWER_DOMAIN_INIT))
6747 #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6748 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6749 BIT(POWER_DOMAIN_INIT))
6751 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6752 .sync_hw = i9xx_always_on_power_well_noop,
6753 .enable = i9xx_always_on_power_well_noop,
6754 .disable = i9xx_always_on_power_well_noop,
6755 .is_enabled = i9xx_always_on_power_well_enabled,
6758 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6759 .sync_hw = chv_pipe_power_well_sync_hw,
6760 .enable = chv_pipe_power_well_enable,
6761 .disable = chv_pipe_power_well_disable,
6762 .is_enabled = chv_pipe_power_well_enabled,
6765 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6766 .sync_hw = vlv_power_well_sync_hw,
6767 .enable = chv_dpio_cmn_power_well_enable,
6768 .disable = chv_dpio_cmn_power_well_disable,
6769 .is_enabled = vlv_power_well_enabled,
6772 static struct i915_power_well i9xx_always_on_power_well[] = {
6774 .name = "always-on",
6776 .domains = POWER_DOMAIN_MASK,
6777 .ops = &i9xx_always_on_power_well_ops,
6781 static const struct i915_power_well_ops hsw_power_well_ops = {
6782 .sync_hw = hsw_power_well_sync_hw,
6783 .enable = hsw_power_well_enable,
6784 .disable = hsw_power_well_disable,
6785 .is_enabled = hsw_power_well_enabled,
6788 static struct i915_power_well hsw_power_wells[] = {
6790 .name = "always-on",
6792 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6793 .ops = &i9xx_always_on_power_well_ops,
6797 .domains = HSW_DISPLAY_POWER_DOMAINS,
6798 .ops = &hsw_power_well_ops,
6802 static struct i915_power_well bdw_power_wells[] = {
6804 .name = "always-on",
6806 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6807 .ops = &i9xx_always_on_power_well_ops,
6811 .domains = BDW_DISPLAY_POWER_DOMAINS,
6812 .ops = &hsw_power_well_ops,
6816 static const struct i915_power_well_ops vlv_display_power_well_ops = {
6817 .sync_hw = vlv_power_well_sync_hw,
6818 .enable = vlv_display_power_well_enable,
6819 .disable = vlv_display_power_well_disable,
6820 .is_enabled = vlv_power_well_enabled,
6823 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6824 .sync_hw = vlv_power_well_sync_hw,
6825 .enable = vlv_dpio_cmn_power_well_enable,
6826 .disable = vlv_dpio_cmn_power_well_disable,
6827 .is_enabled = vlv_power_well_enabled,
6830 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6831 .sync_hw = vlv_power_well_sync_hw,
6832 .enable = vlv_power_well_enable,
6833 .disable = vlv_power_well_disable,
6834 .is_enabled = vlv_power_well_enabled,
6837 static struct i915_power_well vlv_power_wells[] = {
6839 .name = "always-on",
6841 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6842 .ops = &i9xx_always_on_power_well_ops,
6846 .domains = VLV_DISPLAY_POWER_DOMAINS,
6847 .data = PUNIT_POWER_WELL_DISP2D,
6848 .ops = &vlv_display_power_well_ops,
6851 .name = "dpio-tx-b-01",
6852 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6853 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6854 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6855 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6856 .ops = &vlv_dpio_power_well_ops,
6857 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6860 .name = "dpio-tx-b-23",
6861 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6862 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6863 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6864 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6865 .ops = &vlv_dpio_power_well_ops,
6866 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6869 .name = "dpio-tx-c-01",
6870 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6871 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6872 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6873 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6874 .ops = &vlv_dpio_power_well_ops,
6875 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6878 .name = "dpio-tx-c-23",
6879 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6880 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6881 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6882 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6883 .ops = &vlv_dpio_power_well_ops,
6884 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6887 .name = "dpio-common",
6888 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6889 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6890 .ops = &vlv_dpio_cmn_power_well_ops,
6894 static struct i915_power_well chv_power_wells[] = {
6896 .name = "always-on",
6898 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6899 .ops = &i9xx_always_on_power_well_ops,
6904 .domains = VLV_DISPLAY_POWER_DOMAINS,
6905 .data = PUNIT_POWER_WELL_DISP2D,
6906 .ops = &vlv_display_power_well_ops,
6910 .domains = CHV_PIPE_A_POWER_DOMAINS,
6912 .ops = &chv_pipe_power_well_ops,
6916 .domains = CHV_PIPE_B_POWER_DOMAINS,
6918 .ops = &chv_pipe_power_well_ops,
6922 .domains = CHV_PIPE_C_POWER_DOMAINS,
6924 .ops = &chv_pipe_power_well_ops,
6928 .name = "dpio-common-bc",
6930 * XXX: cmnreset for one PHY seems to disturb the other.
6931 * As a workaround keep both powered on at the same
6934 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6935 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6936 .ops = &chv_dpio_cmn_power_well_ops,
6939 .name = "dpio-common-d",
6941 * XXX: cmnreset for one PHY seems to disturb the other.
6942 * As a workaround keep both powered on at the same
6945 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6946 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
6947 .ops = &chv_dpio_cmn_power_well_ops,
6951 .name = "dpio-tx-b-01",
6952 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6953 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6954 .ops = &vlv_dpio_power_well_ops,
6955 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6958 .name = "dpio-tx-b-23",
6959 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6960 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6961 .ops = &vlv_dpio_power_well_ops,
6962 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6965 .name = "dpio-tx-c-01",
6966 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6967 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6968 .ops = &vlv_dpio_power_well_ops,
6969 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6972 .name = "dpio-tx-c-23",
6973 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6974 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6975 .ops = &vlv_dpio_power_well_ops,
6976 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6979 .name = "dpio-tx-d-01",
6980 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6981 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6982 .ops = &vlv_dpio_power_well_ops,
6983 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
6986 .name = "dpio-tx-d-23",
6987 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6988 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6989 .ops = &vlv_dpio_power_well_ops,
6990 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
6995 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6996 enum punit_power_well power_well_id)
6998 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6999 struct i915_power_well *power_well;
7002 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7003 if (power_well->data == power_well_id)
7010 #define set_power_wells(power_domains, __power_wells) ({ \
7011 (power_domains)->power_wells = (__power_wells); \
7012 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
7015 int intel_power_domains_init(struct drm_i915_private *dev_priv)
7017 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7019 mutex_init(&power_domains->lock);
7022 * The enabling order will be from lower to higher indexed wells,
7023 * the disabling order is reversed.
7025 if (IS_HASWELL(dev_priv->dev)) {
7026 set_power_wells(power_domains, hsw_power_wells);
7027 hsw_pwr = power_domains;
7028 } else if (IS_BROADWELL(dev_priv->dev)) {
7029 set_power_wells(power_domains, bdw_power_wells);
7030 hsw_pwr = power_domains;
7031 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7032 set_power_wells(power_domains, chv_power_wells);
7033 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
7034 set_power_wells(power_domains, vlv_power_wells);
7036 set_power_wells(power_domains, i9xx_always_on_power_well);
7042 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
7047 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
7049 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7050 struct i915_power_well *power_well;
7053 mutex_lock(&power_domains->lock);
7054 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7055 power_well->ops->sync_hw(dev_priv, power_well);
7056 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
7059 mutex_unlock(&power_domains->lock);
7062 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
7064 struct i915_power_well *cmn =
7065 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
7066 struct i915_power_well *disp2d =
7067 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
7069 /* nothing to do if common lane is already off */
7070 if (!cmn->ops->is_enabled(dev_priv, cmn))
7073 /* If the display might be already active skip this */
7074 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
7075 I915_READ(DPIO_CTL) & DPIO_CMNRST)
7078 DRM_DEBUG_KMS("toggling display PHY side reset\n");
7080 /* cmnlane needs DPLL registers */
7081 disp2d->ops->enable(dev_priv, disp2d);
7084 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
7085 * Need to assert and de-assert PHY SB reset by gating the
7086 * common lane power, then un-gating it.
7087 * Simply ungating isn't enough to reset the PHY enough to get
7088 * ports and lanes running.
7090 cmn->ops->disable(dev_priv, cmn);
7093 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
7095 struct drm_device *dev = dev_priv->dev;
7096 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7098 power_domains->initializing = true;
7100 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7101 mutex_lock(&power_domains->lock);
7102 vlv_cmnlane_wa(dev_priv);
7103 mutex_unlock(&power_domains->lock);
7106 /* For now, we need the power well to be always enabled. */
7107 intel_display_set_init_power(dev_priv, true);
7108 intel_power_domains_resume(dev_priv);
7109 power_domains->initializing = false;
7112 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
7114 intel_runtime_pm_get(dev_priv);
7117 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
7119 intel_runtime_pm_put(dev_priv);
7122 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
7124 struct drm_device *dev = dev_priv->dev;
7125 struct device *device = &dev->pdev->dev;
7127 if (!HAS_RUNTIME_PM(dev))
7130 pm_runtime_get_sync(device);
7131 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
7134 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
7136 struct drm_device *dev = dev_priv->dev;
7137 struct device *device = &dev->pdev->dev;
7139 if (!HAS_RUNTIME_PM(dev))
7142 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
7143 pm_runtime_get_noresume(device);
7146 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
7148 struct drm_device *dev = dev_priv->dev;
7149 struct device *device = &dev->pdev->dev;
7151 if (!HAS_RUNTIME_PM(dev))
7154 pm_runtime_mark_last_busy(device);
7155 pm_runtime_put_autosuspend(device);
7158 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
7160 struct drm_device *dev = dev_priv->dev;
7161 struct device *device = &dev->pdev->dev;
7163 if (!HAS_RUNTIME_PM(dev))
7166 pm_runtime_set_active(device);
7169 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7172 if (!intel_enable_rc6(dev)) {
7173 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7177 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
7178 pm_runtime_mark_last_busy(device);
7179 pm_runtime_use_autosuspend(device);
7181 pm_runtime_put_autosuspend(device);
7184 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
7186 struct drm_device *dev = dev_priv->dev;
7187 struct device *device = &dev->pdev->dev;
7189 if (!HAS_RUNTIME_PM(dev))
7192 if (!intel_enable_rc6(dev))
7195 /* Make sure we're not suspended first. */
7196 pm_runtime_get_sync(device);
7197 pm_runtime_disable(device);
7200 /* Set up chip specific power management-related functions */
7201 void intel_init_pm(struct drm_device *dev)
7203 struct drm_i915_private *dev_priv = dev->dev_private;
7206 if (INTEL_INFO(dev)->gen >= 7) {
7207 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7208 dev_priv->display.enable_fbc = gen7_enable_fbc;
7209 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7210 } else if (INTEL_INFO(dev)->gen >= 5) {
7211 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7212 dev_priv->display.enable_fbc = ironlake_enable_fbc;
7213 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7214 } else if (IS_GM45(dev)) {
7215 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7216 dev_priv->display.enable_fbc = g4x_enable_fbc;
7217 dev_priv->display.disable_fbc = g4x_disable_fbc;
7219 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7220 dev_priv->display.enable_fbc = i8xx_enable_fbc;
7221 dev_priv->display.disable_fbc = i8xx_disable_fbc;
7223 /* This value was pulled out of someone's hat */
7224 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7229 if (IS_PINEVIEW(dev))
7230 i915_pineview_get_mem_freq(dev);
7231 else if (IS_GEN5(dev))
7232 i915_ironlake_get_mem_freq(dev);
7234 /* For FIFO watermark updates */
7235 if (HAS_PCH_SPLIT(dev)) {
7236 ilk_setup_wm_latency(dev);
7238 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7239 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7240 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7241 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7242 dev_priv->display.update_wm = ilk_update_wm;
7243 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
7245 DRM_DEBUG_KMS("Failed to read display plane latency. "
7250 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7251 else if (IS_GEN6(dev))
7252 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7253 else if (IS_IVYBRIDGE(dev))
7254 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7255 else if (IS_HASWELL(dev))
7256 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7257 else if (INTEL_INFO(dev)->gen == 8)
7258 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7259 } else if (IS_CHERRYVIEW(dev)) {
7260 dev_priv->display.update_wm = cherryview_update_wm;
7261 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7262 dev_priv->display.init_clock_gating =
7263 cherryview_init_clock_gating;
7264 } else if (IS_VALLEYVIEW(dev)) {
7265 dev_priv->display.update_wm = valleyview_update_wm;
7266 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7267 dev_priv->display.init_clock_gating =
7268 valleyview_init_clock_gating;
7269 } else if (IS_PINEVIEW(dev)) {
7270 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7273 dev_priv->mem_freq)) {
7274 DRM_INFO("failed to find known CxSR latency "
7275 "(found ddr%s fsb freq %d, mem freq %d), "
7277 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7278 dev_priv->fsb_freq, dev_priv->mem_freq);
7279 /* Disable CxSR and never update its watermark again */
7280 intel_set_memory_cxsr(dev_priv, false);
7281 dev_priv->display.update_wm = NULL;
7283 dev_priv->display.update_wm = pineview_update_wm;
7284 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7285 } else if (IS_G4X(dev)) {
7286 dev_priv->display.update_wm = g4x_update_wm;
7287 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7288 } else if (IS_GEN4(dev)) {
7289 dev_priv->display.update_wm = i965_update_wm;
7290 if (IS_CRESTLINE(dev))
7291 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7292 else if (IS_BROADWATER(dev))
7293 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7294 } else if (IS_GEN3(dev)) {
7295 dev_priv->display.update_wm = i9xx_update_wm;
7296 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7297 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7298 } else if (IS_GEN2(dev)) {
7299 if (INTEL_INFO(dev)->num_pipes == 1) {
7300 dev_priv->display.update_wm = i845_update_wm;
7301 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7303 dev_priv->display.update_wm = i9xx_update_wm;
7304 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7307 if (IS_I85X(dev) || IS_I865G(dev))
7308 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7310 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7312 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7316 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7318 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7320 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7321 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7325 I915_WRITE(GEN6_PCODE_DATA, *val);
7326 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7328 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7330 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7334 *val = I915_READ(GEN6_PCODE_DATA);
7335 I915_WRITE(GEN6_PCODE_DATA, 0);
7340 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
7342 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7344 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7345 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7349 I915_WRITE(GEN6_PCODE_DATA, val);
7350 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7352 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7354 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7358 I915_WRITE(GEN6_PCODE_DATA, 0);
7363 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7368 switch (dev_priv->mem_freq) {
7382 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
7385 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7390 switch (dev_priv->mem_freq) {
7404 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
7407 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7411 switch (dev_priv->rps.cz_freq) {
7427 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7432 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7436 switch (dev_priv->rps.cz_freq) {
7452 /* CHV needs even values */
7453 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7458 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7462 if (IS_CHERRYVIEW(dev_priv->dev))
7463 ret = chv_gpu_freq(dev_priv, val);
7464 else if (IS_VALLEYVIEW(dev_priv->dev))
7465 ret = byt_gpu_freq(dev_priv, val);
7470 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7474 if (IS_CHERRYVIEW(dev_priv->dev))
7475 ret = chv_freq_opcode(dev_priv, val);
7476 else if (IS_VALLEYVIEW(dev_priv->dev))
7477 ret = byt_freq_opcode(dev_priv, val);
7482 void intel_pm_setup(struct drm_device *dev)
7484 struct drm_i915_private *dev_priv = dev->dev_private;
7486 mutex_init(&dev_priv->rps.hw_lock);
7488 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7489 intel_gen6_powersave_work);
7491 dev_priv->pm.suspended = false;
7492 dev_priv->pm._irqs_disabled = false;