2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
69 static void i8xx_disable_fbc(struct drm_device *dev)
71 struct drm_i915_private *dev_priv = dev->dev_private;
74 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0)
79 fbc_ctl &= ~FBC_CTL_EN;
80 I915_WRITE(FBC_CONTROL, fbc_ctl);
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
88 DRM_DEBUG_KMS("disabled FBC\n");
91 static void i8xx_enable_fbc(struct drm_crtc *crtc)
93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
97 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
102 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
103 if (fb->pitches[0] < cfb_pitch)
104 cfb_pitch = fb->pitches[0];
106 /* FBC_CTL wants 32B or 64B units */
108 cfb_pitch = (cfb_pitch / 32) - 1;
110 cfb_pitch = (cfb_pitch / 64) - 1;
113 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
114 I915_WRITE(FBC_TAG + (i * 4), 0);
120 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
121 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
122 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
123 I915_WRITE(FBC_FENCE_OFF, crtc->y);
127 fbc_ctl = I915_READ(FBC_CONTROL);
128 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
129 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
131 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
132 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
133 fbc_ctl |= obj->fence_reg;
134 I915_WRITE(FBC_CONTROL, fbc_ctl);
136 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
137 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
140 static bool i8xx_fbc_enabled(struct drm_device *dev)
142 struct drm_i915_private *dev_priv = dev->dev_private;
144 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
147 static void g4x_enable_fbc(struct drm_crtc *crtc)
149 struct drm_device *dev = crtc->dev;
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct drm_framebuffer *fb = crtc->primary->fb;
152 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
156 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
157 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
158 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
160 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
161 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
163 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
166 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
168 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
171 static void g4x_disable_fbc(struct drm_device *dev)
173 struct drm_i915_private *dev_priv = dev->dev_private;
176 /* Disable compression */
177 dpfc_ctl = I915_READ(DPFC_CONTROL);
178 if (dpfc_ctl & DPFC_CTL_EN) {
179 dpfc_ctl &= ~DPFC_CTL_EN;
180 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
182 DRM_DEBUG_KMS("disabled FBC\n");
186 static bool g4x_fbc_enabled(struct drm_device *dev)
188 struct drm_i915_private *dev_priv = dev->dev_private;
190 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
193 static void sandybridge_blit_fbc_update(struct drm_device *dev)
195 struct drm_i915_private *dev_priv = dev->dev_private;
198 /* Make sure blitter notifies FBC of writes */
200 /* Blitter is part of Media powerwell on VLV. No impact of
201 * his param in other platforms for now */
202 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
204 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
205 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
206 GEN6_BLITTER_LOCK_SHIFT;
207 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
208 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
211 GEN6_BLITTER_LOCK_SHIFT);
212 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
213 POSTING_READ(GEN6_BLITTER_ECOSKPD);
215 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
218 static void ironlake_enable_fbc(struct drm_crtc *crtc)
220 struct drm_device *dev = crtc->dev;
221 struct drm_i915_private *dev_priv = dev->dev_private;
222 struct drm_framebuffer *fb = crtc->primary->fb;
223 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++;
231 switch (dev_priv->fbc.threshold) {
234 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
237 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
240 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
243 dpfc_ctl |= DPFC_CTL_FENCE_EN;
245 dpfc_ctl |= obj->fence_reg;
247 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
248 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
250 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
253 I915_WRITE(SNB_DPFC_CTL_SA,
254 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
255 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
256 sandybridge_blit_fbc_update(dev);
259 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
262 static void ironlake_disable_fbc(struct drm_device *dev)
264 struct drm_i915_private *dev_priv = dev->dev_private;
267 /* Disable compression */
268 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
269 if (dpfc_ctl & DPFC_CTL_EN) {
270 dpfc_ctl &= ~DPFC_CTL_EN;
271 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
273 DRM_DEBUG_KMS("disabled FBC\n");
277 static bool ironlake_fbc_enabled(struct drm_device *dev)
279 struct drm_i915_private *dev_priv = dev->dev_private;
281 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
284 static void gen7_enable_fbc(struct drm_crtc *crtc)
286 struct drm_device *dev = crtc->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 struct drm_framebuffer *fb = crtc->primary->fb;
289 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++;
297 switch (dev_priv->fbc.threshold) {
300 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
303 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
306 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
312 if (dev_priv->fbc.false_color)
313 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
315 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
317 if (IS_IVYBRIDGE(dev)) {
318 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
319 I915_WRITE(ILK_DISPLAY_CHICKEN1,
320 I915_READ(ILK_DISPLAY_CHICKEN1) |
323 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
324 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
325 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
329 I915_WRITE(SNB_DPFC_CTL_SA,
330 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
331 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
333 sandybridge_blit_fbc_update(dev);
335 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
338 bool intel_fbc_enabled(struct drm_device *dev)
340 struct drm_i915_private *dev_priv = dev->dev_private;
342 if (!dev_priv->display.fbc_enabled)
345 return dev_priv->display.fbc_enabled(dev);
348 void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
350 struct drm_i915_private *dev_priv = dev->dev_private;
355 I915_WRITE(MSG_FBC_REND_STATE, value);
358 static void intel_fbc_work_fn(struct work_struct *__work)
360 struct intel_fbc_work *work =
361 container_of(to_delayed_work(__work),
362 struct intel_fbc_work, work);
363 struct drm_device *dev = work->crtc->dev;
364 struct drm_i915_private *dev_priv = dev->dev_private;
366 mutex_lock(&dev->struct_mutex);
367 if (work == dev_priv->fbc.fbc_work) {
368 /* Double check that we haven't switched fb without cancelling
371 if (work->crtc->primary->fb == work->fb) {
372 dev_priv->display.enable_fbc(work->crtc);
374 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
375 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
376 dev_priv->fbc.y = work->crtc->y;
379 dev_priv->fbc.fbc_work = NULL;
381 mutex_unlock(&dev->struct_mutex);
386 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
388 if (dev_priv->fbc.fbc_work == NULL)
391 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
393 /* Synchronisation is provided by struct_mutex and checking of
394 * dev_priv->fbc.fbc_work, so we can perform the cancellation
395 * entirely asynchronously.
397 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
398 /* tasklet was killed before being run, clean up */
399 kfree(dev_priv->fbc.fbc_work);
401 /* Mark the work as no longer wanted so that if it does
402 * wake-up (because the work was already running and waiting
403 * for our mutex), it will discover that is no longer
406 dev_priv->fbc.fbc_work = NULL;
409 static void intel_enable_fbc(struct drm_crtc *crtc)
411 struct intel_fbc_work *work;
412 struct drm_device *dev = crtc->dev;
413 struct drm_i915_private *dev_priv = dev->dev_private;
415 if (!dev_priv->display.enable_fbc)
418 intel_cancel_fbc_work(dev_priv);
420 work = kzalloc(sizeof(*work), GFP_KERNEL);
422 DRM_ERROR("Failed to allocate FBC work structure\n");
423 dev_priv->display.enable_fbc(crtc);
428 work->fb = crtc->primary->fb;
429 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
431 dev_priv->fbc.fbc_work = work;
433 /* Delay the actual enabling to let pageflipping cease and the
434 * display to settle before starting the compression. Note that
435 * this delay also serves a second purpose: it allows for a
436 * vblank to pass after disabling the FBC before we attempt
437 * to modify the control registers.
439 * A more complicated solution would involve tracking vblanks
440 * following the termination of the page-flipping sequence
441 * and indeed performing the enable as a co-routine and not
442 * waiting synchronously upon the vblank.
444 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
446 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
449 void intel_disable_fbc(struct drm_device *dev)
451 struct drm_i915_private *dev_priv = dev->dev_private;
453 intel_cancel_fbc_work(dev_priv);
455 if (!dev_priv->display.disable_fbc)
458 dev_priv->display.disable_fbc(dev);
459 dev_priv->fbc.plane = -1;
462 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
463 enum no_fbc_reason reason)
465 if (dev_priv->fbc.no_fbc_reason == reason)
468 dev_priv->fbc.no_fbc_reason = reason;
473 * intel_update_fbc - enable/disable FBC as needed
474 * @dev: the drm_device
476 * Set up the framebuffer compression hardware at mode set time. We
477 * enable it if possible:
478 * - plane A only (on pre-965)
479 * - no pixel mulitply/line duplication
480 * - no alpha buffer discard
482 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
484 * We can't assume that any compression will take place (worst case),
485 * so the compressed buffer has to be the same size as the uncompressed
486 * one. It also must reside (along with the line length buffer) in
489 * We need to enable/disable FBC on a global basis.
491 void intel_update_fbc(struct drm_device *dev)
493 struct drm_i915_private *dev_priv = dev->dev_private;
494 struct drm_crtc *crtc = NULL, *tmp_crtc;
495 struct intel_crtc *intel_crtc;
496 struct drm_framebuffer *fb;
497 struct drm_i915_gem_object *obj;
498 const struct drm_display_mode *adjusted_mode;
499 unsigned int max_width, max_height;
502 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
506 if (!i915.powersave) {
507 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
508 DRM_DEBUG_KMS("fbc disabled per module param\n");
513 * If FBC is already on, we just have to verify that we can
514 * keep it that way...
515 * Need to disable if:
516 * - more than one pipe is active
517 * - changing FBC params (stride, fence, mode)
518 * - new fb is too large to fit in compressed buffer
519 * - going to an unsupported config (interlace, pixel multiply, etc.)
521 for_each_crtc(dev, tmp_crtc) {
522 if (intel_crtc_active(tmp_crtc) &&
523 to_intel_crtc(tmp_crtc)->primary_enabled) {
525 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
526 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
533 if (!crtc || crtc->primary->fb == NULL) {
534 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
535 DRM_DEBUG_KMS("no output, disabling\n");
539 intel_crtc = to_intel_crtc(crtc);
540 fb = crtc->primary->fb;
541 obj = intel_fb_obj(fb);
542 adjusted_mode = &intel_crtc->config.adjusted_mode;
544 if (i915.enable_fbc < 0) {
545 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
546 DRM_DEBUG_KMS("disabled per chip default\n");
549 if (!i915.enable_fbc) {
550 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
551 DRM_DEBUG_KMS("fbc disabled per module param\n");
554 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
555 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
556 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
557 DRM_DEBUG_KMS("mode incompatible with compression, "
562 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
565 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
572 if (intel_crtc->config.pipe_src_w > max_width ||
573 intel_crtc->config.pipe_src_h > max_height) {
574 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
575 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
578 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
579 intel_crtc->plane != PLANE_A) {
580 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
581 DRM_DEBUG_KMS("plane not A, disabling compression\n");
585 /* The use of a CPU fence is mandatory in order to detect writes
586 * by the CPU to the scanout and trigger updates to the FBC.
588 if (obj->tiling_mode != I915_TILING_X ||
589 obj->fence_reg == I915_FENCE_REG_NONE) {
590 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
591 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
594 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
595 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
596 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
597 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
601 /* If the kernel debugger is active, always disable compression */
605 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
606 drm_format_plane_cpp(fb->pixel_format, 0))) {
607 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
608 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
612 /* If the scanout has not changed, don't modify the FBC settings.
613 * Note that we make the fundamental assumption that the fb->obj
614 * cannot be unpinned (and have its GTT offset and fence revoked)
615 * without first being decoupled from the scanout and FBC disabled.
617 if (dev_priv->fbc.plane == intel_crtc->plane &&
618 dev_priv->fbc.fb_id == fb->base.id &&
619 dev_priv->fbc.y == crtc->y)
622 if (intel_fbc_enabled(dev)) {
623 /* We update FBC along two paths, after changing fb/crtc
624 * configuration (modeswitching) and after page-flipping
625 * finishes. For the latter, we know that not only did
626 * we disable the FBC at the start of the page-flip
627 * sequence, but also more than one vblank has passed.
629 * For the former case of modeswitching, it is possible
630 * to switch between two FBC valid configurations
631 * instantaneously so we do need to disable the FBC
632 * before we can modify its control registers. We also
633 * have to wait for the next vblank for that to take
634 * effect. However, since we delay enabling FBC we can
635 * assume that a vblank has passed since disabling and
636 * that we can safely alter the registers in the deferred
639 * In the scenario that we go from a valid to invalid
640 * and then back to valid FBC configuration we have
641 * no strict enforcement that a vblank occurred since
642 * disabling the FBC. However, along all current pipe
643 * disabling paths we do need to wait for a vblank at
644 * some point. And we wait before enabling FBC anyway.
646 DRM_DEBUG_KMS("disabling active FBC for update\n");
647 intel_disable_fbc(dev);
650 intel_enable_fbc(crtc);
651 dev_priv->fbc.no_fbc_reason = FBC_OK;
655 /* Multiple disables should be harmless */
656 if (intel_fbc_enabled(dev)) {
657 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
658 intel_disable_fbc(dev);
660 i915_gem_stolen_cleanup_compression(dev);
663 static void i915_pineview_get_mem_freq(struct drm_device *dev)
665 struct drm_i915_private *dev_priv = dev->dev_private;
668 tmp = I915_READ(CLKCFG);
670 switch (tmp & CLKCFG_FSB_MASK) {
672 dev_priv->fsb_freq = 533; /* 133*4 */
675 dev_priv->fsb_freq = 800; /* 200*4 */
678 dev_priv->fsb_freq = 667; /* 167*4 */
681 dev_priv->fsb_freq = 400; /* 100*4 */
685 switch (tmp & CLKCFG_MEM_MASK) {
687 dev_priv->mem_freq = 533;
690 dev_priv->mem_freq = 667;
693 dev_priv->mem_freq = 800;
697 /* detect pineview DDR3 setting */
698 tmp = I915_READ(CSHRDDR3CTL);
699 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
702 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
704 struct drm_i915_private *dev_priv = dev->dev_private;
707 ddrpll = I915_READ16(DDRMPLL1);
708 csipll = I915_READ16(CSIPLL0);
710 switch (ddrpll & 0xff) {
712 dev_priv->mem_freq = 800;
715 dev_priv->mem_freq = 1066;
718 dev_priv->mem_freq = 1333;
721 dev_priv->mem_freq = 1600;
724 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
726 dev_priv->mem_freq = 0;
730 dev_priv->ips.r_t = dev_priv->mem_freq;
732 switch (csipll & 0x3ff) {
734 dev_priv->fsb_freq = 3200;
737 dev_priv->fsb_freq = 3733;
740 dev_priv->fsb_freq = 4266;
743 dev_priv->fsb_freq = 4800;
746 dev_priv->fsb_freq = 5333;
749 dev_priv->fsb_freq = 5866;
752 dev_priv->fsb_freq = 6400;
755 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
757 dev_priv->fsb_freq = 0;
761 if (dev_priv->fsb_freq == 3200) {
762 dev_priv->ips.c_m = 0;
763 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
764 dev_priv->ips.c_m = 1;
766 dev_priv->ips.c_m = 2;
770 static const struct cxsr_latency cxsr_latency_table[] = {
771 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
772 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
773 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
774 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
775 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
777 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
778 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
779 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
780 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
781 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
783 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
784 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
785 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
786 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
787 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
789 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
790 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
791 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
792 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
793 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
795 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
796 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
797 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
798 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
799 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
801 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
802 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
803 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
804 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
805 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
808 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
813 const struct cxsr_latency *latency;
816 if (fsb == 0 || mem == 0)
819 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
820 latency = &cxsr_latency_table[i];
821 if (is_desktop == latency->is_desktop &&
822 is_ddr3 == latency->is_ddr3 &&
823 fsb == latency->fsb_freq && mem == latency->mem_freq)
827 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
832 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
834 struct drm_device *dev = dev_priv->dev;
837 if (IS_VALLEYVIEW(dev)) {
838 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
839 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
840 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
841 } else if (IS_PINEVIEW(dev)) {
842 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
843 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
844 I915_WRITE(DSPFW3, val);
845 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
846 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
847 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
848 I915_WRITE(FW_BLC_SELF, val);
849 } else if (IS_I915GM(dev)) {
850 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
851 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
852 I915_WRITE(INSTPM, val);
857 DRM_DEBUG_KMS("memory self-refresh is %s\n",
858 enable ? "enabled" : "disabled");
862 * Latency for FIFO fetches is dependent on several factors:
863 * - memory configuration (speed, channels)
865 * - current MCH state
866 * It can be fairly high in some situations, so here we assume a fairly
867 * pessimal value. It's a tradeoff between extra memory fetches (if we
868 * set this value too high, the FIFO will fetch frequently to stay full)
869 * and power consumption (set it too low to save power and we might see
870 * FIFO underruns and display "flicker").
872 * A value of 5us seems to be a good balance; safe for very low end
873 * platforms but not overly aggressive on lower latency configs.
875 static const int latency_ns = 5000;
877 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
879 struct drm_i915_private *dev_priv = dev->dev_private;
880 uint32_t dsparb = I915_READ(DSPARB);
883 size = dsparb & 0x7f;
885 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
887 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
888 plane ? "B" : "A", size);
893 static int i830_get_fifo_size(struct drm_device *dev, int plane)
895 struct drm_i915_private *dev_priv = dev->dev_private;
896 uint32_t dsparb = I915_READ(DSPARB);
899 size = dsparb & 0x1ff;
901 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
902 size >>= 1; /* Convert to cachelines */
904 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
905 plane ? "B" : "A", size);
910 static int i845_get_fifo_size(struct drm_device *dev, int plane)
912 struct drm_i915_private *dev_priv = dev->dev_private;
913 uint32_t dsparb = I915_READ(DSPARB);
916 size = dsparb & 0x7f;
917 size >>= 2; /* Convert to cachelines */
919 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
926 /* Pineview has different values for various configs */
927 static const struct intel_watermark_params pineview_display_wm = {
928 .fifo_size = PINEVIEW_DISPLAY_FIFO,
929 .max_wm = PINEVIEW_MAX_WM,
930 .default_wm = PINEVIEW_DFT_WM,
931 .guard_size = PINEVIEW_GUARD_WM,
932 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
934 static const struct intel_watermark_params pineview_display_hplloff_wm = {
935 .fifo_size = PINEVIEW_DISPLAY_FIFO,
936 .max_wm = PINEVIEW_MAX_WM,
937 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
938 .guard_size = PINEVIEW_GUARD_WM,
939 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
941 static const struct intel_watermark_params pineview_cursor_wm = {
942 .fifo_size = PINEVIEW_CURSOR_FIFO,
943 .max_wm = PINEVIEW_CURSOR_MAX_WM,
944 .default_wm = PINEVIEW_CURSOR_DFT_WM,
945 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
946 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
948 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
949 .fifo_size = PINEVIEW_CURSOR_FIFO,
950 .max_wm = PINEVIEW_CURSOR_MAX_WM,
951 .default_wm = PINEVIEW_CURSOR_DFT_WM,
952 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
953 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
955 static const struct intel_watermark_params g4x_wm_info = {
956 .fifo_size = G4X_FIFO_SIZE,
957 .max_wm = G4X_MAX_WM,
958 .default_wm = G4X_MAX_WM,
960 .cacheline_size = G4X_FIFO_LINE_SIZE,
962 static const struct intel_watermark_params g4x_cursor_wm_info = {
963 .fifo_size = I965_CURSOR_FIFO,
964 .max_wm = I965_CURSOR_MAX_WM,
965 .default_wm = I965_CURSOR_DFT_WM,
967 .cacheline_size = G4X_FIFO_LINE_SIZE,
969 static const struct intel_watermark_params valleyview_wm_info = {
970 .fifo_size = VALLEYVIEW_FIFO_SIZE,
971 .max_wm = VALLEYVIEW_MAX_WM,
972 .default_wm = VALLEYVIEW_MAX_WM,
974 .cacheline_size = G4X_FIFO_LINE_SIZE,
976 static const struct intel_watermark_params valleyview_cursor_wm_info = {
977 .fifo_size = I965_CURSOR_FIFO,
978 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
979 .default_wm = I965_CURSOR_DFT_WM,
981 .cacheline_size = G4X_FIFO_LINE_SIZE,
983 static const struct intel_watermark_params i965_cursor_wm_info = {
984 .fifo_size = I965_CURSOR_FIFO,
985 .max_wm = I965_CURSOR_MAX_WM,
986 .default_wm = I965_CURSOR_DFT_WM,
988 .cacheline_size = I915_FIFO_LINE_SIZE,
990 static const struct intel_watermark_params i945_wm_info = {
991 .fifo_size = I945_FIFO_SIZE,
992 .max_wm = I915_MAX_WM,
995 .cacheline_size = I915_FIFO_LINE_SIZE,
997 static const struct intel_watermark_params i915_wm_info = {
998 .fifo_size = I915_FIFO_SIZE,
999 .max_wm = I915_MAX_WM,
1002 .cacheline_size = I915_FIFO_LINE_SIZE,
1004 static const struct intel_watermark_params i830_wm_info = {
1005 .fifo_size = I855GM_FIFO_SIZE,
1006 .max_wm = I915_MAX_WM,
1009 .cacheline_size = I830_FIFO_LINE_SIZE,
1011 static const struct intel_watermark_params i845_wm_info = {
1012 .fifo_size = I830_FIFO_SIZE,
1013 .max_wm = I915_MAX_WM,
1016 .cacheline_size = I830_FIFO_LINE_SIZE,
1020 * intel_calculate_wm - calculate watermark level
1021 * @clock_in_khz: pixel clock
1022 * @wm: chip FIFO params
1023 * @pixel_size: display pixel size
1024 * @latency_ns: memory latency for the platform
1026 * Calculate the watermark level (the level at which the display plane will
1027 * start fetching from memory again). Each chip has a different display
1028 * FIFO size and allocation, so the caller needs to figure that out and pass
1029 * in the correct intel_watermark_params structure.
1031 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1032 * on the pixel size. When it reaches the watermark level, it'll start
1033 * fetching FIFO line sized based chunks from memory until the FIFO fills
1034 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1035 * will occur, and a display engine hang could result.
1037 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1038 const struct intel_watermark_params *wm,
1041 unsigned long latency_ns)
1043 long entries_required, wm_size;
1046 * Note: we need to make sure we don't overflow for various clock &
1048 * clocks go from a few thousand to several hundred thousand.
1049 * latency is usually a few thousand
1051 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1053 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1055 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1057 wm_size = fifo_size - (entries_required + wm->guard_size);
1059 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1061 /* Don't promote wm_size to unsigned... */
1062 if (wm_size > (long)wm->max_wm)
1063 wm_size = wm->max_wm;
1065 wm_size = wm->default_wm;
1069 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1071 struct drm_crtc *crtc, *enabled = NULL;
1073 for_each_crtc(dev, crtc) {
1074 if (intel_crtc_active(crtc)) {
1084 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1086 struct drm_device *dev = unused_crtc->dev;
1087 struct drm_i915_private *dev_priv = dev->dev_private;
1088 struct drm_crtc *crtc;
1089 const struct cxsr_latency *latency;
1093 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1094 dev_priv->fsb_freq, dev_priv->mem_freq);
1096 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1097 intel_set_memory_cxsr(dev_priv, false);
1101 crtc = single_enabled_crtc(dev);
1103 const struct drm_display_mode *adjusted_mode;
1104 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1107 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1108 clock = adjusted_mode->crtc_clock;
1111 wm = intel_calculate_wm(clock, &pineview_display_wm,
1112 pineview_display_wm.fifo_size,
1113 pixel_size, latency->display_sr);
1114 reg = I915_READ(DSPFW1);
1115 reg &= ~DSPFW_SR_MASK;
1116 reg |= wm << DSPFW_SR_SHIFT;
1117 I915_WRITE(DSPFW1, reg);
1118 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1121 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1122 pineview_display_wm.fifo_size,
1123 pixel_size, latency->cursor_sr);
1124 reg = I915_READ(DSPFW3);
1125 reg &= ~DSPFW_CURSOR_SR_MASK;
1126 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1127 I915_WRITE(DSPFW3, reg);
1129 /* Display HPLL off SR */
1130 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1131 pineview_display_hplloff_wm.fifo_size,
1132 pixel_size, latency->display_hpll_disable);
1133 reg = I915_READ(DSPFW3);
1134 reg &= ~DSPFW_HPLL_SR_MASK;
1135 reg |= wm & DSPFW_HPLL_SR_MASK;
1136 I915_WRITE(DSPFW3, reg);
1138 /* cursor HPLL off SR */
1139 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1140 pineview_display_hplloff_wm.fifo_size,
1141 pixel_size, latency->cursor_hpll_disable);
1142 reg = I915_READ(DSPFW3);
1143 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1144 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1145 I915_WRITE(DSPFW3, reg);
1146 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1148 intel_set_memory_cxsr(dev_priv, true);
1150 intel_set_memory_cxsr(dev_priv, false);
1154 static bool g4x_compute_wm0(struct drm_device *dev,
1156 const struct intel_watermark_params *display,
1157 int display_latency_ns,
1158 const struct intel_watermark_params *cursor,
1159 int cursor_latency_ns,
1163 struct drm_crtc *crtc;
1164 const struct drm_display_mode *adjusted_mode;
1165 int htotal, hdisplay, clock, pixel_size;
1166 int line_time_us, line_count;
1167 int entries, tlb_miss;
1169 crtc = intel_get_crtc_for_plane(dev, plane);
1170 if (!intel_crtc_active(crtc)) {
1171 *cursor_wm = cursor->guard_size;
1172 *plane_wm = display->guard_size;
1176 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1177 clock = adjusted_mode->crtc_clock;
1178 htotal = adjusted_mode->crtc_htotal;
1179 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1180 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1182 /* Use the small buffer method to calculate plane watermark */
1183 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1184 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1186 entries += tlb_miss;
1187 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1188 *plane_wm = entries + display->guard_size;
1189 if (*plane_wm > (int)display->max_wm)
1190 *plane_wm = display->max_wm;
1192 /* Use the large buffer method to calculate cursor watermark */
1193 line_time_us = max(htotal * 1000 / clock, 1);
1194 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1195 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1196 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1198 entries += tlb_miss;
1199 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1200 *cursor_wm = entries + cursor->guard_size;
1201 if (*cursor_wm > (int)cursor->max_wm)
1202 *cursor_wm = (int)cursor->max_wm;
1208 * Check the wm result.
1210 * If any calculated watermark values is larger than the maximum value that
1211 * can be programmed into the associated watermark register, that watermark
1214 static bool g4x_check_srwm(struct drm_device *dev,
1215 int display_wm, int cursor_wm,
1216 const struct intel_watermark_params *display,
1217 const struct intel_watermark_params *cursor)
1219 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1220 display_wm, cursor_wm);
1222 if (display_wm > display->max_wm) {
1223 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1224 display_wm, display->max_wm);
1228 if (cursor_wm > cursor->max_wm) {
1229 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1230 cursor_wm, cursor->max_wm);
1234 if (!(display_wm || cursor_wm)) {
1235 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1242 static bool g4x_compute_srwm(struct drm_device *dev,
1245 const struct intel_watermark_params *display,
1246 const struct intel_watermark_params *cursor,
1247 int *display_wm, int *cursor_wm)
1249 struct drm_crtc *crtc;
1250 const struct drm_display_mode *adjusted_mode;
1251 int hdisplay, htotal, pixel_size, clock;
1252 unsigned long line_time_us;
1253 int line_count, line_size;
1258 *display_wm = *cursor_wm = 0;
1262 crtc = intel_get_crtc_for_plane(dev, plane);
1263 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1264 clock = adjusted_mode->crtc_clock;
1265 htotal = adjusted_mode->crtc_htotal;
1266 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1267 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1269 line_time_us = max(htotal * 1000 / clock, 1);
1270 line_count = (latency_ns / line_time_us + 1000) / 1000;
1271 line_size = hdisplay * pixel_size;
1273 /* Use the minimum of the small and large buffer method for primary */
1274 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1275 large = line_count * line_size;
1277 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1278 *display_wm = entries + display->guard_size;
1280 /* calculate the self-refresh watermark for display cursor */
1281 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1282 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1283 *cursor_wm = entries + cursor->guard_size;
1285 return g4x_check_srwm(dev,
1286 *display_wm, *cursor_wm,
1290 static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1296 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1298 if (WARN(clock == 0, "Pixel clock is zero!\n"))
1301 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
1304 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1305 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1306 DRAIN_LATENCY_PRECISION_32;
1307 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1309 if (*drain_latency > DRAIN_LATENCY_MASK)
1310 *drain_latency = DRAIN_LATENCY_MASK;
1316 * Update drain latency registers of memory arbiter
1318 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1319 * to be programmed. Each plane has a drain latency multiplier and a drain
1323 static void vlv_update_drain_latency(struct drm_crtc *crtc)
1325 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1326 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1329 enum pipe pipe = intel_crtc->pipe;
1330 int plane_prec, prec_mult, plane_dl;
1332 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
1333 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
1334 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1336 if (!intel_crtc_active(crtc)) {
1337 I915_WRITE(VLV_DDL(pipe), plane_dl);
1341 /* Primary plane Drain Latency */
1342 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1343 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1344 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1345 DDL_PLANE_PRECISION_64 :
1346 DDL_PLANE_PRECISION_32;
1347 plane_dl |= plane_prec | drain_latency;
1350 /* Cursor Drain Latency
1351 * BPP is always 4 for cursor
1355 /* Program cursor DL only if it is enabled */
1356 if (intel_crtc->cursor_base &&
1357 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1358 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1359 DDL_CURSOR_PRECISION_64 :
1360 DDL_CURSOR_PRECISION_32;
1361 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1364 I915_WRITE(VLV_DDL(pipe), plane_dl);
1367 #define single_plane_enabled(mask) is_power_of_2(mask)
1369 static void valleyview_update_wm(struct drm_crtc *crtc)
1371 struct drm_device *dev = crtc->dev;
1372 static const int sr_latency_ns = 12000;
1373 struct drm_i915_private *dev_priv = dev->dev_private;
1374 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1375 int plane_sr, cursor_sr;
1376 int ignore_plane_sr, ignore_cursor_sr;
1377 unsigned int enabled = 0;
1380 vlv_update_drain_latency(crtc);
1382 if (g4x_compute_wm0(dev, PIPE_A,
1383 &valleyview_wm_info, latency_ns,
1384 &valleyview_cursor_wm_info, latency_ns,
1385 &planea_wm, &cursora_wm))
1386 enabled |= 1 << PIPE_A;
1388 if (g4x_compute_wm0(dev, PIPE_B,
1389 &valleyview_wm_info, latency_ns,
1390 &valleyview_cursor_wm_info, latency_ns,
1391 &planeb_wm, &cursorb_wm))
1392 enabled |= 1 << PIPE_B;
1394 if (single_plane_enabled(enabled) &&
1395 g4x_compute_srwm(dev, ffs(enabled) - 1,
1397 &valleyview_wm_info,
1398 &valleyview_cursor_wm_info,
1399 &plane_sr, &ignore_cursor_sr) &&
1400 g4x_compute_srwm(dev, ffs(enabled) - 1,
1402 &valleyview_wm_info,
1403 &valleyview_cursor_wm_info,
1404 &ignore_plane_sr, &cursor_sr)) {
1405 cxsr_enabled = true;
1407 cxsr_enabled = false;
1408 intel_set_memory_cxsr(dev_priv, false);
1409 plane_sr = cursor_sr = 0;
1412 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1413 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1414 planea_wm, cursora_wm,
1415 planeb_wm, cursorb_wm,
1416 plane_sr, cursor_sr);
1419 (plane_sr << DSPFW_SR_SHIFT) |
1420 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1421 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1422 (planea_wm << DSPFW_PLANEA_SHIFT));
1424 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1425 (cursora_wm << DSPFW_CURSORA_SHIFT));
1427 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1428 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1431 intel_set_memory_cxsr(dev_priv, true);
1434 static void cherryview_update_wm(struct drm_crtc *crtc)
1436 struct drm_device *dev = crtc->dev;
1437 static const int sr_latency_ns = 12000;
1438 struct drm_i915_private *dev_priv = dev->dev_private;
1439 int planea_wm, planeb_wm, planec_wm;
1440 int cursora_wm, cursorb_wm, cursorc_wm;
1441 int plane_sr, cursor_sr;
1442 int ignore_plane_sr, ignore_cursor_sr;
1443 unsigned int enabled = 0;
1446 vlv_update_drain_latency(crtc);
1448 if (g4x_compute_wm0(dev, PIPE_A,
1449 &valleyview_wm_info, latency_ns,
1450 &valleyview_cursor_wm_info, latency_ns,
1451 &planea_wm, &cursora_wm))
1452 enabled |= 1 << PIPE_A;
1454 if (g4x_compute_wm0(dev, PIPE_B,
1455 &valleyview_wm_info, latency_ns,
1456 &valleyview_cursor_wm_info, latency_ns,
1457 &planeb_wm, &cursorb_wm))
1458 enabled |= 1 << PIPE_B;
1460 if (g4x_compute_wm0(dev, PIPE_C,
1461 &valleyview_wm_info, latency_ns,
1462 &valleyview_cursor_wm_info, latency_ns,
1463 &planec_wm, &cursorc_wm))
1464 enabled |= 1 << PIPE_C;
1466 if (single_plane_enabled(enabled) &&
1467 g4x_compute_srwm(dev, ffs(enabled) - 1,
1469 &valleyview_wm_info,
1470 &valleyview_cursor_wm_info,
1471 &plane_sr, &ignore_cursor_sr) &&
1472 g4x_compute_srwm(dev, ffs(enabled) - 1,
1474 &valleyview_wm_info,
1475 &valleyview_cursor_wm_info,
1476 &ignore_plane_sr, &cursor_sr)) {
1477 cxsr_enabled = true;
1479 cxsr_enabled = false;
1480 intel_set_memory_cxsr(dev_priv, false);
1481 plane_sr = cursor_sr = 0;
1484 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1485 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1486 "SR: plane=%d, cursor=%d\n",
1487 planea_wm, cursora_wm,
1488 planeb_wm, cursorb_wm,
1489 planec_wm, cursorc_wm,
1490 plane_sr, cursor_sr);
1493 (plane_sr << DSPFW_SR_SHIFT) |
1494 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1495 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1496 (planea_wm << DSPFW_PLANEA_SHIFT));
1498 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1499 (cursora_wm << DSPFW_CURSORA_SHIFT));
1501 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1502 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1503 I915_WRITE(DSPFW9_CHV,
1504 (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
1505 DSPFW_CURSORC_MASK)) |
1506 (planec_wm << DSPFW_PLANEC_SHIFT) |
1507 (cursorc_wm << DSPFW_CURSORC_SHIFT));
1510 intel_set_memory_cxsr(dev_priv, true);
1513 static void valleyview_update_sprite_wm(struct drm_plane *plane,
1514 struct drm_crtc *crtc,
1515 uint32_t sprite_width,
1516 uint32_t sprite_height,
1518 bool enabled, bool scaled)
1520 struct drm_device *dev = crtc->dev;
1521 struct drm_i915_private *dev_priv = dev->dev_private;
1522 int pipe = to_intel_plane(plane)->pipe;
1523 int sprite = to_intel_plane(plane)->plane;
1529 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
1530 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1532 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1534 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1535 DDL_SPRITE_PRECISION_64(sprite) :
1536 DDL_SPRITE_PRECISION_32(sprite);
1537 sprite_dl |= plane_prec |
1538 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1541 I915_WRITE(VLV_DDL(pipe), sprite_dl);
1544 static void g4x_update_wm(struct drm_crtc *crtc)
1546 struct drm_device *dev = crtc->dev;
1547 static const int sr_latency_ns = 12000;
1548 struct drm_i915_private *dev_priv = dev->dev_private;
1549 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1550 int plane_sr, cursor_sr;
1551 unsigned int enabled = 0;
1554 if (g4x_compute_wm0(dev, PIPE_A,
1555 &g4x_wm_info, latency_ns,
1556 &g4x_cursor_wm_info, latency_ns,
1557 &planea_wm, &cursora_wm))
1558 enabled |= 1 << PIPE_A;
1560 if (g4x_compute_wm0(dev, PIPE_B,
1561 &g4x_wm_info, latency_ns,
1562 &g4x_cursor_wm_info, latency_ns,
1563 &planeb_wm, &cursorb_wm))
1564 enabled |= 1 << PIPE_B;
1566 if (single_plane_enabled(enabled) &&
1567 g4x_compute_srwm(dev, ffs(enabled) - 1,
1570 &g4x_cursor_wm_info,
1571 &plane_sr, &cursor_sr)) {
1572 cxsr_enabled = true;
1574 cxsr_enabled = false;
1575 intel_set_memory_cxsr(dev_priv, false);
1576 plane_sr = cursor_sr = 0;
1579 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1580 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1581 planea_wm, cursora_wm,
1582 planeb_wm, cursorb_wm,
1583 plane_sr, cursor_sr);
1586 (plane_sr << DSPFW_SR_SHIFT) |
1587 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1588 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1589 (planea_wm << DSPFW_PLANEA_SHIFT));
1591 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1592 (cursora_wm << DSPFW_CURSORA_SHIFT));
1593 /* HPLL off in SR has some issues on G4x... disable it */
1595 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1596 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1599 intel_set_memory_cxsr(dev_priv, true);
1602 static void i965_update_wm(struct drm_crtc *unused_crtc)
1604 struct drm_device *dev = unused_crtc->dev;
1605 struct drm_i915_private *dev_priv = dev->dev_private;
1606 struct drm_crtc *crtc;
1611 /* Calc sr entries for one plane configs */
1612 crtc = single_enabled_crtc(dev);
1614 /* self-refresh has much higher latency */
1615 static const int sr_latency_ns = 12000;
1616 const struct drm_display_mode *adjusted_mode =
1617 &to_intel_crtc(crtc)->config.adjusted_mode;
1618 int clock = adjusted_mode->crtc_clock;
1619 int htotal = adjusted_mode->crtc_htotal;
1620 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1621 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1622 unsigned long line_time_us;
1625 line_time_us = max(htotal * 1000 / clock, 1);
1627 /* Use ns/us then divide to preserve precision */
1628 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1629 pixel_size * hdisplay;
1630 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1631 srwm = I965_FIFO_SIZE - entries;
1635 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1638 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1639 pixel_size * to_intel_crtc(crtc)->cursor_width;
1640 entries = DIV_ROUND_UP(entries,
1641 i965_cursor_wm_info.cacheline_size);
1642 cursor_sr = i965_cursor_wm_info.fifo_size -
1643 (entries + i965_cursor_wm_info.guard_size);
1645 if (cursor_sr > i965_cursor_wm_info.max_wm)
1646 cursor_sr = i965_cursor_wm_info.max_wm;
1648 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1649 "cursor %d\n", srwm, cursor_sr);
1651 cxsr_enabled = true;
1653 cxsr_enabled = false;
1654 /* Turn off self refresh if both pipes are enabled */
1655 intel_set_memory_cxsr(dev_priv, false);
1658 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1661 /* 965 has limitations... */
1662 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1663 (8 << DSPFW_CURSORB_SHIFT) |
1664 (8 << DSPFW_PLANEB_SHIFT) |
1665 (8 << DSPFW_PLANEA_SHIFT));
1666 I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1667 (8 << DSPFW_PLANEC_SHIFT_OLD));
1668 /* update cursor SR watermark */
1669 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1672 intel_set_memory_cxsr(dev_priv, true);
1675 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1677 struct drm_device *dev = unused_crtc->dev;
1678 struct drm_i915_private *dev_priv = dev->dev_private;
1679 const struct intel_watermark_params *wm_info;
1684 int planea_wm, planeb_wm;
1685 struct drm_crtc *crtc, *enabled = NULL;
1688 wm_info = &i945_wm_info;
1689 else if (!IS_GEN2(dev))
1690 wm_info = &i915_wm_info;
1692 wm_info = &i830_wm_info;
1694 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1695 crtc = intel_get_crtc_for_plane(dev, 0);
1696 if (intel_crtc_active(crtc)) {
1697 const struct drm_display_mode *adjusted_mode;
1698 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1702 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1703 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1704 wm_info, fifo_size, cpp,
1708 planea_wm = fifo_size - wm_info->guard_size;
1710 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1711 crtc = intel_get_crtc_for_plane(dev, 1);
1712 if (intel_crtc_active(crtc)) {
1713 const struct drm_display_mode *adjusted_mode;
1714 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1718 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1719 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1720 wm_info, fifo_size, cpp,
1722 if (enabled == NULL)
1727 planeb_wm = fifo_size - wm_info->guard_size;
1729 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1731 if (IS_I915GM(dev) && enabled) {
1732 struct drm_i915_gem_object *obj;
1734 obj = intel_fb_obj(enabled->primary->fb);
1736 /* self-refresh seems busted with untiled */
1737 if (obj->tiling_mode == I915_TILING_NONE)
1742 * Overlay gets an aggressive default since video jitter is bad.
1746 /* Play safe and disable self-refresh before adjusting watermarks. */
1747 intel_set_memory_cxsr(dev_priv, false);
1749 /* Calc sr entries for one plane configs */
1750 if (HAS_FW_BLC(dev) && enabled) {
1751 /* self-refresh has much higher latency */
1752 static const int sr_latency_ns = 6000;
1753 const struct drm_display_mode *adjusted_mode =
1754 &to_intel_crtc(enabled)->config.adjusted_mode;
1755 int clock = adjusted_mode->crtc_clock;
1756 int htotal = adjusted_mode->crtc_htotal;
1757 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1758 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1759 unsigned long line_time_us;
1762 line_time_us = max(htotal * 1000 / clock, 1);
1764 /* Use ns/us then divide to preserve precision */
1765 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1766 pixel_size * hdisplay;
1767 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1768 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1769 srwm = wm_info->fifo_size - entries;
1773 if (IS_I945G(dev) || IS_I945GM(dev))
1774 I915_WRITE(FW_BLC_SELF,
1775 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1776 else if (IS_I915GM(dev))
1777 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1780 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1781 planea_wm, planeb_wm, cwm, srwm);
1783 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1784 fwater_hi = (cwm & 0x1f);
1786 /* Set request length to 8 cachelines per fetch */
1787 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1788 fwater_hi = fwater_hi | (1 << 8);
1790 I915_WRITE(FW_BLC, fwater_lo);
1791 I915_WRITE(FW_BLC2, fwater_hi);
1794 intel_set_memory_cxsr(dev_priv, true);
1797 static void i845_update_wm(struct drm_crtc *unused_crtc)
1799 struct drm_device *dev = unused_crtc->dev;
1800 struct drm_i915_private *dev_priv = dev->dev_private;
1801 struct drm_crtc *crtc;
1802 const struct drm_display_mode *adjusted_mode;
1806 crtc = single_enabled_crtc(dev);
1810 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1811 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1813 dev_priv->display.get_fifo_size(dev, 0),
1815 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1816 fwater_lo |= (3<<8) | planea_wm;
1818 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1820 I915_WRITE(FW_BLC, fwater_lo);
1823 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1824 struct drm_crtc *crtc)
1826 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1827 uint32_t pixel_rate;
1829 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1831 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1832 * adjust the pixel_rate here. */
1834 if (intel_crtc->config.pch_pfit.enabled) {
1835 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1836 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1838 pipe_w = intel_crtc->config.pipe_src_w;
1839 pipe_h = intel_crtc->config.pipe_src_h;
1840 pfit_w = (pfit_size >> 16) & 0xFFFF;
1841 pfit_h = pfit_size & 0xFFFF;
1842 if (pipe_w < pfit_w)
1844 if (pipe_h < pfit_h)
1847 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1854 /* latency must be in 0.1us units. */
1855 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1860 if (WARN(latency == 0, "Latency value missing\n"))
1863 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1864 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1869 /* latency must be in 0.1us units. */
1870 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1871 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1876 if (WARN(latency == 0, "Latency value missing\n"))
1879 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1880 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1881 ret = DIV_ROUND_UP(ret, 64) + 2;
1885 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1886 uint8_t bytes_per_pixel)
1888 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1891 struct ilk_pipe_wm_parameters {
1893 uint32_t pipe_htotal;
1894 uint32_t pixel_rate;
1895 struct intel_plane_wm_parameters pri;
1896 struct intel_plane_wm_parameters spr;
1897 struct intel_plane_wm_parameters cur;
1900 struct ilk_wm_maximums {
1907 /* used in computing the new watermarks state */
1908 struct intel_wm_config {
1909 unsigned int num_pipes_active;
1910 bool sprites_enabled;
1911 bool sprites_scaled;
1915 * For both WM_PIPE and WM_LP.
1916 * mem_value must be in 0.1us units.
1918 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1922 uint32_t method1, method2;
1924 if (!params->active || !params->pri.enabled)
1927 method1 = ilk_wm_method1(params->pixel_rate,
1928 params->pri.bytes_per_pixel,
1934 method2 = ilk_wm_method2(params->pixel_rate,
1935 params->pipe_htotal,
1936 params->pri.horiz_pixels,
1937 params->pri.bytes_per_pixel,
1940 return min(method1, method2);
1944 * For both WM_PIPE and WM_LP.
1945 * mem_value must be in 0.1us units.
1947 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1950 uint32_t method1, method2;
1952 if (!params->active || !params->spr.enabled)
1955 method1 = ilk_wm_method1(params->pixel_rate,
1956 params->spr.bytes_per_pixel,
1958 method2 = ilk_wm_method2(params->pixel_rate,
1959 params->pipe_htotal,
1960 params->spr.horiz_pixels,
1961 params->spr.bytes_per_pixel,
1963 return min(method1, method2);
1967 * For both WM_PIPE and WM_LP.
1968 * mem_value must be in 0.1us units.
1970 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1973 if (!params->active || !params->cur.enabled)
1976 return ilk_wm_method2(params->pixel_rate,
1977 params->pipe_htotal,
1978 params->cur.horiz_pixels,
1979 params->cur.bytes_per_pixel,
1983 /* Only for WM_LP. */
1984 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1987 if (!params->active || !params->pri.enabled)
1990 return ilk_wm_fbc(pri_val,
1991 params->pri.horiz_pixels,
1992 params->pri.bytes_per_pixel);
1995 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1997 if (INTEL_INFO(dev)->gen >= 8)
1999 else if (INTEL_INFO(dev)->gen >= 7)
2005 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
2006 int level, bool is_sprite)
2008 if (INTEL_INFO(dev)->gen >= 8)
2009 /* BDW primary/sprite plane watermarks */
2010 return level == 0 ? 255 : 2047;
2011 else if (INTEL_INFO(dev)->gen >= 7)
2012 /* IVB/HSW primary/sprite plane watermarks */
2013 return level == 0 ? 127 : 1023;
2014 else if (!is_sprite)
2015 /* ILK/SNB primary plane watermarks */
2016 return level == 0 ? 127 : 511;
2018 /* ILK/SNB sprite plane watermarks */
2019 return level == 0 ? 63 : 255;
2022 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
2025 if (INTEL_INFO(dev)->gen >= 7)
2026 return level == 0 ? 63 : 255;
2028 return level == 0 ? 31 : 63;
2031 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
2033 if (INTEL_INFO(dev)->gen >= 8)
2039 /* Calculate the maximum primary/sprite plane watermark */
2040 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2042 const struct intel_wm_config *config,
2043 enum intel_ddb_partitioning ddb_partitioning,
2046 unsigned int fifo_size = ilk_display_fifo_size(dev);
2048 /* if sprites aren't enabled, sprites get nothing */
2049 if (is_sprite && !config->sprites_enabled)
2052 /* HSW allows LP1+ watermarks even with multiple pipes */
2053 if (level == 0 || config->num_pipes_active > 1) {
2054 fifo_size /= INTEL_INFO(dev)->num_pipes;
2057 * For some reason the non self refresh
2058 * FIFO size is only half of the self
2059 * refresh FIFO size on ILK/SNB.
2061 if (INTEL_INFO(dev)->gen <= 6)
2065 if (config->sprites_enabled) {
2066 /* level 0 is always calculated with 1:1 split */
2067 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2076 /* clamp to max that the registers can hold */
2077 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
2080 /* Calculate the maximum cursor plane watermark */
2081 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2083 const struct intel_wm_config *config)
2085 /* HSW LP1+ watermarks w/ multiple pipes */
2086 if (level > 0 && config->num_pipes_active > 1)
2089 /* otherwise just report max that registers can hold */
2090 return ilk_cursor_wm_reg_max(dev, level);
2093 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2095 const struct intel_wm_config *config,
2096 enum intel_ddb_partitioning ddb_partitioning,
2097 struct ilk_wm_maximums *max)
2099 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2100 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2101 max->cur = ilk_cursor_wm_max(dev, level, config);
2102 max->fbc = ilk_fbc_wm_reg_max(dev);
2105 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
2107 struct ilk_wm_maximums *max)
2109 max->pri = ilk_plane_wm_reg_max(dev, level, false);
2110 max->spr = ilk_plane_wm_reg_max(dev, level, true);
2111 max->cur = ilk_cursor_wm_reg_max(dev, level);
2112 max->fbc = ilk_fbc_wm_reg_max(dev);
2115 static bool ilk_validate_wm_level(int level,
2116 const struct ilk_wm_maximums *max,
2117 struct intel_wm_level *result)
2121 /* already determined to be invalid? */
2122 if (!result->enable)
2125 result->enable = result->pri_val <= max->pri &&
2126 result->spr_val <= max->spr &&
2127 result->cur_val <= max->cur;
2129 ret = result->enable;
2132 * HACK until we can pre-compute everything,
2133 * and thus fail gracefully if LP0 watermarks
2136 if (level == 0 && !result->enable) {
2137 if (result->pri_val > max->pri)
2138 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2139 level, result->pri_val, max->pri);
2140 if (result->spr_val > max->spr)
2141 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2142 level, result->spr_val, max->spr);
2143 if (result->cur_val > max->cur)
2144 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2145 level, result->cur_val, max->cur);
2147 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2148 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2149 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2150 result->enable = true;
2156 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2158 const struct ilk_pipe_wm_parameters *p,
2159 struct intel_wm_level *result)
2161 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2162 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2163 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2165 /* WM1+ latency values stored in 0.5us units */
2172 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2173 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2174 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2175 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2176 result->enable = true;
2180 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2182 struct drm_i915_private *dev_priv = dev->dev_private;
2183 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2184 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2185 u32 linetime, ips_linetime;
2187 if (!intel_crtc_active(crtc))
2190 /* The WM are computed with base on how long it takes to fill a single
2191 * row at the given clock rate, multiplied by 8.
2193 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2195 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2196 intel_ddi_get_cdclk_freq(dev_priv));
2198 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2199 PIPE_WM_LINETIME_TIME(linetime);
2202 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2204 struct drm_i915_private *dev_priv = dev->dev_private;
2206 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2207 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2209 wm[0] = (sskpd >> 56) & 0xFF;
2211 wm[0] = sskpd & 0xF;
2212 wm[1] = (sskpd >> 4) & 0xFF;
2213 wm[2] = (sskpd >> 12) & 0xFF;
2214 wm[3] = (sskpd >> 20) & 0x1FF;
2215 wm[4] = (sskpd >> 32) & 0x1FF;
2216 } else if (INTEL_INFO(dev)->gen >= 6) {
2217 uint32_t sskpd = I915_READ(MCH_SSKPD);
2219 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2220 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2221 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2222 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2223 } else if (INTEL_INFO(dev)->gen >= 5) {
2224 uint32_t mltr = I915_READ(MLTR_ILK);
2226 /* ILK primary LP0 latency is 700 ns */
2228 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2229 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2233 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2235 /* ILK sprite LP0 latency is 1300 ns */
2236 if (INTEL_INFO(dev)->gen == 5)
2240 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2242 /* ILK cursor LP0 latency is 1300 ns */
2243 if (INTEL_INFO(dev)->gen == 5)
2246 /* WaDoubleCursorLP3Latency:ivb */
2247 if (IS_IVYBRIDGE(dev))
2251 int ilk_wm_max_level(const struct drm_device *dev)
2253 /* how many WM levels are we expecting */
2254 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2256 else if (INTEL_INFO(dev)->gen >= 6)
2261 static void intel_print_wm_latency(struct drm_device *dev,
2263 const uint16_t wm[5])
2265 int level, max_level = ilk_wm_max_level(dev);
2267 for (level = 0; level <= max_level; level++) {
2268 unsigned int latency = wm[level];
2271 DRM_ERROR("%s WM%d latency not provided\n",
2276 /* WM1+ latency values in 0.5us units */
2280 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2281 name, level, wm[level],
2282 latency / 10, latency % 10);
2286 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2287 uint16_t wm[5], uint16_t min)
2289 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2294 wm[0] = max(wm[0], min);
2295 for (level = 1; level <= max_level; level++)
2296 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2301 static void snb_wm_latency_quirk(struct drm_device *dev)
2303 struct drm_i915_private *dev_priv = dev->dev_private;
2307 * The BIOS provided WM memory latency values are often
2308 * inadequate for high resolution displays. Adjust them.
2310 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2311 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2312 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2317 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2318 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2319 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2320 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2323 static void ilk_setup_wm_latency(struct drm_device *dev)
2325 struct drm_i915_private *dev_priv = dev->dev_private;
2327 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2329 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2330 sizeof(dev_priv->wm.pri_latency));
2331 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2332 sizeof(dev_priv->wm.pri_latency));
2334 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2335 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2337 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2338 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2339 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2342 snb_wm_latency_quirk(dev);
2345 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2346 struct ilk_pipe_wm_parameters *p)
2348 struct drm_device *dev = crtc->dev;
2349 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2350 enum pipe pipe = intel_crtc->pipe;
2351 struct drm_plane *plane;
2353 if (!intel_crtc_active(crtc))
2357 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2358 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2359 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2360 p->cur.bytes_per_pixel = 4;
2361 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2362 p->cur.horiz_pixels = intel_crtc->cursor_width;
2363 /* TODO: for now, assume primary and cursor planes are always enabled. */
2364 p->pri.enabled = true;
2365 p->cur.enabled = true;
2367 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2368 struct intel_plane *intel_plane = to_intel_plane(plane);
2370 if (intel_plane->pipe == pipe) {
2371 p->spr = intel_plane->wm;
2377 static void ilk_compute_wm_config(struct drm_device *dev,
2378 struct intel_wm_config *config)
2380 struct intel_crtc *intel_crtc;
2382 /* Compute the currently _active_ config */
2383 for_each_intel_crtc(dev, intel_crtc) {
2384 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2386 if (!wm->pipe_enabled)
2389 config->sprites_enabled |= wm->sprites_enabled;
2390 config->sprites_scaled |= wm->sprites_scaled;
2391 config->num_pipes_active++;
2395 /* Compute new watermarks for the pipe */
2396 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2397 const struct ilk_pipe_wm_parameters *params,
2398 struct intel_pipe_wm *pipe_wm)
2400 struct drm_device *dev = crtc->dev;
2401 const struct drm_i915_private *dev_priv = dev->dev_private;
2402 int level, max_level = ilk_wm_max_level(dev);
2403 /* LP0 watermark maximums depend on this pipe alone */
2404 struct intel_wm_config config = {
2405 .num_pipes_active = 1,
2406 .sprites_enabled = params->spr.enabled,
2407 .sprites_scaled = params->spr.scaled,
2409 struct ilk_wm_maximums max;
2411 pipe_wm->pipe_enabled = params->active;
2412 pipe_wm->sprites_enabled = params->spr.enabled;
2413 pipe_wm->sprites_scaled = params->spr.scaled;
2415 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2416 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2419 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2420 if (params->spr.scaled)
2423 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2425 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2426 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2428 /* LP0 watermarks always use 1/2 DDB partitioning */
2429 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2431 /* At least LP0 must be valid */
2432 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2435 ilk_compute_wm_reg_maximums(dev, 1, &max);
2437 for (level = 1; level <= max_level; level++) {
2438 struct intel_wm_level wm = {};
2440 ilk_compute_wm_level(dev_priv, level, params, &wm);
2443 * Disable any watermark level that exceeds the
2444 * register maximums since such watermarks are
2447 if (!ilk_validate_wm_level(level, &max, &wm))
2450 pipe_wm->wm[level] = wm;
2457 * Merge the watermarks from all active pipes for a specific level.
2459 static void ilk_merge_wm_level(struct drm_device *dev,
2461 struct intel_wm_level *ret_wm)
2463 const struct intel_crtc *intel_crtc;
2465 ret_wm->enable = true;
2467 for_each_intel_crtc(dev, intel_crtc) {
2468 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2469 const struct intel_wm_level *wm = &active->wm[level];
2471 if (!active->pipe_enabled)
2475 * The watermark values may have been used in the past,
2476 * so we must maintain them in the registers for some
2477 * time even if the level is now disabled.
2480 ret_wm->enable = false;
2482 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2483 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2484 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2485 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2490 * Merge all low power watermarks for all active pipes.
2492 static void ilk_wm_merge(struct drm_device *dev,
2493 const struct intel_wm_config *config,
2494 const struct ilk_wm_maximums *max,
2495 struct intel_pipe_wm *merged)
2497 int level, max_level = ilk_wm_max_level(dev);
2498 int last_enabled_level = max_level;
2500 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2501 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2502 config->num_pipes_active > 1)
2505 /* ILK: FBC WM must be disabled always */
2506 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2508 /* merge each WM1+ level */
2509 for (level = 1; level <= max_level; level++) {
2510 struct intel_wm_level *wm = &merged->wm[level];
2512 ilk_merge_wm_level(dev, level, wm);
2514 if (level > last_enabled_level)
2516 else if (!ilk_validate_wm_level(level, max, wm))
2517 /* make sure all following levels get disabled */
2518 last_enabled_level = level - 1;
2521 * The spec says it is preferred to disable
2522 * FBC WMs instead of disabling a WM level.
2524 if (wm->fbc_val > max->fbc) {
2526 merged->fbc_wm_enabled = false;
2531 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2533 * FIXME this is racy. FBC might get enabled later.
2534 * What we should check here is whether FBC can be
2535 * enabled sometime later.
2537 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2538 for (level = 2; level <= max_level; level++) {
2539 struct intel_wm_level *wm = &merged->wm[level];
2546 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2548 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2549 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2552 /* The value we need to program into the WM_LPx latency field */
2553 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2557 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2560 return dev_priv->wm.pri_latency[level];
2563 static void ilk_compute_wm_results(struct drm_device *dev,
2564 const struct intel_pipe_wm *merged,
2565 enum intel_ddb_partitioning partitioning,
2566 struct ilk_wm_values *results)
2568 struct intel_crtc *intel_crtc;
2571 results->enable_fbc_wm = merged->fbc_wm_enabled;
2572 results->partitioning = partitioning;
2574 /* LP1+ register values */
2575 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2576 const struct intel_wm_level *r;
2578 level = ilk_wm_lp_to_level(wm_lp, merged);
2580 r = &merged->wm[level];
2583 * Maintain the watermark values even if the level is
2584 * disabled. Doing otherwise could cause underruns.
2586 results->wm_lp[wm_lp - 1] =
2587 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2588 (r->pri_val << WM1_LP_SR_SHIFT) |
2592 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2594 if (INTEL_INFO(dev)->gen >= 8)
2595 results->wm_lp[wm_lp - 1] |=
2596 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2598 results->wm_lp[wm_lp - 1] |=
2599 r->fbc_val << WM1_LP_FBC_SHIFT;
2602 * Always set WM1S_LP_EN when spr_val != 0, even if the
2603 * level is disabled. Doing otherwise could cause underruns.
2605 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2606 WARN_ON(wm_lp != 1);
2607 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2609 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2612 /* LP0 register values */
2613 for_each_intel_crtc(dev, intel_crtc) {
2614 enum pipe pipe = intel_crtc->pipe;
2615 const struct intel_wm_level *r =
2616 &intel_crtc->wm.active.wm[0];
2618 if (WARN_ON(!r->enable))
2621 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2623 results->wm_pipe[pipe] =
2624 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2625 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2630 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2631 * case both are at the same level. Prefer r1 in case they're the same. */
2632 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2633 struct intel_pipe_wm *r1,
2634 struct intel_pipe_wm *r2)
2636 int level, max_level = ilk_wm_max_level(dev);
2637 int level1 = 0, level2 = 0;
2639 for (level = 1; level <= max_level; level++) {
2640 if (r1->wm[level].enable)
2642 if (r2->wm[level].enable)
2646 if (level1 == level2) {
2647 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2651 } else if (level1 > level2) {
2658 /* dirty bits used to track which watermarks need changes */
2659 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2660 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2661 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2662 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2663 #define WM_DIRTY_FBC (1 << 24)
2664 #define WM_DIRTY_DDB (1 << 25)
2666 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2667 const struct ilk_wm_values *old,
2668 const struct ilk_wm_values *new)
2670 unsigned int dirty = 0;
2674 for_each_pipe(dev_priv, pipe) {
2675 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2676 dirty |= WM_DIRTY_LINETIME(pipe);
2677 /* Must disable LP1+ watermarks too */
2678 dirty |= WM_DIRTY_LP_ALL;
2681 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2682 dirty |= WM_DIRTY_PIPE(pipe);
2683 /* Must disable LP1+ watermarks too */
2684 dirty |= WM_DIRTY_LP_ALL;
2688 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2689 dirty |= WM_DIRTY_FBC;
2690 /* Must disable LP1+ watermarks too */
2691 dirty |= WM_DIRTY_LP_ALL;
2694 if (old->partitioning != new->partitioning) {
2695 dirty |= WM_DIRTY_DDB;
2696 /* Must disable LP1+ watermarks too */
2697 dirty |= WM_DIRTY_LP_ALL;
2700 /* LP1+ watermarks already deemed dirty, no need to continue */
2701 if (dirty & WM_DIRTY_LP_ALL)
2704 /* Find the lowest numbered LP1+ watermark in need of an update... */
2705 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2706 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2707 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2711 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2712 for (; wm_lp <= 3; wm_lp++)
2713 dirty |= WM_DIRTY_LP(wm_lp);
2718 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2721 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2722 bool changed = false;
2724 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2725 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2726 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2729 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2730 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2731 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2734 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2735 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2736 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2741 * Don't touch WM1S_LP_EN here.
2742 * Doing so could cause underruns.
2749 * The spec says we shouldn't write when we don't need, because every write
2750 * causes WMs to be re-evaluated, expending some power.
2752 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2753 struct ilk_wm_values *results)
2755 struct drm_device *dev = dev_priv->dev;
2756 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2760 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2764 _ilk_disable_lp_wm(dev_priv, dirty);
2766 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2767 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2768 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2769 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2770 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2771 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2773 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2774 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2775 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2776 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2777 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2778 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2780 if (dirty & WM_DIRTY_DDB) {
2781 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2782 val = I915_READ(WM_MISC);
2783 if (results->partitioning == INTEL_DDB_PART_1_2)
2784 val &= ~WM_MISC_DATA_PARTITION_5_6;
2786 val |= WM_MISC_DATA_PARTITION_5_6;
2787 I915_WRITE(WM_MISC, val);
2789 val = I915_READ(DISP_ARB_CTL2);
2790 if (results->partitioning == INTEL_DDB_PART_1_2)
2791 val &= ~DISP_DATA_PARTITION_5_6;
2793 val |= DISP_DATA_PARTITION_5_6;
2794 I915_WRITE(DISP_ARB_CTL2, val);
2798 if (dirty & WM_DIRTY_FBC) {
2799 val = I915_READ(DISP_ARB_CTL);
2800 if (results->enable_fbc_wm)
2801 val &= ~DISP_FBC_WM_DIS;
2803 val |= DISP_FBC_WM_DIS;
2804 I915_WRITE(DISP_ARB_CTL, val);
2807 if (dirty & WM_DIRTY_LP(1) &&
2808 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2809 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2811 if (INTEL_INFO(dev)->gen >= 7) {
2812 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2813 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2814 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2815 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2818 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2819 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2820 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2821 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2822 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2823 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2825 dev_priv->wm.hw = *results;
2828 static bool ilk_disable_lp_wm(struct drm_device *dev)
2830 struct drm_i915_private *dev_priv = dev->dev_private;
2832 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2835 static void ilk_update_wm(struct drm_crtc *crtc)
2837 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2838 struct drm_device *dev = crtc->dev;
2839 struct drm_i915_private *dev_priv = dev->dev_private;
2840 struct ilk_wm_maximums max;
2841 struct ilk_pipe_wm_parameters params = {};
2842 struct ilk_wm_values results = {};
2843 enum intel_ddb_partitioning partitioning;
2844 struct intel_pipe_wm pipe_wm = {};
2845 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2846 struct intel_wm_config config = {};
2848 ilk_compute_wm_parameters(crtc, ¶ms);
2850 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2852 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2855 intel_crtc->wm.active = pipe_wm;
2857 ilk_compute_wm_config(dev, &config);
2859 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2860 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2862 /* 5/6 split only in single pipe config on IVB+ */
2863 if (INTEL_INFO(dev)->gen >= 7 &&
2864 config.num_pipes_active == 1 && config.sprites_enabled) {
2865 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2866 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2868 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2870 best_lp_wm = &lp_wm_1_2;
2873 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2874 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2876 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2878 ilk_write_wm_values(dev_priv, &results);
2882 ilk_update_sprite_wm(struct drm_plane *plane,
2883 struct drm_crtc *crtc,
2884 uint32_t sprite_width, uint32_t sprite_height,
2885 int pixel_size, bool enabled, bool scaled)
2887 struct drm_device *dev = plane->dev;
2888 struct intel_plane *intel_plane = to_intel_plane(plane);
2890 intel_plane->wm.enabled = enabled;
2891 intel_plane->wm.scaled = scaled;
2892 intel_plane->wm.horiz_pixels = sprite_width;
2893 intel_plane->wm.vert_pixels = sprite_width;
2894 intel_plane->wm.bytes_per_pixel = pixel_size;
2897 * IVB workaround: must disable low power watermarks for at least
2898 * one frame before enabling scaling. LP watermarks can be re-enabled
2899 * when scaling is disabled.
2901 * WaCxSRDisabledForSpriteScaling:ivb
2903 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2904 intel_wait_for_vblank(dev, intel_plane->pipe);
2906 ilk_update_wm(crtc);
2909 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2911 struct drm_device *dev = crtc->dev;
2912 struct drm_i915_private *dev_priv = dev->dev_private;
2913 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2914 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2915 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2916 enum pipe pipe = intel_crtc->pipe;
2917 static const unsigned int wm0_pipe_reg[] = {
2918 [PIPE_A] = WM0_PIPEA_ILK,
2919 [PIPE_B] = WM0_PIPEB_ILK,
2920 [PIPE_C] = WM0_PIPEC_IVB,
2923 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2924 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2925 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2927 active->pipe_enabled = intel_crtc_active(crtc);
2929 if (active->pipe_enabled) {
2930 u32 tmp = hw->wm_pipe[pipe];
2933 * For active pipes LP0 watermark is marked as
2934 * enabled, and LP1+ watermaks as disabled since
2935 * we can't really reverse compute them in case
2936 * multiple pipes are active.
2938 active->wm[0].enable = true;
2939 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2940 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2941 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2942 active->linetime = hw->wm_linetime[pipe];
2944 int level, max_level = ilk_wm_max_level(dev);
2947 * For inactive pipes, all watermark levels
2948 * should be marked as enabled but zeroed,
2949 * which is what we'd compute them to.
2951 for (level = 0; level <= max_level; level++)
2952 active->wm[level].enable = true;
2956 void ilk_wm_get_hw_state(struct drm_device *dev)
2958 struct drm_i915_private *dev_priv = dev->dev_private;
2959 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2960 struct drm_crtc *crtc;
2962 for_each_crtc(dev, crtc)
2963 ilk_pipe_wm_get_hw_state(crtc);
2965 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2966 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2967 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2969 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2970 if (INTEL_INFO(dev)->gen >= 7) {
2971 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2972 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2975 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2976 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2977 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2978 else if (IS_IVYBRIDGE(dev))
2979 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2980 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2983 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2987 * intel_update_watermarks - update FIFO watermark values based on current modes
2989 * Calculate watermark values for the various WM regs based on current mode
2990 * and plane configuration.
2992 * There are several cases to deal with here:
2993 * - normal (i.e. non-self-refresh)
2994 * - self-refresh (SR) mode
2995 * - lines are large relative to FIFO size (buffer can hold up to 2)
2996 * - lines are small relative to FIFO size (buffer can hold more than 2
2997 * lines), so need to account for TLB latency
2999 * The normal calculation is:
3000 * watermark = dotclock * bytes per pixel * latency
3001 * where latency is platform & configuration dependent (we assume pessimal
3004 * The SR calculation is:
3005 * watermark = (trunc(latency/line time)+1) * surface width *
3008 * line time = htotal / dotclock
3009 * surface width = hdisplay for normal plane and 64 for cursor
3010 * and latency is assumed to be high, as above.
3012 * The final value programmed to the register should always be rounded up,
3013 * and include an extra 2 entries to account for clock crossings.
3015 * We don't use the sprite, so we can ignore that. And on Crestline we have
3016 * to set the non-SR watermarks to 8.
3018 void intel_update_watermarks(struct drm_crtc *crtc)
3020 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3022 if (dev_priv->display.update_wm)
3023 dev_priv->display.update_wm(crtc);
3026 void intel_update_sprite_watermarks(struct drm_plane *plane,
3027 struct drm_crtc *crtc,
3028 uint32_t sprite_width,
3029 uint32_t sprite_height,
3031 bool enabled, bool scaled)
3033 struct drm_i915_private *dev_priv = plane->dev->dev_private;
3035 if (dev_priv->display.update_sprite_wm)
3036 dev_priv->display.update_sprite_wm(plane, crtc,
3037 sprite_width, sprite_height,
3038 pixel_size, enabled, scaled);
3041 static struct drm_i915_gem_object *
3042 intel_alloc_context_page(struct drm_device *dev)
3044 struct drm_i915_gem_object *ctx;
3047 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3049 ctx = i915_gem_alloc_object(dev, 4096);
3051 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3055 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
3057 DRM_ERROR("failed to pin power context: %d\n", ret);
3061 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
3063 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
3070 i915_gem_object_ggtt_unpin(ctx);
3072 drm_gem_object_unreference(&ctx->base);
3077 * Lock protecting IPS related data structures
3079 DEFINE_SPINLOCK(mchdev_lock);
3081 /* Global for IPS driver to get at the current i915 device. Protected by
3083 static struct drm_i915_private *i915_mch_dev;
3085 bool ironlake_set_drps(struct drm_device *dev, u8 val)
3087 struct drm_i915_private *dev_priv = dev->dev_private;
3090 assert_spin_locked(&mchdev_lock);
3092 rgvswctl = I915_READ16(MEMSWCTL);
3093 if (rgvswctl & MEMCTL_CMD_STS) {
3094 DRM_DEBUG("gpu busy, RCS change rejected\n");
3095 return false; /* still busy with another command */
3098 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3099 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3100 I915_WRITE16(MEMSWCTL, rgvswctl);
3101 POSTING_READ16(MEMSWCTL);
3103 rgvswctl |= MEMCTL_CMD_STS;
3104 I915_WRITE16(MEMSWCTL, rgvswctl);
3109 static void ironlake_enable_drps(struct drm_device *dev)
3111 struct drm_i915_private *dev_priv = dev->dev_private;
3112 u32 rgvmodectl = I915_READ(MEMMODECTL);
3113 u8 fmax, fmin, fstart, vstart;
3115 spin_lock_irq(&mchdev_lock);
3117 /* Enable temp reporting */
3118 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3119 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3121 /* 100ms RC evaluation intervals */
3122 I915_WRITE(RCUPEI, 100000);
3123 I915_WRITE(RCDNEI, 100000);
3125 /* Set max/min thresholds to 90ms and 80ms respectively */
3126 I915_WRITE(RCBMAXAVG, 90000);
3127 I915_WRITE(RCBMINAVG, 80000);
3129 I915_WRITE(MEMIHYST, 1);
3131 /* Set up min, max, and cur for interrupt handling */
3132 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3133 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3134 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3135 MEMMODE_FSTART_SHIFT;
3137 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3140 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3141 dev_priv->ips.fstart = fstart;
3143 dev_priv->ips.max_delay = fstart;
3144 dev_priv->ips.min_delay = fmin;
3145 dev_priv->ips.cur_delay = fstart;
3147 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3148 fmax, fmin, fstart);
3150 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3153 * Interrupts will be enabled in ironlake_irq_postinstall
3156 I915_WRITE(VIDSTART, vstart);
3157 POSTING_READ(VIDSTART);
3159 rgvmodectl |= MEMMODE_SWMODE_EN;
3160 I915_WRITE(MEMMODECTL, rgvmodectl);
3162 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3163 DRM_ERROR("stuck trying to change perf mode\n");
3166 ironlake_set_drps(dev, fstart);
3168 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3170 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3171 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3172 dev_priv->ips.last_time2 = ktime_get_raw_ns();
3174 spin_unlock_irq(&mchdev_lock);
3177 static void ironlake_disable_drps(struct drm_device *dev)
3179 struct drm_i915_private *dev_priv = dev->dev_private;
3182 spin_lock_irq(&mchdev_lock);
3184 rgvswctl = I915_READ16(MEMSWCTL);
3186 /* Ack interrupts, disable EFC interrupt */
3187 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3188 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3189 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3190 I915_WRITE(DEIIR, DE_PCU_EVENT);
3191 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3193 /* Go back to the starting frequency */
3194 ironlake_set_drps(dev, dev_priv->ips.fstart);
3196 rgvswctl |= MEMCTL_CMD_STS;
3197 I915_WRITE(MEMSWCTL, rgvswctl);
3200 spin_unlock_irq(&mchdev_lock);
3203 /* There's a funny hw issue where the hw returns all 0 when reading from
3204 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3205 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3206 * all limits and the gpu stuck at whatever frequency it is at atm).
3208 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3212 /* Only set the down limit when we've reached the lowest level to avoid
3213 * getting more interrupts, otherwise leave this clear. This prevents a
3214 * race in the hw when coming out of rc6: There's a tiny window where
3215 * the hw runs at the minimal clock before selecting the desired
3216 * frequency, if the down threshold expires in that window we will not
3217 * receive a down interrupt. */
3218 limits = dev_priv->rps.max_freq_softlimit << 24;
3219 if (val <= dev_priv->rps.min_freq_softlimit)
3220 limits |= dev_priv->rps.min_freq_softlimit << 16;
3225 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3229 if (dev_priv->rps.is_bdw_sw_turbo)
3232 new_power = dev_priv->rps.power;
3233 switch (dev_priv->rps.power) {
3235 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3236 new_power = BETWEEN;
3240 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3241 new_power = LOW_POWER;
3242 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3243 new_power = HIGH_POWER;
3247 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3248 new_power = BETWEEN;
3251 /* Max/min bins are special */
3252 if (val == dev_priv->rps.min_freq_softlimit)
3253 new_power = LOW_POWER;
3254 if (val == dev_priv->rps.max_freq_softlimit)
3255 new_power = HIGH_POWER;
3256 if (new_power == dev_priv->rps.power)
3259 /* Note the units here are not exactly 1us, but 1280ns. */
3260 switch (new_power) {
3262 /* Upclock if more than 95% busy over 16ms */
3263 I915_WRITE(GEN6_RP_UP_EI, 12500);
3264 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3266 /* Downclock if less than 85% busy over 32ms */
3267 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3268 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3270 I915_WRITE(GEN6_RP_CONTROL,
3271 GEN6_RP_MEDIA_TURBO |
3272 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3273 GEN6_RP_MEDIA_IS_GFX |
3275 GEN6_RP_UP_BUSY_AVG |
3276 GEN6_RP_DOWN_IDLE_AVG);
3280 /* Upclock if more than 90% busy over 13ms */
3281 I915_WRITE(GEN6_RP_UP_EI, 10250);
3282 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3284 /* Downclock if less than 75% busy over 32ms */
3285 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3286 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3288 I915_WRITE(GEN6_RP_CONTROL,
3289 GEN6_RP_MEDIA_TURBO |
3290 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3291 GEN6_RP_MEDIA_IS_GFX |
3293 GEN6_RP_UP_BUSY_AVG |
3294 GEN6_RP_DOWN_IDLE_AVG);
3298 /* Upclock if more than 85% busy over 10ms */
3299 I915_WRITE(GEN6_RP_UP_EI, 8000);
3300 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3302 /* Downclock if less than 60% busy over 32ms */
3303 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3304 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3306 I915_WRITE(GEN6_RP_CONTROL,
3307 GEN6_RP_MEDIA_TURBO |
3308 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3309 GEN6_RP_MEDIA_IS_GFX |
3311 GEN6_RP_UP_BUSY_AVG |
3312 GEN6_RP_DOWN_IDLE_AVG);
3316 dev_priv->rps.power = new_power;
3317 dev_priv->rps.last_adj = 0;
3320 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3324 if (val > dev_priv->rps.min_freq_softlimit)
3325 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3326 if (val < dev_priv->rps.max_freq_softlimit)
3327 mask |= GEN6_PM_RP_UP_THRESHOLD;
3329 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3330 mask &= dev_priv->pm_rps_events;
3332 /* IVB and SNB hard hangs on looping batchbuffer
3333 * if GEN6_PM_UP_EI_EXPIRED is masked.
3335 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3336 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3338 if (IS_GEN8(dev_priv->dev))
3339 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
3344 /* gen6_set_rps is called to update the frequency request, but should also be
3345 * called when the range (min_delay and max_delay) is modified so that we can
3346 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3347 void gen6_set_rps(struct drm_device *dev, u8 val)
3349 struct drm_i915_private *dev_priv = dev->dev_private;
3351 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3352 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3353 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3355 /* min/max delay may still have been modified so be sure to
3356 * write the limits value.
3358 if (val != dev_priv->rps.cur_freq) {
3359 gen6_set_rps_thresholds(dev_priv, val);
3361 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3362 I915_WRITE(GEN6_RPNSWREQ,
3363 HSW_FREQUENCY(val));
3365 I915_WRITE(GEN6_RPNSWREQ,
3366 GEN6_FREQUENCY(val) |
3368 GEN6_AGGRESSIVE_TURBO);
3371 /* Make sure we continue to get interrupts
3372 * until we hit the minimum or maximum frequencies.
3374 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3375 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3377 POSTING_READ(GEN6_RPNSWREQ);
3379 dev_priv->rps.cur_freq = val;
3380 trace_intel_gpu_freq_change(val * 50);
3383 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3385 * * If Gfx is Idle, then
3386 * 1. Mask Turbo interrupts
3387 * 2. Bring up Gfx clock
3388 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3389 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3390 * 5. Unmask Turbo interrupts
3392 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3394 struct drm_device *dev = dev_priv->dev;
3396 /* Latest VLV doesn't need to force the gfx clock */
3397 if (dev->pdev->revision >= 0xd) {
3398 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3403 * When we are idle. Drop to min voltage state.
3406 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3409 /* Mask turbo interrupt so that they will not come in between */
3410 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3412 vlv_force_gfx_clock(dev_priv, true);
3414 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3416 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3417 dev_priv->rps.min_freq_softlimit);
3419 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3420 & GENFREQSTATUS) == 0, 5))
3421 DRM_ERROR("timed out waiting for Punit\n");
3423 vlv_force_gfx_clock(dev_priv, false);
3425 I915_WRITE(GEN6_PMINTRMSK,
3426 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3429 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3431 struct drm_device *dev = dev_priv->dev;
3433 mutex_lock(&dev_priv->rps.hw_lock);
3434 if (dev_priv->rps.enabled) {
3435 if (IS_CHERRYVIEW(dev))
3436 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3437 else if (IS_VALLEYVIEW(dev))
3438 vlv_set_rps_idle(dev_priv);
3439 else if (!dev_priv->rps.is_bdw_sw_turbo
3440 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3441 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3444 dev_priv->rps.last_adj = 0;
3446 mutex_unlock(&dev_priv->rps.hw_lock);
3449 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3451 struct drm_device *dev = dev_priv->dev;
3453 mutex_lock(&dev_priv->rps.hw_lock);
3454 if (dev_priv->rps.enabled) {
3455 if (IS_VALLEYVIEW(dev))
3456 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3457 else if (!dev_priv->rps.is_bdw_sw_turbo
3458 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3459 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3462 dev_priv->rps.last_adj = 0;
3464 mutex_unlock(&dev_priv->rps.hw_lock);
3467 void valleyview_set_rps(struct drm_device *dev, u8 val)
3469 struct drm_i915_private *dev_priv = dev->dev_private;
3471 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3472 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3473 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3475 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3476 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3477 dev_priv->rps.cur_freq,
3478 vlv_gpu_freq(dev_priv, val), val);
3480 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3481 "Odd GPU freq value\n"))
3484 if (val != dev_priv->rps.cur_freq)
3485 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3487 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3489 dev_priv->rps.cur_freq = val;
3490 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3493 static void gen8_disable_rps_interrupts(struct drm_device *dev)
3495 struct drm_i915_private *dev_priv = dev->dev_private;
3496 if (IS_BROADWELL(dev) && dev_priv->rps.is_bdw_sw_turbo){
3497 if (atomic_read(&dev_priv->rps.sw_turbo.flip_received))
3498 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3499 dev_priv-> rps.is_bdw_sw_turbo = false;
3501 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3502 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3503 ~dev_priv->pm_rps_events);
3504 /* Complete PM interrupt masking here doesn't race with the rps work
3505 * item again unmasking PM interrupts because that is using a different
3506 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3507 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3508 * gen8_enable_rps will clean up. */
3510 spin_lock_irq(&dev_priv->irq_lock);
3511 dev_priv->rps.pm_iir = 0;
3512 spin_unlock_irq(&dev_priv->irq_lock);
3514 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3518 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3520 struct drm_i915_private *dev_priv = dev->dev_private;
3522 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3523 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3524 ~dev_priv->pm_rps_events);
3525 /* Complete PM interrupt masking here doesn't race with the rps work
3526 * item again unmasking PM interrupts because that is using a different
3527 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3528 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3530 spin_lock_irq(&dev_priv->irq_lock);
3531 dev_priv->rps.pm_iir = 0;
3532 spin_unlock_irq(&dev_priv->irq_lock);
3534 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3537 static void gen6_disable_rps(struct drm_device *dev)
3539 struct drm_i915_private *dev_priv = dev->dev_private;
3541 I915_WRITE(GEN6_RC_CONTROL, 0);
3542 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3544 if (IS_BROADWELL(dev))
3545 gen8_disable_rps_interrupts(dev);
3547 gen6_disable_rps_interrupts(dev);
3550 static void cherryview_disable_rps(struct drm_device *dev)
3552 struct drm_i915_private *dev_priv = dev->dev_private;
3554 I915_WRITE(GEN6_RC_CONTROL, 0);
3556 gen8_disable_rps_interrupts(dev);
3559 static void valleyview_disable_rps(struct drm_device *dev)
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3563 /* we're doing forcewake before Disabling RC6,
3564 * This what the BIOS expects when going into suspend */
3565 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3567 I915_WRITE(GEN6_RC_CONTROL, 0);
3569 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3571 gen6_disable_rps_interrupts(dev);
3574 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3576 if (IS_VALLEYVIEW(dev)) {
3577 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3578 mode = GEN6_RC_CTL_RC6_ENABLE;
3582 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3583 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3584 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3585 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3588 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3590 /* No RC6 before Ironlake */
3591 if (INTEL_INFO(dev)->gen < 5)
3594 /* RC6 is only on Ironlake mobile not on desktop */
3595 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3598 /* Respect the kernel parameter if it is set */
3599 if (enable_rc6 >= 0) {
3602 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
3603 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3606 mask = INTEL_RC6_ENABLE;
3608 if ((enable_rc6 & mask) != enable_rc6)
3609 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3610 enable_rc6 & mask, enable_rc6, mask);
3612 return enable_rc6 & mask;
3615 /* Disable RC6 on Ironlake */
3616 if (INTEL_INFO(dev)->gen == 5)
3619 if (IS_IVYBRIDGE(dev))
3620 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3622 return INTEL_RC6_ENABLE;
3625 int intel_enable_rc6(const struct drm_device *dev)
3627 return i915.enable_rc6;
3630 static void gen8_enable_rps_interrupts(struct drm_device *dev)
3632 struct drm_i915_private *dev_priv = dev->dev_private;
3634 spin_lock_irq(&dev_priv->irq_lock);
3635 WARN_ON(dev_priv->rps.pm_iir);
3636 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3637 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3638 spin_unlock_irq(&dev_priv->irq_lock);
3641 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3643 struct drm_i915_private *dev_priv = dev->dev_private;
3645 spin_lock_irq(&dev_priv->irq_lock);
3646 WARN_ON(dev_priv->rps.pm_iir);
3647 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3648 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3649 spin_unlock_irq(&dev_priv->irq_lock);
3652 static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3654 /* All of these values are in units of 50MHz */
3655 dev_priv->rps.cur_freq = 0;
3656 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3657 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3658 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3659 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3660 /* XXX: only BYT has a special efficient freq */
3661 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3662 /* hw_max = RP0 until we check for overclocking */
3663 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3665 /* Preserve min/max settings in case of re-init */
3666 if (dev_priv->rps.max_freq_softlimit == 0)
3667 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3669 if (dev_priv->rps.min_freq_softlimit == 0)
3670 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3673 static void bdw_sw_calculate_freq(struct drm_device *dev,
3674 struct intel_rps_bdw_cal *c, u32 *cur_time, u32 *c0)
3676 struct drm_i915_private *dev_priv = dev->dev_private;
3678 u32 busyness_pct = 0;
3679 u32 elapsed_time = 0;
3682 if (!c || !cur_time || !c0)
3685 if (0 == c->last_c0)
3688 /* Check Evaluation interval */
3689 elapsed_time = *cur_time - c->last_ts;
3690 if (elapsed_time < c->eval_interval)
3693 mutex_lock(&dev_priv->rps.hw_lock);
3696 * c0 unit in 32*1.28 usec, elapsed_time unit in 1 usec.
3697 * Whole busyness_pct calculation should be
3698 * busy = ((u64)(*c0 - c->last_c0) << 5 << 7) / 100;
3699 * busyness_pct = (u32)(busy * 100 / elapsed_time);
3700 * The final formula is to simplify CPU calculation
3702 busy = (u64)(*c0 - c->last_c0) << 12;
3703 do_div(busy, elapsed_time);
3704 busyness_pct = (u32)busy;
3706 if (c->is_up && busyness_pct >= c->it_threshold_pct)
3707 new_freq = (u16)dev_priv->rps.cur_freq + 3;
3708 if (!c->is_up && busyness_pct <= c->it_threshold_pct)
3709 new_freq = (u16)dev_priv->rps.cur_freq - 1;
3711 /* Adjust to new frequency busyness and compare with threshold */
3712 if (0 != new_freq) {
3713 if (new_freq > dev_priv->rps.max_freq_softlimit)
3714 new_freq = dev_priv->rps.max_freq_softlimit;
3715 else if (new_freq < dev_priv->rps.min_freq_softlimit)
3716 new_freq = dev_priv->rps.min_freq_softlimit;
3718 gen6_set_rps(dev, new_freq);
3721 mutex_unlock(&dev_priv->rps.hw_lock);
3725 c->last_ts = *cur_time;
3728 static void gen8_set_frequency_RP0(struct work_struct *work)
3730 struct intel_rps_bdw_turbo *p_bdw_turbo =
3731 container_of(work, struct intel_rps_bdw_turbo, work_max_freq);
3732 struct intel_gen6_power_mgmt *p_power_mgmt =
3733 container_of(p_bdw_turbo, struct intel_gen6_power_mgmt, sw_turbo);
3734 struct drm_i915_private *dev_priv =
3735 container_of(p_power_mgmt, struct drm_i915_private, rps);
3737 mutex_lock(&dev_priv->rps.hw_lock);
3738 gen6_set_rps(dev_priv->dev, dev_priv->rps.rp0_freq);
3739 mutex_unlock(&dev_priv->rps.hw_lock);
3742 static void flip_active_timeout_handler(unsigned long var)
3744 struct drm_i915_private *dev_priv = (struct drm_i915_private *) var;
3746 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3747 atomic_set(&dev_priv->rps.sw_turbo.flip_received, false);
3749 queue_work(dev_priv->wq, &dev_priv->rps.sw_turbo.work_max_freq);
3752 void bdw_software_turbo(struct drm_device *dev)
3754 struct drm_i915_private *dev_priv = dev->dev_private;
3756 u32 current_time = I915_READ(TIMESTAMP_CTR); /* unit in usec */
3757 u32 current_c0 = I915_READ(MCHBAR_PCU_C0); /* unit in 32*1.28 usec */
3759 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.up,
3760 ¤t_time, ¤t_c0);
3761 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.down,
3762 ¤t_time, ¤t_c0);
3765 static void gen8_enable_rps(struct drm_device *dev)
3767 struct drm_i915_private *dev_priv = dev->dev_private;
3768 struct intel_engine_cs *ring;
3769 uint32_t rc6_mask = 0, rp_state_cap;
3770 uint32_t threshold_up_pct, threshold_down_pct;
3771 uint32_t ei_up, ei_down; /* up and down evaluation interval */
3775 /* Use software Turbo for BDW */
3776 dev_priv->rps.is_bdw_sw_turbo = IS_BROADWELL(dev);
3778 /* 1a: Software RC state - RC0 */
3779 I915_WRITE(GEN6_RC_STATE, 0);
3781 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3782 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3783 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3785 /* 2a: Disable RC states. */
3786 I915_WRITE(GEN6_RC_CONTROL, 0);
3788 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3789 parse_rp_state_cap(dev_priv, rp_state_cap);
3791 /* 2b: Program RC6 thresholds.*/
3792 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3793 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3794 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3795 for_each_ring(ring, dev_priv, unused)
3796 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3797 I915_WRITE(GEN6_RC_SLEEP, 0);
3798 if (IS_BROADWELL(dev))
3799 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
3801 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3804 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3805 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3806 intel_print_rc6_info(dev, rc6_mask);
3807 if (IS_BROADWELL(dev))
3808 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3809 GEN7_RC_CTL_TO_MODE |
3812 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3813 GEN6_RC_CTL_EI_MODE(1) |
3816 /* 4 Program defaults and thresholds for RPS*/
3817 I915_WRITE(GEN6_RPNSWREQ,
3818 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3819 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3820 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3821 ei_up = 84480; /* 84.48ms */
3823 threshold_up_pct = 90; /* x percent busy */
3824 threshold_down_pct = 70;
3826 if (dev_priv->rps.is_bdw_sw_turbo) {
3827 dev_priv->rps.sw_turbo.up.it_threshold_pct = threshold_up_pct;
3828 dev_priv->rps.sw_turbo.up.eval_interval = ei_up;
3829 dev_priv->rps.sw_turbo.up.is_up = true;
3830 dev_priv->rps.sw_turbo.up.last_ts = 0;
3831 dev_priv->rps.sw_turbo.up.last_c0 = 0;
3833 dev_priv->rps.sw_turbo.down.it_threshold_pct = threshold_down_pct;
3834 dev_priv->rps.sw_turbo.down.eval_interval = ei_down;
3835 dev_priv->rps.sw_turbo.down.is_up = false;
3836 dev_priv->rps.sw_turbo.down.last_ts = 0;
3837 dev_priv->rps.sw_turbo.down.last_c0 = 0;
3839 /* Start the timer to track if flip comes*/
3840 dev_priv->rps.sw_turbo.timeout = 200*1000; /* in us */
3842 init_timer(&dev_priv->rps.sw_turbo.flip_timer);
3843 dev_priv->rps.sw_turbo.flip_timer.function = flip_active_timeout_handler;
3844 dev_priv->rps.sw_turbo.flip_timer.data = (unsigned long) dev_priv;
3845 dev_priv->rps.sw_turbo.flip_timer.expires =
3846 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
3847 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
3848 INIT_WORK(&dev_priv->rps.sw_turbo.work_max_freq, gen8_set_frequency_RP0);
3850 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
3852 /* NB: Docs say 1s, and 1000000 - which aren't equivalent
3853 * 1 second timeout*/
3854 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, FREQ_1_28_US(1000000));
3856 /* Docs recommend 900MHz, and 300 MHz respectively */
3857 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3858 dev_priv->rps.max_freq_softlimit << 24 |
3859 dev_priv->rps.min_freq_softlimit << 16);
3861 I915_WRITE(GEN6_RP_UP_THRESHOLD,
3862 FREQ_1_28_US(ei_up * threshold_up_pct / 100));
3863 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
3864 FREQ_1_28_US(ei_down * threshold_down_pct / 100));
3865 I915_WRITE(GEN6_RP_UP_EI,
3866 FREQ_1_28_US(ei_up));
3867 I915_WRITE(GEN6_RP_DOWN_EI,
3868 FREQ_1_28_US(ei_down));
3870 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3874 rp_ctl_flag = GEN6_RP_MEDIA_TURBO |
3875 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3876 GEN6_RP_MEDIA_IS_GFX |
3877 GEN6_RP_UP_BUSY_AVG |
3878 GEN6_RP_DOWN_IDLE_AVG;
3879 if (!dev_priv->rps.is_bdw_sw_turbo)
3880 rp_ctl_flag |= GEN6_RP_ENABLE;
3882 I915_WRITE(GEN6_RP_CONTROL, rp_ctl_flag);
3884 /* 6: Ring frequency + overclocking
3885 * (our driver does this later */
3886 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3887 if (!dev_priv->rps.is_bdw_sw_turbo)
3888 gen8_enable_rps_interrupts(dev);
3890 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3893 static void gen6_enable_rps(struct drm_device *dev)
3895 struct drm_i915_private *dev_priv = dev->dev_private;
3896 struct intel_engine_cs *ring;
3898 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3903 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3905 /* Here begins a magic sequence of register writes to enable
3906 * auto-downclocking.
3908 * Perhaps there might be some value in exposing these to
3911 I915_WRITE(GEN6_RC_STATE, 0);
3913 /* Clear the DBG now so we don't confuse earlier errors */
3914 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3915 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3916 I915_WRITE(GTFIFODBG, gtfifodbg);
3919 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3921 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3923 parse_rp_state_cap(dev_priv, rp_state_cap);
3925 /* disable the counters and set deterministic thresholds */
3926 I915_WRITE(GEN6_RC_CONTROL, 0);
3928 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3929 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3930 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3931 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3932 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3934 for_each_ring(ring, dev_priv, i)
3935 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3937 I915_WRITE(GEN6_RC_SLEEP, 0);
3938 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3939 if (IS_IVYBRIDGE(dev))
3940 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3942 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3943 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3944 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3946 /* Check if we are enabling RC6 */
3947 rc6_mode = intel_enable_rc6(dev_priv->dev);
3948 if (rc6_mode & INTEL_RC6_ENABLE)
3949 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3951 /* We don't use those on Haswell */
3952 if (!IS_HASWELL(dev)) {
3953 if (rc6_mode & INTEL_RC6p_ENABLE)
3954 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3956 if (rc6_mode & INTEL_RC6pp_ENABLE)
3957 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3960 intel_print_rc6_info(dev, rc6_mask);
3962 I915_WRITE(GEN6_RC_CONTROL,
3964 GEN6_RC_CTL_EI_MODE(1) |
3965 GEN6_RC_CTL_HW_ENABLE);
3967 /* Power down if completely idle for over 50ms */
3968 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3969 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3971 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3973 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3975 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3976 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3977 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3978 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3979 (pcu_mbox & 0xff) * 50);
3980 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3983 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3984 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3986 gen6_enable_rps_interrupts(dev);
3989 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3990 if (IS_GEN6(dev) && ret) {
3991 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3992 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3993 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3994 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3995 rc6vids &= 0xffff00;
3996 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3997 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3999 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4002 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4005 static void __gen6_update_ring_freq(struct drm_device *dev)
4007 struct drm_i915_private *dev_priv = dev->dev_private;
4009 unsigned int gpu_freq;
4010 unsigned int max_ia_freq, min_ring_freq;
4011 int scaling_factor = 180;
4012 struct cpufreq_policy *policy;
4014 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4016 policy = cpufreq_cpu_get(0);
4018 max_ia_freq = policy->cpuinfo.max_freq;
4019 cpufreq_cpu_put(policy);
4022 * Default to measured freq if none found, PCU will ensure we
4025 max_ia_freq = tsc_khz;
4028 /* Convert from kHz to MHz */
4029 max_ia_freq /= 1000;
4031 min_ring_freq = I915_READ(DCLK) & 0xf;
4032 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4033 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
4036 * For each potential GPU frequency, load a ring frequency we'd like
4037 * to use for memory access. We do this by specifying the IA frequency
4038 * the PCU should use as a reference to determine the ring frequency.
4040 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
4042 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
4043 unsigned int ia_freq = 0, ring_freq = 0;
4045 if (INTEL_INFO(dev)->gen >= 8) {
4046 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4047 ring_freq = max(min_ring_freq, gpu_freq);
4048 } else if (IS_HASWELL(dev)) {
4049 ring_freq = mult_frac(gpu_freq, 5, 4);
4050 ring_freq = max(min_ring_freq, ring_freq);
4051 /* leave ia_freq as the default, chosen by cpufreq */
4053 /* On older processors, there is no separate ring
4054 * clock domain, so in order to boost the bandwidth
4055 * of the ring, we need to upclock the CPU (ia_freq).
4057 * For GPU frequencies less than 750MHz,
4058 * just use the lowest ring freq.
4060 if (gpu_freq < min_freq)
4063 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
4064 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
4067 sandybridge_pcode_write(dev_priv,
4068 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
4069 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
4070 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
4075 void gen6_update_ring_freq(struct drm_device *dev)
4077 struct drm_i915_private *dev_priv = dev->dev_private;
4079 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
4082 mutex_lock(&dev_priv->rps.hw_lock);
4083 __gen6_update_ring_freq(dev);
4084 mutex_unlock(&dev_priv->rps.hw_lock);
4087 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
4091 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4092 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
4097 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4101 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
4102 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
4107 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
4111 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4112 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
4117 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
4121 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4122 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
4126 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
4130 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4132 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
4137 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
4141 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4143 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
4145 rp0 = min_t(u32, rp0, 0xea);
4150 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4154 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
4155 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
4156 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
4157 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
4162 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
4164 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
4167 /* Check that the pctx buffer wasn't move under us. */
4168 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
4170 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4172 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
4173 dev_priv->vlv_pctx->stolen->start);
4177 /* Check that the pcbr address is not empty. */
4178 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
4180 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4182 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
4185 static void cherryview_setup_pctx(struct drm_device *dev)
4187 struct drm_i915_private *dev_priv = dev->dev_private;
4188 unsigned long pctx_paddr, paddr;
4189 struct i915_gtt *gtt = &dev_priv->gtt;
4191 int pctx_size = 32*1024;
4193 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4195 pcbr = I915_READ(VLV_PCBR);
4196 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
4197 paddr = (dev_priv->mm.stolen_base +
4198 (gtt->stolen_size - pctx_size));
4200 pctx_paddr = (paddr & (~4095));
4201 I915_WRITE(VLV_PCBR, pctx_paddr);
4205 static void valleyview_setup_pctx(struct drm_device *dev)
4207 struct drm_i915_private *dev_priv = dev->dev_private;
4208 struct drm_i915_gem_object *pctx;
4209 unsigned long pctx_paddr;
4211 int pctx_size = 24*1024;
4213 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4215 pcbr = I915_READ(VLV_PCBR);
4217 /* BIOS set it up already, grab the pre-alloc'd space */
4220 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
4221 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
4223 I915_GTT_OFFSET_NONE,
4229 * From the Gunit register HAS:
4230 * The Gfx driver is expected to program this register and ensure
4231 * proper allocation within Gfx stolen memory. For example, this
4232 * register should be programmed such than the PCBR range does not
4233 * overlap with other ranges, such as the frame buffer, protected
4234 * memory, or any other relevant ranges.
4236 pctx = i915_gem_object_create_stolen(dev, pctx_size);
4238 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4242 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
4243 I915_WRITE(VLV_PCBR, pctx_paddr);
4246 dev_priv->vlv_pctx = pctx;
4249 static void valleyview_cleanup_pctx(struct drm_device *dev)
4251 struct drm_i915_private *dev_priv = dev->dev_private;
4253 if (WARN_ON(!dev_priv->vlv_pctx))
4256 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
4257 dev_priv->vlv_pctx = NULL;
4260 static void valleyview_init_gt_powersave(struct drm_device *dev)
4262 struct drm_i915_private *dev_priv = dev->dev_private;
4265 valleyview_setup_pctx(dev);
4267 mutex_lock(&dev_priv->rps.hw_lock);
4269 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4270 switch ((val >> 6) & 3) {
4273 dev_priv->mem_freq = 800;
4276 dev_priv->mem_freq = 1066;
4279 dev_priv->mem_freq = 1333;
4282 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4284 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
4285 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4286 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4287 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4288 dev_priv->rps.max_freq);
4290 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
4291 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4292 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4293 dev_priv->rps.efficient_freq);
4295 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
4296 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4297 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4298 dev_priv->rps.rp1_freq);
4300 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
4301 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4302 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4303 dev_priv->rps.min_freq);
4305 /* Preserve min/max settings in case of re-init */
4306 if (dev_priv->rps.max_freq_softlimit == 0)
4307 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4309 if (dev_priv->rps.min_freq_softlimit == 0)
4310 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4312 mutex_unlock(&dev_priv->rps.hw_lock);
4315 static void cherryview_init_gt_powersave(struct drm_device *dev)
4317 struct drm_i915_private *dev_priv = dev->dev_private;
4320 cherryview_setup_pctx(dev);
4322 mutex_lock(&dev_priv->rps.hw_lock);
4324 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
4325 switch ((val >> 2) & 0x7) {
4328 dev_priv->rps.cz_freq = 200;
4329 dev_priv->mem_freq = 1600;
4332 dev_priv->rps.cz_freq = 267;
4333 dev_priv->mem_freq = 1600;
4336 dev_priv->rps.cz_freq = 333;
4337 dev_priv->mem_freq = 2000;
4340 dev_priv->rps.cz_freq = 320;
4341 dev_priv->mem_freq = 1600;
4344 dev_priv->rps.cz_freq = 400;
4345 dev_priv->mem_freq = 1600;
4348 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4350 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4351 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4352 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4353 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4354 dev_priv->rps.max_freq);
4356 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4357 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4358 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4359 dev_priv->rps.efficient_freq);
4361 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4362 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4363 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4364 dev_priv->rps.rp1_freq);
4366 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4367 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4368 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4369 dev_priv->rps.min_freq);
4371 WARN_ONCE((dev_priv->rps.max_freq |
4372 dev_priv->rps.efficient_freq |
4373 dev_priv->rps.rp1_freq |
4374 dev_priv->rps.min_freq) & 1,
4375 "Odd GPU freq values\n");
4377 /* Preserve min/max settings in case of re-init */
4378 if (dev_priv->rps.max_freq_softlimit == 0)
4379 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4381 if (dev_priv->rps.min_freq_softlimit == 0)
4382 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4384 mutex_unlock(&dev_priv->rps.hw_lock);
4387 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
4389 valleyview_cleanup_pctx(dev);
4392 static void cherryview_enable_rps(struct drm_device *dev)
4394 struct drm_i915_private *dev_priv = dev->dev_private;
4395 struct intel_engine_cs *ring;
4396 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
4399 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4401 gtfifodbg = I915_READ(GTFIFODBG);
4403 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4405 I915_WRITE(GTFIFODBG, gtfifodbg);
4408 cherryview_check_pctx(dev_priv);
4410 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4411 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4412 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4414 /* 2a: Program RC6 thresholds.*/
4415 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4416 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4417 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4419 for_each_ring(ring, dev_priv, i)
4420 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4421 I915_WRITE(GEN6_RC_SLEEP, 0);
4423 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4425 /* allows RC6 residency counter to work */
4426 I915_WRITE(VLV_COUNTER_CONTROL,
4427 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4428 VLV_MEDIA_RC6_COUNT_EN |
4429 VLV_RENDER_RC6_COUNT_EN));
4431 /* For now we assume BIOS is allocating and populating the PCBR */
4432 pcbr = I915_READ(VLV_PCBR);
4434 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4437 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4438 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4439 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
4441 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4443 /* 4 Program defaults and thresholds for RPS*/
4444 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4445 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4446 I915_WRITE(GEN6_RP_UP_EI, 66000);
4447 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4449 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4451 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4452 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4453 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4456 I915_WRITE(GEN6_RP_CONTROL,
4457 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4458 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4460 GEN6_RP_UP_BUSY_AVG |
4461 GEN6_RP_DOWN_IDLE_AVG);
4463 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4465 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4466 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4468 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4469 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4470 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4471 dev_priv->rps.cur_freq);
4473 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4474 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4475 dev_priv->rps.efficient_freq);
4477 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4479 gen8_enable_rps_interrupts(dev);
4481 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4484 static void valleyview_enable_rps(struct drm_device *dev)
4486 struct drm_i915_private *dev_priv = dev->dev_private;
4487 struct intel_engine_cs *ring;
4488 u32 gtfifodbg, val, rc6_mode = 0;
4491 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4493 valleyview_check_pctx(dev_priv);
4495 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4496 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4498 I915_WRITE(GTFIFODBG, gtfifodbg);
4501 /* If VLV, Forcewake all wells, else re-direct to regular path */
4502 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4504 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4505 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4506 I915_WRITE(GEN6_RP_UP_EI, 66000);
4507 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4509 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4510 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
4512 I915_WRITE(GEN6_RP_CONTROL,
4513 GEN6_RP_MEDIA_TURBO |
4514 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4515 GEN6_RP_MEDIA_IS_GFX |
4517 GEN6_RP_UP_BUSY_AVG |
4518 GEN6_RP_DOWN_IDLE_CONT);
4520 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
4521 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4522 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4524 for_each_ring(ring, dev_priv, i)
4525 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4527 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4529 /* allows RC6 residency counter to work */
4530 I915_WRITE(VLV_COUNTER_CONTROL,
4531 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4532 VLV_RENDER_RC0_COUNT_EN |
4533 VLV_MEDIA_RC6_COUNT_EN |
4534 VLV_RENDER_RC6_COUNT_EN));
4536 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4537 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4539 intel_print_rc6_info(dev, rc6_mode);
4541 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4543 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4545 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4546 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4548 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4549 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4550 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4551 dev_priv->rps.cur_freq);
4553 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4554 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4555 dev_priv->rps.efficient_freq);
4557 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4559 gen6_enable_rps_interrupts(dev);
4561 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4564 void ironlake_teardown_rc6(struct drm_device *dev)
4566 struct drm_i915_private *dev_priv = dev->dev_private;
4568 if (dev_priv->ips.renderctx) {
4569 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
4570 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4571 dev_priv->ips.renderctx = NULL;
4574 if (dev_priv->ips.pwrctx) {
4575 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
4576 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4577 dev_priv->ips.pwrctx = NULL;
4581 static void ironlake_disable_rc6(struct drm_device *dev)
4583 struct drm_i915_private *dev_priv = dev->dev_private;
4585 if (I915_READ(PWRCTXA)) {
4586 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4587 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4588 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4591 I915_WRITE(PWRCTXA, 0);
4592 POSTING_READ(PWRCTXA);
4594 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4595 POSTING_READ(RSTDBYCTL);
4599 static int ironlake_setup_rc6(struct drm_device *dev)
4601 struct drm_i915_private *dev_priv = dev->dev_private;
4603 if (dev_priv->ips.renderctx == NULL)
4604 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4605 if (!dev_priv->ips.renderctx)
4608 if (dev_priv->ips.pwrctx == NULL)
4609 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4610 if (!dev_priv->ips.pwrctx) {
4611 ironlake_teardown_rc6(dev);
4618 static void ironlake_enable_rc6(struct drm_device *dev)
4620 struct drm_i915_private *dev_priv = dev->dev_private;
4621 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
4622 bool was_interruptible;
4625 /* rc6 disabled by default due to repeated reports of hanging during
4628 if (!intel_enable_rc6(dev))
4631 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4633 ret = ironlake_setup_rc6(dev);
4637 was_interruptible = dev_priv->mm.interruptible;
4638 dev_priv->mm.interruptible = false;
4641 * GPU can automatically power down the render unit if given a page
4644 ret = intel_ring_begin(ring, 6);
4646 ironlake_teardown_rc6(dev);
4647 dev_priv->mm.interruptible = was_interruptible;
4651 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4652 intel_ring_emit(ring, MI_SET_CONTEXT);
4653 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
4655 MI_SAVE_EXT_STATE_EN |
4656 MI_RESTORE_EXT_STATE_EN |
4657 MI_RESTORE_INHIBIT);
4658 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4659 intel_ring_emit(ring, MI_NOOP);
4660 intel_ring_emit(ring, MI_FLUSH);
4661 intel_ring_advance(ring);
4664 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4665 * does an implicit flush, combined with MI_FLUSH above, it should be
4666 * safe to assume that renderctx is valid
4668 ret = intel_ring_idle(ring);
4669 dev_priv->mm.interruptible = was_interruptible;
4671 DRM_ERROR("failed to enable ironlake power savings\n");
4672 ironlake_teardown_rc6(dev);
4676 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4677 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4679 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
4682 static unsigned long intel_pxfreq(u32 vidfreq)
4685 int div = (vidfreq & 0x3f0000) >> 16;
4686 int post = (vidfreq & 0x3000) >> 12;
4687 int pre = (vidfreq & 0x7);
4692 freq = ((div * 133333) / ((1<<post) * pre));
4697 static const struct cparams {
4703 { 1, 1333, 301, 28664 },
4704 { 1, 1066, 294, 24460 },
4705 { 1, 800, 294, 25192 },
4706 { 0, 1333, 276, 27605 },
4707 { 0, 1066, 276, 27605 },
4708 { 0, 800, 231, 23784 },
4711 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4713 u64 total_count, diff, ret;
4714 u32 count1, count2, count3, m = 0, c = 0;
4715 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4718 assert_spin_locked(&mchdev_lock);
4720 diff1 = now - dev_priv->ips.last_time1;
4722 /* Prevent division-by-zero if we are asking too fast.
4723 * Also, we don't get interesting results if we are polling
4724 * faster than once in 10ms, so just return the saved value
4728 return dev_priv->ips.chipset_power;
4730 count1 = I915_READ(DMIEC);
4731 count2 = I915_READ(DDREC);
4732 count3 = I915_READ(CSIEC);
4734 total_count = count1 + count2 + count3;
4736 /* FIXME: handle per-counter overflow */
4737 if (total_count < dev_priv->ips.last_count1) {
4738 diff = ~0UL - dev_priv->ips.last_count1;
4739 diff += total_count;
4741 diff = total_count - dev_priv->ips.last_count1;
4744 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
4745 if (cparams[i].i == dev_priv->ips.c_m &&
4746 cparams[i].t == dev_priv->ips.r_t) {
4753 diff = div_u64(diff, diff1);
4754 ret = ((m * diff) + c);
4755 ret = div_u64(ret, 10);
4757 dev_priv->ips.last_count1 = total_count;
4758 dev_priv->ips.last_time1 = now;
4760 dev_priv->ips.chipset_power = ret;
4765 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4767 struct drm_device *dev = dev_priv->dev;
4770 if (INTEL_INFO(dev)->gen != 5)
4773 spin_lock_irq(&mchdev_lock);
4775 val = __i915_chipset_val(dev_priv);
4777 spin_unlock_irq(&mchdev_lock);
4782 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4784 unsigned long m, x, b;
4787 tsfs = I915_READ(TSFS);
4789 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4790 x = I915_READ8(TR1);
4792 b = tsfs & TSFS_INTR_MASK;
4794 return ((m * x) / 127) - b;
4797 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4799 struct drm_device *dev = dev_priv->dev;
4800 static const struct v_table {
4801 u16 vd; /* in .1 mil */
4802 u16 vm; /* in .1 mil */
4933 if (INTEL_INFO(dev)->is_mobile)
4934 return v_table[pxvid].vm;
4936 return v_table[pxvid].vd;
4939 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4941 u64 now, diff, diffms;
4944 assert_spin_locked(&mchdev_lock);
4946 now = ktime_get_raw_ns();
4947 diffms = now - dev_priv->ips.last_time2;
4948 do_div(diffms, NSEC_PER_MSEC);
4950 /* Don't divide by 0 */
4954 count = I915_READ(GFXEC);
4956 if (count < dev_priv->ips.last_count2) {
4957 diff = ~0UL - dev_priv->ips.last_count2;
4960 diff = count - dev_priv->ips.last_count2;
4963 dev_priv->ips.last_count2 = count;
4964 dev_priv->ips.last_time2 = now;
4966 /* More magic constants... */
4968 diff = div_u64(diff, diffms * 10);
4969 dev_priv->ips.gfx_power = diff;
4972 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4974 struct drm_device *dev = dev_priv->dev;
4976 if (INTEL_INFO(dev)->gen != 5)
4979 spin_lock_irq(&mchdev_lock);
4981 __i915_update_gfx_val(dev_priv);
4983 spin_unlock_irq(&mchdev_lock);
4986 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4988 unsigned long t, corr, state1, corr2, state2;
4991 assert_spin_locked(&mchdev_lock);
4993 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4994 pxvid = (pxvid >> 24) & 0x7f;
4995 ext_v = pvid_to_extvid(dev_priv, pxvid);
4999 t = i915_mch_val(dev_priv);
5001 /* Revel in the empirically derived constants */
5003 /* Correction factor in 1/100000 units */
5005 corr = ((t * 2349) + 135940);
5007 corr = ((t * 964) + 29317);
5009 corr = ((t * 301) + 1004);
5011 corr = corr * ((150142 * state1) / 10000 - 78642);
5013 corr2 = (corr * dev_priv->ips.corr);
5015 state2 = (corr2 * state1) / 10000;
5016 state2 /= 100; /* convert to mW */
5018 __i915_update_gfx_val(dev_priv);
5020 return dev_priv->ips.gfx_power + state2;
5023 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5025 struct drm_device *dev = dev_priv->dev;
5028 if (INTEL_INFO(dev)->gen != 5)
5031 spin_lock_irq(&mchdev_lock);
5033 val = __i915_gfx_val(dev_priv);
5035 spin_unlock_irq(&mchdev_lock);
5041 * i915_read_mch_val - return value for IPS use
5043 * Calculate and return a value for the IPS driver to use when deciding whether
5044 * we have thermal and power headroom to increase CPU or GPU power budget.
5046 unsigned long i915_read_mch_val(void)
5048 struct drm_i915_private *dev_priv;
5049 unsigned long chipset_val, graphics_val, ret = 0;
5051 spin_lock_irq(&mchdev_lock);
5054 dev_priv = i915_mch_dev;
5056 chipset_val = __i915_chipset_val(dev_priv);
5057 graphics_val = __i915_gfx_val(dev_priv);
5059 ret = chipset_val + graphics_val;
5062 spin_unlock_irq(&mchdev_lock);
5066 EXPORT_SYMBOL_GPL(i915_read_mch_val);
5069 * i915_gpu_raise - raise GPU frequency limit
5071 * Raise the limit; IPS indicates we have thermal headroom.
5073 bool i915_gpu_raise(void)
5075 struct drm_i915_private *dev_priv;
5078 spin_lock_irq(&mchdev_lock);
5079 if (!i915_mch_dev) {
5083 dev_priv = i915_mch_dev;
5085 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5086 dev_priv->ips.max_delay--;
5089 spin_unlock_irq(&mchdev_lock);
5093 EXPORT_SYMBOL_GPL(i915_gpu_raise);
5096 * i915_gpu_lower - lower GPU frequency limit
5098 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5099 * frequency maximum.
5101 bool i915_gpu_lower(void)
5103 struct drm_i915_private *dev_priv;
5106 spin_lock_irq(&mchdev_lock);
5107 if (!i915_mch_dev) {
5111 dev_priv = i915_mch_dev;
5113 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5114 dev_priv->ips.max_delay++;
5117 spin_unlock_irq(&mchdev_lock);
5121 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5124 * i915_gpu_busy - indicate GPU business to IPS
5126 * Tell the IPS driver whether or not the GPU is busy.
5128 bool i915_gpu_busy(void)
5130 struct drm_i915_private *dev_priv;
5131 struct intel_engine_cs *ring;
5135 spin_lock_irq(&mchdev_lock);
5138 dev_priv = i915_mch_dev;
5140 for_each_ring(ring, dev_priv, i)
5141 ret |= !list_empty(&ring->request_list);
5144 spin_unlock_irq(&mchdev_lock);
5148 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5151 * i915_gpu_turbo_disable - disable graphics turbo
5153 * Disable graphics turbo by resetting the max frequency and setting the
5154 * current frequency to the default.
5156 bool i915_gpu_turbo_disable(void)
5158 struct drm_i915_private *dev_priv;
5161 spin_lock_irq(&mchdev_lock);
5162 if (!i915_mch_dev) {
5166 dev_priv = i915_mch_dev;
5168 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5170 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5174 spin_unlock_irq(&mchdev_lock);
5178 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5181 * Tells the intel_ips driver that the i915 driver is now loaded, if
5182 * IPS got loaded first.
5184 * This awkward dance is so that neither module has to depend on the
5185 * other in order for IPS to do the appropriate communication of
5186 * GPU turbo limits to i915.
5189 ips_ping_for_i915_load(void)
5193 link = symbol_get(ips_link_to_i915_driver);
5196 symbol_put(ips_link_to_i915_driver);
5200 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
5202 /* We only register the i915 ips part with intel-ips once everything is
5203 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5204 spin_lock_irq(&mchdev_lock);
5205 i915_mch_dev = dev_priv;
5206 spin_unlock_irq(&mchdev_lock);
5208 ips_ping_for_i915_load();
5211 void intel_gpu_ips_teardown(void)
5213 spin_lock_irq(&mchdev_lock);
5214 i915_mch_dev = NULL;
5215 spin_unlock_irq(&mchdev_lock);
5218 static void intel_init_emon(struct drm_device *dev)
5220 struct drm_i915_private *dev_priv = dev->dev_private;
5225 /* Disable to program */
5229 /* Program energy weights for various events */
5230 I915_WRITE(SDEW, 0x15040d00);
5231 I915_WRITE(CSIEW0, 0x007f0000);
5232 I915_WRITE(CSIEW1, 0x1e220004);
5233 I915_WRITE(CSIEW2, 0x04000004);
5235 for (i = 0; i < 5; i++)
5236 I915_WRITE(PEW + (i * 4), 0);
5237 for (i = 0; i < 3; i++)
5238 I915_WRITE(DEW + (i * 4), 0);
5240 /* Program P-state weights to account for frequency power adjustment */
5241 for (i = 0; i < 16; i++) {
5242 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5243 unsigned long freq = intel_pxfreq(pxvidfreq);
5244 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5249 val *= (freq / 1000);
5251 val /= (127*127*900);
5253 DRM_ERROR("bad pxval: %ld\n", val);
5256 /* Render standby states get 0 weight */
5260 for (i = 0; i < 4; i++) {
5261 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5262 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5263 I915_WRITE(PXW + (i * 4), val);
5266 /* Adjust magic regs to magic values (more experimental results) */
5267 I915_WRITE(OGW0, 0);
5268 I915_WRITE(OGW1, 0);
5269 I915_WRITE(EG0, 0x00007f00);
5270 I915_WRITE(EG1, 0x0000000e);
5271 I915_WRITE(EG2, 0x000e0000);
5272 I915_WRITE(EG3, 0x68000300);
5273 I915_WRITE(EG4, 0x42000000);
5274 I915_WRITE(EG5, 0x00140031);
5278 for (i = 0; i < 8; i++)
5279 I915_WRITE(PXWL + (i * 4), 0);
5281 /* Enable PMON + select events */
5282 I915_WRITE(ECR, 0x80000019);
5284 lcfuse = I915_READ(LCFUSE02);
5286 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
5289 void intel_init_gt_powersave(struct drm_device *dev)
5291 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
5293 if (IS_CHERRYVIEW(dev))
5294 cherryview_init_gt_powersave(dev);
5295 else if (IS_VALLEYVIEW(dev))
5296 valleyview_init_gt_powersave(dev);
5299 void intel_cleanup_gt_powersave(struct drm_device *dev)
5301 if (IS_CHERRYVIEW(dev))
5303 else if (IS_VALLEYVIEW(dev))
5304 valleyview_cleanup_gt_powersave(dev);
5308 * intel_suspend_gt_powersave - suspend PM work and helper threads
5311 * We don't want to disable RC6 or other features here, we just want
5312 * to make sure any work we've queued has finished and won't bother
5313 * us while we're suspended.
5315 void intel_suspend_gt_powersave(struct drm_device *dev)
5317 struct drm_i915_private *dev_priv = dev->dev_private;
5319 /* Interrupts should be disabled already to avoid re-arming. */
5320 WARN_ON(intel_irqs_enabled(dev_priv));
5322 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5324 cancel_work_sync(&dev_priv->rps.work);
5326 /* Force GPU to min freq during suspend */
5327 gen6_rps_idle(dev_priv);
5330 void intel_disable_gt_powersave(struct drm_device *dev)
5332 struct drm_i915_private *dev_priv = dev->dev_private;
5334 /* Interrupts should be disabled already to avoid re-arming. */
5335 WARN_ON(intel_irqs_enabled(dev_priv));
5337 if (IS_IRONLAKE_M(dev)) {
5338 ironlake_disable_drps(dev);
5339 ironlake_disable_rc6(dev);
5340 } else if (INTEL_INFO(dev)->gen >= 6) {
5341 intel_suspend_gt_powersave(dev);
5343 mutex_lock(&dev_priv->rps.hw_lock);
5344 if (IS_CHERRYVIEW(dev))
5345 cherryview_disable_rps(dev);
5346 else if (IS_VALLEYVIEW(dev))
5347 valleyview_disable_rps(dev);
5349 gen6_disable_rps(dev);
5350 dev_priv->rps.enabled = false;
5351 mutex_unlock(&dev_priv->rps.hw_lock);
5355 static void intel_gen6_powersave_work(struct work_struct *work)
5357 struct drm_i915_private *dev_priv =
5358 container_of(work, struct drm_i915_private,
5359 rps.delayed_resume_work.work);
5360 struct drm_device *dev = dev_priv->dev;
5362 dev_priv->rps.is_bdw_sw_turbo = false;
5364 mutex_lock(&dev_priv->rps.hw_lock);
5366 if (IS_CHERRYVIEW(dev)) {
5367 cherryview_enable_rps(dev);
5368 } else if (IS_VALLEYVIEW(dev)) {
5369 valleyview_enable_rps(dev);
5370 } else if (IS_BROADWELL(dev)) {
5371 gen8_enable_rps(dev);
5372 __gen6_update_ring_freq(dev);
5374 gen6_enable_rps(dev);
5375 __gen6_update_ring_freq(dev);
5377 dev_priv->rps.enabled = true;
5378 mutex_unlock(&dev_priv->rps.hw_lock);
5380 intel_runtime_pm_put(dev_priv);
5383 void intel_enable_gt_powersave(struct drm_device *dev)
5385 struct drm_i915_private *dev_priv = dev->dev_private;
5387 if (IS_IRONLAKE_M(dev)) {
5388 mutex_lock(&dev->struct_mutex);
5389 ironlake_enable_drps(dev);
5390 ironlake_enable_rc6(dev);
5391 intel_init_emon(dev);
5392 mutex_unlock(&dev->struct_mutex);
5393 } else if (INTEL_INFO(dev)->gen >= 6) {
5395 * PCU communication is slow and this doesn't need to be
5396 * done at any specific time, so do this out of our fast path
5397 * to make resume and init faster.
5399 * We depend on the HW RC6 power context save/restore
5400 * mechanism when entering D3 through runtime PM suspend. So
5401 * disable RPM until RPS/RC6 is properly setup. We can only
5402 * get here via the driver load/system resume/runtime resume
5403 * paths, so the _noresume version is enough (and in case of
5404 * runtime resume it's necessary).
5406 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5407 round_jiffies_up_relative(HZ)))
5408 intel_runtime_pm_get_noresume(dev_priv);
5412 void intel_reset_gt_powersave(struct drm_device *dev)
5414 struct drm_i915_private *dev_priv = dev->dev_private;
5416 dev_priv->rps.enabled = false;
5417 intel_enable_gt_powersave(dev);
5420 static void ibx_init_clock_gating(struct drm_device *dev)
5422 struct drm_i915_private *dev_priv = dev->dev_private;
5425 * On Ibex Peak and Cougar Point, we need to disable clock
5426 * gating for the panel power sequencer or it will fail to
5427 * start up when no ports are active.
5429 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5432 static void g4x_disable_trickle_feed(struct drm_device *dev)
5434 struct drm_i915_private *dev_priv = dev->dev_private;
5437 for_each_pipe(dev_priv, pipe) {
5438 I915_WRITE(DSPCNTR(pipe),
5439 I915_READ(DSPCNTR(pipe)) |
5440 DISPPLANE_TRICKLE_FEED_DISABLE);
5441 intel_flush_primary_plane(dev_priv, pipe);
5445 static void ilk_init_lp_watermarks(struct drm_device *dev)
5447 struct drm_i915_private *dev_priv = dev->dev_private;
5449 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5450 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5451 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5454 * Don't touch WM1S_LP_EN here.
5455 * Doing so could cause underruns.
5459 static void ironlake_init_clock_gating(struct drm_device *dev)
5461 struct drm_i915_private *dev_priv = dev->dev_private;
5462 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5466 * WaFbcDisableDpfcClockGating:ilk
5468 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5469 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5470 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5472 I915_WRITE(PCH_3DCGDIS0,
5473 MARIUNIT_CLOCK_GATE_DISABLE |
5474 SVSMUNIT_CLOCK_GATE_DISABLE);
5475 I915_WRITE(PCH_3DCGDIS1,
5476 VFMUNIT_CLOCK_GATE_DISABLE);
5479 * According to the spec the following bits should be set in
5480 * order to enable memory self-refresh
5481 * The bit 22/21 of 0x42004
5482 * The bit 5 of 0x42020
5483 * The bit 15 of 0x45000
5485 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5486 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5487 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5488 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5489 I915_WRITE(DISP_ARB_CTL,
5490 (I915_READ(DISP_ARB_CTL) |
5493 ilk_init_lp_watermarks(dev);
5496 * Based on the document from hardware guys the following bits
5497 * should be set unconditionally in order to enable FBC.
5498 * The bit 22 of 0x42000
5499 * The bit 22 of 0x42004
5500 * The bit 7,8,9 of 0x42020.
5502 if (IS_IRONLAKE_M(dev)) {
5503 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5504 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5505 I915_READ(ILK_DISPLAY_CHICKEN1) |
5507 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5508 I915_READ(ILK_DISPLAY_CHICKEN2) |
5512 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5514 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5515 I915_READ(ILK_DISPLAY_CHICKEN2) |
5516 ILK_ELPIN_409_SELECT);
5517 I915_WRITE(_3D_CHICKEN2,
5518 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5519 _3D_CHICKEN2_WM_READ_PIPELINED);
5521 /* WaDisableRenderCachePipelinedFlush:ilk */
5522 I915_WRITE(CACHE_MODE_0,
5523 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5525 /* WaDisable_RenderCache_OperationalFlush:ilk */
5526 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5528 g4x_disable_trickle_feed(dev);
5530 ibx_init_clock_gating(dev);
5533 static void cpt_init_clock_gating(struct drm_device *dev)
5535 struct drm_i915_private *dev_priv = dev->dev_private;
5540 * On Ibex Peak and Cougar Point, we need to disable clock
5541 * gating for the panel power sequencer or it will fail to
5542 * start up when no ports are active.
5544 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5545 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5546 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5547 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5548 DPLS_EDP_PPS_FIX_DIS);
5549 /* The below fixes the weird display corruption, a few pixels shifted
5550 * downward, on (only) LVDS of some HP laptops with IVY.
5552 for_each_pipe(dev_priv, pipe) {
5553 val = I915_READ(TRANS_CHICKEN2(pipe));
5554 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5555 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5556 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5557 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5558 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5559 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5560 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5561 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5563 /* WADP0ClockGatingDisable */
5564 for_each_pipe(dev_priv, pipe) {
5565 I915_WRITE(TRANS_CHICKEN1(pipe),
5566 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5570 static void gen6_check_mch_setup(struct drm_device *dev)
5572 struct drm_i915_private *dev_priv = dev->dev_private;
5575 tmp = I915_READ(MCH_SSKPD);
5576 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
5577 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5581 static void gen6_init_clock_gating(struct drm_device *dev)
5583 struct drm_i915_private *dev_priv = dev->dev_private;
5584 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5586 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5588 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5589 I915_READ(ILK_DISPLAY_CHICKEN2) |
5590 ILK_ELPIN_409_SELECT);
5592 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5593 I915_WRITE(_3D_CHICKEN,
5594 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5596 /* WaSetupGtModeTdRowDispatch:snb */
5597 if (IS_SNB_GT1(dev))
5598 I915_WRITE(GEN6_GT_MODE,
5599 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5601 /* WaDisable_RenderCache_OperationalFlush:snb */
5602 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5605 * BSpec recoomends 8x4 when MSAA is used,
5606 * however in practice 16x4 seems fastest.
5608 * Note that PS/WM thread counts depend on the WIZ hashing
5609 * disable bit, which we don't touch here, but it's good
5610 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5612 I915_WRITE(GEN6_GT_MODE,
5613 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5615 ilk_init_lp_watermarks(dev);
5617 I915_WRITE(CACHE_MODE_0,
5618 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5620 I915_WRITE(GEN6_UCGCTL1,
5621 I915_READ(GEN6_UCGCTL1) |
5622 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5623 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5625 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5626 * gating disable must be set. Failure to set it results in
5627 * flickering pixels due to Z write ordering failures after
5628 * some amount of runtime in the Mesa "fire" demo, and Unigine
5629 * Sanctuary and Tropics, and apparently anything else with
5630 * alpha test or pixel discard.
5632 * According to the spec, bit 11 (RCCUNIT) must also be set,
5633 * but we didn't debug actual testcases to find it out.
5635 * WaDisableRCCUnitClockGating:snb
5636 * WaDisableRCPBUnitClockGating:snb
5638 I915_WRITE(GEN6_UCGCTL2,
5639 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5640 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5642 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5643 I915_WRITE(_3D_CHICKEN3,
5644 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
5648 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5649 * 3DSTATE_SF number of SF output attributes is more than 16."
5651 I915_WRITE(_3D_CHICKEN3,
5652 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
5655 * According to the spec the following bits should be
5656 * set in order to enable memory self-refresh and fbc:
5657 * The bit21 and bit22 of 0x42000
5658 * The bit21 and bit22 of 0x42004
5659 * The bit5 and bit7 of 0x42020
5660 * The bit14 of 0x70180
5661 * The bit14 of 0x71180
5663 * WaFbcAsynchFlipDisableFbcQueue:snb
5665 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5666 I915_READ(ILK_DISPLAY_CHICKEN1) |
5667 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5668 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5669 I915_READ(ILK_DISPLAY_CHICKEN2) |
5670 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5671 I915_WRITE(ILK_DSPCLK_GATE_D,
5672 I915_READ(ILK_DSPCLK_GATE_D) |
5673 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5674 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5676 g4x_disable_trickle_feed(dev);
5678 cpt_init_clock_gating(dev);
5680 gen6_check_mch_setup(dev);
5683 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5685 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5688 * WaVSThreadDispatchOverride:ivb,vlv
5690 * This actually overrides the dispatch
5691 * mode for all thread types.
5693 reg &= ~GEN7_FF_SCHED_MASK;
5694 reg |= GEN7_FF_TS_SCHED_HW;
5695 reg |= GEN7_FF_VS_SCHED_HW;
5696 reg |= GEN7_FF_DS_SCHED_HW;
5698 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5701 static void lpt_init_clock_gating(struct drm_device *dev)
5703 struct drm_i915_private *dev_priv = dev->dev_private;
5706 * TODO: this bit should only be enabled when really needed, then
5707 * disabled when not needed anymore in order to save power.
5709 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5710 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5711 I915_READ(SOUTH_DSPCLK_GATE_D) |
5712 PCH_LP_PARTITION_LEVEL_DISABLE);
5714 /* WADPOClockGatingDisable:hsw */
5715 I915_WRITE(_TRANSA_CHICKEN1,
5716 I915_READ(_TRANSA_CHICKEN1) |
5717 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5720 static void lpt_suspend_hw(struct drm_device *dev)
5722 struct drm_i915_private *dev_priv = dev->dev_private;
5724 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5725 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5727 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5728 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5732 static void broadwell_init_clock_gating(struct drm_device *dev)
5734 struct drm_i915_private *dev_priv = dev->dev_private;
5737 I915_WRITE(WM3_LP_ILK, 0);
5738 I915_WRITE(WM2_LP_ILK, 0);
5739 I915_WRITE(WM1_LP_ILK, 0);
5741 /* FIXME(BDW): Check all the w/a, some might only apply to
5742 * pre-production hw. */
5745 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5747 I915_WRITE(_3D_CHICKEN3,
5748 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5751 /* WaSwitchSolVfFArbitrationPriority:bdw */
5752 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5754 /* WaPsrDPAMaskVBlankInSRD:bdw */
5755 I915_WRITE(CHICKEN_PAR1_1,
5756 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5758 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5759 for_each_pipe(dev_priv, pipe) {
5760 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5761 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5762 BDW_DPRS_MASK_VBLANK_SRD);
5765 /* WaVSRefCountFullforceMissDisable:bdw */
5766 /* WaDSRefCountFullforceMissDisable:bdw */
5767 I915_WRITE(GEN7_FF_THREAD_MODE,
5768 I915_READ(GEN7_FF_THREAD_MODE) &
5769 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5771 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5772 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5774 /* WaDisableSDEUnitClockGating:bdw */
5775 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5776 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5778 lpt_init_clock_gating(dev);
5781 static void haswell_init_clock_gating(struct drm_device *dev)
5783 struct drm_i915_private *dev_priv = dev->dev_private;
5785 ilk_init_lp_watermarks(dev);
5787 /* L3 caching of data atomics doesn't work -- disable it. */
5788 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5789 I915_WRITE(HSW_ROW_CHICKEN3,
5790 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5792 /* This is required by WaCatErrorRejectionIssue:hsw */
5793 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5794 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5795 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5797 /* WaVSRefCountFullforceMissDisable:hsw */
5798 I915_WRITE(GEN7_FF_THREAD_MODE,
5799 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
5801 /* WaDisable_RenderCache_OperationalFlush:hsw */
5802 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5804 /* enable HiZ Raw Stall Optimization */
5805 I915_WRITE(CACHE_MODE_0_GEN7,
5806 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5808 /* WaDisable4x2SubspanOptimization:hsw */
5809 I915_WRITE(CACHE_MODE_1,
5810 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5813 * BSpec recommends 8x4 when MSAA is used,
5814 * however in practice 16x4 seems fastest.
5816 * Note that PS/WM thread counts depend on the WIZ hashing
5817 * disable bit, which we don't touch here, but it's good
5818 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5820 I915_WRITE(GEN7_GT_MODE,
5821 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5823 /* WaSwitchSolVfFArbitrationPriority:hsw */
5824 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5826 /* WaRsPkgCStateDisplayPMReq:hsw */
5827 I915_WRITE(CHICKEN_PAR1_1,
5828 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5830 lpt_init_clock_gating(dev);
5833 static void ivybridge_init_clock_gating(struct drm_device *dev)
5835 struct drm_i915_private *dev_priv = dev->dev_private;
5838 ilk_init_lp_watermarks(dev);
5840 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5842 /* WaDisableEarlyCull:ivb */
5843 I915_WRITE(_3D_CHICKEN3,
5844 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5846 /* WaDisableBackToBackFlipFix:ivb */
5847 I915_WRITE(IVB_CHICKEN3,
5848 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5849 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5851 /* WaDisablePSDDualDispatchEnable:ivb */
5852 if (IS_IVB_GT1(dev))
5853 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5854 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5856 /* WaDisable_RenderCache_OperationalFlush:ivb */
5857 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5859 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5860 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5861 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5863 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5864 I915_WRITE(GEN7_L3CNTLREG1,
5865 GEN7_WA_FOR_GEN7_L3_CONTROL);
5866 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5867 GEN7_WA_L3_CHICKEN_MODE);
5868 if (IS_IVB_GT1(dev))
5869 I915_WRITE(GEN7_ROW_CHICKEN2,
5870 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5872 /* must write both registers */
5873 I915_WRITE(GEN7_ROW_CHICKEN2,
5874 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5875 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5876 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5879 /* WaForceL3Serialization:ivb */
5880 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5881 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5884 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5885 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5887 I915_WRITE(GEN6_UCGCTL2,
5888 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5890 /* This is required by WaCatErrorRejectionIssue:ivb */
5891 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5892 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5893 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5895 g4x_disable_trickle_feed(dev);
5897 gen7_setup_fixed_func_scheduler(dev_priv);
5899 if (0) { /* causes HiZ corruption on ivb:gt1 */
5900 /* enable HiZ Raw Stall Optimization */
5901 I915_WRITE(CACHE_MODE_0_GEN7,
5902 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5905 /* WaDisable4x2SubspanOptimization:ivb */
5906 I915_WRITE(CACHE_MODE_1,
5907 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5910 * BSpec recommends 8x4 when MSAA is used,
5911 * however in practice 16x4 seems fastest.
5913 * Note that PS/WM thread counts depend on the WIZ hashing
5914 * disable bit, which we don't touch here, but it's good
5915 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5917 I915_WRITE(GEN7_GT_MODE,
5918 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5920 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5921 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5922 snpcr |= GEN6_MBC_SNPCR_MED;
5923 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5925 if (!HAS_PCH_NOP(dev))
5926 cpt_init_clock_gating(dev);
5928 gen6_check_mch_setup(dev);
5931 static void valleyview_init_clock_gating(struct drm_device *dev)
5933 struct drm_i915_private *dev_priv = dev->dev_private;
5935 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5937 /* WaDisableEarlyCull:vlv */
5938 I915_WRITE(_3D_CHICKEN3,
5939 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5941 /* WaDisableBackToBackFlipFix:vlv */
5942 I915_WRITE(IVB_CHICKEN3,
5943 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5944 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5946 /* WaPsdDispatchEnable:vlv */
5947 /* WaDisablePSDDualDispatchEnable:vlv */
5948 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5949 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5950 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5952 /* WaDisable_RenderCache_OperationalFlush:vlv */
5953 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5955 /* WaForceL3Serialization:vlv */
5956 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5957 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5959 /* WaDisableDopClockGating:vlv */
5960 I915_WRITE(GEN7_ROW_CHICKEN2,
5961 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5963 /* This is required by WaCatErrorRejectionIssue:vlv */
5964 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5965 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5966 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5968 gen7_setup_fixed_func_scheduler(dev_priv);
5971 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5972 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5974 I915_WRITE(GEN6_UCGCTL2,
5975 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5977 /* WaDisableL3Bank2xClockGate:vlv
5978 * Disabling L3 clock gating- MMIO 940c[25] = 1
5979 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
5980 I915_WRITE(GEN7_UCGCTL4,
5981 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5983 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5986 * BSpec says this must be set, even though
5987 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5989 I915_WRITE(CACHE_MODE_1,
5990 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5993 * WaIncreaseL3CreditsForVLVB0:vlv
5994 * This is the hardware default actually.
5996 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5999 * WaDisableVLVClockGating_VBIIssue:vlv
6000 * Disable clock gating on th GCFG unit to prevent a delay
6001 * in the reporting of vblank events.
6003 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6006 static void cherryview_init_clock_gating(struct drm_device *dev)
6008 struct drm_i915_private *dev_priv = dev->dev_private;
6010 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6012 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6014 /* WaVSRefCountFullforceMissDisable:chv */
6015 /* WaDSRefCountFullforceMissDisable:chv */
6016 I915_WRITE(GEN7_FF_THREAD_MODE,
6017 I915_READ(GEN7_FF_THREAD_MODE) &
6018 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6020 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6021 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6022 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6024 /* WaDisableCSUnitClockGating:chv */
6025 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6026 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6028 /* WaDisableSDEUnitClockGating:chv */
6029 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6030 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6032 /* WaDisableGunitClockGating:chv (pre-production hw) */
6033 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
6036 /* WaDisableFfDopClockGating:chv (pre-production hw) */
6037 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6038 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
6040 /* WaDisableDopClockGating:chv (pre-production hw) */
6041 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6042 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
6045 static void g4x_init_clock_gating(struct drm_device *dev)
6047 struct drm_i915_private *dev_priv = dev->dev_private;
6048 uint32_t dspclk_gate;
6050 I915_WRITE(RENCLK_GATE_D1, 0);
6051 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6052 GS_UNIT_CLOCK_GATE_DISABLE |
6053 CL_UNIT_CLOCK_GATE_DISABLE);
6054 I915_WRITE(RAMCLK_GATE_D, 0);
6055 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6056 OVRUNIT_CLOCK_GATE_DISABLE |
6057 OVCUNIT_CLOCK_GATE_DISABLE;
6059 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6060 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6062 /* WaDisableRenderCachePipelinedFlush */
6063 I915_WRITE(CACHE_MODE_0,
6064 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6066 /* WaDisable_RenderCache_OperationalFlush:g4x */
6067 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6069 g4x_disable_trickle_feed(dev);
6072 static void crestline_init_clock_gating(struct drm_device *dev)
6074 struct drm_i915_private *dev_priv = dev->dev_private;
6076 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6077 I915_WRITE(RENCLK_GATE_D2, 0);
6078 I915_WRITE(DSPCLK_GATE_D, 0);
6079 I915_WRITE(RAMCLK_GATE_D, 0);
6080 I915_WRITE16(DEUC, 0);
6081 I915_WRITE(MI_ARB_STATE,
6082 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6084 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6085 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6088 static void broadwater_init_clock_gating(struct drm_device *dev)
6090 struct drm_i915_private *dev_priv = dev->dev_private;
6092 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6093 I965_RCC_CLOCK_GATE_DISABLE |
6094 I965_RCPB_CLOCK_GATE_DISABLE |
6095 I965_ISC_CLOCK_GATE_DISABLE |
6096 I965_FBC_CLOCK_GATE_DISABLE);
6097 I915_WRITE(RENCLK_GATE_D2, 0);
6098 I915_WRITE(MI_ARB_STATE,
6099 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6101 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6102 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6105 static void gen3_init_clock_gating(struct drm_device *dev)
6107 struct drm_i915_private *dev_priv = dev->dev_private;
6108 u32 dstate = I915_READ(D_STATE);
6110 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6111 DSTATE_DOT_CLOCK_GATING;
6112 I915_WRITE(D_STATE, dstate);
6114 if (IS_PINEVIEW(dev))
6115 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
6117 /* IIR "flip pending" means done if this bit is set */
6118 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6120 /* interrupts should cause a wake up from C3 */
6121 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
6123 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6124 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6127 static void i85x_init_clock_gating(struct drm_device *dev)
6129 struct drm_i915_private *dev_priv = dev->dev_private;
6131 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6133 /* interrupts should cause a wake up from C3 */
6134 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6135 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6138 static void i830_init_clock_gating(struct drm_device *dev)
6140 struct drm_i915_private *dev_priv = dev->dev_private;
6142 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6145 void intel_init_clock_gating(struct drm_device *dev)
6147 struct drm_i915_private *dev_priv = dev->dev_private;
6149 dev_priv->display.init_clock_gating(dev);
6152 void intel_suspend_hw(struct drm_device *dev)
6154 if (HAS_PCH_LPT(dev))
6155 lpt_suspend_hw(dev);
6158 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
6160 i < (power_domains)->power_well_count && \
6161 ((power_well) = &(power_domains)->power_wells[i]); \
6163 if ((power_well)->domains & (domain_mask))
6165 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
6166 for (i = (power_domains)->power_well_count - 1; \
6167 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
6169 if ((power_well)->domains & (domain_mask))
6172 * We should only use the power well if we explicitly asked the hardware to
6173 * enable it, so check if it's enabled and also check if we've requested it to
6176 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
6177 struct i915_power_well *power_well)
6179 return I915_READ(HSW_PWR_WELL_DRIVER) ==
6180 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
6183 bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
6184 enum intel_display_power_domain domain)
6186 struct i915_power_domains *power_domains;
6187 struct i915_power_well *power_well;
6191 if (dev_priv->pm.suspended)
6194 power_domains = &dev_priv->power_domains;
6198 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6199 if (power_well->always_on)
6202 if (!power_well->hw_enabled) {
6211 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
6212 enum intel_display_power_domain domain)
6214 struct i915_power_domains *power_domains;
6217 power_domains = &dev_priv->power_domains;
6219 mutex_lock(&power_domains->lock);
6220 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
6221 mutex_unlock(&power_domains->lock);
6227 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6228 * when not needed anymore. We have 4 registers that can request the power well
6229 * to be enabled, and it will only be disabled if none of the registers is
6230 * requesting it to be enabled.
6232 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6234 struct drm_device *dev = dev_priv->dev;
6237 * After we re-enable the power well, if we touch VGA register 0x3d5
6238 * we'll get unclaimed register interrupts. This stops after we write
6239 * anything to the VGA MSR register. The vgacon module uses this
6240 * register all the time, so if we unbind our driver and, as a
6241 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6242 * console_unlock(). So make here we touch the VGA MSR register, making
6243 * sure vgacon can keep working normally without triggering interrupts
6244 * and error messages.
6246 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6247 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6248 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6250 if (IS_BROADWELL(dev))
6251 gen8_irq_power_well_post_enable(dev_priv);
6254 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6255 struct i915_power_well *power_well, bool enable)
6257 bool is_enabled, enable_requested;
6260 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6261 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6262 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6265 if (!enable_requested)
6266 I915_WRITE(HSW_PWR_WELL_DRIVER,
6267 HSW_PWR_WELL_ENABLE_REQUEST);
6270 DRM_DEBUG_KMS("Enabling power well\n");
6271 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6272 HSW_PWR_WELL_STATE_ENABLED), 20))
6273 DRM_ERROR("Timeout enabling power well\n");
6276 hsw_power_well_post_enable(dev_priv);
6278 if (enable_requested) {
6279 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6280 POSTING_READ(HSW_PWR_WELL_DRIVER);
6281 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6286 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6287 struct i915_power_well *power_well)
6289 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6292 * We're taking over the BIOS, so clear any requests made by it since
6293 * the driver is in charge now.
6295 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6296 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6299 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6300 struct i915_power_well *power_well)
6302 hsw_set_power_well(dev_priv, power_well, true);
6305 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6306 struct i915_power_well *power_well)
6308 hsw_set_power_well(dev_priv, power_well, false);
6311 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6312 struct i915_power_well *power_well)
6316 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6317 struct i915_power_well *power_well)
6322 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6323 struct i915_power_well *power_well, bool enable)
6325 enum punit_power_well power_well_id = power_well->data;
6330 mask = PUNIT_PWRGT_MASK(power_well_id);
6331 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6332 PUNIT_PWRGT_PWR_GATE(power_well_id);
6334 mutex_lock(&dev_priv->rps.hw_lock);
6337 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6342 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6345 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6347 if (wait_for(COND, 100))
6348 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6350 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6355 mutex_unlock(&dev_priv->rps.hw_lock);
6358 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6359 struct i915_power_well *power_well)
6361 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6364 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6365 struct i915_power_well *power_well)
6367 vlv_set_power_well(dev_priv, power_well, true);
6370 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6371 struct i915_power_well *power_well)
6373 vlv_set_power_well(dev_priv, power_well, false);
6376 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6377 struct i915_power_well *power_well)
6379 int power_well_id = power_well->data;
6380 bool enabled = false;
6385 mask = PUNIT_PWRGT_MASK(power_well_id);
6386 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6388 mutex_lock(&dev_priv->rps.hw_lock);
6390 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6392 * We only ever set the power-on and power-gate states, anything
6393 * else is unexpected.
6395 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6396 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6401 * A transient state at this point would mean some unexpected party
6402 * is poking at the power controls too.
6404 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6405 WARN_ON(ctrl != state);
6407 mutex_unlock(&dev_priv->rps.hw_lock);
6412 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6413 struct i915_power_well *power_well)
6415 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6417 vlv_set_power_well(dev_priv, power_well, true);
6419 spin_lock_irq(&dev_priv->irq_lock);
6420 valleyview_enable_display_irqs(dev_priv);
6421 spin_unlock_irq(&dev_priv->irq_lock);
6424 * During driver initialization/resume we can avoid restoring the
6425 * part of the HW/SW state that will be inited anyway explicitly.
6427 if (dev_priv->power_domains.initializing)
6430 intel_hpd_init(dev_priv->dev);
6432 i915_redisable_vga_power_on(dev_priv->dev);
6435 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6436 struct i915_power_well *power_well)
6438 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6440 spin_lock_irq(&dev_priv->irq_lock);
6441 valleyview_disable_display_irqs(dev_priv);
6442 spin_unlock_irq(&dev_priv->irq_lock);
6444 vlv_set_power_well(dev_priv, power_well, false);
6447 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6448 struct i915_power_well *power_well)
6450 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6453 * Enable the CRI clock source so we can get at the
6454 * display and the reference clock for VGA
6455 * hotplug / manual detection.
6457 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6458 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6459 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6461 vlv_set_power_well(dev_priv, power_well, true);
6464 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6465 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6466 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6467 * b. The other bits such as sfr settings / modesel may all
6470 * This should only be done on init and resume from S3 with
6471 * both PLLs disabled, or we risk losing DPIO and PLL
6474 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6477 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6478 struct i915_power_well *power_well)
6482 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6484 for_each_pipe(dev_priv, pipe)
6485 assert_pll_disabled(dev_priv, pipe);
6487 /* Assert common reset */
6488 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6490 vlv_set_power_well(dev_priv, power_well, false);
6493 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6494 struct i915_power_well *power_well)
6498 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6499 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6502 * Enable the CRI clock source so we can get at the
6503 * display and the reference clock for VGA
6504 * hotplug / manual detection.
6506 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6508 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6509 DPLL_REFA_CLK_ENABLE_VLV);
6510 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6511 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6514 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6515 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6517 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6518 vlv_set_power_well(dev_priv, power_well, true);
6520 /* Poll for phypwrgood signal */
6521 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
6522 DRM_ERROR("Display PHY %d is not power up\n", phy);
6524 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
6525 PHY_COM_LANE_RESET_DEASSERT(phy));
6528 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6529 struct i915_power_well *power_well)
6533 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6534 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6536 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6538 assert_pll_disabled(dev_priv, PIPE_A);
6539 assert_pll_disabled(dev_priv, PIPE_B);
6542 assert_pll_disabled(dev_priv, PIPE_C);
6545 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
6546 ~PHY_COM_LANE_RESET_DEASSERT(phy));
6548 vlv_set_power_well(dev_priv, power_well, false);
6551 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6552 struct i915_power_well *power_well)
6554 enum pipe pipe = power_well->data;
6558 mutex_lock(&dev_priv->rps.hw_lock);
6560 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6562 * We only ever set the power-on and power-gate states, anything
6563 * else is unexpected.
6565 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6566 enabled = state == DP_SSS_PWR_ON(pipe);
6569 * A transient state at this point would mean some unexpected party
6570 * is poking at the power controls too.
6572 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6573 WARN_ON(ctrl << 16 != state);
6575 mutex_unlock(&dev_priv->rps.hw_lock);
6580 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6581 struct i915_power_well *power_well,
6584 enum pipe pipe = power_well->data;
6588 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6590 mutex_lock(&dev_priv->rps.hw_lock);
6593 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6598 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6599 ctrl &= ~DP_SSC_MASK(pipe);
6600 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6601 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6603 if (wait_for(COND, 100))
6604 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6606 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6611 mutex_unlock(&dev_priv->rps.hw_lock);
6614 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6615 struct i915_power_well *power_well)
6617 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6620 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6621 struct i915_power_well *power_well)
6623 WARN_ON_ONCE(power_well->data != PIPE_A &&
6624 power_well->data != PIPE_B &&
6625 power_well->data != PIPE_C);
6627 chv_set_pipe_power_well(dev_priv, power_well, true);
6630 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6631 struct i915_power_well *power_well)
6633 WARN_ON_ONCE(power_well->data != PIPE_A &&
6634 power_well->data != PIPE_B &&
6635 power_well->data != PIPE_C);
6637 chv_set_pipe_power_well(dev_priv, power_well, false);
6640 static void check_power_well_state(struct drm_i915_private *dev_priv,
6641 struct i915_power_well *power_well)
6643 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6645 if (power_well->always_on || !i915.disable_power_well) {
6652 if (enabled != (power_well->count > 0))
6658 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6659 power_well->name, power_well->always_on, enabled,
6660 power_well->count, i915.disable_power_well);
6663 void intel_display_power_get(struct drm_i915_private *dev_priv,
6664 enum intel_display_power_domain domain)
6666 struct i915_power_domains *power_domains;
6667 struct i915_power_well *power_well;
6670 intel_runtime_pm_get(dev_priv);
6672 power_domains = &dev_priv->power_domains;
6674 mutex_lock(&power_domains->lock);
6676 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6677 if (!power_well->count++) {
6678 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6679 power_well->ops->enable(dev_priv, power_well);
6680 power_well->hw_enabled = true;
6683 check_power_well_state(dev_priv, power_well);
6686 power_domains->domain_use_count[domain]++;
6688 mutex_unlock(&power_domains->lock);
6691 void intel_display_power_put(struct drm_i915_private *dev_priv,
6692 enum intel_display_power_domain domain)
6694 struct i915_power_domains *power_domains;
6695 struct i915_power_well *power_well;
6698 power_domains = &dev_priv->power_domains;
6700 mutex_lock(&power_domains->lock);
6702 WARN_ON(!power_domains->domain_use_count[domain]);
6703 power_domains->domain_use_count[domain]--;
6705 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6706 WARN_ON(!power_well->count);
6708 if (!--power_well->count && i915.disable_power_well) {
6709 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6710 power_well->hw_enabled = false;
6711 power_well->ops->disable(dev_priv, power_well);
6714 check_power_well_state(dev_priv, power_well);
6717 mutex_unlock(&power_domains->lock);
6719 intel_runtime_pm_put(dev_priv);
6722 static struct i915_power_domains *hsw_pwr;
6724 /* Display audio driver power well request */
6725 int i915_request_power_well(void)
6727 struct drm_i915_private *dev_priv;
6732 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6734 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6737 EXPORT_SYMBOL_GPL(i915_request_power_well);
6739 /* Display audio driver power well release */
6740 int i915_release_power_well(void)
6742 struct drm_i915_private *dev_priv;
6747 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6749 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6752 EXPORT_SYMBOL_GPL(i915_release_power_well);
6755 * Private interface for the audio driver to get CDCLK in kHz.
6757 * Caller must request power well using i915_request_power_well() prior to
6760 int i915_get_cdclk_freq(void)
6762 struct drm_i915_private *dev_priv;
6767 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6770 return intel_ddi_get_cdclk_freq(dev_priv);
6772 EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6775 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6777 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6778 BIT(POWER_DOMAIN_PIPE_A) | \
6779 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6780 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6781 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6782 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6783 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6784 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6785 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6786 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6787 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6788 BIT(POWER_DOMAIN_PORT_CRT) | \
6789 BIT(POWER_DOMAIN_PLLS) | \
6790 BIT(POWER_DOMAIN_INIT))
6791 #define HSW_DISPLAY_POWER_DOMAINS ( \
6792 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6793 BIT(POWER_DOMAIN_INIT))
6795 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6796 HSW_ALWAYS_ON_POWER_DOMAINS | \
6797 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6798 #define BDW_DISPLAY_POWER_DOMAINS ( \
6799 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6800 BIT(POWER_DOMAIN_INIT))
6802 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6803 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6805 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6806 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6807 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6808 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6809 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6810 BIT(POWER_DOMAIN_PORT_CRT) | \
6811 BIT(POWER_DOMAIN_INIT))
6813 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6814 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6815 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6816 BIT(POWER_DOMAIN_INIT))
6818 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6819 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6820 BIT(POWER_DOMAIN_INIT))
6822 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6823 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6824 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6825 BIT(POWER_DOMAIN_INIT))
6827 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6828 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6829 BIT(POWER_DOMAIN_INIT))
6831 #define CHV_PIPE_A_POWER_DOMAINS ( \
6832 BIT(POWER_DOMAIN_PIPE_A) | \
6833 BIT(POWER_DOMAIN_INIT))
6835 #define CHV_PIPE_B_POWER_DOMAINS ( \
6836 BIT(POWER_DOMAIN_PIPE_B) | \
6837 BIT(POWER_DOMAIN_INIT))
6839 #define CHV_PIPE_C_POWER_DOMAINS ( \
6840 BIT(POWER_DOMAIN_PIPE_C) | \
6841 BIT(POWER_DOMAIN_INIT))
6843 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6844 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6845 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6846 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6847 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6848 BIT(POWER_DOMAIN_INIT))
6850 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6851 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6852 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6853 BIT(POWER_DOMAIN_INIT))
6855 #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6856 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6857 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6858 BIT(POWER_DOMAIN_INIT))
6860 #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6861 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6862 BIT(POWER_DOMAIN_INIT))
6864 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6865 .sync_hw = i9xx_always_on_power_well_noop,
6866 .enable = i9xx_always_on_power_well_noop,
6867 .disable = i9xx_always_on_power_well_noop,
6868 .is_enabled = i9xx_always_on_power_well_enabled,
6871 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6872 .sync_hw = chv_pipe_power_well_sync_hw,
6873 .enable = chv_pipe_power_well_enable,
6874 .disable = chv_pipe_power_well_disable,
6875 .is_enabled = chv_pipe_power_well_enabled,
6878 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6879 .sync_hw = vlv_power_well_sync_hw,
6880 .enable = chv_dpio_cmn_power_well_enable,
6881 .disable = chv_dpio_cmn_power_well_disable,
6882 .is_enabled = vlv_power_well_enabled,
6885 static struct i915_power_well i9xx_always_on_power_well[] = {
6887 .name = "always-on",
6889 .domains = POWER_DOMAIN_MASK,
6890 .ops = &i9xx_always_on_power_well_ops,
6894 static const struct i915_power_well_ops hsw_power_well_ops = {
6895 .sync_hw = hsw_power_well_sync_hw,
6896 .enable = hsw_power_well_enable,
6897 .disable = hsw_power_well_disable,
6898 .is_enabled = hsw_power_well_enabled,
6901 static struct i915_power_well hsw_power_wells[] = {
6903 .name = "always-on",
6905 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6906 .ops = &i9xx_always_on_power_well_ops,
6910 .domains = HSW_DISPLAY_POWER_DOMAINS,
6911 .ops = &hsw_power_well_ops,
6915 static struct i915_power_well bdw_power_wells[] = {
6917 .name = "always-on",
6919 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6920 .ops = &i9xx_always_on_power_well_ops,
6924 .domains = BDW_DISPLAY_POWER_DOMAINS,
6925 .ops = &hsw_power_well_ops,
6929 static const struct i915_power_well_ops vlv_display_power_well_ops = {
6930 .sync_hw = vlv_power_well_sync_hw,
6931 .enable = vlv_display_power_well_enable,
6932 .disable = vlv_display_power_well_disable,
6933 .is_enabled = vlv_power_well_enabled,
6936 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6937 .sync_hw = vlv_power_well_sync_hw,
6938 .enable = vlv_dpio_cmn_power_well_enable,
6939 .disable = vlv_dpio_cmn_power_well_disable,
6940 .is_enabled = vlv_power_well_enabled,
6943 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6944 .sync_hw = vlv_power_well_sync_hw,
6945 .enable = vlv_power_well_enable,
6946 .disable = vlv_power_well_disable,
6947 .is_enabled = vlv_power_well_enabled,
6950 static struct i915_power_well vlv_power_wells[] = {
6952 .name = "always-on",
6954 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6955 .ops = &i9xx_always_on_power_well_ops,
6959 .domains = VLV_DISPLAY_POWER_DOMAINS,
6960 .data = PUNIT_POWER_WELL_DISP2D,
6961 .ops = &vlv_display_power_well_ops,
6964 .name = "dpio-tx-b-01",
6965 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6966 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6967 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6968 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6969 .ops = &vlv_dpio_power_well_ops,
6970 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6973 .name = "dpio-tx-b-23",
6974 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6975 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6976 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6977 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6978 .ops = &vlv_dpio_power_well_ops,
6979 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6982 .name = "dpio-tx-c-01",
6983 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6984 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6985 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6986 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6987 .ops = &vlv_dpio_power_well_ops,
6988 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6991 .name = "dpio-tx-c-23",
6992 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6993 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6994 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6995 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6996 .ops = &vlv_dpio_power_well_ops,
6997 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
7000 .name = "dpio-common",
7001 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
7002 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
7003 .ops = &vlv_dpio_cmn_power_well_ops,
7007 static struct i915_power_well chv_power_wells[] = {
7009 .name = "always-on",
7011 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
7012 .ops = &i9xx_always_on_power_well_ops,
7017 .domains = VLV_DISPLAY_POWER_DOMAINS,
7018 .data = PUNIT_POWER_WELL_DISP2D,
7019 .ops = &vlv_display_power_well_ops,
7023 .domains = CHV_PIPE_A_POWER_DOMAINS,
7025 .ops = &chv_pipe_power_well_ops,
7029 .domains = CHV_PIPE_B_POWER_DOMAINS,
7031 .ops = &chv_pipe_power_well_ops,
7035 .domains = CHV_PIPE_C_POWER_DOMAINS,
7037 .ops = &chv_pipe_power_well_ops,
7041 .name = "dpio-common-bc",
7043 * XXX: cmnreset for one PHY seems to disturb the other.
7044 * As a workaround keep both powered on at the same
7047 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
7048 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
7049 .ops = &chv_dpio_cmn_power_well_ops,
7052 .name = "dpio-common-d",
7054 * XXX: cmnreset for one PHY seems to disturb the other.
7055 * As a workaround keep both powered on at the same
7058 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
7059 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
7060 .ops = &chv_dpio_cmn_power_well_ops,
7064 .name = "dpio-tx-b-01",
7065 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7066 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
7067 .ops = &vlv_dpio_power_well_ops,
7068 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
7071 .name = "dpio-tx-b-23",
7072 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7073 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
7074 .ops = &vlv_dpio_power_well_ops,
7075 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
7078 .name = "dpio-tx-c-01",
7079 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7080 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7081 .ops = &vlv_dpio_power_well_ops,
7082 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
7085 .name = "dpio-tx-c-23",
7086 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7087 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7088 .ops = &vlv_dpio_power_well_ops,
7089 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
7092 .name = "dpio-tx-d-01",
7093 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
7094 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
7095 .ops = &vlv_dpio_power_well_ops,
7096 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
7099 .name = "dpio-tx-d-23",
7100 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
7101 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
7102 .ops = &vlv_dpio_power_well_ops,
7103 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
7108 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
7109 enum punit_power_well power_well_id)
7111 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7112 struct i915_power_well *power_well;
7115 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7116 if (power_well->data == power_well_id)
7123 #define set_power_wells(power_domains, __power_wells) ({ \
7124 (power_domains)->power_wells = (__power_wells); \
7125 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
7128 int intel_power_domains_init(struct drm_i915_private *dev_priv)
7130 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7132 mutex_init(&power_domains->lock);
7135 * The enabling order will be from lower to higher indexed wells,
7136 * the disabling order is reversed.
7138 if (IS_HASWELL(dev_priv->dev)) {
7139 set_power_wells(power_domains, hsw_power_wells);
7140 hsw_pwr = power_domains;
7141 } else if (IS_BROADWELL(dev_priv->dev)) {
7142 set_power_wells(power_domains, bdw_power_wells);
7143 hsw_pwr = power_domains;
7144 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7145 set_power_wells(power_domains, chv_power_wells);
7146 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
7147 set_power_wells(power_domains, vlv_power_wells);
7149 set_power_wells(power_domains, i9xx_always_on_power_well);
7155 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
7160 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
7162 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7163 struct i915_power_well *power_well;
7166 mutex_lock(&power_domains->lock);
7167 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7168 power_well->ops->sync_hw(dev_priv, power_well);
7169 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
7172 mutex_unlock(&power_domains->lock);
7175 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
7177 struct i915_power_well *cmn =
7178 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
7179 struct i915_power_well *disp2d =
7180 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
7182 /* nothing to do if common lane is already off */
7183 if (!cmn->ops->is_enabled(dev_priv, cmn))
7186 /* If the display might be already active skip this */
7187 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
7188 I915_READ(DPIO_CTL) & DPIO_CMNRST)
7191 DRM_DEBUG_KMS("toggling display PHY side reset\n");
7193 /* cmnlane needs DPLL registers */
7194 disp2d->ops->enable(dev_priv, disp2d);
7197 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
7198 * Need to assert and de-assert PHY SB reset by gating the
7199 * common lane power, then un-gating it.
7200 * Simply ungating isn't enough to reset the PHY enough to get
7201 * ports and lanes running.
7203 cmn->ops->disable(dev_priv, cmn);
7206 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
7208 struct drm_device *dev = dev_priv->dev;
7209 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7211 power_domains->initializing = true;
7213 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7214 mutex_lock(&power_domains->lock);
7215 vlv_cmnlane_wa(dev_priv);
7216 mutex_unlock(&power_domains->lock);
7219 /* For now, we need the power well to be always enabled. */
7220 intel_display_set_init_power(dev_priv, true);
7221 intel_power_domains_resume(dev_priv);
7222 power_domains->initializing = false;
7225 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
7227 intel_runtime_pm_get(dev_priv);
7230 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
7232 intel_runtime_pm_put(dev_priv);
7235 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
7237 struct drm_device *dev = dev_priv->dev;
7238 struct device *device = &dev->pdev->dev;
7240 if (!HAS_RUNTIME_PM(dev))
7243 pm_runtime_get_sync(device);
7244 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
7247 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
7249 struct drm_device *dev = dev_priv->dev;
7250 struct device *device = &dev->pdev->dev;
7252 if (!HAS_RUNTIME_PM(dev))
7255 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
7256 pm_runtime_get_noresume(device);
7259 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
7261 struct drm_device *dev = dev_priv->dev;
7262 struct device *device = &dev->pdev->dev;
7264 if (!HAS_RUNTIME_PM(dev))
7267 pm_runtime_mark_last_busy(device);
7268 pm_runtime_put_autosuspend(device);
7271 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
7273 struct drm_device *dev = dev_priv->dev;
7274 struct device *device = &dev->pdev->dev;
7276 if (!HAS_RUNTIME_PM(dev))
7279 pm_runtime_set_active(device);
7282 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7285 if (!intel_enable_rc6(dev)) {
7286 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7290 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
7291 pm_runtime_mark_last_busy(device);
7292 pm_runtime_use_autosuspend(device);
7294 pm_runtime_put_autosuspend(device);
7297 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
7299 struct drm_device *dev = dev_priv->dev;
7300 struct device *device = &dev->pdev->dev;
7302 if (!HAS_RUNTIME_PM(dev))
7305 if (!intel_enable_rc6(dev))
7308 /* Make sure we're not suspended first. */
7309 pm_runtime_get_sync(device);
7310 pm_runtime_disable(device);
7313 /* Set up chip specific power management-related functions */
7314 void intel_init_pm(struct drm_device *dev)
7316 struct drm_i915_private *dev_priv = dev->dev_private;
7319 if (INTEL_INFO(dev)->gen >= 7) {
7320 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7321 dev_priv->display.enable_fbc = gen7_enable_fbc;
7322 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7323 } else if (INTEL_INFO(dev)->gen >= 5) {
7324 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7325 dev_priv->display.enable_fbc = ironlake_enable_fbc;
7326 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7327 } else if (IS_GM45(dev)) {
7328 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7329 dev_priv->display.enable_fbc = g4x_enable_fbc;
7330 dev_priv->display.disable_fbc = g4x_disable_fbc;
7332 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7333 dev_priv->display.enable_fbc = i8xx_enable_fbc;
7334 dev_priv->display.disable_fbc = i8xx_disable_fbc;
7336 /* This value was pulled out of someone's hat */
7337 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7342 if (IS_PINEVIEW(dev))
7343 i915_pineview_get_mem_freq(dev);
7344 else if (IS_GEN5(dev))
7345 i915_ironlake_get_mem_freq(dev);
7347 /* For FIFO watermark updates */
7348 if (HAS_PCH_SPLIT(dev)) {
7349 ilk_setup_wm_latency(dev);
7351 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7352 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7353 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7354 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7355 dev_priv->display.update_wm = ilk_update_wm;
7356 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
7358 DRM_DEBUG_KMS("Failed to read display plane latency. "
7363 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7364 else if (IS_GEN6(dev))
7365 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7366 else if (IS_IVYBRIDGE(dev))
7367 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7368 else if (IS_HASWELL(dev))
7369 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7370 else if (INTEL_INFO(dev)->gen == 8)
7371 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7372 } else if (IS_CHERRYVIEW(dev)) {
7373 dev_priv->display.update_wm = cherryview_update_wm;
7374 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7375 dev_priv->display.init_clock_gating =
7376 cherryview_init_clock_gating;
7377 } else if (IS_VALLEYVIEW(dev)) {
7378 dev_priv->display.update_wm = valleyview_update_wm;
7379 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7380 dev_priv->display.init_clock_gating =
7381 valleyview_init_clock_gating;
7382 } else if (IS_PINEVIEW(dev)) {
7383 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7386 dev_priv->mem_freq)) {
7387 DRM_INFO("failed to find known CxSR latency "
7388 "(found ddr%s fsb freq %d, mem freq %d), "
7390 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7391 dev_priv->fsb_freq, dev_priv->mem_freq);
7392 /* Disable CxSR and never update its watermark again */
7393 intel_set_memory_cxsr(dev_priv, false);
7394 dev_priv->display.update_wm = NULL;
7396 dev_priv->display.update_wm = pineview_update_wm;
7397 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7398 } else if (IS_G4X(dev)) {
7399 dev_priv->display.update_wm = g4x_update_wm;
7400 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7401 } else if (IS_GEN4(dev)) {
7402 dev_priv->display.update_wm = i965_update_wm;
7403 if (IS_CRESTLINE(dev))
7404 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7405 else if (IS_BROADWATER(dev))
7406 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7407 } else if (IS_GEN3(dev)) {
7408 dev_priv->display.update_wm = i9xx_update_wm;
7409 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7410 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7411 } else if (IS_GEN2(dev)) {
7412 if (INTEL_INFO(dev)->num_pipes == 1) {
7413 dev_priv->display.update_wm = i845_update_wm;
7414 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7416 dev_priv->display.update_wm = i9xx_update_wm;
7417 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7420 if (IS_I85X(dev) || IS_I865G(dev))
7421 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7423 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7425 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7429 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7431 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7433 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7434 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7438 I915_WRITE(GEN6_PCODE_DATA, *val);
7439 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7441 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7443 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7447 *val = I915_READ(GEN6_PCODE_DATA);
7448 I915_WRITE(GEN6_PCODE_DATA, 0);
7453 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
7455 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7457 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7458 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7462 I915_WRITE(GEN6_PCODE_DATA, val);
7463 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7465 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7467 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7471 I915_WRITE(GEN6_PCODE_DATA, 0);
7476 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7481 switch (dev_priv->mem_freq) {
7495 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
7498 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7503 switch (dev_priv->mem_freq) {
7517 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
7520 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7524 switch (dev_priv->rps.cz_freq) {
7540 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7545 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7549 switch (dev_priv->rps.cz_freq) {
7565 /* CHV needs even values */
7566 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7571 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7575 if (IS_CHERRYVIEW(dev_priv->dev))
7576 ret = chv_gpu_freq(dev_priv, val);
7577 else if (IS_VALLEYVIEW(dev_priv->dev))
7578 ret = byt_gpu_freq(dev_priv, val);
7583 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7587 if (IS_CHERRYVIEW(dev_priv->dev))
7588 ret = chv_freq_opcode(dev_priv, val);
7589 else if (IS_VALLEYVIEW(dev_priv->dev))
7590 ret = byt_freq_opcode(dev_priv, val);
7595 void intel_pm_setup(struct drm_device *dev)
7597 struct drm_i915_private *dev_priv = dev->dev_private;
7599 mutex_init(&dev_priv->rps.hw_lock);
7601 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7602 intel_gen6_powersave_work);
7604 dev_priv->pm.suspended = false;
7605 dev_priv->pm._irqs_disabled = false;