2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
55 static void gen9_init_clock_gating(struct drm_device *dev)
57 struct drm_i915_private *dev_priv = dev->dev_private;
59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
63 /* WaDisableKillLogic:bxt,skl */
64 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
68 static void skl_init_clock_gating(struct drm_device *dev)
70 struct drm_i915_private *dev_priv = dev->dev_private;
72 gen9_init_clock_gating(dev);
74 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
75 /* WaDisableHDCInvalidation:skl */
76 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
77 BDW_DISABLE_HDC_INVALIDATION);
79 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
80 I915_WRITE(FF_SLICE_CS_CHICKEN2,
81 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
84 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
85 * involving this register should also be added to WA batch as required.
87 if (INTEL_REVID(dev) <= SKL_REVID_E0)
88 /* WaDisableLSQCROPERFforOCL:skl */
89 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
90 GEN8_LQSC_RO_PERF_DIS);
92 /* WaEnableGapsTsvCreditFix:skl */
93 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
94 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
95 GEN9_GAPS_TSV_CREDIT_DISABLE));
99 static void bxt_init_clock_gating(struct drm_device *dev)
101 struct drm_i915_private *dev_priv = dev->dev_private;
103 gen9_init_clock_gating(dev);
105 /* WaDisableSDEUnitClockGating:bxt */
106 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
107 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
111 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
113 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
114 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
116 /* WaStoreMultiplePTEenable:bxt */
117 /* This is a requirement according to Hardware specification */
118 if (INTEL_REVID(dev) == BXT_REVID_A0)
119 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
121 /* WaSetClckGatingDisableMedia:bxt */
122 if (INTEL_REVID(dev) == BXT_REVID_A0) {
123 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
124 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
128 static void i915_pineview_get_mem_freq(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = dev->dev_private;
133 tmp = I915_READ(CLKCFG);
135 switch (tmp & CLKCFG_FSB_MASK) {
137 dev_priv->fsb_freq = 533; /* 133*4 */
140 dev_priv->fsb_freq = 800; /* 200*4 */
143 dev_priv->fsb_freq = 667; /* 167*4 */
146 dev_priv->fsb_freq = 400; /* 100*4 */
150 switch (tmp & CLKCFG_MEM_MASK) {
152 dev_priv->mem_freq = 533;
155 dev_priv->mem_freq = 667;
158 dev_priv->mem_freq = 800;
162 /* detect pineview DDR3 setting */
163 tmp = I915_READ(CSHRDDR3CTL);
164 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
167 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
169 struct drm_i915_private *dev_priv = dev->dev_private;
172 ddrpll = I915_READ16(DDRMPLL1);
173 csipll = I915_READ16(CSIPLL0);
175 switch (ddrpll & 0xff) {
177 dev_priv->mem_freq = 800;
180 dev_priv->mem_freq = 1066;
183 dev_priv->mem_freq = 1333;
186 dev_priv->mem_freq = 1600;
189 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
191 dev_priv->mem_freq = 0;
195 dev_priv->ips.r_t = dev_priv->mem_freq;
197 switch (csipll & 0x3ff) {
199 dev_priv->fsb_freq = 3200;
202 dev_priv->fsb_freq = 3733;
205 dev_priv->fsb_freq = 4266;
208 dev_priv->fsb_freq = 4800;
211 dev_priv->fsb_freq = 5333;
214 dev_priv->fsb_freq = 5866;
217 dev_priv->fsb_freq = 6400;
220 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
222 dev_priv->fsb_freq = 0;
226 if (dev_priv->fsb_freq == 3200) {
227 dev_priv->ips.c_m = 0;
228 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
229 dev_priv->ips.c_m = 1;
231 dev_priv->ips.c_m = 2;
235 static const struct cxsr_latency cxsr_latency_table[] = {
236 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
237 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
238 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
239 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
240 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
242 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
243 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
244 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
245 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
246 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
248 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
249 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
250 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
251 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
252 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
254 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
255 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
256 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
257 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
258 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
260 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
261 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
262 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
263 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
264 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
266 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
267 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
268 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
269 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
270 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
273 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
278 const struct cxsr_latency *latency;
281 if (fsb == 0 || mem == 0)
284 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
285 latency = &cxsr_latency_table[i];
286 if (is_desktop == latency->is_desktop &&
287 is_ddr3 == latency->is_ddr3 &&
288 fsb == latency->fsb_freq && mem == latency->mem_freq)
292 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
297 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
301 mutex_lock(&dev_priv->rps.hw_lock);
303 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
305 val &= ~FORCE_DDR_HIGH_FREQ;
307 val |= FORCE_DDR_HIGH_FREQ;
308 val &= ~FORCE_DDR_LOW_FREQ;
309 val |= FORCE_DDR_FREQ_REQ_ACK;
310 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
312 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
313 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
314 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
316 mutex_unlock(&dev_priv->rps.hw_lock);
319 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
323 mutex_lock(&dev_priv->rps.hw_lock);
325 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
327 val |= DSP_MAXFIFO_PM5_ENABLE;
329 val &= ~DSP_MAXFIFO_PM5_ENABLE;
330 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
332 mutex_unlock(&dev_priv->rps.hw_lock);
335 #define FW_WM(value, plane) \
336 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
338 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
340 struct drm_device *dev = dev_priv->dev;
343 if (IS_VALLEYVIEW(dev)) {
344 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
345 POSTING_READ(FW_BLC_SELF_VLV);
346 dev_priv->wm.vlv.cxsr = enable;
347 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
348 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
349 POSTING_READ(FW_BLC_SELF);
350 } else if (IS_PINEVIEW(dev)) {
351 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
352 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
353 I915_WRITE(DSPFW3, val);
354 POSTING_READ(DSPFW3);
355 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
356 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
357 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
358 I915_WRITE(FW_BLC_SELF, val);
359 POSTING_READ(FW_BLC_SELF);
360 } else if (IS_I915GM(dev)) {
361 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
362 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
363 I915_WRITE(INSTPM, val);
364 POSTING_READ(INSTPM);
369 DRM_DEBUG_KMS("memory self-refresh is %s\n",
370 enable ? "enabled" : "disabled");
375 * Latency for FIFO fetches is dependent on several factors:
376 * - memory configuration (speed, channels)
378 * - current MCH state
379 * It can be fairly high in some situations, so here we assume a fairly
380 * pessimal value. It's a tradeoff between extra memory fetches (if we
381 * set this value too high, the FIFO will fetch frequently to stay full)
382 * and power consumption (set it too low to save power and we might see
383 * FIFO underruns and display "flicker").
385 * A value of 5us seems to be a good balance; safe for very low end
386 * platforms but not overly aggressive on lower latency configs.
388 static const int pessimal_latency_ns = 5000;
390 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
391 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
393 static int vlv_get_fifo_size(struct drm_device *dev,
394 enum pipe pipe, int plane)
396 struct drm_i915_private *dev_priv = dev->dev_private;
397 int sprite0_start, sprite1_start, size;
400 uint32_t dsparb, dsparb2, dsparb3;
402 dsparb = I915_READ(DSPARB);
403 dsparb2 = I915_READ(DSPARB2);
404 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
405 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
408 dsparb = I915_READ(DSPARB);
409 dsparb2 = I915_READ(DSPARB2);
410 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
411 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
414 dsparb2 = I915_READ(DSPARB2);
415 dsparb3 = I915_READ(DSPARB3);
416 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
417 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
425 size = sprite0_start;
428 size = sprite1_start - sprite0_start;
431 size = 512 - 1 - sprite1_start;
437 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
438 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
439 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
445 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
447 struct drm_i915_private *dev_priv = dev->dev_private;
448 uint32_t dsparb = I915_READ(DSPARB);
451 size = dsparb & 0x7f;
453 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
455 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
456 plane ? "B" : "A", size);
461 static int i830_get_fifo_size(struct drm_device *dev, int plane)
463 struct drm_i915_private *dev_priv = dev->dev_private;
464 uint32_t dsparb = I915_READ(DSPARB);
467 size = dsparb & 0x1ff;
469 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
470 size >>= 1; /* Convert to cachelines */
472 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
473 plane ? "B" : "A", size);
478 static int i845_get_fifo_size(struct drm_device *dev, int plane)
480 struct drm_i915_private *dev_priv = dev->dev_private;
481 uint32_t dsparb = I915_READ(DSPARB);
484 size = dsparb & 0x7f;
485 size >>= 2; /* Convert to cachelines */
487 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
494 /* Pineview has different values for various configs */
495 static const struct intel_watermark_params pineview_display_wm = {
496 .fifo_size = PINEVIEW_DISPLAY_FIFO,
497 .max_wm = PINEVIEW_MAX_WM,
498 .default_wm = PINEVIEW_DFT_WM,
499 .guard_size = PINEVIEW_GUARD_WM,
500 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
502 static const struct intel_watermark_params pineview_display_hplloff_wm = {
503 .fifo_size = PINEVIEW_DISPLAY_FIFO,
504 .max_wm = PINEVIEW_MAX_WM,
505 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
506 .guard_size = PINEVIEW_GUARD_WM,
507 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
509 static const struct intel_watermark_params pineview_cursor_wm = {
510 .fifo_size = PINEVIEW_CURSOR_FIFO,
511 .max_wm = PINEVIEW_CURSOR_MAX_WM,
512 .default_wm = PINEVIEW_CURSOR_DFT_WM,
513 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
514 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
516 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
517 .fifo_size = PINEVIEW_CURSOR_FIFO,
518 .max_wm = PINEVIEW_CURSOR_MAX_WM,
519 .default_wm = PINEVIEW_CURSOR_DFT_WM,
520 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
521 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
523 static const struct intel_watermark_params g4x_wm_info = {
524 .fifo_size = G4X_FIFO_SIZE,
525 .max_wm = G4X_MAX_WM,
526 .default_wm = G4X_MAX_WM,
528 .cacheline_size = G4X_FIFO_LINE_SIZE,
530 static const struct intel_watermark_params g4x_cursor_wm_info = {
531 .fifo_size = I965_CURSOR_FIFO,
532 .max_wm = I965_CURSOR_MAX_WM,
533 .default_wm = I965_CURSOR_DFT_WM,
535 .cacheline_size = G4X_FIFO_LINE_SIZE,
537 static const struct intel_watermark_params valleyview_wm_info = {
538 .fifo_size = VALLEYVIEW_FIFO_SIZE,
539 .max_wm = VALLEYVIEW_MAX_WM,
540 .default_wm = VALLEYVIEW_MAX_WM,
542 .cacheline_size = G4X_FIFO_LINE_SIZE,
544 static const struct intel_watermark_params valleyview_cursor_wm_info = {
545 .fifo_size = I965_CURSOR_FIFO,
546 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
547 .default_wm = I965_CURSOR_DFT_WM,
549 .cacheline_size = G4X_FIFO_LINE_SIZE,
551 static const struct intel_watermark_params i965_cursor_wm_info = {
552 .fifo_size = I965_CURSOR_FIFO,
553 .max_wm = I965_CURSOR_MAX_WM,
554 .default_wm = I965_CURSOR_DFT_WM,
556 .cacheline_size = I915_FIFO_LINE_SIZE,
558 static const struct intel_watermark_params i945_wm_info = {
559 .fifo_size = I945_FIFO_SIZE,
560 .max_wm = I915_MAX_WM,
563 .cacheline_size = I915_FIFO_LINE_SIZE,
565 static const struct intel_watermark_params i915_wm_info = {
566 .fifo_size = I915_FIFO_SIZE,
567 .max_wm = I915_MAX_WM,
570 .cacheline_size = I915_FIFO_LINE_SIZE,
572 static const struct intel_watermark_params i830_a_wm_info = {
573 .fifo_size = I855GM_FIFO_SIZE,
574 .max_wm = I915_MAX_WM,
577 .cacheline_size = I830_FIFO_LINE_SIZE,
579 static const struct intel_watermark_params i830_bc_wm_info = {
580 .fifo_size = I855GM_FIFO_SIZE,
581 .max_wm = I915_MAX_WM/2,
584 .cacheline_size = I830_FIFO_LINE_SIZE,
586 static const struct intel_watermark_params i845_wm_info = {
587 .fifo_size = I830_FIFO_SIZE,
588 .max_wm = I915_MAX_WM,
591 .cacheline_size = I830_FIFO_LINE_SIZE,
595 * intel_calculate_wm - calculate watermark level
596 * @clock_in_khz: pixel clock
597 * @wm: chip FIFO params
598 * @pixel_size: display pixel size
599 * @latency_ns: memory latency for the platform
601 * Calculate the watermark level (the level at which the display plane will
602 * start fetching from memory again). Each chip has a different display
603 * FIFO size and allocation, so the caller needs to figure that out and pass
604 * in the correct intel_watermark_params structure.
606 * As the pixel clock runs, the FIFO will be drained at a rate that depends
607 * on the pixel size. When it reaches the watermark level, it'll start
608 * fetching FIFO line sized based chunks from memory until the FIFO fills
609 * past the watermark point. If the FIFO drains completely, a FIFO underrun
610 * will occur, and a display engine hang could result.
612 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
613 const struct intel_watermark_params *wm,
616 unsigned long latency_ns)
618 long entries_required, wm_size;
621 * Note: we need to make sure we don't overflow for various clock &
623 * clocks go from a few thousand to several hundred thousand.
624 * latency is usually a few thousand
626 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
628 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
630 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
632 wm_size = fifo_size - (entries_required + wm->guard_size);
634 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
636 /* Don't promote wm_size to unsigned... */
637 if (wm_size > (long)wm->max_wm)
638 wm_size = wm->max_wm;
640 wm_size = wm->default_wm;
643 * Bspec seems to indicate that the value shouldn't be lower than
644 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
645 * Lets go for 8 which is the burst size since certain platforms
646 * already use a hardcoded 8 (which is what the spec says should be
655 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
657 struct drm_crtc *crtc, *enabled = NULL;
659 for_each_crtc(dev, crtc) {
660 if (intel_crtc_active(crtc)) {
670 static void pineview_update_wm(struct drm_crtc *unused_crtc)
672 struct drm_device *dev = unused_crtc->dev;
673 struct drm_i915_private *dev_priv = dev->dev_private;
674 struct drm_crtc *crtc;
675 const struct cxsr_latency *latency;
679 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
680 dev_priv->fsb_freq, dev_priv->mem_freq);
682 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
683 intel_set_memory_cxsr(dev_priv, false);
687 crtc = single_enabled_crtc(dev);
689 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
690 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
691 int clock = adjusted_mode->crtc_clock;
694 wm = intel_calculate_wm(clock, &pineview_display_wm,
695 pineview_display_wm.fifo_size,
696 pixel_size, latency->display_sr);
697 reg = I915_READ(DSPFW1);
698 reg &= ~DSPFW_SR_MASK;
699 reg |= FW_WM(wm, SR);
700 I915_WRITE(DSPFW1, reg);
701 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
704 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
705 pineview_display_wm.fifo_size,
706 pixel_size, latency->cursor_sr);
707 reg = I915_READ(DSPFW3);
708 reg &= ~DSPFW_CURSOR_SR_MASK;
709 reg |= FW_WM(wm, CURSOR_SR);
710 I915_WRITE(DSPFW3, reg);
712 /* Display HPLL off SR */
713 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
714 pineview_display_hplloff_wm.fifo_size,
715 pixel_size, latency->display_hpll_disable);
716 reg = I915_READ(DSPFW3);
717 reg &= ~DSPFW_HPLL_SR_MASK;
718 reg |= FW_WM(wm, HPLL_SR);
719 I915_WRITE(DSPFW3, reg);
721 /* cursor HPLL off SR */
722 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
723 pineview_display_hplloff_wm.fifo_size,
724 pixel_size, latency->cursor_hpll_disable);
725 reg = I915_READ(DSPFW3);
726 reg &= ~DSPFW_HPLL_CURSOR_MASK;
727 reg |= FW_WM(wm, HPLL_CURSOR);
728 I915_WRITE(DSPFW3, reg);
729 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
731 intel_set_memory_cxsr(dev_priv, true);
733 intel_set_memory_cxsr(dev_priv, false);
737 static bool g4x_compute_wm0(struct drm_device *dev,
739 const struct intel_watermark_params *display,
740 int display_latency_ns,
741 const struct intel_watermark_params *cursor,
742 int cursor_latency_ns,
746 struct drm_crtc *crtc;
747 const struct drm_display_mode *adjusted_mode;
748 int htotal, hdisplay, clock, pixel_size;
749 int line_time_us, line_count;
750 int entries, tlb_miss;
752 crtc = intel_get_crtc_for_plane(dev, plane);
753 if (!intel_crtc_active(crtc)) {
754 *cursor_wm = cursor->guard_size;
755 *plane_wm = display->guard_size;
759 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
760 clock = adjusted_mode->crtc_clock;
761 htotal = adjusted_mode->crtc_htotal;
762 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
763 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
765 /* Use the small buffer method to calculate plane watermark */
766 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
767 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
770 entries = DIV_ROUND_UP(entries, display->cacheline_size);
771 *plane_wm = entries + display->guard_size;
772 if (*plane_wm > (int)display->max_wm)
773 *plane_wm = display->max_wm;
775 /* Use the large buffer method to calculate cursor watermark */
776 line_time_us = max(htotal * 1000 / clock, 1);
777 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
778 entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
779 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
782 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
783 *cursor_wm = entries + cursor->guard_size;
784 if (*cursor_wm > (int)cursor->max_wm)
785 *cursor_wm = (int)cursor->max_wm;
791 * Check the wm result.
793 * If any calculated watermark values is larger than the maximum value that
794 * can be programmed into the associated watermark register, that watermark
797 static bool g4x_check_srwm(struct drm_device *dev,
798 int display_wm, int cursor_wm,
799 const struct intel_watermark_params *display,
800 const struct intel_watermark_params *cursor)
802 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
803 display_wm, cursor_wm);
805 if (display_wm > display->max_wm) {
806 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
807 display_wm, display->max_wm);
811 if (cursor_wm > cursor->max_wm) {
812 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
813 cursor_wm, cursor->max_wm);
817 if (!(display_wm || cursor_wm)) {
818 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
825 static bool g4x_compute_srwm(struct drm_device *dev,
828 const struct intel_watermark_params *display,
829 const struct intel_watermark_params *cursor,
830 int *display_wm, int *cursor_wm)
832 struct drm_crtc *crtc;
833 const struct drm_display_mode *adjusted_mode;
834 int hdisplay, htotal, pixel_size, clock;
835 unsigned long line_time_us;
836 int line_count, line_size;
841 *display_wm = *cursor_wm = 0;
845 crtc = intel_get_crtc_for_plane(dev, plane);
846 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
847 clock = adjusted_mode->crtc_clock;
848 htotal = adjusted_mode->crtc_htotal;
849 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
850 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
852 line_time_us = max(htotal * 1000 / clock, 1);
853 line_count = (latency_ns / line_time_us + 1000) / 1000;
854 line_size = hdisplay * pixel_size;
856 /* Use the minimum of the small and large buffer method for primary */
857 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
858 large = line_count * line_size;
860 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
861 *display_wm = entries + display->guard_size;
863 /* calculate the self-refresh watermark for display cursor */
864 entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
865 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
866 *cursor_wm = entries + cursor->guard_size;
868 return g4x_check_srwm(dev,
869 *display_wm, *cursor_wm,
873 #define FW_WM_VLV(value, plane) \
874 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
876 static void vlv_write_wm_values(struct intel_crtc *crtc,
877 const struct vlv_wm_values *wm)
879 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
880 enum pipe pipe = crtc->pipe;
882 I915_WRITE(VLV_DDL(pipe),
883 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
884 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
885 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
886 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
889 FW_WM(wm->sr.plane, SR) |
890 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
891 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
892 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
894 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
895 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
896 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
898 FW_WM(wm->sr.cursor, CURSOR_SR));
900 if (IS_CHERRYVIEW(dev_priv)) {
901 I915_WRITE(DSPFW7_CHV,
902 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
903 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
904 I915_WRITE(DSPFW8_CHV,
905 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
906 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
907 I915_WRITE(DSPFW9_CHV,
908 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
909 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
911 FW_WM(wm->sr.plane >> 9, SR_HI) |
912 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
913 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
914 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
915 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
916 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
917 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
918 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
919 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
920 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
923 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
924 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
926 FW_WM(wm->sr.plane >> 9, SR_HI) |
927 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
928 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
929 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
930 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
931 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
932 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
935 /* zero (unused) WM1 watermarks */
936 I915_WRITE(DSPFW4, 0);
937 I915_WRITE(DSPFW5, 0);
938 I915_WRITE(DSPFW6, 0);
939 I915_WRITE(DSPHOWM1, 0);
941 POSTING_READ(DSPFW1);
949 VLV_WM_LEVEL_DDR_DVFS,
952 /* latency must be in 0.1us units. */
953 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
954 unsigned int pipe_htotal,
955 unsigned int horiz_pixels,
956 unsigned int bytes_per_pixel,
957 unsigned int latency)
961 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
962 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
963 ret = DIV_ROUND_UP(ret, 64);
968 static void vlv_setup_wm_latency(struct drm_device *dev)
970 struct drm_i915_private *dev_priv = dev->dev_private;
972 /* all latencies in usec */
973 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
975 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
977 if (IS_CHERRYVIEW(dev_priv)) {
978 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
979 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
981 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
985 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
986 struct intel_crtc *crtc,
987 const struct intel_plane_state *state,
990 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
991 int clock, htotal, pixel_size, width, wm;
993 if (dev_priv->wm.pri_latency[level] == 0)
999 pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1000 clock = crtc->config->base.adjusted_mode.crtc_clock;
1001 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
1002 width = crtc->config->pipe_src_w;
1003 if (WARN_ON(htotal == 0))
1006 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1008 * FIXME the formula gives values that are
1009 * too big for the cursor FIFO, and hence we
1010 * would never be able to use cursors. For
1011 * now just hardcode the watermark.
1015 wm = vlv_wm_method2(clock, htotal, width, pixel_size,
1016 dev_priv->wm.pri_latency[level] * 10);
1019 return min_t(int, wm, USHRT_MAX);
1022 static void vlv_compute_fifo(struct intel_crtc *crtc)
1024 struct drm_device *dev = crtc->base.dev;
1025 struct vlv_wm_state *wm_state = &crtc->wm_state;
1026 struct intel_plane *plane;
1027 unsigned int total_rate = 0;
1028 const int fifo_size = 512 - 1;
1029 int fifo_extra, fifo_left = fifo_size;
1031 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1032 struct intel_plane_state *state =
1033 to_intel_plane_state(plane->base.state);
1035 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1038 if (state->visible) {
1039 wm_state->num_active_planes++;
1040 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1044 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1045 struct intel_plane_state *state =
1046 to_intel_plane_state(plane->base.state);
1049 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1050 plane->wm.fifo_size = 63;
1054 if (!state->visible) {
1055 plane->wm.fifo_size = 0;
1059 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1060 plane->wm.fifo_size = fifo_size * rate / total_rate;
1061 fifo_left -= plane->wm.fifo_size;
1064 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1066 /* spread the remainder evenly */
1067 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1073 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1076 /* give it all to the first plane if none are active */
1077 if (plane->wm.fifo_size == 0 &&
1078 wm_state->num_active_planes)
1081 plane_extra = min(fifo_extra, fifo_left);
1082 plane->wm.fifo_size += plane_extra;
1083 fifo_left -= plane_extra;
1086 WARN_ON(fifo_left != 0);
1089 static void vlv_invert_wms(struct intel_crtc *crtc)
1091 struct vlv_wm_state *wm_state = &crtc->wm_state;
1094 for (level = 0; level < wm_state->num_levels; level++) {
1095 struct drm_device *dev = crtc->base.dev;
1096 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1097 struct intel_plane *plane;
1099 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1100 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1102 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1103 switch (plane->base.type) {
1105 case DRM_PLANE_TYPE_CURSOR:
1106 wm_state->wm[level].cursor = plane->wm.fifo_size -
1107 wm_state->wm[level].cursor;
1109 case DRM_PLANE_TYPE_PRIMARY:
1110 wm_state->wm[level].primary = plane->wm.fifo_size -
1111 wm_state->wm[level].primary;
1113 case DRM_PLANE_TYPE_OVERLAY:
1114 sprite = plane->plane;
1115 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1116 wm_state->wm[level].sprite[sprite];
1123 static void vlv_compute_wm(struct intel_crtc *crtc)
1125 struct drm_device *dev = crtc->base.dev;
1126 struct vlv_wm_state *wm_state = &crtc->wm_state;
1127 struct intel_plane *plane;
1128 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1131 memset(wm_state, 0, sizeof(*wm_state));
1133 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1134 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1136 wm_state->num_active_planes = 0;
1138 vlv_compute_fifo(crtc);
1140 if (wm_state->num_active_planes != 1)
1141 wm_state->cxsr = false;
1143 if (wm_state->cxsr) {
1144 for (level = 0; level < wm_state->num_levels; level++) {
1145 wm_state->sr[level].plane = sr_fifo_size;
1146 wm_state->sr[level].cursor = 63;
1150 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1151 struct intel_plane_state *state =
1152 to_intel_plane_state(plane->base.state);
1154 if (!state->visible)
1157 /* normal watermarks */
1158 for (level = 0; level < wm_state->num_levels; level++) {
1159 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1160 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1163 if (WARN_ON(level == 0 && wm > max_wm))
1166 if (wm > plane->wm.fifo_size)
1169 switch (plane->base.type) {
1171 case DRM_PLANE_TYPE_CURSOR:
1172 wm_state->wm[level].cursor = wm;
1174 case DRM_PLANE_TYPE_PRIMARY:
1175 wm_state->wm[level].primary = wm;
1177 case DRM_PLANE_TYPE_OVERLAY:
1178 sprite = plane->plane;
1179 wm_state->wm[level].sprite[sprite] = wm;
1184 wm_state->num_levels = level;
1186 if (!wm_state->cxsr)
1189 /* maxfifo watermarks */
1190 switch (plane->base.type) {
1192 case DRM_PLANE_TYPE_CURSOR:
1193 for (level = 0; level < wm_state->num_levels; level++)
1194 wm_state->sr[level].cursor =
1195 wm_state->sr[level].cursor;
1197 case DRM_PLANE_TYPE_PRIMARY:
1198 for (level = 0; level < wm_state->num_levels; level++)
1199 wm_state->sr[level].plane =
1200 min(wm_state->sr[level].plane,
1201 wm_state->wm[level].primary);
1203 case DRM_PLANE_TYPE_OVERLAY:
1204 sprite = plane->plane;
1205 for (level = 0; level < wm_state->num_levels; level++)
1206 wm_state->sr[level].plane =
1207 min(wm_state->sr[level].plane,
1208 wm_state->wm[level].sprite[sprite]);
1213 /* clear any (partially) filled invalid levels */
1214 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1215 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1216 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1219 vlv_invert_wms(crtc);
1222 #define VLV_FIFO(plane, value) \
1223 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1225 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1227 struct drm_device *dev = crtc->base.dev;
1228 struct drm_i915_private *dev_priv = to_i915(dev);
1229 struct intel_plane *plane;
1230 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1232 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1233 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1234 WARN_ON(plane->wm.fifo_size != 63);
1238 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1239 sprite0_start = plane->wm.fifo_size;
1240 else if (plane->plane == 0)
1241 sprite1_start = sprite0_start + plane->wm.fifo_size;
1243 fifo_size = sprite1_start + plane->wm.fifo_size;
1246 WARN_ON(fifo_size != 512 - 1);
1248 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1249 pipe_name(crtc->pipe), sprite0_start,
1250 sprite1_start, fifo_size);
1252 switch (crtc->pipe) {
1253 uint32_t dsparb, dsparb2, dsparb3;
1255 dsparb = I915_READ(DSPARB);
1256 dsparb2 = I915_READ(DSPARB2);
1258 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1259 VLV_FIFO(SPRITEB, 0xff));
1260 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1261 VLV_FIFO(SPRITEB, sprite1_start));
1263 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1264 VLV_FIFO(SPRITEB_HI, 0x1));
1265 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1266 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1268 I915_WRITE(DSPARB, dsparb);
1269 I915_WRITE(DSPARB2, dsparb2);
1272 dsparb = I915_READ(DSPARB);
1273 dsparb2 = I915_READ(DSPARB2);
1275 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1276 VLV_FIFO(SPRITED, 0xff));
1277 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1278 VLV_FIFO(SPRITED, sprite1_start));
1280 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1281 VLV_FIFO(SPRITED_HI, 0xff));
1282 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1283 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1285 I915_WRITE(DSPARB, dsparb);
1286 I915_WRITE(DSPARB2, dsparb2);
1289 dsparb3 = I915_READ(DSPARB3);
1290 dsparb2 = I915_READ(DSPARB2);
1292 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1293 VLV_FIFO(SPRITEF, 0xff));
1294 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1295 VLV_FIFO(SPRITEF, sprite1_start));
1297 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1298 VLV_FIFO(SPRITEF_HI, 0xff));
1299 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1300 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1302 I915_WRITE(DSPARB3, dsparb3);
1303 I915_WRITE(DSPARB2, dsparb2);
1312 static void vlv_merge_wm(struct drm_device *dev,
1313 struct vlv_wm_values *wm)
1315 struct intel_crtc *crtc;
1316 int num_active_crtcs = 0;
1318 wm->level = to_i915(dev)->wm.max_level;
1321 for_each_intel_crtc(dev, crtc) {
1322 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1327 if (!wm_state->cxsr)
1331 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1334 if (num_active_crtcs != 1)
1337 if (num_active_crtcs > 1)
1338 wm->level = VLV_WM_LEVEL_PM2;
1340 for_each_intel_crtc(dev, crtc) {
1341 struct vlv_wm_state *wm_state = &crtc->wm_state;
1342 enum pipe pipe = crtc->pipe;
1347 wm->pipe[pipe] = wm_state->wm[wm->level];
1349 wm->sr = wm_state->sr[wm->level];
1351 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1352 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1353 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1354 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1358 static void vlv_update_wm(struct drm_crtc *crtc)
1360 struct drm_device *dev = crtc->dev;
1361 struct drm_i915_private *dev_priv = dev->dev_private;
1362 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1363 enum pipe pipe = intel_crtc->pipe;
1364 struct vlv_wm_values wm = {};
1366 vlv_compute_wm(intel_crtc);
1367 vlv_merge_wm(dev, &wm);
1369 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1370 /* FIXME should be part of crtc atomic commit */
1371 vlv_pipe_set_fifo_size(intel_crtc);
1375 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1376 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1377 chv_set_memory_dvfs(dev_priv, false);
1379 if (wm.level < VLV_WM_LEVEL_PM5 &&
1380 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1381 chv_set_memory_pm5(dev_priv, false);
1383 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1384 intel_set_memory_cxsr(dev_priv, false);
1386 /* FIXME should be part of crtc atomic commit */
1387 vlv_pipe_set_fifo_size(intel_crtc);
1389 vlv_write_wm_values(intel_crtc, &wm);
1391 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1392 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1393 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1394 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1395 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1397 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1398 intel_set_memory_cxsr(dev_priv, true);
1400 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1401 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1402 chv_set_memory_pm5(dev_priv, true);
1404 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1405 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1406 chv_set_memory_dvfs(dev_priv, true);
1408 dev_priv->wm.vlv = wm;
1411 #define single_plane_enabled(mask) is_power_of_2(mask)
1413 static void g4x_update_wm(struct drm_crtc *crtc)
1415 struct drm_device *dev = crtc->dev;
1416 static const int sr_latency_ns = 12000;
1417 struct drm_i915_private *dev_priv = dev->dev_private;
1418 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1419 int plane_sr, cursor_sr;
1420 unsigned int enabled = 0;
1423 if (g4x_compute_wm0(dev, PIPE_A,
1424 &g4x_wm_info, pessimal_latency_ns,
1425 &g4x_cursor_wm_info, pessimal_latency_ns,
1426 &planea_wm, &cursora_wm))
1427 enabled |= 1 << PIPE_A;
1429 if (g4x_compute_wm0(dev, PIPE_B,
1430 &g4x_wm_info, pessimal_latency_ns,
1431 &g4x_cursor_wm_info, pessimal_latency_ns,
1432 &planeb_wm, &cursorb_wm))
1433 enabled |= 1 << PIPE_B;
1435 if (single_plane_enabled(enabled) &&
1436 g4x_compute_srwm(dev, ffs(enabled) - 1,
1439 &g4x_cursor_wm_info,
1440 &plane_sr, &cursor_sr)) {
1441 cxsr_enabled = true;
1443 cxsr_enabled = false;
1444 intel_set_memory_cxsr(dev_priv, false);
1445 plane_sr = cursor_sr = 0;
1448 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1449 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1450 planea_wm, cursora_wm,
1451 planeb_wm, cursorb_wm,
1452 plane_sr, cursor_sr);
1455 FW_WM(plane_sr, SR) |
1456 FW_WM(cursorb_wm, CURSORB) |
1457 FW_WM(planeb_wm, PLANEB) |
1458 FW_WM(planea_wm, PLANEA));
1460 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1461 FW_WM(cursora_wm, CURSORA));
1462 /* HPLL off in SR has some issues on G4x... disable it */
1464 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1465 FW_WM(cursor_sr, CURSOR_SR));
1468 intel_set_memory_cxsr(dev_priv, true);
1471 static void i965_update_wm(struct drm_crtc *unused_crtc)
1473 struct drm_device *dev = unused_crtc->dev;
1474 struct drm_i915_private *dev_priv = dev->dev_private;
1475 struct drm_crtc *crtc;
1480 /* Calc sr entries for one plane configs */
1481 crtc = single_enabled_crtc(dev);
1483 /* self-refresh has much higher latency */
1484 static const int sr_latency_ns = 12000;
1485 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1486 int clock = adjusted_mode->crtc_clock;
1487 int htotal = adjusted_mode->crtc_htotal;
1488 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1489 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
1490 unsigned long line_time_us;
1493 line_time_us = max(htotal * 1000 / clock, 1);
1495 /* Use ns/us then divide to preserve precision */
1496 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1497 pixel_size * hdisplay;
1498 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1499 srwm = I965_FIFO_SIZE - entries;
1503 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1506 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1507 pixel_size * crtc->cursor->state->crtc_w;
1508 entries = DIV_ROUND_UP(entries,
1509 i965_cursor_wm_info.cacheline_size);
1510 cursor_sr = i965_cursor_wm_info.fifo_size -
1511 (entries + i965_cursor_wm_info.guard_size);
1513 if (cursor_sr > i965_cursor_wm_info.max_wm)
1514 cursor_sr = i965_cursor_wm_info.max_wm;
1516 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1517 "cursor %d\n", srwm, cursor_sr);
1519 cxsr_enabled = true;
1521 cxsr_enabled = false;
1522 /* Turn off self refresh if both pipes are enabled */
1523 intel_set_memory_cxsr(dev_priv, false);
1526 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1529 /* 965 has limitations... */
1530 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1534 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1535 FW_WM(8, PLANEC_OLD));
1536 /* update cursor SR watermark */
1537 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1540 intel_set_memory_cxsr(dev_priv, true);
1545 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1547 struct drm_device *dev = unused_crtc->dev;
1548 struct drm_i915_private *dev_priv = dev->dev_private;
1549 const struct intel_watermark_params *wm_info;
1554 int planea_wm, planeb_wm;
1555 struct drm_crtc *crtc, *enabled = NULL;
1558 wm_info = &i945_wm_info;
1559 else if (!IS_GEN2(dev))
1560 wm_info = &i915_wm_info;
1562 wm_info = &i830_a_wm_info;
1564 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1565 crtc = intel_get_crtc_for_plane(dev, 0);
1566 if (intel_crtc_active(crtc)) {
1567 const struct drm_display_mode *adjusted_mode;
1568 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1572 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1573 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1574 wm_info, fifo_size, cpp,
1575 pessimal_latency_ns);
1578 planea_wm = fifo_size - wm_info->guard_size;
1579 if (planea_wm > (long)wm_info->max_wm)
1580 planea_wm = wm_info->max_wm;
1584 wm_info = &i830_bc_wm_info;
1586 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1587 crtc = intel_get_crtc_for_plane(dev, 1);
1588 if (intel_crtc_active(crtc)) {
1589 const struct drm_display_mode *adjusted_mode;
1590 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1594 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1595 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1596 wm_info, fifo_size, cpp,
1597 pessimal_latency_ns);
1598 if (enabled == NULL)
1603 planeb_wm = fifo_size - wm_info->guard_size;
1604 if (planeb_wm > (long)wm_info->max_wm)
1605 planeb_wm = wm_info->max_wm;
1608 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1610 if (IS_I915GM(dev) && enabled) {
1611 struct drm_i915_gem_object *obj;
1613 obj = intel_fb_obj(enabled->primary->state->fb);
1615 /* self-refresh seems busted with untiled */
1616 if (obj->tiling_mode == I915_TILING_NONE)
1621 * Overlay gets an aggressive default since video jitter is bad.
1625 /* Play safe and disable self-refresh before adjusting watermarks. */
1626 intel_set_memory_cxsr(dev_priv, false);
1628 /* Calc sr entries for one plane configs */
1629 if (HAS_FW_BLC(dev) && enabled) {
1630 /* self-refresh has much higher latency */
1631 static const int sr_latency_ns = 6000;
1632 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1633 int clock = adjusted_mode->crtc_clock;
1634 int htotal = adjusted_mode->crtc_htotal;
1635 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1636 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
1637 unsigned long line_time_us;
1640 line_time_us = max(htotal * 1000 / clock, 1);
1642 /* Use ns/us then divide to preserve precision */
1643 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1644 pixel_size * hdisplay;
1645 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1646 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1647 srwm = wm_info->fifo_size - entries;
1651 if (IS_I945G(dev) || IS_I945GM(dev))
1652 I915_WRITE(FW_BLC_SELF,
1653 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1654 else if (IS_I915GM(dev))
1655 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1658 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1659 planea_wm, planeb_wm, cwm, srwm);
1661 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1662 fwater_hi = (cwm & 0x1f);
1664 /* Set request length to 8 cachelines per fetch */
1665 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1666 fwater_hi = fwater_hi | (1 << 8);
1668 I915_WRITE(FW_BLC, fwater_lo);
1669 I915_WRITE(FW_BLC2, fwater_hi);
1672 intel_set_memory_cxsr(dev_priv, true);
1675 static void i845_update_wm(struct drm_crtc *unused_crtc)
1677 struct drm_device *dev = unused_crtc->dev;
1678 struct drm_i915_private *dev_priv = dev->dev_private;
1679 struct drm_crtc *crtc;
1680 const struct drm_display_mode *adjusted_mode;
1684 crtc = single_enabled_crtc(dev);
1688 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1689 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1691 dev_priv->display.get_fifo_size(dev, 0),
1692 4, pessimal_latency_ns);
1693 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1694 fwater_lo |= (3<<8) | planea_wm;
1696 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1698 I915_WRITE(FW_BLC, fwater_lo);
1701 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1703 uint32_t pixel_rate;
1705 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1707 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1708 * adjust the pixel_rate here. */
1710 if (pipe_config->pch_pfit.enabled) {
1711 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1712 uint32_t pfit_size = pipe_config->pch_pfit.size;
1714 pipe_w = pipe_config->pipe_src_w;
1715 pipe_h = pipe_config->pipe_src_h;
1717 pfit_w = (pfit_size >> 16) & 0xFFFF;
1718 pfit_h = pfit_size & 0xFFFF;
1719 if (pipe_w < pfit_w)
1721 if (pipe_h < pfit_h)
1724 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1731 /* latency must be in 0.1us units. */
1732 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1737 if (WARN(latency == 0, "Latency value missing\n"))
1740 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1741 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1746 /* latency must be in 0.1us units. */
1747 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1748 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1753 if (WARN(latency == 0, "Latency value missing\n"))
1756 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1757 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1758 ret = DIV_ROUND_UP(ret, 64) + 2;
1762 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1763 uint8_t bytes_per_pixel)
1765 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1768 struct ilk_wm_maximums {
1775 /* used in computing the new watermarks state */
1776 struct intel_wm_config {
1777 unsigned int num_pipes_active;
1778 bool sprites_enabled;
1779 bool sprites_scaled;
1783 * For both WM_PIPE and WM_LP.
1784 * mem_value must be in 0.1us units.
1786 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1787 const struct intel_plane_state *pstate,
1791 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1792 uint32_t method1, method2;
1794 if (!cstate->base.active || !pstate->visible)
1797 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1802 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1803 cstate->base.adjusted_mode.crtc_htotal,
1804 drm_rect_width(&pstate->dst),
1808 return min(method1, method2);
1812 * For both WM_PIPE and WM_LP.
1813 * mem_value must be in 0.1us units.
1815 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1816 const struct intel_plane_state *pstate,
1819 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1820 uint32_t method1, method2;
1822 if (!cstate->base.active || !pstate->visible)
1825 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1826 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1827 cstate->base.adjusted_mode.crtc_htotal,
1828 drm_rect_width(&pstate->dst),
1831 return min(method1, method2);
1835 * For both WM_PIPE and WM_LP.
1836 * mem_value must be in 0.1us units.
1838 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1839 const struct intel_plane_state *pstate,
1842 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1844 if (!cstate->base.active || !pstate->visible)
1847 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1848 cstate->base.adjusted_mode.crtc_htotal,
1849 drm_rect_width(&pstate->dst),
1854 /* Only for WM_LP. */
1855 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1856 const struct intel_plane_state *pstate,
1859 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1861 if (!cstate->base.active || !pstate->visible)
1864 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
1867 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1869 if (INTEL_INFO(dev)->gen >= 8)
1871 else if (INTEL_INFO(dev)->gen >= 7)
1877 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1878 int level, bool is_sprite)
1880 if (INTEL_INFO(dev)->gen >= 8)
1881 /* BDW primary/sprite plane watermarks */
1882 return level == 0 ? 255 : 2047;
1883 else if (INTEL_INFO(dev)->gen >= 7)
1884 /* IVB/HSW primary/sprite plane watermarks */
1885 return level == 0 ? 127 : 1023;
1886 else if (!is_sprite)
1887 /* ILK/SNB primary plane watermarks */
1888 return level == 0 ? 127 : 511;
1890 /* ILK/SNB sprite plane watermarks */
1891 return level == 0 ? 63 : 255;
1894 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1897 if (INTEL_INFO(dev)->gen >= 7)
1898 return level == 0 ? 63 : 255;
1900 return level == 0 ? 31 : 63;
1903 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1905 if (INTEL_INFO(dev)->gen >= 8)
1911 /* Calculate the maximum primary/sprite plane watermark */
1912 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1914 const struct intel_wm_config *config,
1915 enum intel_ddb_partitioning ddb_partitioning,
1918 unsigned int fifo_size = ilk_display_fifo_size(dev);
1920 /* if sprites aren't enabled, sprites get nothing */
1921 if (is_sprite && !config->sprites_enabled)
1924 /* HSW allows LP1+ watermarks even with multiple pipes */
1925 if (level == 0 || config->num_pipes_active > 1) {
1926 fifo_size /= INTEL_INFO(dev)->num_pipes;
1929 * For some reason the non self refresh
1930 * FIFO size is only half of the self
1931 * refresh FIFO size on ILK/SNB.
1933 if (INTEL_INFO(dev)->gen <= 6)
1937 if (config->sprites_enabled) {
1938 /* level 0 is always calculated with 1:1 split */
1939 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1948 /* clamp to max that the registers can hold */
1949 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1952 /* Calculate the maximum cursor plane watermark */
1953 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1955 const struct intel_wm_config *config)
1957 /* HSW LP1+ watermarks w/ multiple pipes */
1958 if (level > 0 && config->num_pipes_active > 1)
1961 /* otherwise just report max that registers can hold */
1962 return ilk_cursor_wm_reg_max(dev, level);
1965 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1967 const struct intel_wm_config *config,
1968 enum intel_ddb_partitioning ddb_partitioning,
1969 struct ilk_wm_maximums *max)
1971 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1972 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1973 max->cur = ilk_cursor_wm_max(dev, level, config);
1974 max->fbc = ilk_fbc_wm_reg_max(dev);
1977 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1979 struct ilk_wm_maximums *max)
1981 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1982 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1983 max->cur = ilk_cursor_wm_reg_max(dev, level);
1984 max->fbc = ilk_fbc_wm_reg_max(dev);
1987 static bool ilk_validate_wm_level(int level,
1988 const struct ilk_wm_maximums *max,
1989 struct intel_wm_level *result)
1993 /* already determined to be invalid? */
1994 if (!result->enable)
1997 result->enable = result->pri_val <= max->pri &&
1998 result->spr_val <= max->spr &&
1999 result->cur_val <= max->cur;
2001 ret = result->enable;
2004 * HACK until we can pre-compute everything,
2005 * and thus fail gracefully if LP0 watermarks
2008 if (level == 0 && !result->enable) {
2009 if (result->pri_val > max->pri)
2010 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2011 level, result->pri_val, max->pri);
2012 if (result->spr_val > max->spr)
2013 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2014 level, result->spr_val, max->spr);
2015 if (result->cur_val > max->cur)
2016 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2017 level, result->cur_val, max->cur);
2019 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2020 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2021 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2022 result->enable = true;
2028 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2029 const struct intel_crtc *intel_crtc,
2031 struct intel_crtc_state *cstate,
2032 struct intel_wm_level *result)
2034 struct intel_plane *intel_plane;
2035 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2036 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2037 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2039 /* WM1+ latency values stored in 0.5us units */
2046 for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) {
2047 struct intel_plane_state *pstate =
2048 to_intel_plane_state(intel_plane->base.state);
2050 switch (intel_plane->base.type) {
2051 case DRM_PLANE_TYPE_PRIMARY:
2052 result->pri_val = ilk_compute_pri_wm(cstate, pstate,
2055 result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
2058 case DRM_PLANE_TYPE_OVERLAY:
2059 result->spr_val = ilk_compute_spr_wm(cstate, pstate,
2062 case DRM_PLANE_TYPE_CURSOR:
2063 result->cur_val = ilk_compute_cur_wm(cstate, pstate,
2069 result->enable = true;
2073 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2075 struct drm_i915_private *dev_priv = dev->dev_private;
2076 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2077 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
2078 u32 linetime, ips_linetime;
2080 if (!intel_crtc->active)
2083 /* The WM are computed with base on how long it takes to fill a single
2084 * row at the given clock rate, multiplied by 8.
2086 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2087 adjusted_mode->crtc_clock);
2088 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2089 dev_priv->cdclk_freq);
2091 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2092 PIPE_WM_LINETIME_TIME(linetime);
2095 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2097 struct drm_i915_private *dev_priv = dev->dev_private;
2102 int level, max_level = ilk_wm_max_level(dev);
2104 /* read the first set of memory latencies[0:3] */
2105 val = 0; /* data0 to be programmed to 0 for first set */
2106 mutex_lock(&dev_priv->rps.hw_lock);
2107 ret = sandybridge_pcode_read(dev_priv,
2108 GEN9_PCODE_READ_MEM_LATENCY,
2110 mutex_unlock(&dev_priv->rps.hw_lock);
2113 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2117 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2118 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2119 GEN9_MEM_LATENCY_LEVEL_MASK;
2120 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2121 GEN9_MEM_LATENCY_LEVEL_MASK;
2122 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2123 GEN9_MEM_LATENCY_LEVEL_MASK;
2125 /* read the second set of memory latencies[4:7] */
2126 val = 1; /* data0 to be programmed to 1 for second set */
2127 mutex_lock(&dev_priv->rps.hw_lock);
2128 ret = sandybridge_pcode_read(dev_priv,
2129 GEN9_PCODE_READ_MEM_LATENCY,
2131 mutex_unlock(&dev_priv->rps.hw_lock);
2133 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2137 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2138 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2139 GEN9_MEM_LATENCY_LEVEL_MASK;
2140 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2141 GEN9_MEM_LATENCY_LEVEL_MASK;
2142 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2143 GEN9_MEM_LATENCY_LEVEL_MASK;
2146 * WaWmMemoryReadLatency:skl
2148 * punit doesn't take into account the read latency so we need
2149 * to add 2us to the various latency levels we retrieve from
2151 * - W0 is a bit special in that it's the only level that
2152 * can't be disabled if we want to have display working, so
2153 * we always add 2us there.
2154 * - For levels >=1, punit returns 0us latency when they are
2155 * disabled, so we respect that and don't add 2us then
2157 * Additionally, if a level n (n > 1) has a 0us latency, all
2158 * levels m (m >= n) need to be disabled. We make sure to
2159 * sanitize the values out of the punit to satisfy this
2163 for (level = 1; level <= max_level; level++)
2167 for (i = level + 1; i <= max_level; i++)
2172 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2173 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2175 wm[0] = (sskpd >> 56) & 0xFF;
2177 wm[0] = sskpd & 0xF;
2178 wm[1] = (sskpd >> 4) & 0xFF;
2179 wm[2] = (sskpd >> 12) & 0xFF;
2180 wm[3] = (sskpd >> 20) & 0x1FF;
2181 wm[4] = (sskpd >> 32) & 0x1FF;
2182 } else if (INTEL_INFO(dev)->gen >= 6) {
2183 uint32_t sskpd = I915_READ(MCH_SSKPD);
2185 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2186 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2187 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2188 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2189 } else if (INTEL_INFO(dev)->gen >= 5) {
2190 uint32_t mltr = I915_READ(MLTR_ILK);
2192 /* ILK primary LP0 latency is 700 ns */
2194 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2195 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2199 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2201 /* ILK sprite LP0 latency is 1300 ns */
2202 if (INTEL_INFO(dev)->gen == 5)
2206 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2208 /* ILK cursor LP0 latency is 1300 ns */
2209 if (INTEL_INFO(dev)->gen == 5)
2212 /* WaDoubleCursorLP3Latency:ivb */
2213 if (IS_IVYBRIDGE(dev))
2217 int ilk_wm_max_level(const struct drm_device *dev)
2219 /* how many WM levels are we expecting */
2220 if (INTEL_INFO(dev)->gen >= 9)
2222 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2224 else if (INTEL_INFO(dev)->gen >= 6)
2230 static void intel_print_wm_latency(struct drm_device *dev,
2232 const uint16_t wm[8])
2234 int level, max_level = ilk_wm_max_level(dev);
2236 for (level = 0; level <= max_level; level++) {
2237 unsigned int latency = wm[level];
2240 DRM_ERROR("%s WM%d latency not provided\n",
2246 * - latencies are in us on gen9.
2247 * - before then, WM1+ latency values are in 0.5us units
2254 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2255 name, level, wm[level],
2256 latency / 10, latency % 10);
2260 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2261 uint16_t wm[5], uint16_t min)
2263 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2268 wm[0] = max(wm[0], min);
2269 for (level = 1; level <= max_level; level++)
2270 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2275 static void snb_wm_latency_quirk(struct drm_device *dev)
2277 struct drm_i915_private *dev_priv = dev->dev_private;
2281 * The BIOS provided WM memory latency values are often
2282 * inadequate for high resolution displays. Adjust them.
2284 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2285 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2286 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2291 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2292 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2293 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2294 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2297 static void ilk_setup_wm_latency(struct drm_device *dev)
2299 struct drm_i915_private *dev_priv = dev->dev_private;
2301 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2303 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2304 sizeof(dev_priv->wm.pri_latency));
2305 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2306 sizeof(dev_priv->wm.pri_latency));
2308 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2309 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2311 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2312 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2313 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2316 snb_wm_latency_quirk(dev);
2319 static void skl_setup_wm_latency(struct drm_device *dev)
2321 struct drm_i915_private *dev_priv = dev->dev_private;
2323 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2324 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2327 static void ilk_compute_wm_config(struct drm_device *dev,
2328 struct intel_wm_config *config)
2330 struct intel_crtc *intel_crtc;
2332 /* Compute the currently _active_ config */
2333 for_each_intel_crtc(dev, intel_crtc) {
2334 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2336 if (!wm->pipe_enabled)
2339 config->sprites_enabled |= wm->sprites_enabled;
2340 config->sprites_scaled |= wm->sprites_scaled;
2341 config->num_pipes_active++;
2345 /* Compute new watermarks for the pipe */
2346 static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2347 struct intel_pipe_wm *pipe_wm)
2349 struct drm_crtc *crtc = cstate->base.crtc;
2350 struct drm_device *dev = crtc->dev;
2351 const struct drm_i915_private *dev_priv = dev->dev_private;
2352 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2353 struct intel_plane *intel_plane;
2354 struct intel_plane_state *sprstate = NULL;
2355 int level, max_level = ilk_wm_max_level(dev);
2356 /* LP0 watermark maximums depend on this pipe alone */
2357 struct intel_wm_config config = {
2358 .num_pipes_active = 1,
2360 struct ilk_wm_maximums max;
2362 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2363 if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) {
2364 sprstate = to_intel_plane_state(intel_plane->base.state);
2369 config.sprites_enabled = sprstate->visible;
2370 config.sprites_scaled = sprstate->visible &&
2371 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2372 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2374 pipe_wm->pipe_enabled = cstate->base.active;
2375 pipe_wm->sprites_enabled = sprstate->visible;
2376 pipe_wm->sprites_scaled = config.sprites_scaled;
2378 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2379 if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
2382 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2383 if (config.sprites_scaled)
2386 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]);
2388 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2389 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2391 /* LP0 watermarks always use 1/2 DDB partitioning */
2392 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2394 /* At least LP0 must be valid */
2395 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2398 ilk_compute_wm_reg_maximums(dev, 1, &max);
2400 for (level = 1; level <= max_level; level++) {
2401 struct intel_wm_level wm = {};
2403 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm);
2406 * Disable any watermark level that exceeds the
2407 * register maximums since such watermarks are
2410 if (!ilk_validate_wm_level(level, &max, &wm))
2413 pipe_wm->wm[level] = wm;
2420 * Merge the watermarks from all active pipes for a specific level.
2422 static void ilk_merge_wm_level(struct drm_device *dev,
2424 struct intel_wm_level *ret_wm)
2426 const struct intel_crtc *intel_crtc;
2428 ret_wm->enable = true;
2430 for_each_intel_crtc(dev, intel_crtc) {
2431 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2432 const struct intel_wm_level *wm = &active->wm[level];
2434 if (!active->pipe_enabled)
2438 * The watermark values may have been used in the past,
2439 * so we must maintain them in the registers for some
2440 * time even if the level is now disabled.
2443 ret_wm->enable = false;
2445 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2446 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2447 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2448 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2453 * Merge all low power watermarks for all active pipes.
2455 static void ilk_wm_merge(struct drm_device *dev,
2456 const struct intel_wm_config *config,
2457 const struct ilk_wm_maximums *max,
2458 struct intel_pipe_wm *merged)
2460 struct drm_i915_private *dev_priv = dev->dev_private;
2461 int level, max_level = ilk_wm_max_level(dev);
2462 int last_enabled_level = max_level;
2464 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2465 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2466 config->num_pipes_active > 1)
2469 /* ILK: FBC WM must be disabled always */
2470 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2472 /* merge each WM1+ level */
2473 for (level = 1; level <= max_level; level++) {
2474 struct intel_wm_level *wm = &merged->wm[level];
2476 ilk_merge_wm_level(dev, level, wm);
2478 if (level > last_enabled_level)
2480 else if (!ilk_validate_wm_level(level, max, wm))
2481 /* make sure all following levels get disabled */
2482 last_enabled_level = level - 1;
2485 * The spec says it is preferred to disable
2486 * FBC WMs instead of disabling a WM level.
2488 if (wm->fbc_val > max->fbc) {
2490 merged->fbc_wm_enabled = false;
2495 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2497 * FIXME this is racy. FBC might get enabled later.
2498 * What we should check here is whether FBC can be
2499 * enabled sometime later.
2501 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2502 intel_fbc_enabled(dev_priv)) {
2503 for (level = 2; level <= max_level; level++) {
2504 struct intel_wm_level *wm = &merged->wm[level];
2511 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2513 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2514 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2517 /* The value we need to program into the WM_LPx latency field */
2518 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2520 struct drm_i915_private *dev_priv = dev->dev_private;
2522 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2525 return dev_priv->wm.pri_latency[level];
2528 static void ilk_compute_wm_results(struct drm_device *dev,
2529 const struct intel_pipe_wm *merged,
2530 enum intel_ddb_partitioning partitioning,
2531 struct ilk_wm_values *results)
2533 struct intel_crtc *intel_crtc;
2536 results->enable_fbc_wm = merged->fbc_wm_enabled;
2537 results->partitioning = partitioning;
2539 /* LP1+ register values */
2540 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2541 const struct intel_wm_level *r;
2543 level = ilk_wm_lp_to_level(wm_lp, merged);
2545 r = &merged->wm[level];
2548 * Maintain the watermark values even if the level is
2549 * disabled. Doing otherwise could cause underruns.
2551 results->wm_lp[wm_lp - 1] =
2552 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2553 (r->pri_val << WM1_LP_SR_SHIFT) |
2557 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2559 if (INTEL_INFO(dev)->gen >= 8)
2560 results->wm_lp[wm_lp - 1] |=
2561 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2563 results->wm_lp[wm_lp - 1] |=
2564 r->fbc_val << WM1_LP_FBC_SHIFT;
2567 * Always set WM1S_LP_EN when spr_val != 0, even if the
2568 * level is disabled. Doing otherwise could cause underruns.
2570 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2571 WARN_ON(wm_lp != 1);
2572 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2574 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2577 /* LP0 register values */
2578 for_each_intel_crtc(dev, intel_crtc) {
2579 enum pipe pipe = intel_crtc->pipe;
2580 const struct intel_wm_level *r =
2581 &intel_crtc->wm.active.wm[0];
2583 if (WARN_ON(!r->enable))
2586 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2588 results->wm_pipe[pipe] =
2589 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2590 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2595 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2596 * case both are at the same level. Prefer r1 in case they're the same. */
2597 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2598 struct intel_pipe_wm *r1,
2599 struct intel_pipe_wm *r2)
2601 int level, max_level = ilk_wm_max_level(dev);
2602 int level1 = 0, level2 = 0;
2604 for (level = 1; level <= max_level; level++) {
2605 if (r1->wm[level].enable)
2607 if (r2->wm[level].enable)
2611 if (level1 == level2) {
2612 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2616 } else if (level1 > level2) {
2623 /* dirty bits used to track which watermarks need changes */
2624 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2625 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2626 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2627 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2628 #define WM_DIRTY_FBC (1 << 24)
2629 #define WM_DIRTY_DDB (1 << 25)
2631 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2632 const struct ilk_wm_values *old,
2633 const struct ilk_wm_values *new)
2635 unsigned int dirty = 0;
2639 for_each_pipe(dev_priv, pipe) {
2640 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2641 dirty |= WM_DIRTY_LINETIME(pipe);
2642 /* Must disable LP1+ watermarks too */
2643 dirty |= WM_DIRTY_LP_ALL;
2646 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2647 dirty |= WM_DIRTY_PIPE(pipe);
2648 /* Must disable LP1+ watermarks too */
2649 dirty |= WM_DIRTY_LP_ALL;
2653 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2654 dirty |= WM_DIRTY_FBC;
2655 /* Must disable LP1+ watermarks too */
2656 dirty |= WM_DIRTY_LP_ALL;
2659 if (old->partitioning != new->partitioning) {
2660 dirty |= WM_DIRTY_DDB;
2661 /* Must disable LP1+ watermarks too */
2662 dirty |= WM_DIRTY_LP_ALL;
2665 /* LP1+ watermarks already deemed dirty, no need to continue */
2666 if (dirty & WM_DIRTY_LP_ALL)
2669 /* Find the lowest numbered LP1+ watermark in need of an update... */
2670 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2671 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2672 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2676 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2677 for (; wm_lp <= 3; wm_lp++)
2678 dirty |= WM_DIRTY_LP(wm_lp);
2683 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2686 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2687 bool changed = false;
2689 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2690 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2691 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2694 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2695 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2696 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2699 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2700 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2701 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2706 * Don't touch WM1S_LP_EN here.
2707 * Doing so could cause underruns.
2714 * The spec says we shouldn't write when we don't need, because every write
2715 * causes WMs to be re-evaluated, expending some power.
2717 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2718 struct ilk_wm_values *results)
2720 struct drm_device *dev = dev_priv->dev;
2721 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2725 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2729 _ilk_disable_lp_wm(dev_priv, dirty);
2731 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2732 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2733 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2734 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2735 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2736 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2738 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2739 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2740 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2741 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2742 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2743 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2745 if (dirty & WM_DIRTY_DDB) {
2746 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2747 val = I915_READ(WM_MISC);
2748 if (results->partitioning == INTEL_DDB_PART_1_2)
2749 val &= ~WM_MISC_DATA_PARTITION_5_6;
2751 val |= WM_MISC_DATA_PARTITION_5_6;
2752 I915_WRITE(WM_MISC, val);
2754 val = I915_READ(DISP_ARB_CTL2);
2755 if (results->partitioning == INTEL_DDB_PART_1_2)
2756 val &= ~DISP_DATA_PARTITION_5_6;
2758 val |= DISP_DATA_PARTITION_5_6;
2759 I915_WRITE(DISP_ARB_CTL2, val);
2763 if (dirty & WM_DIRTY_FBC) {
2764 val = I915_READ(DISP_ARB_CTL);
2765 if (results->enable_fbc_wm)
2766 val &= ~DISP_FBC_WM_DIS;
2768 val |= DISP_FBC_WM_DIS;
2769 I915_WRITE(DISP_ARB_CTL, val);
2772 if (dirty & WM_DIRTY_LP(1) &&
2773 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2774 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2776 if (INTEL_INFO(dev)->gen >= 7) {
2777 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2778 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2779 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2780 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2783 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2784 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2785 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2786 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2787 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2788 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2790 dev_priv->wm.hw = *results;
2793 static bool ilk_disable_lp_wm(struct drm_device *dev)
2795 struct drm_i915_private *dev_priv = dev->dev_private;
2797 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2801 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2802 * different active planes.
2805 #define SKL_DDB_SIZE 896 /* in blocks */
2806 #define BXT_DDB_SIZE 512
2809 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2810 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2811 * other universal planes are in indices 1..n. Note that this may leave unused
2812 * indices between the top "sprite" plane and the cursor.
2815 skl_wm_plane_id(const struct intel_plane *plane)
2817 switch (plane->base.type) {
2818 case DRM_PLANE_TYPE_PRIMARY:
2820 case DRM_PLANE_TYPE_CURSOR:
2821 return PLANE_CURSOR;
2822 case DRM_PLANE_TYPE_OVERLAY:
2823 return plane->plane + 1;
2825 MISSING_CASE(plane->base.type);
2826 return plane->plane;
2831 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2832 const struct intel_crtc_state *cstate,
2833 const struct intel_wm_config *config,
2834 struct skl_ddb_entry *alloc /* out */)
2836 struct drm_crtc *for_crtc = cstate->base.crtc;
2837 struct drm_crtc *crtc;
2838 unsigned int pipe_size, ddb_size;
2839 int nth_active_pipe;
2841 if (!cstate->base.active) {
2847 if (IS_BROXTON(dev))
2848 ddb_size = BXT_DDB_SIZE;
2850 ddb_size = SKL_DDB_SIZE;
2852 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2854 nth_active_pipe = 0;
2855 for_each_crtc(dev, crtc) {
2856 if (!to_intel_crtc(crtc)->active)
2859 if (crtc == for_crtc)
2865 pipe_size = ddb_size / config->num_pipes_active;
2866 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
2867 alloc->end = alloc->start + pipe_size;
2870 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2872 if (config->num_pipes_active == 1)
2878 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2880 entry->start = reg & 0x3ff;
2881 entry->end = (reg >> 16) & 0x3ff;
2886 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2887 struct skl_ddb_allocation *ddb /* out */)
2893 for_each_pipe(dev_priv, pipe) {
2894 for_each_plane(dev_priv, pipe, plane) {
2895 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2896 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2900 val = I915_READ(CUR_BUF_CFG(pipe));
2901 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2907 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2908 const struct drm_plane_state *pstate,
2911 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2912 struct drm_framebuffer *fb = pstate->fb;
2914 /* for planar format */
2915 if (fb->pixel_format == DRM_FORMAT_NV12) {
2916 if (y) /* y-plane data rate */
2917 return intel_crtc->config->pipe_src_w *
2918 intel_crtc->config->pipe_src_h *
2919 drm_format_plane_cpp(fb->pixel_format, 0);
2920 else /* uv-plane data rate */
2921 return (intel_crtc->config->pipe_src_w/2) *
2922 (intel_crtc->config->pipe_src_h/2) *
2923 drm_format_plane_cpp(fb->pixel_format, 1);
2926 /* for packed formats */
2927 return intel_crtc->config->pipe_src_w *
2928 intel_crtc->config->pipe_src_h *
2929 drm_format_plane_cpp(fb->pixel_format, 0);
2933 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2934 * a 8192x4096@32bpp framebuffer:
2935 * 3 * 4096 * 8192 * 4 < 2^32
2938 skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
2940 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2941 struct drm_device *dev = intel_crtc->base.dev;
2942 const struct intel_plane *intel_plane;
2943 unsigned int total_data_rate = 0;
2945 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2946 const struct drm_plane_state *pstate = intel_plane->base.state;
2948 if (pstate->fb == NULL)
2952 total_data_rate += skl_plane_relative_data_rate(cstate,
2956 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
2958 total_data_rate += skl_plane_relative_data_rate(cstate,
2963 return total_data_rate;
2967 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
2968 const struct intel_wm_config *config,
2969 struct skl_ddb_allocation *ddb /* out */)
2971 struct drm_crtc *crtc = cstate->base.crtc;
2972 struct drm_device *dev = crtc->dev;
2973 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2974 struct intel_plane *intel_plane;
2975 enum pipe pipe = intel_crtc->pipe;
2976 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2977 uint16_t alloc_size, start, cursor_blocks;
2978 uint16_t minimum[I915_MAX_PLANES];
2979 uint16_t y_minimum[I915_MAX_PLANES];
2980 unsigned int total_data_rate;
2982 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
2983 alloc_size = skl_ddb_entry_size(alloc);
2984 if (alloc_size == 0) {
2985 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2986 memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
2987 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
2991 cursor_blocks = skl_cursor_allocation(config);
2992 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
2993 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
2995 alloc_size -= cursor_blocks;
2996 alloc->end -= cursor_blocks;
2998 /* 1. Allocate the mininum required blocks for each active plane */
2999 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3000 struct drm_plane *plane = &intel_plane->base;
3001 struct drm_framebuffer *fb = plane->fb;
3002 int id = skl_wm_plane_id(intel_plane);
3006 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3010 alloc_size -= minimum[id];
3011 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
3012 alloc_size -= y_minimum[id];
3016 * 2. Distribute the remaining space in proportion to the amount of
3017 * data each plane needs to fetch from memory.
3019 * FIXME: we may not allocate every single block here.
3021 total_data_rate = skl_get_total_relative_data_rate(cstate);
3023 start = alloc->start;
3024 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3025 struct drm_plane *plane = &intel_plane->base;
3026 struct drm_plane_state *pstate = intel_plane->base.state;
3027 unsigned int data_rate, y_data_rate;
3028 uint16_t plane_blocks, y_plane_blocks = 0;
3029 int id = skl_wm_plane_id(intel_plane);
3031 if (pstate->fb == NULL)
3033 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3036 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
3039 * allocation for (packed formats) or (uv-plane part of planar format):
3040 * promote the expression to 64 bits to avoid overflowing, the
3041 * result is < available as data_rate / total_data_rate < 1
3043 plane_blocks = minimum[id];
3044 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3047 ddb->plane[pipe][id].start = start;
3048 ddb->plane[pipe][id].end = start + plane_blocks;
3050 start += plane_blocks;
3053 * allocation for y_plane part of planar format:
3055 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
3056 y_data_rate = skl_plane_relative_data_rate(cstate,
3059 y_plane_blocks = y_minimum[id];
3060 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3063 ddb->y_plane[pipe][id].start = start;
3064 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3066 start += y_plane_blocks;
3073 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3075 /* TODO: Take into account the scalers once we support them */
3076 return config->base.adjusted_mode.crtc_clock;
3080 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3081 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3082 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3083 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3085 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
3088 uint32_t wm_intermediate_val, ret;
3093 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
3094 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3099 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3100 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
3101 uint64_t tiling, uint32_t latency)
3104 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3105 uint32_t wm_intermediate_val;
3110 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
3112 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3113 tiling == I915_FORMAT_MOD_Yf_TILED) {
3114 plane_bytes_per_line *= 4;
3115 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3116 plane_blocks_per_line /= 4;
3118 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3121 wm_intermediate_val = latency * pixel_rate;
3122 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3123 plane_blocks_per_line;
3128 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3129 const struct intel_crtc *intel_crtc)
3131 struct drm_device *dev = intel_crtc->base.dev;
3132 struct drm_i915_private *dev_priv = dev->dev_private;
3133 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3134 enum pipe pipe = intel_crtc->pipe;
3136 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
3137 sizeof(new_ddb->plane[pipe])))
3140 if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR],
3141 sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
3147 static void skl_compute_wm_global_parameters(struct drm_device *dev,
3148 struct intel_wm_config *config)
3150 struct drm_crtc *crtc;
3152 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3153 config->num_pipes_active += to_intel_crtc(crtc)->active;
3156 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3157 struct intel_crtc_state *cstate,
3158 struct intel_plane *intel_plane,
3159 uint16_t ddb_allocation,
3161 uint16_t *out_blocks, /* out */
3162 uint8_t *out_lines /* out */)
3164 struct drm_plane *plane = &intel_plane->base;
3165 struct drm_framebuffer *fb = plane->state->fb;
3166 uint32_t latency = dev_priv->wm.skl_latency[level];
3167 uint32_t method1, method2;
3168 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3169 uint32_t res_blocks, res_lines;
3170 uint32_t selected_result;
3171 uint8_t bytes_per_pixel;
3173 if (latency == 0 || !cstate->base.active || !fb)
3176 bytes_per_pixel = (fb->pixel_format == DRM_FORMAT_NV12) ?
3177 drm_format_plane_cpp(DRM_FORMAT_NV12, 0) :
3178 drm_format_plane_cpp(DRM_FORMAT_NV12, 1);
3179 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3182 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3183 cstate->base.adjusted_mode.crtc_htotal,
3189 plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
3190 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3192 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3193 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3194 uint32_t min_scanlines = 4;
3195 uint32_t y_tile_minimum;
3196 if (intel_rotation_90_or_270(plane->state->rotation)) {
3197 int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3198 drm_format_plane_cpp(fb->pixel_format, 1) :
3199 drm_format_plane_cpp(fb->pixel_format, 0);
3209 WARN(1, "Unsupported pixel depth for rotation");
3212 y_tile_minimum = plane_blocks_per_line * min_scanlines;
3213 selected_result = max(method2, y_tile_minimum);
3215 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3216 selected_result = min(method1, method2);
3218 selected_result = method1;
3221 res_blocks = selected_result + 1;
3222 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3224 if (level >= 1 && level <= 7) {
3225 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3226 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3232 if (res_blocks >= ddb_allocation || res_lines > 31)
3235 *out_blocks = res_blocks;
3236 *out_lines = res_lines;
3241 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3242 struct skl_ddb_allocation *ddb,
3243 struct intel_crtc_state *cstate,
3245 struct skl_wm_level *result)
3247 struct drm_device *dev = dev_priv->dev;
3248 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3249 struct intel_plane *intel_plane;
3250 uint16_t ddb_blocks;
3251 enum pipe pipe = intel_crtc->pipe;
3253 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3254 int i = skl_wm_plane_id(intel_plane);
3256 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3258 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3263 &result->plane_res_b[i],
3264 &result->plane_res_l[i]);
3269 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3271 if (!cstate->base.active)
3274 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3277 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3278 skl_pipe_pixel_rate(cstate));
3281 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3282 struct skl_wm_level *trans_wm /* out */)
3284 struct drm_crtc *crtc = cstate->base.crtc;
3285 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3286 struct intel_plane *intel_plane;
3288 if (!cstate->base.active)
3291 /* Until we know more, just disable transition WMs */
3292 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3293 int i = skl_wm_plane_id(intel_plane);
3295 trans_wm->plane_en[i] = false;
3299 static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
3300 struct skl_ddb_allocation *ddb,
3301 struct skl_pipe_wm *pipe_wm)
3303 struct drm_device *dev = cstate->base.crtc->dev;
3304 const struct drm_i915_private *dev_priv = dev->dev_private;
3305 int level, max_level = ilk_wm_max_level(dev);
3307 for (level = 0; level <= max_level; level++) {
3308 skl_compute_wm_level(dev_priv, ddb, cstate,
3309 level, &pipe_wm->wm[level]);
3311 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3313 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3316 static void skl_compute_wm_results(struct drm_device *dev,
3317 struct skl_pipe_wm *p_wm,
3318 struct skl_wm_values *r,
3319 struct intel_crtc *intel_crtc)
3321 int level, max_level = ilk_wm_max_level(dev);
3322 enum pipe pipe = intel_crtc->pipe;
3326 for (level = 0; level <= max_level; level++) {
3327 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3330 temp |= p_wm->wm[level].plane_res_l[i] <<
3331 PLANE_WM_LINES_SHIFT;
3332 temp |= p_wm->wm[level].plane_res_b[i];
3333 if (p_wm->wm[level].plane_en[i])
3334 temp |= PLANE_WM_EN;
3336 r->plane[pipe][i][level] = temp;
3341 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3342 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3344 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3345 temp |= PLANE_WM_EN;
3347 r->plane[pipe][PLANE_CURSOR][level] = temp;
3351 /* transition WMs */
3352 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3354 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3355 temp |= p_wm->trans_wm.plane_res_b[i];
3356 if (p_wm->trans_wm.plane_en[i])
3357 temp |= PLANE_WM_EN;
3359 r->plane_trans[pipe][i] = temp;
3363 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3364 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3365 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3366 temp |= PLANE_WM_EN;
3368 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3370 r->wm_linetime[pipe] = p_wm->linetime;
3373 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3374 const struct skl_ddb_entry *entry)
3377 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3382 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3383 const struct skl_wm_values *new)
3385 struct drm_device *dev = dev_priv->dev;
3386 struct intel_crtc *crtc;
3388 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3389 int i, level, max_level = ilk_wm_max_level(dev);
3390 enum pipe pipe = crtc->pipe;
3392 if (!new->dirty[pipe])
3395 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3397 for (level = 0; level <= max_level; level++) {
3398 for (i = 0; i < intel_num_planes(crtc); i++)
3399 I915_WRITE(PLANE_WM(pipe, i, level),
3400 new->plane[pipe][i][level]);
3401 I915_WRITE(CUR_WM(pipe, level),
3402 new->plane[pipe][PLANE_CURSOR][level]);
3404 for (i = 0; i < intel_num_planes(crtc); i++)
3405 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3406 new->plane_trans[pipe][i]);
3407 I915_WRITE(CUR_WM_TRANS(pipe),
3408 new->plane_trans[pipe][PLANE_CURSOR]);
3410 for (i = 0; i < intel_num_planes(crtc); i++) {
3411 skl_ddb_entry_write(dev_priv,
3412 PLANE_BUF_CFG(pipe, i),
3413 &new->ddb.plane[pipe][i]);
3414 skl_ddb_entry_write(dev_priv,
3415 PLANE_NV12_BUF_CFG(pipe, i),
3416 &new->ddb.y_plane[pipe][i]);
3419 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3420 &new->ddb.plane[pipe][PLANE_CURSOR]);
3425 * When setting up a new DDB allocation arrangement, we need to correctly
3426 * sequence the times at which the new allocations for the pipes are taken into
3427 * account or we'll have pipes fetching from space previously allocated to
3430 * Roughly the sequence looks like:
3431 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3432 * overlapping with a previous light-up pipe (another way to put it is:
3433 * pipes with their new allocation strickly included into their old ones).
3434 * 2. re-allocate the other pipes that get their allocation reduced
3435 * 3. allocate the pipes having their allocation increased
3437 * Steps 1. and 2. are here to take care of the following case:
3438 * - Initially DDB looks like this:
3441 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3445 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3449 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3453 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3455 for_each_plane(dev_priv, pipe, plane) {
3456 I915_WRITE(PLANE_SURF(pipe, plane),
3457 I915_READ(PLANE_SURF(pipe, plane)));
3459 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3463 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3464 const struct skl_ddb_allocation *new,
3467 uint16_t old_size, new_size;
3469 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3470 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3472 return old_size != new_size &&
3473 new->pipe[pipe].start >= old->pipe[pipe].start &&
3474 new->pipe[pipe].end <= old->pipe[pipe].end;
3477 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3478 struct skl_wm_values *new_values)
3480 struct drm_device *dev = dev_priv->dev;
3481 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3482 bool reallocated[I915_MAX_PIPES] = {};
3483 struct intel_crtc *crtc;
3486 new_ddb = &new_values->ddb;
3487 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3490 * First pass: flush the pipes with the new allocation contained into
3493 * We'll wait for the vblank on those pipes to ensure we can safely
3494 * re-allocate the freed space without this pipe fetching from it.
3496 for_each_intel_crtc(dev, crtc) {
3502 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3505 skl_wm_flush_pipe(dev_priv, pipe, 1);
3506 intel_wait_for_vblank(dev, pipe);
3508 reallocated[pipe] = true;
3513 * Second pass: flush the pipes that are having their allocation
3514 * reduced, but overlapping with a previous allocation.
3516 * Here as well we need to wait for the vblank to make sure the freed
3517 * space is not used anymore.
3519 for_each_intel_crtc(dev, crtc) {
3525 if (reallocated[pipe])
3528 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3529 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3530 skl_wm_flush_pipe(dev_priv, pipe, 2);
3531 intel_wait_for_vblank(dev, pipe);
3532 reallocated[pipe] = true;
3537 * Third pass: flush the pipes that got more space allocated.
3539 * We don't need to actively wait for the update here, next vblank
3540 * will just get more DDB space with the correct WM values.
3542 for_each_intel_crtc(dev, crtc) {
3549 * At this point, only the pipes more space than before are
3550 * left to re-allocate.
3552 if (reallocated[pipe])
3555 skl_wm_flush_pipe(dev_priv, pipe, 3);
3559 static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3560 struct intel_wm_config *config,
3561 struct skl_ddb_allocation *ddb, /* out */
3562 struct skl_pipe_wm *pipe_wm /* out */)
3564 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3565 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3567 skl_allocate_pipe_ddb(cstate, config, ddb);
3568 skl_compute_pipe_wm(cstate, ddb, pipe_wm);
3570 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3573 intel_crtc->wm.skl_active = *pipe_wm;
3578 static void skl_update_other_pipe_wm(struct drm_device *dev,
3579 struct drm_crtc *crtc,
3580 struct intel_wm_config *config,
3581 struct skl_wm_values *r)
3583 struct intel_crtc *intel_crtc;
3584 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3587 * If the WM update hasn't changed the allocation for this_crtc (the
3588 * crtc we are currently computing the new WM values for), other
3589 * enabled crtcs will keep the same allocation and we don't need to
3590 * recompute anything for them.
3592 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3596 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3597 * other active pipes need new DDB allocation and WM values.
3599 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3601 struct skl_pipe_wm pipe_wm = {};
3604 if (this_crtc->pipe == intel_crtc->pipe)
3607 if (!intel_crtc->active)
3610 wm_changed = skl_update_pipe_wm(&intel_crtc->base, config,
3614 * If we end up re-computing the other pipe WM values, it's
3615 * because it was really needed, so we expect the WM values to
3618 WARN_ON(!wm_changed);
3620 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
3621 r->dirty[intel_crtc->pipe] = true;
3625 static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
3627 watermarks->wm_linetime[pipe] = 0;
3628 memset(watermarks->plane[pipe], 0,
3629 sizeof(uint32_t) * 8 * I915_MAX_PLANES);
3630 memset(watermarks->plane_trans[pipe],
3631 0, sizeof(uint32_t) * I915_MAX_PLANES);
3632 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
3634 /* Clear ddb entries for pipe */
3635 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
3636 memset(&watermarks->ddb.plane[pipe], 0,
3637 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3638 memset(&watermarks->ddb.y_plane[pipe], 0,
3639 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3640 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
3641 sizeof(struct skl_ddb_entry));
3645 static void skl_update_wm(struct drm_crtc *crtc)
3647 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3648 struct drm_device *dev = crtc->dev;
3649 struct drm_i915_private *dev_priv = dev->dev_private;
3650 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3651 struct skl_pipe_wm pipe_wm = {};
3652 struct intel_wm_config config = {};
3655 /* Clear all dirty flags */
3656 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3658 skl_clear_wm(results, intel_crtc->pipe);
3660 skl_compute_wm_global_parameters(dev, &config);
3662 if (!skl_update_pipe_wm(crtc, &config, &results->ddb, &pipe_wm))
3665 skl_compute_wm_results(dev, &pipe_wm, results, intel_crtc);
3666 results->dirty[intel_crtc->pipe] = true;
3668 skl_update_other_pipe_wm(dev, crtc, &config, results);
3669 skl_write_wm_values(dev_priv, results);
3670 skl_flush_wm_values(dev_priv, results);
3672 /* store the new configuration */
3673 dev_priv->wm.skl_hw = *results;
3676 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3678 struct drm_device *dev = dev_priv->dev;
3679 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3680 struct ilk_wm_maximums max;
3681 struct intel_wm_config config = {};
3682 struct ilk_wm_values results = {};
3683 enum intel_ddb_partitioning partitioning;
3685 ilk_compute_wm_config(dev, &config);
3687 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3688 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3690 /* 5/6 split only in single pipe config on IVB+ */
3691 if (INTEL_INFO(dev)->gen >= 7 &&
3692 config.num_pipes_active == 1 && config.sprites_enabled) {
3693 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3694 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3696 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3698 best_lp_wm = &lp_wm_1_2;
3701 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3702 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3704 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3706 ilk_write_wm_values(dev_priv, &results);
3709 static void ilk_update_wm(struct drm_crtc *crtc)
3711 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3712 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3713 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3714 struct intel_pipe_wm pipe_wm = {};
3716 WARN_ON(cstate->base.active != intel_crtc->active);
3719 * IVB workaround: must disable low power watermarks for at least
3720 * one frame before enabling scaling. LP watermarks can be re-enabled
3721 * when scaling is disabled.
3723 * WaCxSRDisabledForSpriteScaling:ivb
3725 if (cstate->disable_lp_wm) {
3726 ilk_disable_lp_wm(crtc->dev);
3727 intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
3730 intel_compute_pipe_wm(cstate, &pipe_wm);
3732 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3735 intel_crtc->wm.active = pipe_wm;
3737 ilk_program_watermarks(dev_priv);
3740 static void skl_pipe_wm_active_state(uint32_t val,
3741 struct skl_pipe_wm *active,
3747 bool is_enabled = (val & PLANE_WM_EN) != 0;
3751 active->wm[level].plane_en[i] = is_enabled;
3752 active->wm[level].plane_res_b[i] =
3753 val & PLANE_WM_BLOCKS_MASK;
3754 active->wm[level].plane_res_l[i] =
3755 (val >> PLANE_WM_LINES_SHIFT) &
3756 PLANE_WM_LINES_MASK;
3758 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
3759 active->wm[level].plane_res_b[PLANE_CURSOR] =
3760 val & PLANE_WM_BLOCKS_MASK;
3761 active->wm[level].plane_res_l[PLANE_CURSOR] =
3762 (val >> PLANE_WM_LINES_SHIFT) &
3763 PLANE_WM_LINES_MASK;
3767 active->trans_wm.plane_en[i] = is_enabled;
3768 active->trans_wm.plane_res_b[i] =
3769 val & PLANE_WM_BLOCKS_MASK;
3770 active->trans_wm.plane_res_l[i] =
3771 (val >> PLANE_WM_LINES_SHIFT) &
3772 PLANE_WM_LINES_MASK;
3774 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
3775 active->trans_wm.plane_res_b[PLANE_CURSOR] =
3776 val & PLANE_WM_BLOCKS_MASK;
3777 active->trans_wm.plane_res_l[PLANE_CURSOR] =
3778 (val >> PLANE_WM_LINES_SHIFT) &
3779 PLANE_WM_LINES_MASK;
3784 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3786 struct drm_device *dev = crtc->dev;
3787 struct drm_i915_private *dev_priv = dev->dev_private;
3788 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3789 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3790 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3791 enum pipe pipe = intel_crtc->pipe;
3792 int level, i, max_level;
3795 max_level = ilk_wm_max_level(dev);
3797 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3799 for (level = 0; level <= max_level; level++) {
3800 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3801 hw->plane[pipe][i][level] =
3802 I915_READ(PLANE_WM(pipe, i, level));
3803 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
3806 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3807 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3808 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
3810 if (!intel_crtc->active)
3813 hw->dirty[pipe] = true;
3815 active->linetime = hw->wm_linetime[pipe];
3817 for (level = 0; level <= max_level; level++) {
3818 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3819 temp = hw->plane[pipe][i][level];
3820 skl_pipe_wm_active_state(temp, active, false,
3823 temp = hw->plane[pipe][PLANE_CURSOR][level];
3824 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3827 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3828 temp = hw->plane_trans[pipe][i];
3829 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3832 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3833 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3836 void skl_wm_get_hw_state(struct drm_device *dev)
3838 struct drm_i915_private *dev_priv = dev->dev_private;
3839 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3840 struct drm_crtc *crtc;
3842 skl_ddb_get_hw_state(dev_priv, ddb);
3843 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3844 skl_pipe_wm_get_hw_state(crtc);
3847 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3849 struct drm_device *dev = crtc->dev;
3850 struct drm_i915_private *dev_priv = dev->dev_private;
3851 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3852 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3853 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3854 enum pipe pipe = intel_crtc->pipe;
3855 static const unsigned int wm0_pipe_reg[] = {
3856 [PIPE_A] = WM0_PIPEA_ILK,
3857 [PIPE_B] = WM0_PIPEB_ILK,
3858 [PIPE_C] = WM0_PIPEC_IVB,
3861 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3862 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3863 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3865 active->pipe_enabled = intel_crtc->active;
3867 if (active->pipe_enabled) {
3868 u32 tmp = hw->wm_pipe[pipe];
3871 * For active pipes LP0 watermark is marked as
3872 * enabled, and LP1+ watermaks as disabled since
3873 * we can't really reverse compute them in case
3874 * multiple pipes are active.
3876 active->wm[0].enable = true;
3877 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3878 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3879 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3880 active->linetime = hw->wm_linetime[pipe];
3882 int level, max_level = ilk_wm_max_level(dev);
3885 * For inactive pipes, all watermark levels
3886 * should be marked as enabled but zeroed,
3887 * which is what we'd compute them to.
3889 for (level = 0; level <= max_level; level++)
3890 active->wm[level].enable = true;
3894 #define _FW_WM(value, plane) \
3895 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3896 #define _FW_WM_VLV(value, plane) \
3897 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3899 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3900 struct vlv_wm_values *wm)
3905 for_each_pipe(dev_priv, pipe) {
3906 tmp = I915_READ(VLV_DDL(pipe));
3908 wm->ddl[pipe].primary =
3909 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3910 wm->ddl[pipe].cursor =
3911 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3912 wm->ddl[pipe].sprite[0] =
3913 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3914 wm->ddl[pipe].sprite[1] =
3915 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3918 tmp = I915_READ(DSPFW1);
3919 wm->sr.plane = _FW_WM(tmp, SR);
3920 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
3921 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
3922 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
3924 tmp = I915_READ(DSPFW2);
3925 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
3926 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
3927 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
3929 tmp = I915_READ(DSPFW3);
3930 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3932 if (IS_CHERRYVIEW(dev_priv)) {
3933 tmp = I915_READ(DSPFW7_CHV);
3934 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
3935 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
3937 tmp = I915_READ(DSPFW8_CHV);
3938 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
3939 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
3941 tmp = I915_READ(DSPFW9_CHV);
3942 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
3943 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
3945 tmp = I915_READ(DSPHOWM);
3946 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3947 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
3948 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
3949 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
3950 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3951 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3952 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
3953 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3954 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3955 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
3957 tmp = I915_READ(DSPFW7);
3958 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
3959 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
3961 tmp = I915_READ(DSPHOWM);
3962 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3963 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3964 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3965 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
3966 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3967 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3968 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
3975 void vlv_wm_get_hw_state(struct drm_device *dev)
3977 struct drm_i915_private *dev_priv = to_i915(dev);
3978 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
3979 struct intel_plane *plane;
3983 vlv_read_wm_values(dev_priv, wm);
3985 for_each_intel_plane(dev, plane) {
3986 switch (plane->base.type) {
3988 case DRM_PLANE_TYPE_CURSOR:
3989 plane->wm.fifo_size = 63;
3991 case DRM_PLANE_TYPE_PRIMARY:
3992 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
3994 case DRM_PLANE_TYPE_OVERLAY:
3995 sprite = plane->plane;
3996 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4001 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4002 wm->level = VLV_WM_LEVEL_PM2;
4004 if (IS_CHERRYVIEW(dev_priv)) {
4005 mutex_lock(&dev_priv->rps.hw_lock);
4007 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4008 if (val & DSP_MAXFIFO_PM5_ENABLE)
4009 wm->level = VLV_WM_LEVEL_PM5;
4012 * If DDR DVFS is disabled in the BIOS, Punit
4013 * will never ack the request. So if that happens
4014 * assume we don't have to enable/disable DDR DVFS
4015 * dynamically. To test that just set the REQ_ACK
4016 * bit to poke the Punit, but don't change the
4017 * HIGH/LOW bits so that we don't actually change
4018 * the current state.
4020 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4021 val |= FORCE_DDR_FREQ_REQ_ACK;
4022 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4024 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4025 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4026 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4027 "assuming DDR DVFS is disabled\n");
4028 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4030 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4031 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4032 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4035 mutex_unlock(&dev_priv->rps.hw_lock);
4038 for_each_pipe(dev_priv, pipe)
4039 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4040 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4041 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4043 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4044 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4047 void ilk_wm_get_hw_state(struct drm_device *dev)
4049 struct drm_i915_private *dev_priv = dev->dev_private;
4050 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4051 struct drm_crtc *crtc;
4053 for_each_crtc(dev, crtc)
4054 ilk_pipe_wm_get_hw_state(crtc);
4056 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4057 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4058 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4060 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4061 if (INTEL_INFO(dev)->gen >= 7) {
4062 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4063 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4066 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4067 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4068 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4069 else if (IS_IVYBRIDGE(dev))
4070 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4071 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4074 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4078 * intel_update_watermarks - update FIFO watermark values based on current modes
4080 * Calculate watermark values for the various WM regs based on current mode
4081 * and plane configuration.
4083 * There are several cases to deal with here:
4084 * - normal (i.e. non-self-refresh)
4085 * - self-refresh (SR) mode
4086 * - lines are large relative to FIFO size (buffer can hold up to 2)
4087 * - lines are small relative to FIFO size (buffer can hold more than 2
4088 * lines), so need to account for TLB latency
4090 * The normal calculation is:
4091 * watermark = dotclock * bytes per pixel * latency
4092 * where latency is platform & configuration dependent (we assume pessimal
4095 * The SR calculation is:
4096 * watermark = (trunc(latency/line time)+1) * surface width *
4099 * line time = htotal / dotclock
4100 * surface width = hdisplay for normal plane and 64 for cursor
4101 * and latency is assumed to be high, as above.
4103 * The final value programmed to the register should always be rounded up,
4104 * and include an extra 2 entries to account for clock crossings.
4106 * We don't use the sprite, so we can ignore that. And on Crestline we have
4107 * to set the non-SR watermarks to 8.
4109 void intel_update_watermarks(struct drm_crtc *crtc)
4111 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4113 if (dev_priv->display.update_wm)
4114 dev_priv->display.update_wm(crtc);
4118 * Lock protecting IPS related data structures
4120 DEFINE_SPINLOCK(mchdev_lock);
4122 /* Global for IPS driver to get at the current i915 device. Protected by
4124 static struct drm_i915_private *i915_mch_dev;
4126 bool ironlake_set_drps(struct drm_device *dev, u8 val)
4128 struct drm_i915_private *dev_priv = dev->dev_private;
4131 assert_spin_locked(&mchdev_lock);
4133 rgvswctl = I915_READ16(MEMSWCTL);
4134 if (rgvswctl & MEMCTL_CMD_STS) {
4135 DRM_DEBUG("gpu busy, RCS change rejected\n");
4136 return false; /* still busy with another command */
4139 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4140 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4141 I915_WRITE16(MEMSWCTL, rgvswctl);
4142 POSTING_READ16(MEMSWCTL);
4144 rgvswctl |= MEMCTL_CMD_STS;
4145 I915_WRITE16(MEMSWCTL, rgvswctl);
4150 static void ironlake_enable_drps(struct drm_device *dev)
4152 struct drm_i915_private *dev_priv = dev->dev_private;
4153 u32 rgvmodectl = I915_READ(MEMMODECTL);
4154 u8 fmax, fmin, fstart, vstart;
4156 spin_lock_irq(&mchdev_lock);
4158 /* Enable temp reporting */
4159 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4160 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4162 /* 100ms RC evaluation intervals */
4163 I915_WRITE(RCUPEI, 100000);
4164 I915_WRITE(RCDNEI, 100000);
4166 /* Set max/min thresholds to 90ms and 80ms respectively */
4167 I915_WRITE(RCBMAXAVG, 90000);
4168 I915_WRITE(RCBMINAVG, 80000);
4170 I915_WRITE(MEMIHYST, 1);
4172 /* Set up min, max, and cur for interrupt handling */
4173 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4174 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4175 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4176 MEMMODE_FSTART_SHIFT;
4178 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4181 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4182 dev_priv->ips.fstart = fstart;
4184 dev_priv->ips.max_delay = fstart;
4185 dev_priv->ips.min_delay = fmin;
4186 dev_priv->ips.cur_delay = fstart;
4188 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4189 fmax, fmin, fstart);
4191 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4194 * Interrupts will be enabled in ironlake_irq_postinstall
4197 I915_WRITE(VIDSTART, vstart);
4198 POSTING_READ(VIDSTART);
4200 rgvmodectl |= MEMMODE_SWMODE_EN;
4201 I915_WRITE(MEMMODECTL, rgvmodectl);
4203 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4204 DRM_ERROR("stuck trying to change perf mode\n");
4207 ironlake_set_drps(dev, fstart);
4209 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4210 I915_READ(DDREC) + I915_READ(CSIEC);
4211 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4212 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4213 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4215 spin_unlock_irq(&mchdev_lock);
4218 static void ironlake_disable_drps(struct drm_device *dev)
4220 struct drm_i915_private *dev_priv = dev->dev_private;
4223 spin_lock_irq(&mchdev_lock);
4225 rgvswctl = I915_READ16(MEMSWCTL);
4227 /* Ack interrupts, disable EFC interrupt */
4228 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4229 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4230 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4231 I915_WRITE(DEIIR, DE_PCU_EVENT);
4232 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4234 /* Go back to the starting frequency */
4235 ironlake_set_drps(dev, dev_priv->ips.fstart);
4237 rgvswctl |= MEMCTL_CMD_STS;
4238 I915_WRITE(MEMSWCTL, rgvswctl);
4241 spin_unlock_irq(&mchdev_lock);
4244 /* There's a funny hw issue where the hw returns all 0 when reading from
4245 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4246 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4247 * all limits and the gpu stuck at whatever frequency it is at atm).
4249 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4253 /* Only set the down limit when we've reached the lowest level to avoid
4254 * getting more interrupts, otherwise leave this clear. This prevents a
4255 * race in the hw when coming out of rc6: There's a tiny window where
4256 * the hw runs at the minimal clock before selecting the desired
4257 * frequency, if the down threshold expires in that window we will not
4258 * receive a down interrupt. */
4259 if (IS_GEN9(dev_priv->dev)) {
4260 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4261 if (val <= dev_priv->rps.min_freq_softlimit)
4262 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4264 limits = dev_priv->rps.max_freq_softlimit << 24;
4265 if (val <= dev_priv->rps.min_freq_softlimit)
4266 limits |= dev_priv->rps.min_freq_softlimit << 16;
4272 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4275 u32 threshold_up = 0, threshold_down = 0; /* in % */
4276 u32 ei_up = 0, ei_down = 0;
4278 new_power = dev_priv->rps.power;
4279 switch (dev_priv->rps.power) {
4281 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
4282 new_power = BETWEEN;
4286 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
4287 new_power = LOW_POWER;
4288 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
4289 new_power = HIGH_POWER;
4293 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
4294 new_power = BETWEEN;
4297 /* Max/min bins are special */
4298 if (val <= dev_priv->rps.min_freq_softlimit)
4299 new_power = LOW_POWER;
4300 if (val >= dev_priv->rps.max_freq_softlimit)
4301 new_power = HIGH_POWER;
4302 if (new_power == dev_priv->rps.power)
4305 /* Note the units here are not exactly 1us, but 1280ns. */
4306 switch (new_power) {
4308 /* Upclock if more than 95% busy over 16ms */
4312 /* Downclock if less than 85% busy over 32ms */
4314 threshold_down = 85;
4318 /* Upclock if more than 90% busy over 13ms */
4322 /* Downclock if less than 75% busy over 32ms */
4324 threshold_down = 75;
4328 /* Upclock if more than 85% busy over 10ms */
4332 /* Downclock if less than 60% busy over 32ms */
4334 threshold_down = 60;
4338 I915_WRITE(GEN6_RP_UP_EI,
4339 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4340 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4341 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4343 I915_WRITE(GEN6_RP_DOWN_EI,
4344 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4345 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4346 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4348 I915_WRITE(GEN6_RP_CONTROL,
4349 GEN6_RP_MEDIA_TURBO |
4350 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4351 GEN6_RP_MEDIA_IS_GFX |
4353 GEN6_RP_UP_BUSY_AVG |
4354 GEN6_RP_DOWN_IDLE_AVG);
4356 dev_priv->rps.power = new_power;
4357 dev_priv->rps.up_threshold = threshold_up;
4358 dev_priv->rps.down_threshold = threshold_down;
4359 dev_priv->rps.last_adj = 0;
4362 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4366 if (val > dev_priv->rps.min_freq_softlimit)
4367 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4368 if (val < dev_priv->rps.max_freq_softlimit)
4369 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4371 mask &= dev_priv->pm_rps_events;
4373 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4376 /* gen6_set_rps is called to update the frequency request, but should also be
4377 * called when the range (min_delay and max_delay) is modified so that we can
4378 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4379 static void gen6_set_rps(struct drm_device *dev, u8 val)
4381 struct drm_i915_private *dev_priv = dev->dev_private;
4383 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4384 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
4387 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4388 WARN_ON(val > dev_priv->rps.max_freq);
4389 WARN_ON(val < dev_priv->rps.min_freq);
4391 /* min/max delay may still have been modified so be sure to
4392 * write the limits value.
4394 if (val != dev_priv->rps.cur_freq) {
4395 gen6_set_rps_thresholds(dev_priv, val);
4398 I915_WRITE(GEN6_RPNSWREQ,
4399 GEN9_FREQUENCY(val));
4400 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4401 I915_WRITE(GEN6_RPNSWREQ,
4402 HSW_FREQUENCY(val));
4404 I915_WRITE(GEN6_RPNSWREQ,
4405 GEN6_FREQUENCY(val) |
4407 GEN6_AGGRESSIVE_TURBO);
4410 /* Make sure we continue to get interrupts
4411 * until we hit the minimum or maximum frequencies.
4413 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4414 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4416 POSTING_READ(GEN6_RPNSWREQ);
4418 dev_priv->rps.cur_freq = val;
4419 trace_intel_gpu_freq_change(val * 50);
4422 static void valleyview_set_rps(struct drm_device *dev, u8 val)
4424 struct drm_i915_private *dev_priv = dev->dev_private;
4426 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4427 WARN_ON(val > dev_priv->rps.max_freq);
4428 WARN_ON(val < dev_priv->rps.min_freq);
4430 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4431 "Odd GPU freq value\n"))
4434 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4436 if (val != dev_priv->rps.cur_freq) {
4437 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4438 if (!IS_CHERRYVIEW(dev_priv))
4439 gen6_set_rps_thresholds(dev_priv, val);
4442 dev_priv->rps.cur_freq = val;
4443 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4446 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4448 * * If Gfx is Idle, then
4449 * 1. Forcewake Media well.
4450 * 2. Request idle freq.
4451 * 3. Release Forcewake of Media well.
4453 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4455 u32 val = dev_priv->rps.idle_freq;
4457 if (dev_priv->rps.cur_freq <= val)
4460 /* Wake up the media well, as that takes a lot less
4461 * power than the Render well. */
4462 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4463 valleyview_set_rps(dev_priv->dev, val);
4464 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4467 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4469 mutex_lock(&dev_priv->rps.hw_lock);
4470 if (dev_priv->rps.enabled) {
4471 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4472 gen6_rps_reset_ei(dev_priv);
4473 I915_WRITE(GEN6_PMINTRMSK,
4474 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4476 mutex_unlock(&dev_priv->rps.hw_lock);
4479 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4481 struct drm_device *dev = dev_priv->dev;
4483 mutex_lock(&dev_priv->rps.hw_lock);
4484 if (dev_priv->rps.enabled) {
4485 if (IS_VALLEYVIEW(dev))
4486 vlv_set_rps_idle(dev_priv);
4488 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4489 dev_priv->rps.last_adj = 0;
4490 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4492 mutex_unlock(&dev_priv->rps.hw_lock);
4494 spin_lock(&dev_priv->rps.client_lock);
4495 while (!list_empty(&dev_priv->rps.clients))
4496 list_del_init(dev_priv->rps.clients.next);
4497 spin_unlock(&dev_priv->rps.client_lock);
4500 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4501 struct intel_rps_client *rps,
4502 unsigned long submitted)
4504 /* This is intentionally racy! We peek at the state here, then
4505 * validate inside the RPS worker.
4507 if (!(dev_priv->mm.busy &&
4508 dev_priv->rps.enabled &&
4509 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4512 /* Force a RPS boost (and don't count it against the client) if
4513 * the GPU is severely congested.
4515 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
4518 spin_lock(&dev_priv->rps.client_lock);
4519 if (rps == NULL || list_empty(&rps->link)) {
4520 spin_lock_irq(&dev_priv->irq_lock);
4521 if (dev_priv->rps.interrupts_enabled) {
4522 dev_priv->rps.client_boost = true;
4523 queue_work(dev_priv->wq, &dev_priv->rps.work);
4525 spin_unlock_irq(&dev_priv->irq_lock);
4528 list_add(&rps->link, &dev_priv->rps.clients);
4531 dev_priv->rps.boosts++;
4533 spin_unlock(&dev_priv->rps.client_lock);
4536 void intel_set_rps(struct drm_device *dev, u8 val)
4538 if (IS_VALLEYVIEW(dev))
4539 valleyview_set_rps(dev, val);
4541 gen6_set_rps(dev, val);
4544 static void gen9_disable_rps(struct drm_device *dev)
4546 struct drm_i915_private *dev_priv = dev->dev_private;
4548 I915_WRITE(GEN6_RC_CONTROL, 0);
4549 I915_WRITE(GEN9_PG_ENABLE, 0);
4552 static void gen6_disable_rps(struct drm_device *dev)
4554 struct drm_i915_private *dev_priv = dev->dev_private;
4556 I915_WRITE(GEN6_RC_CONTROL, 0);
4557 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4560 static void cherryview_disable_rps(struct drm_device *dev)
4562 struct drm_i915_private *dev_priv = dev->dev_private;
4564 I915_WRITE(GEN6_RC_CONTROL, 0);
4567 static void valleyview_disable_rps(struct drm_device *dev)
4569 struct drm_i915_private *dev_priv = dev->dev_private;
4571 /* we're doing forcewake before Disabling RC6,
4572 * This what the BIOS expects when going into suspend */
4573 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4575 I915_WRITE(GEN6_RC_CONTROL, 0);
4577 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4580 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4582 if (IS_VALLEYVIEW(dev)) {
4583 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4584 mode = GEN6_RC_CTL_RC6_ENABLE;
4589 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4590 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
4591 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4592 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4595 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4596 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
4599 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4601 /* No RC6 before Ironlake and code is gone for ilk. */
4602 if (INTEL_INFO(dev)->gen < 6)
4605 /* Respect the kernel parameter if it is set */
4606 if (enable_rc6 >= 0) {
4610 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4613 mask = INTEL_RC6_ENABLE;
4615 if ((enable_rc6 & mask) != enable_rc6)
4616 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4617 enable_rc6 & mask, enable_rc6, mask);
4619 return enable_rc6 & mask;
4622 if (IS_IVYBRIDGE(dev))
4623 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4625 return INTEL_RC6_ENABLE;
4628 int intel_enable_rc6(const struct drm_device *dev)
4630 return i915.enable_rc6;
4633 static void gen6_init_rps_frequencies(struct drm_device *dev)
4635 struct drm_i915_private *dev_priv = dev->dev_private;
4636 uint32_t rp_state_cap;
4637 u32 ddcc_status = 0;
4640 /* All of these values are in units of 50MHz */
4641 dev_priv->rps.cur_freq = 0;
4642 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4643 if (IS_BROXTON(dev)) {
4644 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4645 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4646 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4647 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4649 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4650 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4651 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4652 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4655 /* hw_max = RP0 until we check for overclocking */
4656 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4658 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4659 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
4660 ret = sandybridge_pcode_read(dev_priv,
4661 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4664 dev_priv->rps.efficient_freq =
4666 ((ddcc_status >> 8) & 0xff),
4667 dev_priv->rps.min_freq,
4668 dev_priv->rps.max_freq);
4671 if (IS_SKYLAKE(dev)) {
4672 /* Store the frequency values in 16.66 MHZ units, which is
4673 the natural hardware unit for SKL */
4674 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4675 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4676 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4677 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4678 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4681 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4683 /* Preserve min/max settings in case of re-init */
4684 if (dev_priv->rps.max_freq_softlimit == 0)
4685 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4687 if (dev_priv->rps.min_freq_softlimit == 0) {
4688 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4689 dev_priv->rps.min_freq_softlimit =
4690 max_t(int, dev_priv->rps.efficient_freq,
4691 intel_freq_opcode(dev_priv, 450));
4693 dev_priv->rps.min_freq_softlimit =
4694 dev_priv->rps.min_freq;
4698 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
4699 static void gen9_enable_rps(struct drm_device *dev)
4701 struct drm_i915_private *dev_priv = dev->dev_private;
4703 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4705 gen6_init_rps_frequencies(dev);
4707 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4708 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
4709 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4713 /* Program defaults and thresholds for RPS*/
4714 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4715 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
4717 /* 1 second timeout*/
4718 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4719 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4721 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
4723 /* Leaning on the below call to gen6_set_rps to program/setup the
4724 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4725 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4726 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4727 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4729 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4732 static void gen9_enable_rc6(struct drm_device *dev)
4734 struct drm_i915_private *dev_priv = dev->dev_private;
4735 struct intel_engine_cs *ring;
4736 uint32_t rc6_mask = 0;
4739 /* 1a: Software RC state - RC0 */
4740 I915_WRITE(GEN6_RC_STATE, 0);
4742 /* 1b: Get forcewake during program sequence. Although the driver
4743 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4744 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4746 /* 2a: Disable RC states. */
4747 I915_WRITE(GEN6_RC_CONTROL, 0);
4749 /* 2b: Program RC6 thresholds.*/
4751 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4752 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
4753 (INTEL_REVID(dev) <= SKL_REVID_E0)))
4754 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4756 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4757 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4758 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4759 for_each_ring(ring, dev_priv, unused)
4760 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4762 if (HAS_GUC_UCODE(dev))
4763 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4765 I915_WRITE(GEN6_RC_SLEEP, 0);
4766 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4768 /* 2c: Program Coarse Power Gating Policies. */
4769 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4770 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4772 /* 3a: Enable RC6 */
4773 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4774 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4775 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4778 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
4779 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0))
4780 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4781 GEN7_RC_CTL_TO_MODE |
4784 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4785 GEN6_RC_CTL_EI_MODE(1) |
4789 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4790 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4792 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
4793 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
4794 I915_WRITE(GEN9_PG_ENABLE, 0);
4796 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4797 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4799 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4803 static void gen8_enable_rps(struct drm_device *dev)
4805 struct drm_i915_private *dev_priv = dev->dev_private;
4806 struct intel_engine_cs *ring;
4807 uint32_t rc6_mask = 0;
4810 /* 1a: Software RC state - RC0 */
4811 I915_WRITE(GEN6_RC_STATE, 0);
4813 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4814 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4815 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4817 /* 2a: Disable RC states. */
4818 I915_WRITE(GEN6_RC_CONTROL, 0);
4820 /* Initialize rps frequencies */
4821 gen6_init_rps_frequencies(dev);
4823 /* 2b: Program RC6 thresholds.*/
4824 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4825 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4826 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4827 for_each_ring(ring, dev_priv, unused)
4828 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4829 I915_WRITE(GEN6_RC_SLEEP, 0);
4830 if (IS_BROADWELL(dev))
4831 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4833 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4836 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4837 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4838 intel_print_rc6_info(dev, rc6_mask);
4839 if (IS_BROADWELL(dev))
4840 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4841 GEN7_RC_CTL_TO_MODE |
4844 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4845 GEN6_RC_CTL_EI_MODE(1) |
4848 /* 4 Program defaults and thresholds for RPS*/
4849 I915_WRITE(GEN6_RPNSWREQ,
4850 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4851 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4852 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4853 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4854 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4856 /* Docs recommend 900MHz, and 300 MHz respectively */
4857 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4858 dev_priv->rps.max_freq_softlimit << 24 |
4859 dev_priv->rps.min_freq_softlimit << 16);
4861 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4862 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4863 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4864 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4866 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4869 I915_WRITE(GEN6_RP_CONTROL,
4870 GEN6_RP_MEDIA_TURBO |
4871 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4872 GEN6_RP_MEDIA_IS_GFX |
4874 GEN6_RP_UP_BUSY_AVG |
4875 GEN6_RP_DOWN_IDLE_AVG);
4877 /* 6: Ring frequency + overclocking (our driver does this later */
4879 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4880 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4882 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4885 static void gen6_enable_rps(struct drm_device *dev)
4887 struct drm_i915_private *dev_priv = dev->dev_private;
4888 struct intel_engine_cs *ring;
4889 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
4894 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4896 /* Here begins a magic sequence of register writes to enable
4897 * auto-downclocking.
4899 * Perhaps there might be some value in exposing these to
4902 I915_WRITE(GEN6_RC_STATE, 0);
4904 /* Clear the DBG now so we don't confuse earlier errors */
4905 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4906 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4907 I915_WRITE(GTFIFODBG, gtfifodbg);
4910 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4912 /* Initialize rps frequencies */
4913 gen6_init_rps_frequencies(dev);
4915 /* disable the counters and set deterministic thresholds */
4916 I915_WRITE(GEN6_RC_CONTROL, 0);
4918 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4919 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4920 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4921 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4922 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4924 for_each_ring(ring, dev_priv, i)
4925 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4927 I915_WRITE(GEN6_RC_SLEEP, 0);
4928 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
4929 if (IS_IVYBRIDGE(dev))
4930 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4932 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
4933 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
4934 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4936 /* Check if we are enabling RC6 */
4937 rc6_mode = intel_enable_rc6(dev_priv->dev);
4938 if (rc6_mode & INTEL_RC6_ENABLE)
4939 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4941 /* We don't use those on Haswell */
4942 if (!IS_HASWELL(dev)) {
4943 if (rc6_mode & INTEL_RC6p_ENABLE)
4944 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
4946 if (rc6_mode & INTEL_RC6pp_ENABLE)
4947 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4950 intel_print_rc6_info(dev, rc6_mask);
4952 I915_WRITE(GEN6_RC_CONTROL,
4954 GEN6_RC_CTL_EI_MODE(1) |
4955 GEN6_RC_CTL_HW_ENABLE);
4957 /* Power down if completely idle for over 50ms */
4958 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
4959 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4961 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
4963 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4965 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4966 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4967 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4968 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
4969 (pcu_mbox & 0xff) * 50);
4970 dev_priv->rps.max_freq = pcu_mbox & 0xff;
4973 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4974 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4977 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4978 if (IS_GEN6(dev) && ret) {
4979 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4980 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
4981 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4982 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
4983 rc6vids &= 0xffff00;
4984 rc6vids |= GEN6_ENCODE_RC6_VID(450);
4985 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
4987 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4990 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4993 static void __gen6_update_ring_freq(struct drm_device *dev)
4995 struct drm_i915_private *dev_priv = dev->dev_private;
4997 unsigned int gpu_freq;
4998 unsigned int max_ia_freq, min_ring_freq;
4999 unsigned int max_gpu_freq, min_gpu_freq;
5000 int scaling_factor = 180;
5001 struct cpufreq_policy *policy;
5003 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5005 policy = cpufreq_cpu_get(0);
5007 max_ia_freq = policy->cpuinfo.max_freq;
5008 cpufreq_cpu_put(policy);
5011 * Default to measured freq if none found, PCU will ensure we
5014 max_ia_freq = tsc_khz;
5017 /* Convert from kHz to MHz */
5018 max_ia_freq /= 1000;
5020 min_ring_freq = I915_READ(DCLK) & 0xf;
5021 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5022 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5024 if (IS_SKYLAKE(dev)) {
5025 /* Convert GT frequency to 50 HZ units */
5026 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5027 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5029 min_gpu_freq = dev_priv->rps.min_freq;
5030 max_gpu_freq = dev_priv->rps.max_freq;
5034 * For each potential GPU frequency, load a ring frequency we'd like
5035 * to use for memory access. We do this by specifying the IA frequency
5036 * the PCU should use as a reference to determine the ring frequency.
5038 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5039 int diff = max_gpu_freq - gpu_freq;
5040 unsigned int ia_freq = 0, ring_freq = 0;
5042 if (IS_SKYLAKE(dev)) {
5044 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5045 * No floor required for ring frequency on SKL.
5047 ring_freq = gpu_freq;
5048 } else if (INTEL_INFO(dev)->gen >= 8) {
5049 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5050 ring_freq = max(min_ring_freq, gpu_freq);
5051 } else if (IS_HASWELL(dev)) {
5052 ring_freq = mult_frac(gpu_freq, 5, 4);
5053 ring_freq = max(min_ring_freq, ring_freq);
5054 /* leave ia_freq as the default, chosen by cpufreq */
5056 /* On older processors, there is no separate ring
5057 * clock domain, so in order to boost the bandwidth
5058 * of the ring, we need to upclock the CPU (ia_freq).
5060 * For GPU frequencies less than 750MHz,
5061 * just use the lowest ring freq.
5063 if (gpu_freq < min_freq)
5066 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5067 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5070 sandybridge_pcode_write(dev_priv,
5071 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5072 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5073 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5078 void gen6_update_ring_freq(struct drm_device *dev)
5080 struct drm_i915_private *dev_priv = dev->dev_private;
5082 if (!HAS_CORE_RING_FREQ(dev))
5085 mutex_lock(&dev_priv->rps.hw_lock);
5086 __gen6_update_ring_freq(dev);
5087 mutex_unlock(&dev_priv->rps.hw_lock);
5090 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5092 struct drm_device *dev = dev_priv->dev;
5095 if (dev->pdev->revision >= 0x20) {
5096 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5098 switch (INTEL_INFO(dev)->eu_total) {
5100 /* (2 * 4) config */
5101 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5104 /* (2 * 6) config */
5105 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5108 /* (2 * 8) config */
5110 /* Setting (2 * 8) Min RP0 for any other combination */
5111 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5114 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5116 /* For pre-production hardware */
5117 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
5118 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5119 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
5124 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5128 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5129 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5134 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5136 struct drm_device *dev = dev_priv->dev;
5139 if (dev->pdev->revision >= 0x20) {
5140 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5141 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5143 /* For pre-production hardware */
5144 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5145 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5146 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
5151 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5155 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5157 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5162 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5166 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5168 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5170 rp0 = min_t(u32, rp0, 0xea);
5175 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5179 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5180 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5181 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5182 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5187 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5189 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5192 /* Check that the pctx buffer wasn't move under us. */
5193 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5195 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5197 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5198 dev_priv->vlv_pctx->stolen->start);
5202 /* Check that the pcbr address is not empty. */
5203 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5205 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5207 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5210 static void cherryview_setup_pctx(struct drm_device *dev)
5212 struct drm_i915_private *dev_priv = dev->dev_private;
5213 unsigned long pctx_paddr, paddr;
5214 struct i915_gtt *gtt = &dev_priv->gtt;
5216 int pctx_size = 32*1024;
5218 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5220 pcbr = I915_READ(VLV_PCBR);
5221 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5222 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5223 paddr = (dev_priv->mm.stolen_base +
5224 (gtt->stolen_size - pctx_size));
5226 pctx_paddr = (paddr & (~4095));
5227 I915_WRITE(VLV_PCBR, pctx_paddr);
5230 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5233 static void valleyview_setup_pctx(struct drm_device *dev)
5235 struct drm_i915_private *dev_priv = dev->dev_private;
5236 struct drm_i915_gem_object *pctx;
5237 unsigned long pctx_paddr;
5239 int pctx_size = 24*1024;
5241 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5243 pcbr = I915_READ(VLV_PCBR);
5245 /* BIOS set it up already, grab the pre-alloc'd space */
5248 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5249 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5251 I915_GTT_OFFSET_NONE,
5256 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5259 * From the Gunit register HAS:
5260 * The Gfx driver is expected to program this register and ensure
5261 * proper allocation within Gfx stolen memory. For example, this
5262 * register should be programmed such than the PCBR range does not
5263 * overlap with other ranges, such as the frame buffer, protected
5264 * memory, or any other relevant ranges.
5266 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5268 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5272 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5273 I915_WRITE(VLV_PCBR, pctx_paddr);
5276 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5277 dev_priv->vlv_pctx = pctx;
5280 static void valleyview_cleanup_pctx(struct drm_device *dev)
5282 struct drm_i915_private *dev_priv = dev->dev_private;
5284 if (WARN_ON(!dev_priv->vlv_pctx))
5287 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
5288 dev_priv->vlv_pctx = NULL;
5291 static void valleyview_init_gt_powersave(struct drm_device *dev)
5293 struct drm_i915_private *dev_priv = dev->dev_private;
5296 valleyview_setup_pctx(dev);
5298 mutex_lock(&dev_priv->rps.hw_lock);
5300 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5301 switch ((val >> 6) & 3) {
5304 dev_priv->mem_freq = 800;
5307 dev_priv->mem_freq = 1066;
5310 dev_priv->mem_freq = 1333;
5313 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5315 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5316 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5317 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5318 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5319 dev_priv->rps.max_freq);
5321 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5322 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5323 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5324 dev_priv->rps.efficient_freq);
5326 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5327 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5328 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5329 dev_priv->rps.rp1_freq);
5331 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5332 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5333 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5334 dev_priv->rps.min_freq);
5336 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5338 /* Preserve min/max settings in case of re-init */
5339 if (dev_priv->rps.max_freq_softlimit == 0)
5340 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5342 if (dev_priv->rps.min_freq_softlimit == 0)
5343 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5345 mutex_unlock(&dev_priv->rps.hw_lock);
5348 static void cherryview_init_gt_powersave(struct drm_device *dev)
5350 struct drm_i915_private *dev_priv = dev->dev_private;
5353 cherryview_setup_pctx(dev);
5355 mutex_lock(&dev_priv->rps.hw_lock);
5357 mutex_lock(&dev_priv->sb_lock);
5358 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5359 mutex_unlock(&dev_priv->sb_lock);
5361 switch ((val >> 2) & 0x7) {
5363 dev_priv->mem_freq = 2000;
5366 dev_priv->mem_freq = 1600;
5369 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5371 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5372 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5373 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5374 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5375 dev_priv->rps.max_freq);
5377 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5378 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5379 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5380 dev_priv->rps.efficient_freq);
5382 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5383 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5384 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5385 dev_priv->rps.rp1_freq);
5387 /* PUnit validated range is only [RPe, RP0] */
5388 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5389 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5390 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5391 dev_priv->rps.min_freq);
5393 WARN_ONCE((dev_priv->rps.max_freq |
5394 dev_priv->rps.efficient_freq |
5395 dev_priv->rps.rp1_freq |
5396 dev_priv->rps.min_freq) & 1,
5397 "Odd GPU freq values\n");
5399 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5401 /* Preserve min/max settings in case of re-init */
5402 if (dev_priv->rps.max_freq_softlimit == 0)
5403 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5405 if (dev_priv->rps.min_freq_softlimit == 0)
5406 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5408 mutex_unlock(&dev_priv->rps.hw_lock);
5411 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5413 valleyview_cleanup_pctx(dev);
5416 static void cherryview_enable_rps(struct drm_device *dev)
5418 struct drm_i915_private *dev_priv = dev->dev_private;
5419 struct intel_engine_cs *ring;
5420 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5423 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5425 gtfifodbg = I915_READ(GTFIFODBG);
5427 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5429 I915_WRITE(GTFIFODBG, gtfifodbg);
5432 cherryview_check_pctx(dev_priv);
5434 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5435 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5436 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5438 /* Disable RC states. */
5439 I915_WRITE(GEN6_RC_CONTROL, 0);
5441 /* 2a: Program RC6 thresholds.*/
5442 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5443 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5444 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5446 for_each_ring(ring, dev_priv, i)
5447 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5448 I915_WRITE(GEN6_RC_SLEEP, 0);
5450 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5451 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5453 /* allows RC6 residency counter to work */
5454 I915_WRITE(VLV_COUNTER_CONTROL,
5455 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5456 VLV_MEDIA_RC6_COUNT_EN |
5457 VLV_RENDER_RC6_COUNT_EN));
5459 /* For now we assume BIOS is allocating and populating the PCBR */
5460 pcbr = I915_READ(VLV_PCBR);
5463 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5464 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5465 rc6_mode = GEN7_RC_CTL_TO_MODE;
5467 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5469 /* 4 Program defaults and thresholds for RPS*/
5470 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5471 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5472 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5473 I915_WRITE(GEN6_RP_UP_EI, 66000);
5474 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5476 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5479 I915_WRITE(GEN6_RP_CONTROL,
5480 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5481 GEN6_RP_MEDIA_IS_GFX |
5483 GEN6_RP_UP_BUSY_AVG |
5484 GEN6_RP_DOWN_IDLE_AVG);
5486 /* Setting Fixed Bias */
5487 val = VLV_OVERRIDE_EN |
5489 CHV_BIAS_CPU_50_SOC_50;
5490 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5492 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5494 /* RPS code assumes GPLL is used */
5495 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5497 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5498 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5500 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5501 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5502 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5503 dev_priv->rps.cur_freq);
5505 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5506 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5507 dev_priv->rps.efficient_freq);
5509 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5511 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5514 static void valleyview_enable_rps(struct drm_device *dev)
5516 struct drm_i915_private *dev_priv = dev->dev_private;
5517 struct intel_engine_cs *ring;
5518 u32 gtfifodbg, val, rc6_mode = 0;
5521 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5523 valleyview_check_pctx(dev_priv);
5525 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
5526 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5528 I915_WRITE(GTFIFODBG, gtfifodbg);
5531 /* If VLV, Forcewake all wells, else re-direct to regular path */
5532 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5534 /* Disable RC states. */
5535 I915_WRITE(GEN6_RC_CONTROL, 0);
5537 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5538 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5539 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5540 I915_WRITE(GEN6_RP_UP_EI, 66000);
5541 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5543 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5545 I915_WRITE(GEN6_RP_CONTROL,
5546 GEN6_RP_MEDIA_TURBO |
5547 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5548 GEN6_RP_MEDIA_IS_GFX |
5550 GEN6_RP_UP_BUSY_AVG |
5551 GEN6_RP_DOWN_IDLE_CONT);
5553 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5554 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5555 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5557 for_each_ring(ring, dev_priv, i)
5558 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5560 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5562 /* allows RC6 residency counter to work */
5563 I915_WRITE(VLV_COUNTER_CONTROL,
5564 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5565 VLV_RENDER_RC0_COUNT_EN |
5566 VLV_MEDIA_RC6_COUNT_EN |
5567 VLV_RENDER_RC6_COUNT_EN));
5569 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
5570 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5572 intel_print_rc6_info(dev, rc6_mode);
5574 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5576 /* Setting Fixed Bias */
5577 val = VLV_OVERRIDE_EN |
5579 VLV_BIAS_CPU_125_SOC_875;
5580 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5582 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5584 /* RPS code assumes GPLL is used */
5585 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5587 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5588 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5590 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5591 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5592 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5593 dev_priv->rps.cur_freq);
5595 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5596 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5597 dev_priv->rps.efficient_freq);
5599 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5601 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5604 static unsigned long intel_pxfreq(u32 vidfreq)
5607 int div = (vidfreq & 0x3f0000) >> 16;
5608 int post = (vidfreq & 0x3000) >> 12;
5609 int pre = (vidfreq & 0x7);
5614 freq = ((div * 133333) / ((1<<post) * pre));
5619 static const struct cparams {
5625 { 1, 1333, 301, 28664 },
5626 { 1, 1066, 294, 24460 },
5627 { 1, 800, 294, 25192 },
5628 { 0, 1333, 276, 27605 },
5629 { 0, 1066, 276, 27605 },
5630 { 0, 800, 231, 23784 },
5633 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5635 u64 total_count, diff, ret;
5636 u32 count1, count2, count3, m = 0, c = 0;
5637 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5640 assert_spin_locked(&mchdev_lock);
5642 diff1 = now - dev_priv->ips.last_time1;
5644 /* Prevent division-by-zero if we are asking too fast.
5645 * Also, we don't get interesting results if we are polling
5646 * faster than once in 10ms, so just return the saved value
5650 return dev_priv->ips.chipset_power;
5652 count1 = I915_READ(DMIEC);
5653 count2 = I915_READ(DDREC);
5654 count3 = I915_READ(CSIEC);
5656 total_count = count1 + count2 + count3;
5658 /* FIXME: handle per-counter overflow */
5659 if (total_count < dev_priv->ips.last_count1) {
5660 diff = ~0UL - dev_priv->ips.last_count1;
5661 diff += total_count;
5663 diff = total_count - dev_priv->ips.last_count1;
5666 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5667 if (cparams[i].i == dev_priv->ips.c_m &&
5668 cparams[i].t == dev_priv->ips.r_t) {
5675 diff = div_u64(diff, diff1);
5676 ret = ((m * diff) + c);
5677 ret = div_u64(ret, 10);
5679 dev_priv->ips.last_count1 = total_count;
5680 dev_priv->ips.last_time1 = now;
5682 dev_priv->ips.chipset_power = ret;
5687 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5689 struct drm_device *dev = dev_priv->dev;
5692 if (INTEL_INFO(dev)->gen != 5)
5695 spin_lock_irq(&mchdev_lock);
5697 val = __i915_chipset_val(dev_priv);
5699 spin_unlock_irq(&mchdev_lock);
5704 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5706 unsigned long m, x, b;
5709 tsfs = I915_READ(TSFS);
5711 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5712 x = I915_READ8(TR1);
5714 b = tsfs & TSFS_INTR_MASK;
5716 return ((m * x) / 127) - b;
5719 static int _pxvid_to_vd(u8 pxvid)
5724 if (pxvid >= 8 && pxvid < 31)
5727 return (pxvid + 2) * 125;
5730 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5732 struct drm_device *dev = dev_priv->dev;
5733 const int vd = _pxvid_to_vd(pxvid);
5734 const int vm = vd - 1125;
5736 if (INTEL_INFO(dev)->is_mobile)
5737 return vm > 0 ? vm : 0;
5742 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5744 u64 now, diff, diffms;
5747 assert_spin_locked(&mchdev_lock);
5749 now = ktime_get_raw_ns();
5750 diffms = now - dev_priv->ips.last_time2;
5751 do_div(diffms, NSEC_PER_MSEC);
5753 /* Don't divide by 0 */
5757 count = I915_READ(GFXEC);
5759 if (count < dev_priv->ips.last_count2) {
5760 diff = ~0UL - dev_priv->ips.last_count2;
5763 diff = count - dev_priv->ips.last_count2;
5766 dev_priv->ips.last_count2 = count;
5767 dev_priv->ips.last_time2 = now;
5769 /* More magic constants... */
5771 diff = div_u64(diff, diffms * 10);
5772 dev_priv->ips.gfx_power = diff;
5775 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5777 struct drm_device *dev = dev_priv->dev;
5779 if (INTEL_INFO(dev)->gen != 5)
5782 spin_lock_irq(&mchdev_lock);
5784 __i915_update_gfx_val(dev_priv);
5786 spin_unlock_irq(&mchdev_lock);
5789 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5791 unsigned long t, corr, state1, corr2, state2;
5794 assert_spin_locked(&mchdev_lock);
5796 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
5797 pxvid = (pxvid >> 24) & 0x7f;
5798 ext_v = pvid_to_extvid(dev_priv, pxvid);
5802 t = i915_mch_val(dev_priv);
5804 /* Revel in the empirically derived constants */
5806 /* Correction factor in 1/100000 units */
5808 corr = ((t * 2349) + 135940);
5810 corr = ((t * 964) + 29317);
5812 corr = ((t * 301) + 1004);
5814 corr = corr * ((150142 * state1) / 10000 - 78642);
5816 corr2 = (corr * dev_priv->ips.corr);
5818 state2 = (corr2 * state1) / 10000;
5819 state2 /= 100; /* convert to mW */
5821 __i915_update_gfx_val(dev_priv);
5823 return dev_priv->ips.gfx_power + state2;
5826 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5828 struct drm_device *dev = dev_priv->dev;
5831 if (INTEL_INFO(dev)->gen != 5)
5834 spin_lock_irq(&mchdev_lock);
5836 val = __i915_gfx_val(dev_priv);
5838 spin_unlock_irq(&mchdev_lock);
5844 * i915_read_mch_val - return value for IPS use
5846 * Calculate and return a value for the IPS driver to use when deciding whether
5847 * we have thermal and power headroom to increase CPU or GPU power budget.
5849 unsigned long i915_read_mch_val(void)
5851 struct drm_i915_private *dev_priv;
5852 unsigned long chipset_val, graphics_val, ret = 0;
5854 spin_lock_irq(&mchdev_lock);
5857 dev_priv = i915_mch_dev;
5859 chipset_val = __i915_chipset_val(dev_priv);
5860 graphics_val = __i915_gfx_val(dev_priv);
5862 ret = chipset_val + graphics_val;
5865 spin_unlock_irq(&mchdev_lock);
5869 EXPORT_SYMBOL_GPL(i915_read_mch_val);
5872 * i915_gpu_raise - raise GPU frequency limit
5874 * Raise the limit; IPS indicates we have thermal headroom.
5876 bool i915_gpu_raise(void)
5878 struct drm_i915_private *dev_priv;
5881 spin_lock_irq(&mchdev_lock);
5882 if (!i915_mch_dev) {
5886 dev_priv = i915_mch_dev;
5888 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5889 dev_priv->ips.max_delay--;
5892 spin_unlock_irq(&mchdev_lock);
5896 EXPORT_SYMBOL_GPL(i915_gpu_raise);
5899 * i915_gpu_lower - lower GPU frequency limit
5901 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5902 * frequency maximum.
5904 bool i915_gpu_lower(void)
5906 struct drm_i915_private *dev_priv;
5909 spin_lock_irq(&mchdev_lock);
5910 if (!i915_mch_dev) {
5914 dev_priv = i915_mch_dev;
5916 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5917 dev_priv->ips.max_delay++;
5920 spin_unlock_irq(&mchdev_lock);
5924 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5927 * i915_gpu_busy - indicate GPU business to IPS
5929 * Tell the IPS driver whether or not the GPU is busy.
5931 bool i915_gpu_busy(void)
5933 struct drm_i915_private *dev_priv;
5934 struct intel_engine_cs *ring;
5938 spin_lock_irq(&mchdev_lock);
5941 dev_priv = i915_mch_dev;
5943 for_each_ring(ring, dev_priv, i)
5944 ret |= !list_empty(&ring->request_list);
5947 spin_unlock_irq(&mchdev_lock);
5951 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5954 * i915_gpu_turbo_disable - disable graphics turbo
5956 * Disable graphics turbo by resetting the max frequency and setting the
5957 * current frequency to the default.
5959 bool i915_gpu_turbo_disable(void)
5961 struct drm_i915_private *dev_priv;
5964 spin_lock_irq(&mchdev_lock);
5965 if (!i915_mch_dev) {
5969 dev_priv = i915_mch_dev;
5971 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5973 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5977 spin_unlock_irq(&mchdev_lock);
5981 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5984 * Tells the intel_ips driver that the i915 driver is now loaded, if
5985 * IPS got loaded first.
5987 * This awkward dance is so that neither module has to depend on the
5988 * other in order for IPS to do the appropriate communication of
5989 * GPU turbo limits to i915.
5992 ips_ping_for_i915_load(void)
5996 link = symbol_get(ips_link_to_i915_driver);
5999 symbol_put(ips_link_to_i915_driver);
6003 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6005 /* We only register the i915 ips part with intel-ips once everything is
6006 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6007 spin_lock_irq(&mchdev_lock);
6008 i915_mch_dev = dev_priv;
6009 spin_unlock_irq(&mchdev_lock);
6011 ips_ping_for_i915_load();
6014 void intel_gpu_ips_teardown(void)
6016 spin_lock_irq(&mchdev_lock);
6017 i915_mch_dev = NULL;
6018 spin_unlock_irq(&mchdev_lock);
6021 static void intel_init_emon(struct drm_device *dev)
6023 struct drm_i915_private *dev_priv = dev->dev_private;
6028 /* Disable to program */
6032 /* Program energy weights for various events */
6033 I915_WRITE(SDEW, 0x15040d00);
6034 I915_WRITE(CSIEW0, 0x007f0000);
6035 I915_WRITE(CSIEW1, 0x1e220004);
6036 I915_WRITE(CSIEW2, 0x04000004);
6038 for (i = 0; i < 5; i++)
6039 I915_WRITE(PEW(i), 0);
6040 for (i = 0; i < 3; i++)
6041 I915_WRITE(DEW(i), 0);
6043 /* Program P-state weights to account for frequency power adjustment */
6044 for (i = 0; i < 16; i++) {
6045 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6046 unsigned long freq = intel_pxfreq(pxvidfreq);
6047 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6052 val *= (freq / 1000);
6054 val /= (127*127*900);
6056 DRM_ERROR("bad pxval: %ld\n", val);
6059 /* Render standby states get 0 weight */
6063 for (i = 0; i < 4; i++) {
6064 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6065 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6066 I915_WRITE(PXW(i), val);
6069 /* Adjust magic regs to magic values (more experimental results) */
6070 I915_WRITE(OGW0, 0);
6071 I915_WRITE(OGW1, 0);
6072 I915_WRITE(EG0, 0x00007f00);
6073 I915_WRITE(EG1, 0x0000000e);
6074 I915_WRITE(EG2, 0x000e0000);
6075 I915_WRITE(EG3, 0x68000300);
6076 I915_WRITE(EG4, 0x42000000);
6077 I915_WRITE(EG5, 0x00140031);
6081 for (i = 0; i < 8; i++)
6082 I915_WRITE(PXWL(i), 0);
6084 /* Enable PMON + select events */
6085 I915_WRITE(ECR, 0x80000019);
6087 lcfuse = I915_READ(LCFUSE02);
6089 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6092 void intel_init_gt_powersave(struct drm_device *dev)
6094 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
6096 if (IS_CHERRYVIEW(dev))
6097 cherryview_init_gt_powersave(dev);
6098 else if (IS_VALLEYVIEW(dev))
6099 valleyview_init_gt_powersave(dev);
6102 void intel_cleanup_gt_powersave(struct drm_device *dev)
6104 if (IS_CHERRYVIEW(dev))
6106 else if (IS_VALLEYVIEW(dev))
6107 valleyview_cleanup_gt_powersave(dev);
6110 static void gen6_suspend_rps(struct drm_device *dev)
6112 struct drm_i915_private *dev_priv = dev->dev_private;
6114 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6116 gen6_disable_rps_interrupts(dev);
6120 * intel_suspend_gt_powersave - suspend PM work and helper threads
6123 * We don't want to disable RC6 or other features here, we just want
6124 * to make sure any work we've queued has finished and won't bother
6125 * us while we're suspended.
6127 void intel_suspend_gt_powersave(struct drm_device *dev)
6129 struct drm_i915_private *dev_priv = dev->dev_private;
6131 if (INTEL_INFO(dev)->gen < 6)
6134 gen6_suspend_rps(dev);
6136 /* Force GPU to min freq during suspend */
6137 gen6_rps_idle(dev_priv);
6140 void intel_disable_gt_powersave(struct drm_device *dev)
6142 struct drm_i915_private *dev_priv = dev->dev_private;
6144 if (IS_IRONLAKE_M(dev)) {
6145 ironlake_disable_drps(dev);
6146 } else if (INTEL_INFO(dev)->gen >= 6) {
6147 intel_suspend_gt_powersave(dev);
6149 mutex_lock(&dev_priv->rps.hw_lock);
6150 if (INTEL_INFO(dev)->gen >= 9)
6151 gen9_disable_rps(dev);
6152 else if (IS_CHERRYVIEW(dev))
6153 cherryview_disable_rps(dev);
6154 else if (IS_VALLEYVIEW(dev))
6155 valleyview_disable_rps(dev);
6157 gen6_disable_rps(dev);
6159 dev_priv->rps.enabled = false;
6160 mutex_unlock(&dev_priv->rps.hw_lock);
6164 static void intel_gen6_powersave_work(struct work_struct *work)
6166 struct drm_i915_private *dev_priv =
6167 container_of(work, struct drm_i915_private,
6168 rps.delayed_resume_work.work);
6169 struct drm_device *dev = dev_priv->dev;
6171 mutex_lock(&dev_priv->rps.hw_lock);
6173 gen6_reset_rps_interrupts(dev);
6175 if (IS_CHERRYVIEW(dev)) {
6176 cherryview_enable_rps(dev);
6177 } else if (IS_VALLEYVIEW(dev)) {
6178 valleyview_enable_rps(dev);
6179 } else if (INTEL_INFO(dev)->gen >= 9) {
6180 gen9_enable_rc6(dev);
6181 gen9_enable_rps(dev);
6182 if (IS_SKYLAKE(dev))
6183 __gen6_update_ring_freq(dev);
6184 } else if (IS_BROADWELL(dev)) {
6185 gen8_enable_rps(dev);
6186 __gen6_update_ring_freq(dev);
6188 gen6_enable_rps(dev);
6189 __gen6_update_ring_freq(dev);
6192 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6193 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6195 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6196 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6198 dev_priv->rps.enabled = true;
6200 gen6_enable_rps_interrupts(dev);
6202 mutex_unlock(&dev_priv->rps.hw_lock);
6204 intel_runtime_pm_put(dev_priv);
6207 void intel_enable_gt_powersave(struct drm_device *dev)
6209 struct drm_i915_private *dev_priv = dev->dev_private;
6211 /* Powersaving is controlled by the host when inside a VM */
6212 if (intel_vgpu_active(dev))
6215 if (IS_IRONLAKE_M(dev)) {
6216 mutex_lock(&dev->struct_mutex);
6217 ironlake_enable_drps(dev);
6218 intel_init_emon(dev);
6219 mutex_unlock(&dev->struct_mutex);
6220 } else if (INTEL_INFO(dev)->gen >= 6) {
6222 * PCU communication is slow and this doesn't need to be
6223 * done at any specific time, so do this out of our fast path
6224 * to make resume and init faster.
6226 * We depend on the HW RC6 power context save/restore
6227 * mechanism when entering D3 through runtime PM suspend. So
6228 * disable RPM until RPS/RC6 is properly setup. We can only
6229 * get here via the driver load/system resume/runtime resume
6230 * paths, so the _noresume version is enough (and in case of
6231 * runtime resume it's necessary).
6233 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6234 round_jiffies_up_relative(HZ)))
6235 intel_runtime_pm_get_noresume(dev_priv);
6239 void intel_reset_gt_powersave(struct drm_device *dev)
6241 struct drm_i915_private *dev_priv = dev->dev_private;
6243 if (INTEL_INFO(dev)->gen < 6)
6246 gen6_suspend_rps(dev);
6247 dev_priv->rps.enabled = false;
6250 static void ibx_init_clock_gating(struct drm_device *dev)
6252 struct drm_i915_private *dev_priv = dev->dev_private;
6255 * On Ibex Peak and Cougar Point, we need to disable clock
6256 * gating for the panel power sequencer or it will fail to
6257 * start up when no ports are active.
6259 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6262 static void g4x_disable_trickle_feed(struct drm_device *dev)
6264 struct drm_i915_private *dev_priv = dev->dev_private;
6267 for_each_pipe(dev_priv, pipe) {
6268 I915_WRITE(DSPCNTR(pipe),
6269 I915_READ(DSPCNTR(pipe)) |
6270 DISPPLANE_TRICKLE_FEED_DISABLE);
6272 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6273 POSTING_READ(DSPSURF(pipe));
6277 static void ilk_init_lp_watermarks(struct drm_device *dev)
6279 struct drm_i915_private *dev_priv = dev->dev_private;
6281 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6282 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6283 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6286 * Don't touch WM1S_LP_EN here.
6287 * Doing so could cause underruns.
6291 static void ironlake_init_clock_gating(struct drm_device *dev)
6293 struct drm_i915_private *dev_priv = dev->dev_private;
6294 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6298 * WaFbcDisableDpfcClockGating:ilk
6300 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6301 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6302 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6304 I915_WRITE(PCH_3DCGDIS0,
6305 MARIUNIT_CLOCK_GATE_DISABLE |
6306 SVSMUNIT_CLOCK_GATE_DISABLE);
6307 I915_WRITE(PCH_3DCGDIS1,
6308 VFMUNIT_CLOCK_GATE_DISABLE);
6311 * According to the spec the following bits should be set in
6312 * order to enable memory self-refresh
6313 * The bit 22/21 of 0x42004
6314 * The bit 5 of 0x42020
6315 * The bit 15 of 0x45000
6317 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6318 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6319 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6320 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6321 I915_WRITE(DISP_ARB_CTL,
6322 (I915_READ(DISP_ARB_CTL) |
6325 ilk_init_lp_watermarks(dev);
6328 * Based on the document from hardware guys the following bits
6329 * should be set unconditionally in order to enable FBC.
6330 * The bit 22 of 0x42000
6331 * The bit 22 of 0x42004
6332 * The bit 7,8,9 of 0x42020.
6334 if (IS_IRONLAKE_M(dev)) {
6335 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6336 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6337 I915_READ(ILK_DISPLAY_CHICKEN1) |
6339 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6340 I915_READ(ILK_DISPLAY_CHICKEN2) |
6344 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6346 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6347 I915_READ(ILK_DISPLAY_CHICKEN2) |
6348 ILK_ELPIN_409_SELECT);
6349 I915_WRITE(_3D_CHICKEN2,
6350 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6351 _3D_CHICKEN2_WM_READ_PIPELINED);
6353 /* WaDisableRenderCachePipelinedFlush:ilk */
6354 I915_WRITE(CACHE_MODE_0,
6355 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6357 /* WaDisable_RenderCache_OperationalFlush:ilk */
6358 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6360 g4x_disable_trickle_feed(dev);
6362 ibx_init_clock_gating(dev);
6365 static void cpt_init_clock_gating(struct drm_device *dev)
6367 struct drm_i915_private *dev_priv = dev->dev_private;
6372 * On Ibex Peak and Cougar Point, we need to disable clock
6373 * gating for the panel power sequencer or it will fail to
6374 * start up when no ports are active.
6376 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6377 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6378 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6379 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6380 DPLS_EDP_PPS_FIX_DIS);
6381 /* The below fixes the weird display corruption, a few pixels shifted
6382 * downward, on (only) LVDS of some HP laptops with IVY.
6384 for_each_pipe(dev_priv, pipe) {
6385 val = I915_READ(TRANS_CHICKEN2(pipe));
6386 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6387 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6388 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6389 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6390 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6391 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6392 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6393 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6395 /* WADP0ClockGatingDisable */
6396 for_each_pipe(dev_priv, pipe) {
6397 I915_WRITE(TRANS_CHICKEN1(pipe),
6398 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6402 static void gen6_check_mch_setup(struct drm_device *dev)
6404 struct drm_i915_private *dev_priv = dev->dev_private;
6407 tmp = I915_READ(MCH_SSKPD);
6408 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6409 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6413 static void gen6_init_clock_gating(struct drm_device *dev)
6415 struct drm_i915_private *dev_priv = dev->dev_private;
6416 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6418 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6420 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6421 I915_READ(ILK_DISPLAY_CHICKEN2) |
6422 ILK_ELPIN_409_SELECT);
6424 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6425 I915_WRITE(_3D_CHICKEN,
6426 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6428 /* WaDisable_RenderCache_OperationalFlush:snb */
6429 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6432 * BSpec recoomends 8x4 when MSAA is used,
6433 * however in practice 16x4 seems fastest.
6435 * Note that PS/WM thread counts depend on the WIZ hashing
6436 * disable bit, which we don't touch here, but it's good
6437 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6439 I915_WRITE(GEN6_GT_MODE,
6440 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6442 ilk_init_lp_watermarks(dev);
6444 I915_WRITE(CACHE_MODE_0,
6445 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6447 I915_WRITE(GEN6_UCGCTL1,
6448 I915_READ(GEN6_UCGCTL1) |
6449 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6450 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6452 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6453 * gating disable must be set. Failure to set it results in
6454 * flickering pixels due to Z write ordering failures after
6455 * some amount of runtime in the Mesa "fire" demo, and Unigine
6456 * Sanctuary and Tropics, and apparently anything else with
6457 * alpha test or pixel discard.
6459 * According to the spec, bit 11 (RCCUNIT) must also be set,
6460 * but we didn't debug actual testcases to find it out.
6462 * WaDisableRCCUnitClockGating:snb
6463 * WaDisableRCPBUnitClockGating:snb
6465 I915_WRITE(GEN6_UCGCTL2,
6466 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6467 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6469 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6470 I915_WRITE(_3D_CHICKEN3,
6471 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6475 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6476 * 3DSTATE_SF number of SF output attributes is more than 16."
6478 I915_WRITE(_3D_CHICKEN3,
6479 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6482 * According to the spec the following bits should be
6483 * set in order to enable memory self-refresh and fbc:
6484 * The bit21 and bit22 of 0x42000
6485 * The bit21 and bit22 of 0x42004
6486 * The bit5 and bit7 of 0x42020
6487 * The bit14 of 0x70180
6488 * The bit14 of 0x71180
6490 * WaFbcAsynchFlipDisableFbcQueue:snb
6492 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6493 I915_READ(ILK_DISPLAY_CHICKEN1) |
6494 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6495 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6496 I915_READ(ILK_DISPLAY_CHICKEN2) |
6497 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6498 I915_WRITE(ILK_DSPCLK_GATE_D,
6499 I915_READ(ILK_DSPCLK_GATE_D) |
6500 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6501 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6503 g4x_disable_trickle_feed(dev);
6505 cpt_init_clock_gating(dev);
6507 gen6_check_mch_setup(dev);
6510 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6512 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6515 * WaVSThreadDispatchOverride:ivb,vlv
6517 * This actually overrides the dispatch
6518 * mode for all thread types.
6520 reg &= ~GEN7_FF_SCHED_MASK;
6521 reg |= GEN7_FF_TS_SCHED_HW;
6522 reg |= GEN7_FF_VS_SCHED_HW;
6523 reg |= GEN7_FF_DS_SCHED_HW;
6525 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6528 static void lpt_init_clock_gating(struct drm_device *dev)
6530 struct drm_i915_private *dev_priv = dev->dev_private;
6533 * TODO: this bit should only be enabled when really needed, then
6534 * disabled when not needed anymore in order to save power.
6536 if (HAS_PCH_LPT_LP(dev))
6537 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6538 I915_READ(SOUTH_DSPCLK_GATE_D) |
6539 PCH_LP_PARTITION_LEVEL_DISABLE);
6541 /* WADPOClockGatingDisable:hsw */
6542 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6543 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6544 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6547 static void lpt_suspend_hw(struct drm_device *dev)
6549 struct drm_i915_private *dev_priv = dev->dev_private;
6551 if (HAS_PCH_LPT_LP(dev)) {
6552 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6554 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6555 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6559 static void broadwell_init_clock_gating(struct drm_device *dev)
6561 struct drm_i915_private *dev_priv = dev->dev_private;
6565 ilk_init_lp_watermarks(dev);
6567 /* WaSwitchSolVfFArbitrationPriority:bdw */
6568 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6570 /* WaPsrDPAMaskVBlankInSRD:bdw */
6571 I915_WRITE(CHICKEN_PAR1_1,
6572 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6574 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6575 for_each_pipe(dev_priv, pipe) {
6576 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6577 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6578 BDW_DPRS_MASK_VBLANK_SRD);
6581 /* WaVSRefCountFullforceMissDisable:bdw */
6582 /* WaDSRefCountFullforceMissDisable:bdw */
6583 I915_WRITE(GEN7_FF_THREAD_MODE,
6584 I915_READ(GEN7_FF_THREAD_MODE) &
6585 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6587 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6588 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6590 /* WaDisableSDEUnitClockGating:bdw */
6591 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6592 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6595 * WaProgramL3SqcReg1Default:bdw
6596 * WaTempDisableDOPClkGating:bdw
6598 misccpctl = I915_READ(GEN7_MISCCPCTL);
6599 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6600 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6601 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6604 * WaGttCachingOffByDefault:bdw
6605 * GTT cache may not work with big pages, so if those
6606 * are ever enabled GTT cache may need to be disabled.
6608 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6610 lpt_init_clock_gating(dev);
6613 static void haswell_init_clock_gating(struct drm_device *dev)
6615 struct drm_i915_private *dev_priv = dev->dev_private;
6617 ilk_init_lp_watermarks(dev);
6619 /* L3 caching of data atomics doesn't work -- disable it. */
6620 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6621 I915_WRITE(HSW_ROW_CHICKEN3,
6622 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6624 /* This is required by WaCatErrorRejectionIssue:hsw */
6625 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6626 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6627 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6629 /* WaVSRefCountFullforceMissDisable:hsw */
6630 I915_WRITE(GEN7_FF_THREAD_MODE,
6631 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6633 /* WaDisable_RenderCache_OperationalFlush:hsw */
6634 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6636 /* enable HiZ Raw Stall Optimization */
6637 I915_WRITE(CACHE_MODE_0_GEN7,
6638 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6640 /* WaDisable4x2SubspanOptimization:hsw */
6641 I915_WRITE(CACHE_MODE_1,
6642 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6645 * BSpec recommends 8x4 when MSAA is used,
6646 * however in practice 16x4 seems fastest.
6648 * Note that PS/WM thread counts depend on the WIZ hashing
6649 * disable bit, which we don't touch here, but it's good
6650 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6652 I915_WRITE(GEN7_GT_MODE,
6653 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6655 /* WaSampleCChickenBitEnable:hsw */
6656 I915_WRITE(HALF_SLICE_CHICKEN3,
6657 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6659 /* WaSwitchSolVfFArbitrationPriority:hsw */
6660 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6662 /* WaRsPkgCStateDisplayPMReq:hsw */
6663 I915_WRITE(CHICKEN_PAR1_1,
6664 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
6666 lpt_init_clock_gating(dev);
6669 static void ivybridge_init_clock_gating(struct drm_device *dev)
6671 struct drm_i915_private *dev_priv = dev->dev_private;
6674 ilk_init_lp_watermarks(dev);
6676 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6678 /* WaDisableEarlyCull:ivb */
6679 I915_WRITE(_3D_CHICKEN3,
6680 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6682 /* WaDisableBackToBackFlipFix:ivb */
6683 I915_WRITE(IVB_CHICKEN3,
6684 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6685 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6687 /* WaDisablePSDDualDispatchEnable:ivb */
6688 if (IS_IVB_GT1(dev))
6689 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6690 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6692 /* WaDisable_RenderCache_OperationalFlush:ivb */
6693 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6695 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6696 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6697 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6699 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6700 I915_WRITE(GEN7_L3CNTLREG1,
6701 GEN7_WA_FOR_GEN7_L3_CONTROL);
6702 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6703 GEN7_WA_L3_CHICKEN_MODE);
6704 if (IS_IVB_GT1(dev))
6705 I915_WRITE(GEN7_ROW_CHICKEN2,
6706 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6708 /* must write both registers */
6709 I915_WRITE(GEN7_ROW_CHICKEN2,
6710 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6711 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6712 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6715 /* WaForceL3Serialization:ivb */
6716 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6717 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6720 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6721 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6723 I915_WRITE(GEN6_UCGCTL2,
6724 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6726 /* This is required by WaCatErrorRejectionIssue:ivb */
6727 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6728 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6729 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6731 g4x_disable_trickle_feed(dev);
6733 gen7_setup_fixed_func_scheduler(dev_priv);
6735 if (0) { /* causes HiZ corruption on ivb:gt1 */
6736 /* enable HiZ Raw Stall Optimization */
6737 I915_WRITE(CACHE_MODE_0_GEN7,
6738 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6741 /* WaDisable4x2SubspanOptimization:ivb */
6742 I915_WRITE(CACHE_MODE_1,
6743 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6746 * BSpec recommends 8x4 when MSAA is used,
6747 * however in practice 16x4 seems fastest.
6749 * Note that PS/WM thread counts depend on the WIZ hashing
6750 * disable bit, which we don't touch here, but it's good
6751 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6753 I915_WRITE(GEN7_GT_MODE,
6754 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6756 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6757 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6758 snpcr |= GEN6_MBC_SNPCR_MED;
6759 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6761 if (!HAS_PCH_NOP(dev))
6762 cpt_init_clock_gating(dev);
6764 gen6_check_mch_setup(dev);
6767 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6769 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6772 * Disable trickle feed and enable pnd deadline calculation
6774 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6775 I915_WRITE(CBR1_VLV, 0);
6778 static void valleyview_init_clock_gating(struct drm_device *dev)
6780 struct drm_i915_private *dev_priv = dev->dev_private;
6782 vlv_init_display_clock_gating(dev_priv);
6784 /* WaDisableEarlyCull:vlv */
6785 I915_WRITE(_3D_CHICKEN3,
6786 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6788 /* WaDisableBackToBackFlipFix:vlv */
6789 I915_WRITE(IVB_CHICKEN3,
6790 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6791 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6793 /* WaPsdDispatchEnable:vlv */
6794 /* WaDisablePSDDualDispatchEnable:vlv */
6795 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6796 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6797 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6799 /* WaDisable_RenderCache_OperationalFlush:vlv */
6800 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6802 /* WaForceL3Serialization:vlv */
6803 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6804 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6806 /* WaDisableDopClockGating:vlv */
6807 I915_WRITE(GEN7_ROW_CHICKEN2,
6808 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6810 /* This is required by WaCatErrorRejectionIssue:vlv */
6811 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6812 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6813 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6815 gen7_setup_fixed_func_scheduler(dev_priv);
6818 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6819 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6821 I915_WRITE(GEN6_UCGCTL2,
6822 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6824 /* WaDisableL3Bank2xClockGate:vlv
6825 * Disabling L3 clock gating- MMIO 940c[25] = 1
6826 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6827 I915_WRITE(GEN7_UCGCTL4,
6828 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6831 * BSpec says this must be set, even though
6832 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6834 I915_WRITE(CACHE_MODE_1,
6835 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6838 * BSpec recommends 8x4 when MSAA is used,
6839 * however in practice 16x4 seems fastest.
6841 * Note that PS/WM thread counts depend on the WIZ hashing
6842 * disable bit, which we don't touch here, but it's good
6843 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6845 I915_WRITE(GEN7_GT_MODE,
6846 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6849 * WaIncreaseL3CreditsForVLVB0:vlv
6850 * This is the hardware default actually.
6852 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6855 * WaDisableVLVClockGating_VBIIssue:vlv
6856 * Disable clock gating on th GCFG unit to prevent a delay
6857 * in the reporting of vblank events.
6859 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6862 static void cherryview_init_clock_gating(struct drm_device *dev)
6864 struct drm_i915_private *dev_priv = dev->dev_private;
6866 vlv_init_display_clock_gating(dev_priv);
6868 /* WaVSRefCountFullforceMissDisable:chv */
6869 /* WaDSRefCountFullforceMissDisable:chv */
6870 I915_WRITE(GEN7_FF_THREAD_MODE,
6871 I915_READ(GEN7_FF_THREAD_MODE) &
6872 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6874 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6875 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6876 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6878 /* WaDisableCSUnitClockGating:chv */
6879 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6880 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6882 /* WaDisableSDEUnitClockGating:chv */
6883 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6884 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6887 * GTT cache may not work with big pages, so if those
6888 * are ever enabled GTT cache may need to be disabled.
6890 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6893 static void g4x_init_clock_gating(struct drm_device *dev)
6895 struct drm_i915_private *dev_priv = dev->dev_private;
6896 uint32_t dspclk_gate;
6898 I915_WRITE(RENCLK_GATE_D1, 0);
6899 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6900 GS_UNIT_CLOCK_GATE_DISABLE |
6901 CL_UNIT_CLOCK_GATE_DISABLE);
6902 I915_WRITE(RAMCLK_GATE_D, 0);
6903 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6904 OVRUNIT_CLOCK_GATE_DISABLE |
6905 OVCUNIT_CLOCK_GATE_DISABLE;
6907 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6908 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6910 /* WaDisableRenderCachePipelinedFlush */
6911 I915_WRITE(CACHE_MODE_0,
6912 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6914 /* WaDisable_RenderCache_OperationalFlush:g4x */
6915 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6917 g4x_disable_trickle_feed(dev);
6920 static void crestline_init_clock_gating(struct drm_device *dev)
6922 struct drm_i915_private *dev_priv = dev->dev_private;
6924 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6925 I915_WRITE(RENCLK_GATE_D2, 0);
6926 I915_WRITE(DSPCLK_GATE_D, 0);
6927 I915_WRITE(RAMCLK_GATE_D, 0);
6928 I915_WRITE16(DEUC, 0);
6929 I915_WRITE(MI_ARB_STATE,
6930 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6932 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6933 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6936 static void broadwater_init_clock_gating(struct drm_device *dev)
6938 struct drm_i915_private *dev_priv = dev->dev_private;
6940 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6941 I965_RCC_CLOCK_GATE_DISABLE |
6942 I965_RCPB_CLOCK_GATE_DISABLE |
6943 I965_ISC_CLOCK_GATE_DISABLE |
6944 I965_FBC_CLOCK_GATE_DISABLE);
6945 I915_WRITE(RENCLK_GATE_D2, 0);
6946 I915_WRITE(MI_ARB_STATE,
6947 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6949 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6950 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6953 static void gen3_init_clock_gating(struct drm_device *dev)
6955 struct drm_i915_private *dev_priv = dev->dev_private;
6956 u32 dstate = I915_READ(D_STATE);
6958 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6959 DSTATE_DOT_CLOCK_GATING;
6960 I915_WRITE(D_STATE, dstate);
6962 if (IS_PINEVIEW(dev))
6963 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
6965 /* IIR "flip pending" means done if this bit is set */
6966 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6968 /* interrupts should cause a wake up from C3 */
6969 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
6971 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6972 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6974 I915_WRITE(MI_ARB_STATE,
6975 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6978 static void i85x_init_clock_gating(struct drm_device *dev)
6980 struct drm_i915_private *dev_priv = dev->dev_private;
6982 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6984 /* interrupts should cause a wake up from C3 */
6985 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6986 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6988 I915_WRITE(MEM_MODE,
6989 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6992 static void i830_init_clock_gating(struct drm_device *dev)
6994 struct drm_i915_private *dev_priv = dev->dev_private;
6996 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6998 I915_WRITE(MEM_MODE,
6999 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7000 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7003 void intel_init_clock_gating(struct drm_device *dev)
7005 struct drm_i915_private *dev_priv = dev->dev_private;
7007 if (dev_priv->display.init_clock_gating)
7008 dev_priv->display.init_clock_gating(dev);
7011 void intel_suspend_hw(struct drm_device *dev)
7013 if (HAS_PCH_LPT(dev))
7014 lpt_suspend_hw(dev);
7017 /* Set up chip specific power management-related functions */
7018 void intel_init_pm(struct drm_device *dev)
7020 struct drm_i915_private *dev_priv = dev->dev_private;
7022 intel_fbc_init(dev_priv);
7025 if (IS_PINEVIEW(dev))
7026 i915_pineview_get_mem_freq(dev);
7027 else if (IS_GEN5(dev))
7028 i915_ironlake_get_mem_freq(dev);
7030 /* For FIFO watermark updates */
7031 if (INTEL_INFO(dev)->gen >= 9) {
7032 skl_setup_wm_latency(dev);
7034 if (IS_BROXTON(dev))
7035 dev_priv->display.init_clock_gating =
7036 bxt_init_clock_gating;
7037 else if (IS_SKYLAKE(dev))
7038 dev_priv->display.init_clock_gating =
7039 skl_init_clock_gating;
7040 dev_priv->display.update_wm = skl_update_wm;
7041 } else if (HAS_PCH_SPLIT(dev)) {
7042 ilk_setup_wm_latency(dev);
7044 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7045 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7046 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7047 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7048 dev_priv->display.update_wm = ilk_update_wm;
7050 DRM_DEBUG_KMS("Failed to read display plane latency. "
7055 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7056 else if (IS_GEN6(dev))
7057 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7058 else if (IS_IVYBRIDGE(dev))
7059 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7060 else if (IS_HASWELL(dev))
7061 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7062 else if (INTEL_INFO(dev)->gen == 8)
7063 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7064 } else if (IS_CHERRYVIEW(dev)) {
7065 vlv_setup_wm_latency(dev);
7067 dev_priv->display.update_wm = vlv_update_wm;
7068 dev_priv->display.init_clock_gating =
7069 cherryview_init_clock_gating;
7070 } else if (IS_VALLEYVIEW(dev)) {
7071 vlv_setup_wm_latency(dev);
7073 dev_priv->display.update_wm = vlv_update_wm;
7074 dev_priv->display.init_clock_gating =
7075 valleyview_init_clock_gating;
7076 } else if (IS_PINEVIEW(dev)) {
7077 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7080 dev_priv->mem_freq)) {
7081 DRM_INFO("failed to find known CxSR latency "
7082 "(found ddr%s fsb freq %d, mem freq %d), "
7084 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7085 dev_priv->fsb_freq, dev_priv->mem_freq);
7086 /* Disable CxSR and never update its watermark again */
7087 intel_set_memory_cxsr(dev_priv, false);
7088 dev_priv->display.update_wm = NULL;
7090 dev_priv->display.update_wm = pineview_update_wm;
7091 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7092 } else if (IS_G4X(dev)) {
7093 dev_priv->display.update_wm = g4x_update_wm;
7094 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7095 } else if (IS_GEN4(dev)) {
7096 dev_priv->display.update_wm = i965_update_wm;
7097 if (IS_CRESTLINE(dev))
7098 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7099 else if (IS_BROADWATER(dev))
7100 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7101 } else if (IS_GEN3(dev)) {
7102 dev_priv->display.update_wm = i9xx_update_wm;
7103 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7104 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7105 } else if (IS_GEN2(dev)) {
7106 if (INTEL_INFO(dev)->num_pipes == 1) {
7107 dev_priv->display.update_wm = i845_update_wm;
7108 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7110 dev_priv->display.update_wm = i9xx_update_wm;
7111 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7114 if (IS_I85X(dev) || IS_I865G(dev))
7115 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7117 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7119 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7123 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7125 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7127 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7128 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7132 I915_WRITE(GEN6_PCODE_DATA, *val);
7133 I915_WRITE(GEN6_PCODE_DATA1, 0);
7134 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7136 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7138 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7142 *val = I915_READ(GEN6_PCODE_DATA);
7143 I915_WRITE(GEN6_PCODE_DATA, 0);
7148 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
7150 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7152 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7153 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7157 I915_WRITE(GEN6_PCODE_DATA, val);
7158 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7160 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7162 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7166 I915_WRITE(GEN6_PCODE_DATA, 0);
7171 static int vlv_gpu_freq_div(unsigned int czclk_freq)
7173 switch (czclk_freq) {
7188 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7190 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7192 div = vlv_gpu_freq_div(czclk_freq);
7196 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
7199 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7201 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7203 mul = vlv_gpu_freq_div(czclk_freq);
7207 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
7210 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7212 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7214 div = vlv_gpu_freq_div(czclk_freq) / 2;
7218 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
7221 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7223 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7225 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7229 /* CHV needs even values */
7230 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
7233 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7235 if (IS_GEN9(dev_priv->dev))
7236 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
7237 else if (IS_CHERRYVIEW(dev_priv->dev))
7238 return chv_gpu_freq(dev_priv, val);
7239 else if (IS_VALLEYVIEW(dev_priv->dev))
7240 return byt_gpu_freq(dev_priv, val);
7242 return val * GT_FREQUENCY_MULTIPLIER;
7245 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7247 if (IS_GEN9(dev_priv->dev))
7248 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
7249 else if (IS_CHERRYVIEW(dev_priv->dev))
7250 return chv_freq_opcode(dev_priv, val);
7251 else if (IS_VALLEYVIEW(dev_priv->dev))
7252 return byt_freq_opcode(dev_priv, val);
7254 return val / GT_FREQUENCY_MULTIPLIER;
7257 struct request_boost {
7258 struct work_struct work;
7259 struct drm_i915_gem_request *req;
7262 static void __intel_rps_boost_work(struct work_struct *work)
7264 struct request_boost *boost = container_of(work, struct request_boost, work);
7265 struct drm_i915_gem_request *req = boost->req;
7267 if (!i915_gem_request_completed(req, true))
7268 gen6_rps_boost(to_i915(req->ring->dev), NULL,
7269 req->emitted_jiffies);
7271 i915_gem_request_unreference__unlocked(req);
7275 void intel_queue_rps_boost_for_request(struct drm_device *dev,
7276 struct drm_i915_gem_request *req)
7278 struct request_boost *boost;
7280 if (req == NULL || INTEL_INFO(dev)->gen < 6)
7283 if (i915_gem_request_completed(req, true))
7286 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7290 i915_gem_request_reference(req);
7293 INIT_WORK(&boost->work, __intel_rps_boost_work);
7294 queue_work(to_i915(dev)->wq, &boost->work);
7297 void intel_pm_setup(struct drm_device *dev)
7299 struct drm_i915_private *dev_priv = dev->dev_private;
7301 mutex_init(&dev_priv->rps.hw_lock);
7302 spin_lock_init(&dev_priv->rps.client_lock);
7304 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7305 intel_gen6_powersave_work);
7306 INIT_LIST_HEAD(&dev_priv->rps.clients);
7307 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7308 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
7310 dev_priv->pm.suspended = false;