1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt[] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
130 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
131 POSTING_READ(GEN8_##type##_IER(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IMR, (imr_val)); \
137 I915_WRITE(type##IER, (ier_val)); \
138 POSTING_READ(type##IER); \
141 /* For display hotplug interrupt */
143 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
145 assert_spin_locked(&dev_priv->irq_lock);
147 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
150 if ((dev_priv->irq_mask & mask) != 0) {
151 dev_priv->irq_mask &= ~mask;
152 I915_WRITE(DEIMR, dev_priv->irq_mask);
158 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
160 assert_spin_locked(&dev_priv->irq_lock);
162 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
165 if ((dev_priv->irq_mask & mask) != mask) {
166 dev_priv->irq_mask |= mask;
167 I915_WRITE(DEIMR, dev_priv->irq_mask);
173 * ilk_update_gt_irq - update GTIMR
174 * @dev_priv: driver private
175 * @interrupt_mask: mask of interrupt bits to update
176 * @enabled_irq_mask: mask of interrupt bits to enable
178 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
179 uint32_t interrupt_mask,
180 uint32_t enabled_irq_mask)
182 assert_spin_locked(&dev_priv->irq_lock);
184 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
187 dev_priv->gt_irq_mask &= ~interrupt_mask;
188 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
189 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
193 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195 ilk_update_gt_irq(dev_priv, mask, mask);
198 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
200 ilk_update_gt_irq(dev_priv, mask, 0);
204 * snb_update_pm_irq - update GEN6_PMIMR
205 * @dev_priv: driver private
206 * @interrupt_mask: mask of interrupt bits to update
207 * @enabled_irq_mask: mask of interrupt bits to enable
209 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
210 uint32_t interrupt_mask,
211 uint32_t enabled_irq_mask)
215 assert_spin_locked(&dev_priv->irq_lock);
217 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
220 new_val = dev_priv->pm_irq_mask;
221 new_val &= ~interrupt_mask;
222 new_val |= (~enabled_irq_mask & interrupt_mask);
224 if (new_val != dev_priv->pm_irq_mask) {
225 dev_priv->pm_irq_mask = new_val;
226 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
227 POSTING_READ(GEN6_PMIMR);
231 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
233 snb_update_pm_irq(dev_priv, mask, mask);
236 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
238 snb_update_pm_irq(dev_priv, mask, 0);
241 static bool ivb_can_enable_err_int(struct drm_device *dev)
243 struct drm_i915_private *dev_priv = dev->dev_private;
244 struct intel_crtc *crtc;
247 assert_spin_locked(&dev_priv->irq_lock);
249 for_each_pipe(dev_priv, pipe) {
250 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
252 if (crtc->cpu_fifo_underrun_disabled)
260 * bdw_update_pm_irq - update GT interrupt 2
261 * @dev_priv: driver private
262 * @interrupt_mask: mask of interrupt bits to update
263 * @enabled_irq_mask: mask of interrupt bits to enable
265 * Copied from the snb function, updated with relevant register offsets
267 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
268 uint32_t interrupt_mask,
269 uint32_t enabled_irq_mask)
273 assert_spin_locked(&dev_priv->irq_lock);
275 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
278 new_val = dev_priv->pm_irq_mask;
279 new_val &= ~interrupt_mask;
280 new_val |= (~enabled_irq_mask & interrupt_mask);
282 if (new_val != dev_priv->pm_irq_mask) {
283 dev_priv->pm_irq_mask = new_val;
284 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
285 POSTING_READ(GEN8_GT_IMR(2));
289 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
291 bdw_update_pm_irq(dev_priv, mask, mask);
294 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
296 bdw_update_pm_irq(dev_priv, mask, 0);
299 static bool cpt_can_enable_serr_int(struct drm_device *dev)
301 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_crtc *crtc;
305 assert_spin_locked(&dev_priv->irq_lock);
307 for_each_pipe(dev_priv, pipe) {
308 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
310 if (crtc->pch_fifo_underrun_disabled)
317 void i9xx_check_fifo_underruns(struct drm_device *dev)
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 struct intel_crtc *crtc;
322 spin_lock_irq(&dev_priv->irq_lock);
324 for_each_intel_crtc(dev, crtc) {
325 u32 reg = PIPESTAT(crtc->pipe);
328 if (crtc->cpu_fifo_underrun_disabled)
331 pipestat = I915_READ(reg) & 0xffff0000;
332 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
335 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
338 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
341 spin_unlock_irq(&dev_priv->irq_lock);
344 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
346 bool enable, bool old)
348 struct drm_i915_private *dev_priv = dev->dev_private;
349 u32 reg = PIPESTAT(pipe);
350 u32 pipestat = I915_READ(reg) & 0xffff0000;
352 assert_spin_locked(&dev_priv->irq_lock);
355 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
358 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
359 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
363 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
364 enum pipe pipe, bool enable)
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
368 DE_PIPEB_FIFO_UNDERRUN;
371 ironlake_enable_display_irq(dev_priv, bit);
373 ironlake_disable_display_irq(dev_priv, bit);
376 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
378 bool enable, bool old)
380 struct drm_i915_private *dev_priv = dev->dev_private;
382 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
384 if (!ivb_can_enable_err_int(dev))
387 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
389 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
392 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
393 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
399 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
400 enum pipe pipe, bool enable)
402 struct drm_i915_private *dev_priv = dev->dev_private;
404 assert_spin_locked(&dev_priv->irq_lock);
407 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
409 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
410 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
411 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
415 * ibx_display_interrupt_update - update SDEIMR
416 * @dev_priv: driver private
417 * @interrupt_mask: mask of interrupt bits to update
418 * @enabled_irq_mask: mask of interrupt bits to enable
420 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
421 uint32_t interrupt_mask,
422 uint32_t enabled_irq_mask)
424 uint32_t sdeimr = I915_READ(SDEIMR);
425 sdeimr &= ~interrupt_mask;
426 sdeimr |= (~enabled_irq_mask & interrupt_mask);
428 assert_spin_locked(&dev_priv->irq_lock);
430 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
433 I915_WRITE(SDEIMR, sdeimr);
434 POSTING_READ(SDEIMR);
436 #define ibx_enable_display_interrupt(dev_priv, bits) \
437 ibx_display_interrupt_update((dev_priv), (bits), (bits))
438 #define ibx_disable_display_interrupt(dev_priv, bits) \
439 ibx_display_interrupt_update((dev_priv), (bits), 0)
441 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
442 enum transcoder pch_transcoder,
445 struct drm_i915_private *dev_priv = dev->dev_private;
446 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
447 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
450 ibx_enable_display_interrupt(dev_priv, bit);
452 ibx_disable_display_interrupt(dev_priv, bit);
455 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
456 enum transcoder pch_transcoder,
457 bool enable, bool old)
459 struct drm_i915_private *dev_priv = dev->dev_private;
463 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
465 if (!cpt_can_enable_serr_int(dev))
468 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
470 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
472 if (old && I915_READ(SERR_INT) &
473 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
474 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
475 transcoder_name(pch_transcoder));
481 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
484 * @enable: true if we want to report FIFO underrun errors, false otherwise
486 * This function makes us disable or enable CPU fifo underruns for a specific
487 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
488 * reporting for one pipe may also disable all the other CPU error interruts for
489 * the other pipes, due to the fact that there's just one interrupt mask/enable
490 * bit for all the pipes.
492 * Returns the previous state of underrun reporting.
494 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
495 enum pipe pipe, bool enable)
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
499 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
502 assert_spin_locked(&dev_priv->irq_lock);
504 old = !intel_crtc->cpu_fifo_underrun_disabled;
505 intel_crtc->cpu_fifo_underrun_disabled = !enable;
507 if (HAS_GMCH_DISPLAY(dev))
508 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
509 else if (IS_GEN5(dev) || IS_GEN6(dev))
510 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
511 else if (IS_GEN7(dev))
512 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
513 else if (IS_GEN8(dev) || IS_GEN9(dev))
514 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
519 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
520 enum pipe pipe, bool enable)
522 struct drm_i915_private *dev_priv = dev->dev_private;
526 spin_lock_irqsave(&dev_priv->irq_lock, flags);
527 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
528 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
533 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
536 struct drm_i915_private *dev_priv = dev->dev_private;
537 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
540 return !intel_crtc->cpu_fifo_underrun_disabled;
544 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
546 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
547 * @enable: true if we want to report FIFO underrun errors, false otherwise
549 * This function makes us disable or enable PCH fifo underruns for a specific
550 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
551 * underrun reporting for one transcoder may also disable all the other PCH
552 * error interruts for the other transcoders, due to the fact that there's just
553 * one interrupt mask/enable bit for all the transcoders.
555 * Returns the previous state of underrun reporting.
557 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
558 enum transcoder pch_transcoder,
561 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
563 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
568 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
569 * has only one pch transcoder A that all pipes can use. To avoid racy
570 * pch transcoder -> pipe lookups from interrupt code simply store the
571 * underrun statistics in crtc A. Since we never expose this anywhere
572 * nor use it outside of the fifo underrun code here using the "wrong"
573 * crtc on LPT won't cause issues.
576 spin_lock_irqsave(&dev_priv->irq_lock, flags);
578 old = !intel_crtc->pch_fifo_underrun_disabled;
579 intel_crtc->pch_fifo_underrun_disabled = !enable;
581 if (HAS_PCH_IBX(dev))
582 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
584 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
586 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
592 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
593 u32 enable_mask, u32 status_mask)
595 u32 reg = PIPESTAT(pipe);
596 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
598 assert_spin_locked(&dev_priv->irq_lock);
599 WARN_ON(!intel_irqs_enabled(dev_priv));
601 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
602 status_mask & ~PIPESTAT_INT_STATUS_MASK,
603 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
604 pipe_name(pipe), enable_mask, status_mask))
607 if ((pipestat & enable_mask) == enable_mask)
610 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
612 /* Enable the interrupt, clear any pending status */
613 pipestat |= enable_mask | status_mask;
614 I915_WRITE(reg, pipestat);
619 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
620 u32 enable_mask, u32 status_mask)
622 u32 reg = PIPESTAT(pipe);
623 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
625 assert_spin_locked(&dev_priv->irq_lock);
626 WARN_ON(!intel_irqs_enabled(dev_priv));
628 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
629 status_mask & ~PIPESTAT_INT_STATUS_MASK,
630 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
631 pipe_name(pipe), enable_mask, status_mask))
634 if ((pipestat & enable_mask) == 0)
637 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
639 pipestat &= ~enable_mask;
640 I915_WRITE(reg, pipestat);
644 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
646 u32 enable_mask = status_mask << 16;
649 * On pipe A we don't support the PSR interrupt yet,
650 * on pipe B and C the same bit MBZ.
652 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
655 * On pipe B and C we don't support the PSR interrupt yet, on pipe
656 * A the same bit is for perf counters which we don't use either.
658 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
661 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
662 SPRITE0_FLIP_DONE_INT_EN_VLV |
663 SPRITE1_FLIP_DONE_INT_EN_VLV);
664 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
665 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
666 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
667 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
673 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
678 if (IS_VALLEYVIEW(dev_priv->dev))
679 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
682 enable_mask = status_mask << 16;
683 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
687 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
692 if (IS_VALLEYVIEW(dev_priv->dev))
693 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
696 enable_mask = status_mask << 16;
697 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
701 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
703 static void i915_enable_asle_pipestat(struct drm_device *dev)
705 struct drm_i915_private *dev_priv = dev->dev_private;
707 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
710 spin_lock_irq(&dev_priv->irq_lock);
712 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
713 if (INTEL_INFO(dev)->gen >= 4)
714 i915_enable_pipestat(dev_priv, PIPE_A,
715 PIPE_LEGACY_BLC_EVENT_STATUS);
717 spin_unlock_irq(&dev_priv->irq_lock);
721 * i915_pipe_enabled - check if a pipe is enabled
723 * @pipe: pipe to check
725 * Reading certain registers when the pipe is disabled can hang the chip.
726 * Use this routine to make sure the PLL is running and the pipe is active
727 * before reading such registers if unsure.
730 i915_pipe_enabled(struct drm_device *dev, int pipe)
732 struct drm_i915_private *dev_priv = dev->dev_private;
734 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
735 /* Locking is horribly broken here, but whatever. */
736 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
737 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
739 return intel_crtc->active;
741 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
746 * This timing diagram depicts the video signal in and
747 * around the vertical blanking period.
749 * Assumptions about the fictitious mode used in this example:
751 * vsync_start = vblank_start + 1
752 * vsync_end = vblank_start + 2
753 * vtotal = vblank_start + 3
756 * latch double buffered registers
757 * increment frame counter (ctg+)
758 * generate start of vblank interrupt (gen4+)
761 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
762 * | may be shifted forward 1-3 extra lines via PIPECONF
764 * | | start of vsync:
765 * | | generate vsync interrupt
767 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
768 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
769 * ----va---> <-----------------vb--------------------> <--------va-------------
770 * | | <----vs-----> |
771 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
772 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
773 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
775 * last visible pixel first visible pixel
776 * | increment frame counter (gen3/4)
777 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
779 * x = horizontal active
780 * _ = horizontal blanking
781 * hs = horizontal sync
782 * va = vertical active
783 * vb = vertical blanking
785 * vbs = vblank_start (number)
788 * - most events happen at the start of horizontal sync
789 * - frame start happens at the start of horizontal blank, 1-4 lines
790 * (depending on PIPECONF settings) after the start of vblank
791 * - gen3/4 pixel and frame counter are synchronized with the start
792 * of horizontal active on the first line of vertical active
795 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
797 /* Gen2 doesn't have a hardware frame counter */
801 /* Called from drm generic code, passed a 'crtc', which
802 * we use as a pipe index
804 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 unsigned long high_frame;
808 unsigned long low_frame;
809 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
811 if (!i915_pipe_enabled(dev, pipe)) {
812 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
813 "pipe %c\n", pipe_name(pipe));
817 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
818 struct intel_crtc *intel_crtc =
819 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
820 const struct drm_display_mode *mode =
821 &intel_crtc->config.adjusted_mode;
823 htotal = mode->crtc_htotal;
824 hsync_start = mode->crtc_hsync_start;
825 vbl_start = mode->crtc_vblank_start;
826 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
827 vbl_start = DIV_ROUND_UP(vbl_start, 2);
829 enum transcoder cpu_transcoder = (enum transcoder) pipe;
831 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
832 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
833 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
834 if ((I915_READ(PIPECONF(cpu_transcoder)) &
835 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
836 vbl_start = DIV_ROUND_UP(vbl_start, 2);
839 /* Convert to pixel count */
842 /* Start of vblank event occurs at start of hsync */
843 vbl_start -= htotal - hsync_start;
845 high_frame = PIPEFRAME(pipe);
846 low_frame = PIPEFRAMEPIXEL(pipe);
849 * High & low register fields aren't synchronized, so make sure
850 * we get a low value that's stable across two reads of the high
854 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
855 low = I915_READ(low_frame);
856 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
857 } while (high1 != high2);
859 high1 >>= PIPE_FRAME_HIGH_SHIFT;
860 pixel = low & PIPE_PIXEL_MASK;
861 low >>= PIPE_FRAME_LOW_SHIFT;
864 * The frame counter increments at beginning of active.
865 * Cook up a vblank counter by also checking the pixel
866 * counter against vblank start.
868 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
871 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
873 struct drm_i915_private *dev_priv = dev->dev_private;
874 int reg = PIPE_FRMCOUNT_GM45(pipe);
876 if (!i915_pipe_enabled(dev, pipe)) {
877 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
878 "pipe %c\n", pipe_name(pipe));
882 return I915_READ(reg);
885 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
886 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
888 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
890 struct drm_device *dev = crtc->base.dev;
891 struct drm_i915_private *dev_priv = dev->dev_private;
892 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
893 enum pipe pipe = crtc->pipe;
894 int position, vtotal;
896 vtotal = mode->crtc_vtotal;
897 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
901 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
903 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
906 * See update_scanline_offset() for the details on the
907 * scanline_offset adjustment.
909 return (position + crtc->scanline_offset) % vtotal;
912 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
913 unsigned int flags, int *vpos, int *hpos,
914 ktime_t *stime, ktime_t *etime)
916 struct drm_i915_private *dev_priv = dev->dev_private;
917 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
918 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
919 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
921 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
924 unsigned long irqflags;
926 if (!intel_crtc->active) {
927 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
928 "pipe %c\n", pipe_name(pipe));
932 htotal = mode->crtc_htotal;
933 hsync_start = mode->crtc_hsync_start;
934 vtotal = mode->crtc_vtotal;
935 vbl_start = mode->crtc_vblank_start;
936 vbl_end = mode->crtc_vblank_end;
938 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
939 vbl_start = DIV_ROUND_UP(vbl_start, 2);
944 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
947 * Lock uncore.lock, as we will do multiple timing critical raw
948 * register reads, potentially with preemption disabled, so the
949 * following code must not block on uncore.lock.
951 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
953 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
955 /* Get optional system timestamp before query. */
957 *stime = ktime_get();
959 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
960 /* No obvious pixelcount register. Only query vertical
961 * scanout position from Display scan line register.
963 position = __intel_get_crtc_scanline(intel_crtc);
965 /* Have access to pixelcount since start of frame.
966 * We can split this into vertical and horizontal
969 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
971 /* convert to pixel counts */
977 * In interlaced modes, the pixel counter counts all pixels,
978 * so one field will have htotal more pixels. In order to avoid
979 * the reported position from jumping backwards when the pixel
980 * counter is beyond the length of the shorter field, just
981 * clamp the position the length of the shorter field. This
982 * matches how the scanline counter based position works since
983 * the scanline counter doesn't count the two half lines.
985 if (position >= vtotal)
986 position = vtotal - 1;
989 * Start of vblank interrupt is triggered at start of hsync,
990 * just prior to the first active line of vblank. However we
991 * consider lines to start at the leading edge of horizontal
992 * active. So, should we get here before we've crossed into
993 * the horizontal active of the first line in vblank, we would
994 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
995 * always add htotal-hsync_start to the current pixel position.
997 position = (position + htotal - hsync_start) % vtotal;
1000 /* Get optional system timestamp after query. */
1002 *etime = ktime_get();
1004 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1006 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1008 in_vbl = position >= vbl_start && position < vbl_end;
1011 * While in vblank, position will be negative
1012 * counting up towards 0 at vbl_end. And outside
1013 * vblank, position will be positive counting
1016 if (position >= vbl_start)
1017 position -= vbl_end;
1019 position += vtotal - vbl_end;
1021 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1025 *vpos = position / htotal;
1026 *hpos = position - (*vpos * htotal);
1031 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1036 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1038 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1039 unsigned long irqflags;
1042 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1043 position = __intel_get_crtc_scanline(crtc);
1044 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1049 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1051 struct timeval *vblank_time,
1054 struct drm_crtc *crtc;
1056 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1057 DRM_ERROR("Invalid crtc %d\n", pipe);
1061 /* Get drm_crtc to timestamp: */
1062 crtc = intel_get_crtc_for_pipe(dev, pipe);
1064 DRM_ERROR("Invalid crtc %d\n", pipe);
1068 if (!crtc->enabled) {
1069 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1073 /* Helper routine in DRM core does all the work: */
1074 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1077 &to_intel_crtc(crtc)->config.adjusted_mode);
1080 static bool intel_hpd_irq_event(struct drm_device *dev,
1081 struct drm_connector *connector)
1083 enum drm_connector_status old_status;
1085 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1086 old_status = connector->status;
1088 connector->status = connector->funcs->detect(connector, false);
1089 if (old_status == connector->status)
1092 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1095 drm_get_connector_status_name(old_status),
1096 drm_get_connector_status_name(connector->status));
1101 static void i915_digport_work_func(struct work_struct *work)
1103 struct drm_i915_private *dev_priv =
1104 container_of(work, struct drm_i915_private, dig_port_work);
1105 u32 long_port_mask, short_port_mask;
1106 struct intel_digital_port *intel_dig_port;
1110 spin_lock_irq(&dev_priv->irq_lock);
1111 long_port_mask = dev_priv->long_hpd_port_mask;
1112 dev_priv->long_hpd_port_mask = 0;
1113 short_port_mask = dev_priv->short_hpd_port_mask;
1114 dev_priv->short_hpd_port_mask = 0;
1115 spin_unlock_irq(&dev_priv->irq_lock);
1117 for (i = 0; i < I915_MAX_PORTS; i++) {
1119 bool long_hpd = false;
1120 intel_dig_port = dev_priv->hpd_irq_port[i];
1121 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1124 if (long_port_mask & (1 << i)) {
1127 } else if (short_port_mask & (1 << i))
1131 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1133 /* if we get true fallback to old school hpd */
1134 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1140 spin_lock_irq(&dev_priv->irq_lock);
1141 dev_priv->hpd_event_bits |= old_bits;
1142 spin_unlock_irq(&dev_priv->irq_lock);
1143 schedule_work(&dev_priv->hotplug_work);
1148 * Handle hotplug events outside the interrupt handler proper.
1150 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1152 static void i915_hotplug_work_func(struct work_struct *work)
1154 struct drm_i915_private *dev_priv =
1155 container_of(work, struct drm_i915_private, hotplug_work);
1156 struct drm_device *dev = dev_priv->dev;
1157 struct drm_mode_config *mode_config = &dev->mode_config;
1158 struct intel_connector *intel_connector;
1159 struct intel_encoder *intel_encoder;
1160 struct drm_connector *connector;
1161 bool hpd_disabled = false;
1162 bool changed = false;
1165 mutex_lock(&mode_config->mutex);
1166 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1168 spin_lock_irq(&dev_priv->irq_lock);
1170 hpd_event_bits = dev_priv->hpd_event_bits;
1171 dev_priv->hpd_event_bits = 0;
1172 list_for_each_entry(connector, &mode_config->connector_list, head) {
1173 intel_connector = to_intel_connector(connector);
1174 if (!intel_connector->encoder)
1176 intel_encoder = intel_connector->encoder;
1177 if (intel_encoder->hpd_pin > HPD_NONE &&
1178 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1179 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1180 DRM_INFO("HPD interrupt storm detected on connector %s: "
1181 "switching from hotplug detection to polling\n",
1183 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1184 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1185 | DRM_CONNECTOR_POLL_DISCONNECT;
1186 hpd_disabled = true;
1188 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1189 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1190 connector->name, intel_encoder->hpd_pin);
1193 /* if there were no outputs to poll, poll was disabled,
1194 * therefore make sure it's enabled when disabling HPD on
1195 * some connectors */
1197 drm_kms_helper_poll_enable(dev);
1198 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1199 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1202 spin_unlock_irq(&dev_priv->irq_lock);
1204 list_for_each_entry(connector, &mode_config->connector_list, head) {
1205 intel_connector = to_intel_connector(connector);
1206 if (!intel_connector->encoder)
1208 intel_encoder = intel_connector->encoder;
1209 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1210 if (intel_encoder->hot_plug)
1211 intel_encoder->hot_plug(intel_encoder);
1212 if (intel_hpd_irq_event(dev, connector))
1216 mutex_unlock(&mode_config->mutex);
1219 drm_kms_helper_hotplug_event(dev);
1222 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1224 struct drm_i915_private *dev_priv = dev->dev_private;
1225 u32 busy_up, busy_down, max_avg, min_avg;
1228 spin_lock(&mchdev_lock);
1230 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1232 new_delay = dev_priv->ips.cur_delay;
1234 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1235 busy_up = I915_READ(RCPREVBSYTUPAVG);
1236 busy_down = I915_READ(RCPREVBSYTDNAVG);
1237 max_avg = I915_READ(RCBMAXAVG);
1238 min_avg = I915_READ(RCBMINAVG);
1240 /* Handle RCS change request from hw */
1241 if (busy_up > max_avg) {
1242 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1243 new_delay = dev_priv->ips.cur_delay - 1;
1244 if (new_delay < dev_priv->ips.max_delay)
1245 new_delay = dev_priv->ips.max_delay;
1246 } else if (busy_down < min_avg) {
1247 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1248 new_delay = dev_priv->ips.cur_delay + 1;
1249 if (new_delay > dev_priv->ips.min_delay)
1250 new_delay = dev_priv->ips.min_delay;
1253 if (ironlake_set_drps(dev, new_delay))
1254 dev_priv->ips.cur_delay = new_delay;
1256 spin_unlock(&mchdev_lock);
1261 static void notify_ring(struct drm_device *dev,
1262 struct intel_engine_cs *ring)
1264 if (!intel_ring_initialized(ring))
1267 trace_i915_gem_request_complete(ring);
1269 if (drm_core_check_feature(dev, DRIVER_MODESET))
1270 intel_notify_mmio_flip(ring);
1272 wake_up_all(&ring->irq_queue);
1273 i915_queue_hangcheck(dev);
1276 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1277 struct intel_rps_ei *rps_ei)
1279 u32 cz_ts, cz_freq_khz;
1280 u32 render_count, media_count;
1281 u32 elapsed_render, elapsed_media, elapsed_time;
1284 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1285 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1287 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1288 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1290 if (rps_ei->cz_clock == 0) {
1291 rps_ei->cz_clock = cz_ts;
1292 rps_ei->render_c0 = render_count;
1293 rps_ei->media_c0 = media_count;
1295 return dev_priv->rps.cur_freq;
1298 elapsed_time = cz_ts - rps_ei->cz_clock;
1299 rps_ei->cz_clock = cz_ts;
1301 elapsed_render = render_count - rps_ei->render_c0;
1302 rps_ei->render_c0 = render_count;
1304 elapsed_media = media_count - rps_ei->media_c0;
1305 rps_ei->media_c0 = media_count;
1307 /* Convert all the counters into common unit of milli sec */
1308 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1309 elapsed_render /= cz_freq_khz;
1310 elapsed_media /= cz_freq_khz;
1313 * Calculate overall C0 residency percentage
1314 * only if elapsed time is non zero
1318 ((max(elapsed_render, elapsed_media) * 100)
1326 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1327 * busy-ness calculated from C0 counters of render & media power wells
1328 * @dev_priv: DRM device private
1331 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1333 u32 residency_C0_up = 0, residency_C0_down = 0;
1336 dev_priv->rps.ei_interrupt_count++;
1338 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1341 if (dev_priv->rps.up_ei.cz_clock == 0) {
1342 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1343 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1344 return dev_priv->rps.cur_freq;
1349 * To down throttle, C0 residency should be less than down threshold
1350 * for continous EI intervals. So calculate down EI counters
1351 * once in VLV_INT_COUNT_FOR_DOWN_EI
1353 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1355 dev_priv->rps.ei_interrupt_count = 0;
1357 residency_C0_down = vlv_c0_residency(dev_priv,
1358 &dev_priv->rps.down_ei);
1360 residency_C0_up = vlv_c0_residency(dev_priv,
1361 &dev_priv->rps.up_ei);
1364 new_delay = dev_priv->rps.cur_freq;
1366 adj = dev_priv->rps.last_adj;
1367 /* C0 residency is greater than UP threshold. Increase Frequency */
1368 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1374 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1375 new_delay = dev_priv->rps.cur_freq + adj;
1378 * For better performance, jump directly
1379 * to RPe if we're below it.
1381 if (new_delay < dev_priv->rps.efficient_freq)
1382 new_delay = dev_priv->rps.efficient_freq;
1384 } else if (!dev_priv->rps.ei_interrupt_count &&
1385 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1391 * This means, C0 residency is less than down threshold over
1392 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1394 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1395 new_delay = dev_priv->rps.cur_freq + adj;
1401 static void gen6_pm_rps_work(struct work_struct *work)
1403 struct drm_i915_private *dev_priv =
1404 container_of(work, struct drm_i915_private, rps.work);
1408 spin_lock_irq(&dev_priv->irq_lock);
1409 pm_iir = dev_priv->rps.pm_iir;
1410 dev_priv->rps.pm_iir = 0;
1411 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1412 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1414 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1415 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1417 spin_unlock_irq(&dev_priv->irq_lock);
1419 /* Make sure we didn't queue anything we're not going to process. */
1420 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1422 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1425 mutex_lock(&dev_priv->rps.hw_lock);
1427 adj = dev_priv->rps.last_adj;
1428 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1432 /* CHV needs even encode values */
1433 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1435 new_delay = dev_priv->rps.cur_freq + adj;
1438 * For better performance, jump directly
1439 * to RPe if we're below it.
1441 if (new_delay < dev_priv->rps.efficient_freq)
1442 new_delay = dev_priv->rps.efficient_freq;
1443 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1444 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1445 new_delay = dev_priv->rps.efficient_freq;
1447 new_delay = dev_priv->rps.min_freq_softlimit;
1449 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1450 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1451 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1455 /* CHV needs even encode values */
1456 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1458 new_delay = dev_priv->rps.cur_freq + adj;
1459 } else { /* unknown event */
1460 new_delay = dev_priv->rps.cur_freq;
1463 /* sysfs frequency interfaces may have snuck in while servicing the
1466 new_delay = clamp_t(int, new_delay,
1467 dev_priv->rps.min_freq_softlimit,
1468 dev_priv->rps.max_freq_softlimit);
1470 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1472 if (IS_VALLEYVIEW(dev_priv->dev))
1473 valleyview_set_rps(dev_priv->dev, new_delay);
1475 gen6_set_rps(dev_priv->dev, new_delay);
1477 mutex_unlock(&dev_priv->rps.hw_lock);
1482 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1484 * @work: workqueue struct
1486 * Doesn't actually do anything except notify userspace. As a consequence of
1487 * this event, userspace should try to remap the bad rows since statistically
1488 * it is likely the same row is more likely to go bad again.
1490 static void ivybridge_parity_work(struct work_struct *work)
1492 struct drm_i915_private *dev_priv =
1493 container_of(work, struct drm_i915_private, l3_parity.error_work);
1494 u32 error_status, row, bank, subbank;
1495 char *parity_event[6];
1499 /* We must turn off DOP level clock gating to access the L3 registers.
1500 * In order to prevent a get/put style interface, acquire struct mutex
1501 * any time we access those registers.
1503 mutex_lock(&dev_priv->dev->struct_mutex);
1505 /* If we've screwed up tracking, just let the interrupt fire again */
1506 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1509 misccpctl = I915_READ(GEN7_MISCCPCTL);
1510 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1511 POSTING_READ(GEN7_MISCCPCTL);
1513 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1517 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1520 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1522 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1524 error_status = I915_READ(reg);
1525 row = GEN7_PARITY_ERROR_ROW(error_status);
1526 bank = GEN7_PARITY_ERROR_BANK(error_status);
1527 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1529 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1532 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1533 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1534 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1535 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1536 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1537 parity_event[5] = NULL;
1539 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1540 KOBJ_CHANGE, parity_event);
1542 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1543 slice, row, bank, subbank);
1545 kfree(parity_event[4]);
1546 kfree(parity_event[3]);
1547 kfree(parity_event[2]);
1548 kfree(parity_event[1]);
1551 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1554 WARN_ON(dev_priv->l3_parity.which_slice);
1555 spin_lock_irq(&dev_priv->irq_lock);
1556 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1557 spin_unlock_irq(&dev_priv->irq_lock);
1559 mutex_unlock(&dev_priv->dev->struct_mutex);
1562 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1564 struct drm_i915_private *dev_priv = dev->dev_private;
1566 if (!HAS_L3_DPF(dev))
1569 spin_lock(&dev_priv->irq_lock);
1570 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1571 spin_unlock(&dev_priv->irq_lock);
1573 iir &= GT_PARITY_ERROR(dev);
1574 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1575 dev_priv->l3_parity.which_slice |= 1 << 1;
1577 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1578 dev_priv->l3_parity.which_slice |= 1 << 0;
1580 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1583 static void ilk_gt_irq_handler(struct drm_device *dev,
1584 struct drm_i915_private *dev_priv,
1588 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1589 notify_ring(dev, &dev_priv->ring[RCS]);
1590 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1591 notify_ring(dev, &dev_priv->ring[VCS]);
1594 static void snb_gt_irq_handler(struct drm_device *dev,
1595 struct drm_i915_private *dev_priv,
1600 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1601 notify_ring(dev, &dev_priv->ring[RCS]);
1602 if (gt_iir & GT_BSD_USER_INTERRUPT)
1603 notify_ring(dev, &dev_priv->ring[VCS]);
1604 if (gt_iir & GT_BLT_USER_INTERRUPT)
1605 notify_ring(dev, &dev_priv->ring[BCS]);
1607 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1608 GT_BSD_CS_ERROR_INTERRUPT |
1609 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1610 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1614 if (gt_iir & GT_PARITY_ERROR(dev))
1615 ivybridge_parity_error_irq_handler(dev, gt_iir);
1618 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1620 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1623 spin_lock(&dev_priv->irq_lock);
1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1625 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1626 spin_unlock(&dev_priv->irq_lock);
1628 queue_work(dev_priv->wq, &dev_priv->rps.work);
1631 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1632 struct drm_i915_private *dev_priv,
1635 struct intel_engine_cs *ring;
1638 irqreturn_t ret = IRQ_NONE;
1640 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1641 tmp = I915_READ(GEN8_GT_IIR(0));
1643 I915_WRITE(GEN8_GT_IIR(0), tmp);
1646 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1647 ring = &dev_priv->ring[RCS];
1648 if (rcs & GT_RENDER_USER_INTERRUPT)
1649 notify_ring(dev, ring);
1650 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1651 intel_execlists_handle_ctx_events(ring);
1653 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1654 ring = &dev_priv->ring[BCS];
1655 if (bcs & GT_RENDER_USER_INTERRUPT)
1656 notify_ring(dev, ring);
1657 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1658 intel_execlists_handle_ctx_events(ring);
1660 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1663 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1664 tmp = I915_READ(GEN8_GT_IIR(1));
1666 I915_WRITE(GEN8_GT_IIR(1), tmp);
1669 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1670 ring = &dev_priv->ring[VCS];
1671 if (vcs & GT_RENDER_USER_INTERRUPT)
1672 notify_ring(dev, ring);
1673 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1674 intel_execlists_handle_ctx_events(ring);
1676 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1677 ring = &dev_priv->ring[VCS2];
1678 if (vcs & GT_RENDER_USER_INTERRUPT)
1679 notify_ring(dev, ring);
1680 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1681 intel_execlists_handle_ctx_events(ring);
1683 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1686 if (master_ctl & GEN8_GT_PM_IRQ) {
1687 tmp = I915_READ(GEN8_GT_IIR(2));
1688 if (tmp & dev_priv->pm_rps_events) {
1689 I915_WRITE(GEN8_GT_IIR(2),
1690 tmp & dev_priv->pm_rps_events);
1692 gen8_rps_irq_handler(dev_priv, tmp);
1694 DRM_ERROR("The master control interrupt lied (PM)!\n");
1697 if (master_ctl & GEN8_GT_VECS_IRQ) {
1698 tmp = I915_READ(GEN8_GT_IIR(3));
1700 I915_WRITE(GEN8_GT_IIR(3), tmp);
1703 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1704 ring = &dev_priv->ring[VECS];
1705 if (vcs & GT_RENDER_USER_INTERRUPT)
1706 notify_ring(dev, ring);
1707 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1708 intel_execlists_handle_ctx_events(ring);
1710 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1716 #define HPD_STORM_DETECT_PERIOD 1000
1717 #define HPD_STORM_THRESHOLD 5
1719 static int pch_port_to_hotplug_shift(enum port port)
1735 static int i915_port_to_hotplug_shift(enum port port)
1751 static inline enum port get_port_from_pin(enum hpd_pin pin)
1761 return PORT_A; /* no hpd */
1765 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1766 u32 hotplug_trigger,
1767 u32 dig_hotplug_reg,
1770 struct drm_i915_private *dev_priv = dev->dev_private;
1773 bool storm_detected = false;
1774 bool queue_dig = false, queue_hp = false;
1776 u32 dig_port_mask = 0;
1778 if (!hotplug_trigger)
1781 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1782 hotplug_trigger, dig_hotplug_reg);
1784 spin_lock(&dev_priv->irq_lock);
1785 for (i = 1; i < HPD_NUM_PINS; i++) {
1786 if (!(hpd[i] & hotplug_trigger))
1789 port = get_port_from_pin(i);
1790 if (port && dev_priv->hpd_irq_port[port]) {
1793 if (HAS_PCH_SPLIT(dev)) {
1794 dig_shift = pch_port_to_hotplug_shift(port);
1795 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1797 dig_shift = i915_port_to_hotplug_shift(port);
1798 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1801 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1803 long_hpd ? "long" : "short");
1804 /* for long HPD pulses we want to have the digital queue happen,
1805 but we still want HPD storm detection to function. */
1807 dev_priv->long_hpd_port_mask |= (1 << port);
1808 dig_port_mask |= hpd[i];
1810 /* for short HPD just trigger the digital queue */
1811 dev_priv->short_hpd_port_mask |= (1 << port);
1812 hotplug_trigger &= ~hpd[i];
1818 for (i = 1; i < HPD_NUM_PINS; i++) {
1819 if (hpd[i] & hotplug_trigger &&
1820 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1822 * On GMCH platforms the interrupt mask bits only
1823 * prevent irq generation, not the setting of the
1824 * hotplug bits itself. So only WARN about unexpected
1825 * interrupts on saner platforms.
1827 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1828 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1829 hotplug_trigger, i, hpd[i]);
1834 if (!(hpd[i] & hotplug_trigger) ||
1835 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1838 if (!(dig_port_mask & hpd[i])) {
1839 dev_priv->hpd_event_bits |= (1 << i);
1843 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1844 dev_priv->hpd_stats[i].hpd_last_jiffies
1845 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1846 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1847 dev_priv->hpd_stats[i].hpd_cnt = 0;
1848 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1849 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1850 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1851 dev_priv->hpd_event_bits &= ~(1 << i);
1852 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1853 storm_detected = true;
1855 dev_priv->hpd_stats[i].hpd_cnt++;
1856 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1857 dev_priv->hpd_stats[i].hpd_cnt);
1862 dev_priv->display.hpd_irq_setup(dev);
1863 spin_unlock(&dev_priv->irq_lock);
1866 * Our hotplug handler can grab modeset locks (by calling down into the
1867 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1868 * queue for otherwise the flush_work in the pageflip code will
1872 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1874 schedule_work(&dev_priv->hotplug_work);
1877 static void gmbus_irq_handler(struct drm_device *dev)
1879 struct drm_i915_private *dev_priv = dev->dev_private;
1881 wake_up_all(&dev_priv->gmbus_wait_queue);
1884 static void dp_aux_irq_handler(struct drm_device *dev)
1886 struct drm_i915_private *dev_priv = dev->dev_private;
1888 wake_up_all(&dev_priv->gmbus_wait_queue);
1891 #if defined(CONFIG_DEBUG_FS)
1892 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1893 uint32_t crc0, uint32_t crc1,
1894 uint32_t crc2, uint32_t crc3,
1897 struct drm_i915_private *dev_priv = dev->dev_private;
1898 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1899 struct intel_pipe_crc_entry *entry;
1902 spin_lock(&pipe_crc->lock);
1904 if (!pipe_crc->entries) {
1905 spin_unlock(&pipe_crc->lock);
1906 DRM_ERROR("spurious interrupt\n");
1910 head = pipe_crc->head;
1911 tail = pipe_crc->tail;
1913 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1914 spin_unlock(&pipe_crc->lock);
1915 DRM_ERROR("CRC buffer overflowing\n");
1919 entry = &pipe_crc->entries[head];
1921 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1922 entry->crc[0] = crc0;
1923 entry->crc[1] = crc1;
1924 entry->crc[2] = crc2;
1925 entry->crc[3] = crc3;
1926 entry->crc[4] = crc4;
1928 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1929 pipe_crc->head = head;
1931 spin_unlock(&pipe_crc->lock);
1933 wake_up_interruptible(&pipe_crc->wq);
1937 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1938 uint32_t crc0, uint32_t crc1,
1939 uint32_t crc2, uint32_t crc3,
1944 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1946 struct drm_i915_private *dev_priv = dev->dev_private;
1948 display_pipe_crc_irq_handler(dev, pipe,
1949 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1953 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1955 struct drm_i915_private *dev_priv = dev->dev_private;
1957 display_pipe_crc_irq_handler(dev, pipe,
1958 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1959 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1960 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1961 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1962 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1965 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968 uint32_t res1, res2;
1970 if (INTEL_INFO(dev)->gen >= 3)
1971 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1975 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1976 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1980 display_pipe_crc_irq_handler(dev, pipe,
1981 I915_READ(PIPE_CRC_RES_RED(pipe)),
1982 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1983 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1987 /* The RPS events need forcewake, so we add them to a work queue and mask their
1988 * IMR bits until the work is done. Other interrupts can be processed without
1989 * the work queue. */
1990 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1992 if (pm_iir & dev_priv->pm_rps_events) {
1993 spin_lock(&dev_priv->irq_lock);
1994 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1995 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1996 spin_unlock(&dev_priv->irq_lock);
1998 queue_work(dev_priv->wq, &dev_priv->rps.work);
2001 if (HAS_VEBOX(dev_priv->dev)) {
2002 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
2003 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
2005 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
2006 i915_handle_error(dev_priv->dev, false,
2007 "VEBOX CS error interrupt 0x%08x",
2013 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
2015 if (!drm_handle_vblank(dev, pipe))
2021 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 pipe_stats[I915_MAX_PIPES] = { };
2027 spin_lock(&dev_priv->irq_lock);
2028 for_each_pipe(dev_priv, pipe) {
2030 u32 mask, iir_bit = 0;
2033 * PIPESTAT bits get signalled even when the interrupt is
2034 * disabled with the mask bits, and some of the status bits do
2035 * not generate interrupts at all (like the underrun bit). Hence
2036 * we need to be careful that we only handle what we want to
2040 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2041 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2045 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2048 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2051 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2055 mask |= dev_priv->pipestat_irq_mask[pipe];
2060 reg = PIPESTAT(pipe);
2061 mask |= PIPESTAT_INT_ENABLE_MASK;
2062 pipe_stats[pipe] = I915_READ(reg) & mask;
2065 * Clear the PIPE*STAT regs before the IIR
2067 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2068 PIPESTAT_INT_STATUS_MASK))
2069 I915_WRITE(reg, pipe_stats[pipe]);
2071 spin_unlock(&dev_priv->irq_lock);
2073 for_each_pipe(dev_priv, pipe) {
2074 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2075 intel_pipe_handle_vblank(dev, pipe))
2076 intel_check_page_flip(dev, pipe);
2078 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2079 intel_prepare_page_flip(dev, pipe);
2080 intel_finish_page_flip(dev, pipe);
2083 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2084 i9xx_pipe_crc_irq_handler(dev, pipe);
2086 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2087 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2088 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2091 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2092 gmbus_irq_handler(dev);
2095 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2097 struct drm_i915_private *dev_priv = dev->dev_private;
2098 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2100 if (hotplug_status) {
2101 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2103 * Make sure hotplug status is cleared before we clear IIR, or else we
2104 * may miss hotplug events.
2106 POSTING_READ(PORT_HOTPLUG_STAT);
2109 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2111 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2113 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2115 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2118 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2119 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2120 dp_aux_irq_handler(dev);
2124 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2126 struct drm_device *dev = arg;
2127 struct drm_i915_private *dev_priv = dev->dev_private;
2128 u32 iir, gt_iir, pm_iir;
2129 irqreturn_t ret = IRQ_NONE;
2132 /* Find, clear, then process each source of interrupt */
2134 gt_iir = I915_READ(GTIIR);
2136 I915_WRITE(GTIIR, gt_iir);
2138 pm_iir = I915_READ(GEN6_PMIIR);
2140 I915_WRITE(GEN6_PMIIR, pm_iir);
2142 iir = I915_READ(VLV_IIR);
2144 /* Consume port before clearing IIR or we'll miss events */
2145 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2146 i9xx_hpd_irq_handler(dev);
2147 I915_WRITE(VLV_IIR, iir);
2150 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2156 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2158 gen6_rps_irq_handler(dev_priv, pm_iir);
2159 /* Call regardless, as some status bits might not be
2160 * signalled in iir */
2161 valleyview_pipestat_irq_handler(dev, iir);
2168 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2170 struct drm_device *dev = arg;
2171 struct drm_i915_private *dev_priv = dev->dev_private;
2172 u32 master_ctl, iir;
2173 irqreturn_t ret = IRQ_NONE;
2176 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2177 iir = I915_READ(VLV_IIR);
2179 if (master_ctl == 0 && iir == 0)
2184 I915_WRITE(GEN8_MASTER_IRQ, 0);
2186 /* Find, clear, then process each source of interrupt */
2189 /* Consume port before clearing IIR or we'll miss events */
2190 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2191 i9xx_hpd_irq_handler(dev);
2192 I915_WRITE(VLV_IIR, iir);
2195 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2197 /* Call regardless, as some status bits might not be
2198 * signalled in iir */
2199 valleyview_pipestat_irq_handler(dev, iir);
2201 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2202 POSTING_READ(GEN8_MASTER_IRQ);
2208 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2210 struct drm_i915_private *dev_priv = dev->dev_private;
2212 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2213 u32 dig_hotplug_reg;
2215 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2216 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2218 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2220 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2221 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2222 SDE_AUDIO_POWER_SHIFT);
2223 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2227 if (pch_iir & SDE_AUX_MASK)
2228 dp_aux_irq_handler(dev);
2230 if (pch_iir & SDE_GMBUS)
2231 gmbus_irq_handler(dev);
2233 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2234 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2236 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2237 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2239 if (pch_iir & SDE_POISON)
2240 DRM_ERROR("PCH poison interrupt\n");
2242 if (pch_iir & SDE_FDI_MASK)
2243 for_each_pipe(dev_priv, pipe)
2244 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2246 I915_READ(FDI_RX_IIR(pipe)));
2248 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2249 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2251 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2252 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2254 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2255 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2257 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2259 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2260 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2262 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2265 static void ivb_err_int_handler(struct drm_device *dev)
2267 struct drm_i915_private *dev_priv = dev->dev_private;
2268 u32 err_int = I915_READ(GEN7_ERR_INT);
2271 if (err_int & ERR_INT_POISON)
2272 DRM_ERROR("Poison interrupt\n");
2274 for_each_pipe(dev_priv, pipe) {
2275 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2276 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2278 DRM_ERROR("Pipe %c FIFO underrun\n",
2282 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2283 if (IS_IVYBRIDGE(dev))
2284 ivb_pipe_crc_irq_handler(dev, pipe);
2286 hsw_pipe_crc_irq_handler(dev, pipe);
2290 I915_WRITE(GEN7_ERR_INT, err_int);
2293 static void cpt_serr_int_handler(struct drm_device *dev)
2295 struct drm_i915_private *dev_priv = dev->dev_private;
2296 u32 serr_int = I915_READ(SERR_INT);
2298 if (serr_int & SERR_INT_POISON)
2299 DRM_ERROR("PCH poison interrupt\n");
2301 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2302 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2304 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2306 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2307 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2309 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2311 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2312 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2314 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2316 I915_WRITE(SERR_INT, serr_int);
2319 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2321 struct drm_i915_private *dev_priv = dev->dev_private;
2323 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2324 u32 dig_hotplug_reg;
2326 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2327 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2329 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2331 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2332 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2333 SDE_AUDIO_POWER_SHIFT_CPT);
2334 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2338 if (pch_iir & SDE_AUX_MASK_CPT)
2339 dp_aux_irq_handler(dev);
2341 if (pch_iir & SDE_GMBUS_CPT)
2342 gmbus_irq_handler(dev);
2344 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2345 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2347 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2348 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2350 if (pch_iir & SDE_FDI_MASK_CPT)
2351 for_each_pipe(dev_priv, pipe)
2352 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2354 I915_READ(FDI_RX_IIR(pipe)));
2356 if (pch_iir & SDE_ERROR_CPT)
2357 cpt_serr_int_handler(dev);
2360 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2362 struct drm_i915_private *dev_priv = dev->dev_private;
2365 if (de_iir & DE_AUX_CHANNEL_A)
2366 dp_aux_irq_handler(dev);
2368 if (de_iir & DE_GSE)
2369 intel_opregion_asle_intr(dev);
2371 if (de_iir & DE_POISON)
2372 DRM_ERROR("Poison interrupt\n");
2374 for_each_pipe(dev_priv, pipe) {
2375 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2376 intel_pipe_handle_vblank(dev, pipe))
2377 intel_check_page_flip(dev, pipe);
2379 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2380 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2381 DRM_ERROR("Pipe %c FIFO underrun\n",
2384 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2385 i9xx_pipe_crc_irq_handler(dev, pipe);
2387 /* plane/pipes map 1:1 on ilk+ */
2388 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2389 intel_prepare_page_flip(dev, pipe);
2390 intel_finish_page_flip_plane(dev, pipe);
2394 /* check event from PCH */
2395 if (de_iir & DE_PCH_EVENT) {
2396 u32 pch_iir = I915_READ(SDEIIR);
2398 if (HAS_PCH_CPT(dev))
2399 cpt_irq_handler(dev, pch_iir);
2401 ibx_irq_handler(dev, pch_iir);
2403 /* should clear PCH hotplug event before clear CPU irq */
2404 I915_WRITE(SDEIIR, pch_iir);
2407 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2408 ironlake_rps_change_irq_handler(dev);
2411 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2413 struct drm_i915_private *dev_priv = dev->dev_private;
2416 if (de_iir & DE_ERR_INT_IVB)
2417 ivb_err_int_handler(dev);
2419 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2420 dp_aux_irq_handler(dev);
2422 if (de_iir & DE_GSE_IVB)
2423 intel_opregion_asle_intr(dev);
2425 for_each_pipe(dev_priv, pipe) {
2426 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2427 intel_pipe_handle_vblank(dev, pipe))
2428 intel_check_page_flip(dev, pipe);
2430 /* plane/pipes map 1:1 on ilk+ */
2431 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2432 intel_prepare_page_flip(dev, pipe);
2433 intel_finish_page_flip_plane(dev, pipe);
2437 /* check event from PCH */
2438 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2439 u32 pch_iir = I915_READ(SDEIIR);
2441 cpt_irq_handler(dev, pch_iir);
2443 /* clear PCH hotplug event before clear CPU irq */
2444 I915_WRITE(SDEIIR, pch_iir);
2449 * To handle irqs with the minimum potential races with fresh interrupts, we:
2450 * 1 - Disable Master Interrupt Control.
2451 * 2 - Find the source(s) of the interrupt.
2452 * 3 - Clear the Interrupt Identity bits (IIR).
2453 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2454 * 5 - Re-enable Master Interrupt Control.
2456 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2458 struct drm_device *dev = arg;
2459 struct drm_i915_private *dev_priv = dev->dev_private;
2460 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2461 irqreturn_t ret = IRQ_NONE;
2463 /* We get interrupts on unclaimed registers, so check for this before we
2464 * do any I915_{READ,WRITE}. */
2465 intel_uncore_check_errors(dev);
2467 /* disable master interrupt before clearing iir */
2468 de_ier = I915_READ(DEIER);
2469 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2470 POSTING_READ(DEIER);
2472 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2473 * interrupts will will be stored on its back queue, and then we'll be
2474 * able to process them after we restore SDEIER (as soon as we restore
2475 * it, we'll get an interrupt if SDEIIR still has something to process
2476 * due to its back queue). */
2477 if (!HAS_PCH_NOP(dev)) {
2478 sde_ier = I915_READ(SDEIER);
2479 I915_WRITE(SDEIER, 0);
2480 POSTING_READ(SDEIER);
2483 /* Find, clear, then process each source of interrupt */
2485 gt_iir = I915_READ(GTIIR);
2487 I915_WRITE(GTIIR, gt_iir);
2489 if (INTEL_INFO(dev)->gen >= 6)
2490 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2492 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2495 de_iir = I915_READ(DEIIR);
2497 I915_WRITE(DEIIR, de_iir);
2499 if (INTEL_INFO(dev)->gen >= 7)
2500 ivb_display_irq_handler(dev, de_iir);
2502 ilk_display_irq_handler(dev, de_iir);
2505 if (INTEL_INFO(dev)->gen >= 6) {
2506 u32 pm_iir = I915_READ(GEN6_PMIIR);
2508 I915_WRITE(GEN6_PMIIR, pm_iir);
2510 gen6_rps_irq_handler(dev_priv, pm_iir);
2514 I915_WRITE(DEIER, de_ier);
2515 POSTING_READ(DEIER);
2516 if (!HAS_PCH_NOP(dev)) {
2517 I915_WRITE(SDEIER, sde_ier);
2518 POSTING_READ(SDEIER);
2524 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2526 struct drm_device *dev = arg;
2527 struct drm_i915_private *dev_priv = dev->dev_private;
2529 irqreturn_t ret = IRQ_NONE;
2533 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2534 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2538 I915_WRITE(GEN8_MASTER_IRQ, 0);
2539 POSTING_READ(GEN8_MASTER_IRQ);
2541 /* Find, clear, then process each source of interrupt */
2543 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2545 if (master_ctl & GEN8_DE_MISC_IRQ) {
2546 tmp = I915_READ(GEN8_DE_MISC_IIR);
2548 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2550 if (tmp & GEN8_DE_MISC_GSE)
2551 intel_opregion_asle_intr(dev);
2553 DRM_ERROR("Unexpected DE Misc interrupt\n");
2556 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2559 if (master_ctl & GEN8_DE_PORT_IRQ) {
2560 tmp = I915_READ(GEN8_DE_PORT_IIR);
2562 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2564 if (tmp & GEN8_AUX_CHANNEL_A)
2565 dp_aux_irq_handler(dev);
2567 DRM_ERROR("Unexpected DE Port interrupt\n");
2570 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2573 for_each_pipe(dev_priv, pipe) {
2574 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2576 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2579 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2582 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2584 if (pipe_iir & GEN8_PIPE_VBLANK &&
2585 intel_pipe_handle_vblank(dev, pipe))
2586 intel_check_page_flip(dev, pipe);
2589 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2591 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2594 intel_prepare_page_flip(dev, pipe);
2595 intel_finish_page_flip_plane(dev, pipe);
2598 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2599 hsw_pipe_crc_irq_handler(dev, pipe);
2601 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2602 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2604 DRM_ERROR("Pipe %c FIFO underrun\n",
2610 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2612 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2615 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2617 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2619 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2622 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2624 * FIXME(BDW): Assume for now that the new interrupt handling
2625 * scheme also closed the SDE interrupt handling race we've seen
2626 * on older pch-split platforms. But this needs testing.
2628 u32 pch_iir = I915_READ(SDEIIR);
2630 I915_WRITE(SDEIIR, pch_iir);
2632 cpt_irq_handler(dev, pch_iir);
2634 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2638 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2639 POSTING_READ(GEN8_MASTER_IRQ);
2644 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2645 bool reset_completed)
2647 struct intel_engine_cs *ring;
2651 * Notify all waiters for GPU completion events that reset state has
2652 * been changed, and that they need to restart their wait after
2653 * checking for potential errors (and bail out to drop locks if there is
2654 * a gpu reset pending so that i915_error_work_func can acquire them).
2657 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2658 for_each_ring(ring, dev_priv, i)
2659 wake_up_all(&ring->irq_queue);
2661 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2662 wake_up_all(&dev_priv->pending_flip_queue);
2665 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2666 * reset state is cleared.
2668 if (reset_completed)
2669 wake_up_all(&dev_priv->gpu_error.reset_queue);
2673 * i915_error_work_func - do process context error handling work
2674 * @work: work struct
2676 * Fire an error uevent so userspace can see that a hang or error
2679 static void i915_error_work_func(struct work_struct *work)
2681 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2683 struct drm_i915_private *dev_priv =
2684 container_of(error, struct drm_i915_private, gpu_error);
2685 struct drm_device *dev = dev_priv->dev;
2686 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2687 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2688 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2691 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2694 * Note that there's only one work item which does gpu resets, so we
2695 * need not worry about concurrent gpu resets potentially incrementing
2696 * error->reset_counter twice. We only need to take care of another
2697 * racing irq/hangcheck declaring the gpu dead for a second time. A
2698 * quick check for that is good enough: schedule_work ensures the
2699 * correct ordering between hang detection and this work item, and since
2700 * the reset in-progress bit is only ever set by code outside of this
2701 * work we don't need to worry about any other races.
2703 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2704 DRM_DEBUG_DRIVER("resetting chip\n");
2705 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2709 * In most cases it's guaranteed that we get here with an RPM
2710 * reference held, for example because there is a pending GPU
2711 * request that won't finish until the reset is done. This
2712 * isn't the case at least when we get here by doing a
2713 * simulated reset via debugs, so get an RPM reference.
2715 intel_runtime_pm_get(dev_priv);
2717 * All state reset _must_ be completed before we update the
2718 * reset counter, for otherwise waiters might miss the reset
2719 * pending state and not properly drop locks, resulting in
2720 * deadlocks with the reset work.
2722 ret = i915_reset(dev);
2724 intel_display_handle_reset(dev);
2726 intel_runtime_pm_put(dev_priv);
2730 * After all the gem state is reset, increment the reset
2731 * counter and wake up everyone waiting for the reset to
2734 * Since unlock operations are a one-sided barrier only,
2735 * we need to insert a barrier here to order any seqno
2737 * the counter increment.
2739 smp_mb__before_atomic();
2740 atomic_inc(&dev_priv->gpu_error.reset_counter);
2742 kobject_uevent_env(&dev->primary->kdev->kobj,
2743 KOBJ_CHANGE, reset_done_event);
2745 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2749 * Note: The wake_up also serves as a memory barrier so that
2750 * waiters see the update value of the reset counter atomic_t.
2752 i915_error_wake_up(dev_priv, true);
2756 static void i915_report_and_clear_eir(struct drm_device *dev)
2758 struct drm_i915_private *dev_priv = dev->dev_private;
2759 uint32_t instdone[I915_NUM_INSTDONE_REG];
2760 u32 eir = I915_READ(EIR);
2766 pr_err("render error detected, EIR: 0x%08x\n", eir);
2768 i915_get_extra_instdone(dev, instdone);
2771 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2772 u32 ipeir = I915_READ(IPEIR_I965);
2774 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2775 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2776 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2777 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2778 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2779 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2780 I915_WRITE(IPEIR_I965, ipeir);
2781 POSTING_READ(IPEIR_I965);
2783 if (eir & GM45_ERROR_PAGE_TABLE) {
2784 u32 pgtbl_err = I915_READ(PGTBL_ER);
2785 pr_err("page table error\n");
2786 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2787 I915_WRITE(PGTBL_ER, pgtbl_err);
2788 POSTING_READ(PGTBL_ER);
2792 if (!IS_GEN2(dev)) {
2793 if (eir & I915_ERROR_PAGE_TABLE) {
2794 u32 pgtbl_err = I915_READ(PGTBL_ER);
2795 pr_err("page table error\n");
2796 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2797 I915_WRITE(PGTBL_ER, pgtbl_err);
2798 POSTING_READ(PGTBL_ER);
2802 if (eir & I915_ERROR_MEMORY_REFRESH) {
2803 pr_err("memory refresh error:\n");
2804 for_each_pipe(dev_priv, pipe)
2805 pr_err("pipe %c stat: 0x%08x\n",
2806 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2807 /* pipestat has already been acked */
2809 if (eir & I915_ERROR_INSTRUCTION) {
2810 pr_err("instruction error\n");
2811 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2812 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2813 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2814 if (INTEL_INFO(dev)->gen < 4) {
2815 u32 ipeir = I915_READ(IPEIR);
2817 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2818 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2819 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2820 I915_WRITE(IPEIR, ipeir);
2821 POSTING_READ(IPEIR);
2823 u32 ipeir = I915_READ(IPEIR_I965);
2825 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2826 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2827 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2828 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2829 I915_WRITE(IPEIR_I965, ipeir);
2830 POSTING_READ(IPEIR_I965);
2834 I915_WRITE(EIR, eir);
2836 eir = I915_READ(EIR);
2839 * some errors might have become stuck,
2842 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2843 I915_WRITE(EMR, I915_READ(EMR) | eir);
2844 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2849 * i915_handle_error - handle an error interrupt
2852 * Do some basic checking of regsiter state at error interrupt time and
2853 * dump it to the syslog. Also call i915_capture_error_state() to make
2854 * sure we get a record and make it available in debugfs. Fire a uevent
2855 * so userspace knows something bad happened (should trigger collection
2856 * of a ring dump etc.).
2858 void i915_handle_error(struct drm_device *dev, bool wedged,
2859 const char *fmt, ...)
2861 struct drm_i915_private *dev_priv = dev->dev_private;
2865 va_start(args, fmt);
2866 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2869 i915_capture_error_state(dev, wedged, error_msg);
2870 i915_report_and_clear_eir(dev);
2873 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2874 &dev_priv->gpu_error.reset_counter);
2877 * Wakeup waiting processes so that the reset work function
2878 * i915_error_work_func doesn't deadlock trying to grab various
2879 * locks. By bumping the reset counter first, the woken
2880 * processes will see a reset in progress and back off,
2881 * releasing their locks and then wait for the reset completion.
2882 * We must do this for _all_ gpu waiters that might hold locks
2883 * that the reset work needs to acquire.
2885 * Note: The wake_up serves as the required memory barrier to
2886 * ensure that the waiters see the updated value of the reset
2889 i915_error_wake_up(dev_priv, false);
2893 * Our reset work can grab modeset locks (since it needs to reset the
2894 * state of outstanding pagelips). Hence it must not be run on our own
2895 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2896 * code will deadlock.
2898 schedule_work(&dev_priv->gpu_error.work);
2901 /* Called from drm generic code, passed 'crtc' which
2902 * we use as a pipe index
2904 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2906 struct drm_i915_private *dev_priv = dev->dev_private;
2907 unsigned long irqflags;
2909 if (!i915_pipe_enabled(dev, pipe))
2912 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2913 if (INTEL_INFO(dev)->gen >= 4)
2914 i915_enable_pipestat(dev_priv, pipe,
2915 PIPE_START_VBLANK_INTERRUPT_STATUS);
2917 i915_enable_pipestat(dev_priv, pipe,
2918 PIPE_VBLANK_INTERRUPT_STATUS);
2919 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2924 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2926 struct drm_i915_private *dev_priv = dev->dev_private;
2927 unsigned long irqflags;
2928 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2929 DE_PIPE_VBLANK(pipe);
2931 if (!i915_pipe_enabled(dev, pipe))
2934 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2935 ironlake_enable_display_irq(dev_priv, bit);
2936 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2941 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2943 struct drm_i915_private *dev_priv = dev->dev_private;
2944 unsigned long irqflags;
2946 if (!i915_pipe_enabled(dev, pipe))
2949 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2950 i915_enable_pipestat(dev_priv, pipe,
2951 PIPE_START_VBLANK_INTERRUPT_STATUS);
2952 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2957 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2959 struct drm_i915_private *dev_priv = dev->dev_private;
2960 unsigned long irqflags;
2962 if (!i915_pipe_enabled(dev, pipe))
2965 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2966 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2967 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2968 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2969 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2973 /* Called from drm generic code, passed 'crtc' which
2974 * we use as a pipe index
2976 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2978 struct drm_i915_private *dev_priv = dev->dev_private;
2979 unsigned long irqflags;
2981 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2982 i915_disable_pipestat(dev_priv, pipe,
2983 PIPE_VBLANK_INTERRUPT_STATUS |
2984 PIPE_START_VBLANK_INTERRUPT_STATUS);
2985 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2988 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2990 struct drm_i915_private *dev_priv = dev->dev_private;
2991 unsigned long irqflags;
2992 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2993 DE_PIPE_VBLANK(pipe);
2995 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2996 ironlake_disable_display_irq(dev_priv, bit);
2997 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3000 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3002 struct drm_i915_private *dev_priv = dev->dev_private;
3003 unsigned long irqflags;
3005 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3006 i915_disable_pipestat(dev_priv, pipe,
3007 PIPE_START_VBLANK_INTERRUPT_STATUS);
3008 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3011 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3013 struct drm_i915_private *dev_priv = dev->dev_private;
3014 unsigned long irqflags;
3016 if (!i915_pipe_enabled(dev, pipe))
3019 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3020 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3021 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3022 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3023 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3027 ring_last_seqno(struct intel_engine_cs *ring)
3029 return list_entry(ring->request_list.prev,
3030 struct drm_i915_gem_request, list)->seqno;
3034 ring_idle(struct intel_engine_cs *ring, u32 seqno)
3036 return (list_empty(&ring->request_list) ||
3037 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3041 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3043 if (INTEL_INFO(dev)->gen >= 8) {
3044 return (ipehr >> 23) == 0x1c;
3046 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3047 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3048 MI_SEMAPHORE_REGISTER);
3052 static struct intel_engine_cs *
3053 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3055 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3056 struct intel_engine_cs *signaller;
3059 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3060 for_each_ring(signaller, dev_priv, i) {
3061 if (ring == signaller)
3064 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3068 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3070 for_each_ring(signaller, dev_priv, i) {
3071 if(ring == signaller)
3074 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3079 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3080 ring->id, ipehr, offset);
3085 static struct intel_engine_cs *
3086 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3088 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3089 u32 cmd, ipehr, head;
3093 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3094 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3098 * HEAD is likely pointing to the dword after the actual command,
3099 * so scan backwards until we find the MBOX. But limit it to just 3
3100 * or 4 dwords depending on the semaphore wait command size.
3101 * Note that we don't care about ACTHD here since that might
3102 * point at at batch, and semaphores are always emitted into the
3103 * ringbuffer itself.
3105 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3106 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3108 for (i = backwards; i; --i) {
3110 * Be paranoid and presume the hw has gone off into the wild -
3111 * our ring is smaller than what the hardware (and hence
3112 * HEAD_ADDR) allows. Also handles wrap-around.
3114 head &= ring->buffer->size - 1;
3116 /* This here seems to blow up */
3117 cmd = ioread32(ring->buffer->virtual_start + head);
3127 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3128 if (INTEL_INFO(ring->dev)->gen >= 8) {
3129 offset = ioread32(ring->buffer->virtual_start + head + 12);
3131 offset = ioread32(ring->buffer->virtual_start + head + 8);
3133 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3136 static int semaphore_passed(struct intel_engine_cs *ring)
3138 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3139 struct intel_engine_cs *signaller;
3142 ring->hangcheck.deadlock++;
3144 signaller = semaphore_waits_for(ring, &seqno);
3145 if (signaller == NULL)
3148 /* Prevent pathological recursion due to driver bugs */
3149 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3152 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3155 /* cursory check for an unkickable deadlock */
3156 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3157 semaphore_passed(signaller) < 0)
3163 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3165 struct intel_engine_cs *ring;
3168 for_each_ring(ring, dev_priv, i)
3169 ring->hangcheck.deadlock = 0;
3172 static enum intel_ring_hangcheck_action
3173 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3175 struct drm_device *dev = ring->dev;
3176 struct drm_i915_private *dev_priv = dev->dev_private;
3179 if (acthd != ring->hangcheck.acthd) {
3180 if (acthd > ring->hangcheck.max_acthd) {
3181 ring->hangcheck.max_acthd = acthd;
3182 return HANGCHECK_ACTIVE;
3185 return HANGCHECK_ACTIVE_LOOP;
3189 return HANGCHECK_HUNG;
3191 /* Is the chip hanging on a WAIT_FOR_EVENT?
3192 * If so we can simply poke the RB_WAIT bit
3193 * and break the hang. This should work on
3194 * all but the second generation chipsets.
3196 tmp = I915_READ_CTL(ring);
3197 if (tmp & RING_WAIT) {
3198 i915_handle_error(dev, false,
3199 "Kicking stuck wait on %s",
3201 I915_WRITE_CTL(ring, tmp);
3202 return HANGCHECK_KICK;
3205 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3206 switch (semaphore_passed(ring)) {
3208 return HANGCHECK_HUNG;
3210 i915_handle_error(dev, false,
3211 "Kicking stuck semaphore on %s",
3213 I915_WRITE_CTL(ring, tmp);
3214 return HANGCHECK_KICK;
3216 return HANGCHECK_WAIT;
3220 return HANGCHECK_HUNG;
3224 * This is called when the chip hasn't reported back with completed
3225 * batchbuffers in a long time. We keep track per ring seqno progress and
3226 * if there are no progress, hangcheck score for that ring is increased.
3227 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3228 * we kick the ring. If we see no progress on three subsequent calls
3229 * we assume chip is wedged and try to fix it by resetting the chip.
3231 static void i915_hangcheck_elapsed(unsigned long data)
3233 struct drm_device *dev = (struct drm_device *)data;
3234 struct drm_i915_private *dev_priv = dev->dev_private;
3235 struct intel_engine_cs *ring;
3237 int busy_count = 0, rings_hung = 0;
3238 bool stuck[I915_NUM_RINGS] = { 0 };
3243 if (!i915.enable_hangcheck)
3246 for_each_ring(ring, dev_priv, i) {
3251 semaphore_clear_deadlocks(dev_priv);
3253 seqno = ring->get_seqno(ring, false);
3254 acthd = intel_ring_get_active_head(ring);
3256 if (ring->hangcheck.seqno == seqno) {
3257 if (ring_idle(ring, seqno)) {
3258 ring->hangcheck.action = HANGCHECK_IDLE;
3260 if (waitqueue_active(&ring->irq_queue)) {
3261 /* Issue a wake-up to catch stuck h/w. */
3262 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3263 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3264 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3267 DRM_INFO("Fake missed irq on %s\n",
3269 wake_up_all(&ring->irq_queue);
3271 /* Safeguard against driver failure */
3272 ring->hangcheck.score += BUSY;
3276 /* We always increment the hangcheck score
3277 * if the ring is busy and still processing
3278 * the same request, so that no single request
3279 * can run indefinitely (such as a chain of
3280 * batches). The only time we do not increment
3281 * the hangcheck score on this ring, if this
3282 * ring is in a legitimate wait for another
3283 * ring. In that case the waiting ring is a
3284 * victim and we want to be sure we catch the
3285 * right culprit. Then every time we do kick
3286 * the ring, add a small increment to the
3287 * score so that we can catch a batch that is
3288 * being repeatedly kicked and so responsible
3289 * for stalling the machine.
3291 ring->hangcheck.action = ring_stuck(ring,
3294 switch (ring->hangcheck.action) {
3295 case HANGCHECK_IDLE:
3296 case HANGCHECK_WAIT:
3297 case HANGCHECK_ACTIVE:
3299 case HANGCHECK_ACTIVE_LOOP:
3300 ring->hangcheck.score += BUSY;
3302 case HANGCHECK_KICK:
3303 ring->hangcheck.score += KICK;
3305 case HANGCHECK_HUNG:
3306 ring->hangcheck.score += HUNG;
3312 ring->hangcheck.action = HANGCHECK_ACTIVE;
3314 /* Gradually reduce the count so that we catch DoS
3315 * attempts across multiple batches.
3317 if (ring->hangcheck.score > 0)
3318 ring->hangcheck.score--;
3320 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3323 ring->hangcheck.seqno = seqno;
3324 ring->hangcheck.acthd = acthd;
3328 for_each_ring(ring, dev_priv, i) {
3329 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3330 DRM_INFO("%s on %s\n",
3331 stuck[i] ? "stuck" : "no progress",
3338 return i915_handle_error(dev, true, "Ring hung");
3341 /* Reset timer case chip hangs without another request
3343 i915_queue_hangcheck(dev);
3346 void i915_queue_hangcheck(struct drm_device *dev)
3348 struct drm_i915_private *dev_priv = dev->dev_private;
3349 if (!i915.enable_hangcheck)
3352 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3353 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3356 static void ibx_irq_reset(struct drm_device *dev)
3358 struct drm_i915_private *dev_priv = dev->dev_private;
3360 if (HAS_PCH_NOP(dev))
3363 GEN5_IRQ_RESET(SDE);
3365 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3366 I915_WRITE(SERR_INT, 0xffffffff);
3370 * SDEIER is also touched by the interrupt handler to work around missed PCH
3371 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3372 * instead we unconditionally enable all PCH interrupt sources here, but then
3373 * only unmask them as needed with SDEIMR.
3375 * This function needs to be called before interrupts are enabled.
3377 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3379 struct drm_i915_private *dev_priv = dev->dev_private;
3381 if (HAS_PCH_NOP(dev))
3384 WARN_ON(I915_READ(SDEIER) != 0);
3385 I915_WRITE(SDEIER, 0xffffffff);
3386 POSTING_READ(SDEIER);
3389 static void gen5_gt_irq_reset(struct drm_device *dev)
3391 struct drm_i915_private *dev_priv = dev->dev_private;
3394 if (INTEL_INFO(dev)->gen >= 6)
3395 GEN5_IRQ_RESET(GEN6_PM);
3400 static void ironlake_irq_reset(struct drm_device *dev)
3402 struct drm_i915_private *dev_priv = dev->dev_private;
3404 I915_WRITE(HWSTAM, 0xffffffff);
3408 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3410 gen5_gt_irq_reset(dev);
3415 static void valleyview_irq_preinstall(struct drm_device *dev)
3417 struct drm_i915_private *dev_priv = dev->dev_private;
3421 I915_WRITE(VLV_IMR, 0);
3422 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3423 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3424 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3427 I915_WRITE(GTIIR, I915_READ(GTIIR));
3428 I915_WRITE(GTIIR, I915_READ(GTIIR));
3430 gen5_gt_irq_reset(dev);
3432 I915_WRITE(DPINVGTT, 0xff);
3434 I915_WRITE(PORT_HOTPLUG_EN, 0);
3435 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3436 for_each_pipe(dev_priv, pipe)
3437 I915_WRITE(PIPESTAT(pipe), 0xffff);
3438 I915_WRITE(VLV_IIR, 0xffffffff);
3439 I915_WRITE(VLV_IMR, 0xffffffff);
3440 I915_WRITE(VLV_IER, 0x0);
3441 POSTING_READ(VLV_IER);
3444 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3446 GEN8_IRQ_RESET_NDX(GT, 0);
3447 GEN8_IRQ_RESET_NDX(GT, 1);
3448 GEN8_IRQ_RESET_NDX(GT, 2);
3449 GEN8_IRQ_RESET_NDX(GT, 3);
3452 static void gen8_irq_reset(struct drm_device *dev)
3454 struct drm_i915_private *dev_priv = dev->dev_private;
3457 I915_WRITE(GEN8_MASTER_IRQ, 0);
3458 POSTING_READ(GEN8_MASTER_IRQ);
3460 gen8_gt_irq_reset(dev_priv);
3462 for_each_pipe(dev_priv, pipe)
3463 if (intel_display_power_is_enabled(dev_priv,
3464 POWER_DOMAIN_PIPE(pipe)))
3465 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3467 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3468 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3469 GEN5_IRQ_RESET(GEN8_PCU_);
3474 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3476 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3478 spin_lock_irq(&dev_priv->irq_lock);
3479 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3480 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3481 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3482 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3483 spin_unlock_irq(&dev_priv->irq_lock);
3486 static void cherryview_irq_preinstall(struct drm_device *dev)
3488 struct drm_i915_private *dev_priv = dev->dev_private;
3491 I915_WRITE(GEN8_MASTER_IRQ, 0);
3492 POSTING_READ(GEN8_MASTER_IRQ);
3494 gen8_gt_irq_reset(dev_priv);
3496 GEN5_IRQ_RESET(GEN8_PCU_);
3498 POSTING_READ(GEN8_PCU_IIR);
3500 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3502 I915_WRITE(PORT_HOTPLUG_EN, 0);
3503 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3505 for_each_pipe(dev_priv, pipe)
3506 I915_WRITE(PIPESTAT(pipe), 0xffff);
3508 I915_WRITE(VLV_IMR, 0xffffffff);
3509 I915_WRITE(VLV_IER, 0x0);
3510 I915_WRITE(VLV_IIR, 0xffffffff);
3511 POSTING_READ(VLV_IIR);
3514 static void ibx_hpd_irq_setup(struct drm_device *dev)
3516 struct drm_i915_private *dev_priv = dev->dev_private;
3517 struct intel_encoder *intel_encoder;
3518 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3520 if (HAS_PCH_IBX(dev)) {
3521 hotplug_irqs = SDE_HOTPLUG_MASK;
3522 for_each_intel_encoder(dev, intel_encoder)
3523 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3524 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3526 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3527 for_each_intel_encoder(dev, intel_encoder)
3528 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3529 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3532 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3535 * Enable digital hotplug on the PCH, and configure the DP short pulse
3536 * duration to 2ms (which is the minimum in the Display Port spec)
3538 * This register is the same on all known PCH chips.
3540 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3541 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3542 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3543 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3544 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3545 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3548 static void ibx_irq_postinstall(struct drm_device *dev)
3550 struct drm_i915_private *dev_priv = dev->dev_private;
3553 if (HAS_PCH_NOP(dev))
3556 if (HAS_PCH_IBX(dev))
3557 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3559 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3561 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3562 I915_WRITE(SDEIMR, ~mask);
3565 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3568 u32 pm_irqs, gt_irqs;
3570 pm_irqs = gt_irqs = 0;
3572 dev_priv->gt_irq_mask = ~0;
3573 if (HAS_L3_DPF(dev)) {
3574 /* L3 parity interrupt is always unmasked. */
3575 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3576 gt_irqs |= GT_PARITY_ERROR(dev);
3579 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3581 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3582 ILK_BSD_USER_INTERRUPT;
3584 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3587 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3589 if (INTEL_INFO(dev)->gen >= 6) {
3590 pm_irqs |= dev_priv->pm_rps_events;
3593 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3595 dev_priv->pm_irq_mask = 0xffffffff;
3596 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3600 static int ironlake_irq_postinstall(struct drm_device *dev)
3602 struct drm_i915_private *dev_priv = dev->dev_private;
3603 u32 display_mask, extra_mask;
3605 if (INTEL_INFO(dev)->gen >= 7) {
3606 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3607 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3608 DE_PLANEB_FLIP_DONE_IVB |
3609 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3610 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3611 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3613 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3614 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3616 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3618 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3619 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3622 dev_priv->irq_mask = ~display_mask;
3624 I915_WRITE(HWSTAM, 0xeffe);
3626 ibx_irq_pre_postinstall(dev);
3628 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3630 gen5_gt_irq_postinstall(dev);
3632 ibx_irq_postinstall(dev);
3634 if (IS_IRONLAKE_M(dev)) {
3635 /* Enable PCU event interrupts
3637 * spinlocking not required here for correctness since interrupt
3638 * setup is guaranteed to run in single-threaded context. But we
3639 * need it to make the assert_spin_locked happy. */
3640 spin_lock_irq(&dev_priv->irq_lock);
3641 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3642 spin_unlock_irq(&dev_priv->irq_lock);
3648 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3653 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3654 PIPE_FIFO_UNDERRUN_STATUS;
3656 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3657 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3658 POSTING_READ(PIPESTAT(PIPE_A));
3660 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3661 PIPE_CRC_DONE_INTERRUPT_STATUS;
3663 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3664 PIPE_GMBUS_INTERRUPT_STATUS);
3665 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3667 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3668 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3669 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3670 dev_priv->irq_mask &= ~iir_mask;
3672 I915_WRITE(VLV_IIR, iir_mask);
3673 I915_WRITE(VLV_IIR, iir_mask);
3674 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3675 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3676 POSTING_READ(VLV_IER);
3679 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3684 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3685 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3686 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3688 dev_priv->irq_mask |= iir_mask;
3689 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3690 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3691 I915_WRITE(VLV_IIR, iir_mask);
3692 I915_WRITE(VLV_IIR, iir_mask);
3693 POSTING_READ(VLV_IIR);
3695 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3696 PIPE_CRC_DONE_INTERRUPT_STATUS;
3698 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3699 PIPE_GMBUS_INTERRUPT_STATUS);
3700 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3702 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3703 PIPE_FIFO_UNDERRUN_STATUS;
3704 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3705 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3706 POSTING_READ(PIPESTAT(PIPE_A));
3709 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3711 assert_spin_locked(&dev_priv->irq_lock);
3713 if (dev_priv->display_irqs_enabled)
3716 dev_priv->display_irqs_enabled = true;
3718 if (intel_irqs_enabled(dev_priv))
3719 valleyview_display_irqs_install(dev_priv);
3722 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3724 assert_spin_locked(&dev_priv->irq_lock);
3726 if (!dev_priv->display_irqs_enabled)
3729 dev_priv->display_irqs_enabled = false;
3731 if (intel_irqs_enabled(dev_priv))
3732 valleyview_display_irqs_uninstall(dev_priv);
3735 static int valleyview_irq_postinstall(struct drm_device *dev)
3737 struct drm_i915_private *dev_priv = dev->dev_private;
3739 dev_priv->irq_mask = ~0;
3741 I915_WRITE(PORT_HOTPLUG_EN, 0);
3742 POSTING_READ(PORT_HOTPLUG_EN);
3744 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3745 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3746 I915_WRITE(VLV_IIR, 0xffffffff);
3747 POSTING_READ(VLV_IER);
3749 /* Interrupt setup is already guaranteed to be single-threaded, this is
3750 * just to make the assert_spin_locked check happy. */
3751 spin_lock_irq(&dev_priv->irq_lock);
3752 if (dev_priv->display_irqs_enabled)
3753 valleyview_display_irqs_install(dev_priv);
3754 spin_unlock_irq(&dev_priv->irq_lock);
3756 I915_WRITE(VLV_IIR, 0xffffffff);
3757 I915_WRITE(VLV_IIR, 0xffffffff);
3759 gen5_gt_irq_postinstall(dev);
3761 /* ack & enable invalid PTE error interrupts */
3762 #if 0 /* FIXME: add support to irq handler for checking these bits */
3763 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3764 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3767 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3772 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3774 /* These are interrupts we'll toggle with the ring mask register */
3775 uint32_t gt_interrupts[] = {
3776 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3777 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3778 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3779 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3780 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3781 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3782 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3783 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3784 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3786 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3787 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3790 dev_priv->pm_irq_mask = 0xffffffff;
3791 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3792 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3793 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3794 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3797 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3799 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3800 uint32_t de_pipe_enables;
3803 if (IS_GEN9(dev_priv))
3804 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3805 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3807 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3808 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3810 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3811 GEN8_PIPE_FIFO_UNDERRUN;
3813 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3814 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3815 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3817 for_each_pipe(dev_priv, pipe)
3818 if (intel_display_power_is_enabled(dev_priv,
3819 POWER_DOMAIN_PIPE(pipe)))
3820 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3821 dev_priv->de_irq_mask[pipe],
3824 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3827 static int gen8_irq_postinstall(struct drm_device *dev)
3829 struct drm_i915_private *dev_priv = dev->dev_private;
3831 ibx_irq_pre_postinstall(dev);
3833 gen8_gt_irq_postinstall(dev_priv);
3834 gen8_de_irq_postinstall(dev_priv);
3836 ibx_irq_postinstall(dev);
3838 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3839 POSTING_READ(GEN8_MASTER_IRQ);
3844 static int cherryview_irq_postinstall(struct drm_device *dev)
3846 struct drm_i915_private *dev_priv = dev->dev_private;
3847 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3848 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3849 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3850 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3851 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3852 PIPE_CRC_DONE_INTERRUPT_STATUS;
3856 * Leave vblank interrupts masked initially. enable/disable will
3857 * toggle them based on usage.
3859 dev_priv->irq_mask = ~enable_mask;
3861 for_each_pipe(dev_priv, pipe)
3862 I915_WRITE(PIPESTAT(pipe), 0xffff);
3864 spin_lock_irq(&dev_priv->irq_lock);
3865 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3866 for_each_pipe(dev_priv, pipe)
3867 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3868 spin_unlock_irq(&dev_priv->irq_lock);
3870 I915_WRITE(VLV_IIR, 0xffffffff);
3871 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3872 I915_WRITE(VLV_IER, enable_mask);
3874 gen8_gt_irq_postinstall(dev_priv);
3876 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3877 POSTING_READ(GEN8_MASTER_IRQ);
3882 static void gen8_irq_uninstall(struct drm_device *dev)
3884 struct drm_i915_private *dev_priv = dev->dev_private;
3889 gen8_irq_reset(dev);
3892 static void valleyview_irq_uninstall(struct drm_device *dev)
3894 struct drm_i915_private *dev_priv = dev->dev_private;
3900 I915_WRITE(VLV_MASTER_IER, 0);
3902 for_each_pipe(dev_priv, pipe)
3903 I915_WRITE(PIPESTAT(pipe), 0xffff);
3905 I915_WRITE(HWSTAM, 0xffffffff);
3906 I915_WRITE(PORT_HOTPLUG_EN, 0);
3907 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3909 /* Interrupt setup is already guaranteed to be single-threaded, this is
3910 * just to make the assert_spin_locked check happy. */
3911 spin_lock_irq(&dev_priv->irq_lock);
3912 if (dev_priv->display_irqs_enabled)
3913 valleyview_display_irqs_uninstall(dev_priv);
3914 spin_unlock_irq(&dev_priv->irq_lock);
3916 dev_priv->irq_mask = 0;
3918 I915_WRITE(VLV_IIR, 0xffffffff);
3919 I915_WRITE(VLV_IMR, 0xffffffff);
3920 I915_WRITE(VLV_IER, 0x0);
3921 POSTING_READ(VLV_IER);
3924 static void cherryview_irq_uninstall(struct drm_device *dev)
3926 struct drm_i915_private *dev_priv = dev->dev_private;
3932 I915_WRITE(GEN8_MASTER_IRQ, 0);
3933 POSTING_READ(GEN8_MASTER_IRQ);
3935 #define GEN8_IRQ_FINI_NDX(type, which) \
3937 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3938 I915_WRITE(GEN8_##type##_IER(which), 0); \
3939 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3940 POSTING_READ(GEN8_##type##_IIR(which)); \
3941 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3944 #define GEN8_IRQ_FINI(type) \
3946 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3947 I915_WRITE(GEN8_##type##_IER, 0); \
3948 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3949 POSTING_READ(GEN8_##type##_IIR); \
3950 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3953 GEN8_IRQ_FINI_NDX(GT, 0);
3954 GEN8_IRQ_FINI_NDX(GT, 1);
3955 GEN8_IRQ_FINI_NDX(GT, 2);
3956 GEN8_IRQ_FINI_NDX(GT, 3);
3960 #undef GEN8_IRQ_FINI
3961 #undef GEN8_IRQ_FINI_NDX
3963 I915_WRITE(PORT_HOTPLUG_EN, 0);
3964 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3966 for_each_pipe(dev_priv, pipe)
3967 I915_WRITE(PIPESTAT(pipe), 0xffff);
3969 I915_WRITE(VLV_IMR, 0xffffffff);
3970 I915_WRITE(VLV_IER, 0x0);
3971 I915_WRITE(VLV_IIR, 0xffffffff);
3972 POSTING_READ(VLV_IIR);
3975 static void ironlake_irq_uninstall(struct drm_device *dev)
3977 struct drm_i915_private *dev_priv = dev->dev_private;
3982 ironlake_irq_reset(dev);
3985 static void i8xx_irq_preinstall(struct drm_device * dev)
3987 struct drm_i915_private *dev_priv = dev->dev_private;
3990 for_each_pipe(dev_priv, pipe)
3991 I915_WRITE(PIPESTAT(pipe), 0);
3992 I915_WRITE16(IMR, 0xffff);
3993 I915_WRITE16(IER, 0x0);
3994 POSTING_READ16(IER);
3997 static int i8xx_irq_postinstall(struct drm_device *dev)
3999 struct drm_i915_private *dev_priv = dev->dev_private;
4002 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4004 /* Unmask the interrupts that we always want on. */
4005 dev_priv->irq_mask =
4006 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4007 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4008 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4009 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4010 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4011 I915_WRITE16(IMR, dev_priv->irq_mask);
4014 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4015 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4016 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4017 I915_USER_INTERRUPT);
4018 POSTING_READ16(IER);
4020 /* Interrupt setup is already guaranteed to be single-threaded, this is
4021 * just to make the assert_spin_locked check happy. */
4022 spin_lock_irq(&dev_priv->irq_lock);
4023 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4024 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4025 spin_unlock_irq(&dev_priv->irq_lock);
4031 * Returns true when a page flip has completed.
4033 static bool i8xx_handle_vblank(struct drm_device *dev,
4034 int plane, int pipe, u32 iir)
4036 struct drm_i915_private *dev_priv = dev->dev_private;
4037 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4039 if (!intel_pipe_handle_vblank(dev, pipe))
4042 if ((iir & flip_pending) == 0)
4043 goto check_page_flip;
4045 intel_prepare_page_flip(dev, plane);
4047 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4048 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4049 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4050 * the flip is completed (no longer pending). Since this doesn't raise
4051 * an interrupt per se, we watch for the change at vblank.
4053 if (I915_READ16(ISR) & flip_pending)
4054 goto check_page_flip;
4056 intel_finish_page_flip(dev, pipe);
4060 intel_check_page_flip(dev, pipe);
4064 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4066 struct drm_device *dev = arg;
4067 struct drm_i915_private *dev_priv = dev->dev_private;
4072 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4073 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4075 iir = I915_READ16(IIR);
4079 while (iir & ~flip_mask) {
4080 /* Can't rely on pipestat interrupt bit in iir as it might
4081 * have been cleared after the pipestat interrupt was received.
4082 * It doesn't set the bit in iir again, but it still produces
4083 * interrupts (for non-MSI).
4085 spin_lock(&dev_priv->irq_lock);
4086 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4087 i915_handle_error(dev, false,
4088 "Command parser error, iir 0x%08x",
4091 for_each_pipe(dev_priv, pipe) {
4092 int reg = PIPESTAT(pipe);
4093 pipe_stats[pipe] = I915_READ(reg);
4096 * Clear the PIPE*STAT regs before the IIR
4098 if (pipe_stats[pipe] & 0x8000ffff)
4099 I915_WRITE(reg, pipe_stats[pipe]);
4101 spin_unlock(&dev_priv->irq_lock);
4103 I915_WRITE16(IIR, iir & ~flip_mask);
4104 new_iir = I915_READ16(IIR); /* Flush posted writes */
4106 i915_update_dri1_breadcrumb(dev);
4108 if (iir & I915_USER_INTERRUPT)
4109 notify_ring(dev, &dev_priv->ring[RCS]);
4111 for_each_pipe(dev_priv, pipe) {
4116 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4117 i8xx_handle_vblank(dev, plane, pipe, iir))
4118 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4120 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4121 i9xx_pipe_crc_irq_handler(dev, pipe);
4123 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4124 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4125 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4134 static void i8xx_irq_uninstall(struct drm_device * dev)
4136 struct drm_i915_private *dev_priv = dev->dev_private;
4139 for_each_pipe(dev_priv, pipe) {
4140 /* Clear enable bits; then clear status bits */
4141 I915_WRITE(PIPESTAT(pipe), 0);
4142 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4144 I915_WRITE16(IMR, 0xffff);
4145 I915_WRITE16(IER, 0x0);
4146 I915_WRITE16(IIR, I915_READ16(IIR));
4149 static void i915_irq_preinstall(struct drm_device * dev)
4151 struct drm_i915_private *dev_priv = dev->dev_private;
4154 if (I915_HAS_HOTPLUG(dev)) {
4155 I915_WRITE(PORT_HOTPLUG_EN, 0);
4156 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4159 I915_WRITE16(HWSTAM, 0xeffe);
4160 for_each_pipe(dev_priv, pipe)
4161 I915_WRITE(PIPESTAT(pipe), 0);
4162 I915_WRITE(IMR, 0xffffffff);
4163 I915_WRITE(IER, 0x0);
4167 static int i915_irq_postinstall(struct drm_device *dev)
4169 struct drm_i915_private *dev_priv = dev->dev_private;
4172 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4174 /* Unmask the interrupts that we always want on. */
4175 dev_priv->irq_mask =
4176 ~(I915_ASLE_INTERRUPT |
4177 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4178 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4179 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4180 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4181 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4184 I915_ASLE_INTERRUPT |
4185 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4186 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4187 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4188 I915_USER_INTERRUPT;
4190 if (I915_HAS_HOTPLUG(dev)) {
4191 I915_WRITE(PORT_HOTPLUG_EN, 0);
4192 POSTING_READ(PORT_HOTPLUG_EN);
4194 /* Enable in IER... */
4195 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4196 /* and unmask in IMR */
4197 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4200 I915_WRITE(IMR, dev_priv->irq_mask);
4201 I915_WRITE(IER, enable_mask);
4204 i915_enable_asle_pipestat(dev);
4206 /* Interrupt setup is already guaranteed to be single-threaded, this is
4207 * just to make the assert_spin_locked check happy. */
4208 spin_lock_irq(&dev_priv->irq_lock);
4209 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4210 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4211 spin_unlock_irq(&dev_priv->irq_lock);
4217 * Returns true when a page flip has completed.
4219 static bool i915_handle_vblank(struct drm_device *dev,
4220 int plane, int pipe, u32 iir)
4222 struct drm_i915_private *dev_priv = dev->dev_private;
4223 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4225 if (!intel_pipe_handle_vblank(dev, pipe))
4228 if ((iir & flip_pending) == 0)
4229 goto check_page_flip;
4231 intel_prepare_page_flip(dev, plane);
4233 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4234 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4235 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4236 * the flip is completed (no longer pending). Since this doesn't raise
4237 * an interrupt per se, we watch for the change at vblank.
4239 if (I915_READ(ISR) & flip_pending)
4240 goto check_page_flip;
4242 intel_finish_page_flip(dev, pipe);
4246 intel_check_page_flip(dev, pipe);
4250 static irqreturn_t i915_irq_handler(int irq, void *arg)
4252 struct drm_device *dev = arg;
4253 struct drm_i915_private *dev_priv = dev->dev_private;
4254 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4256 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4257 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4258 int pipe, ret = IRQ_NONE;
4260 iir = I915_READ(IIR);
4262 bool irq_received = (iir & ~flip_mask) != 0;
4263 bool blc_event = false;
4265 /* Can't rely on pipestat interrupt bit in iir as it might
4266 * have been cleared after the pipestat interrupt was received.
4267 * It doesn't set the bit in iir again, but it still produces
4268 * interrupts (for non-MSI).
4270 spin_lock(&dev_priv->irq_lock);
4271 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4272 i915_handle_error(dev, false,
4273 "Command parser error, iir 0x%08x",
4276 for_each_pipe(dev_priv, pipe) {
4277 int reg = PIPESTAT(pipe);
4278 pipe_stats[pipe] = I915_READ(reg);
4280 /* Clear the PIPE*STAT regs before the IIR */
4281 if (pipe_stats[pipe] & 0x8000ffff) {
4282 I915_WRITE(reg, pipe_stats[pipe]);
4283 irq_received = true;
4286 spin_unlock(&dev_priv->irq_lock);
4291 /* Consume port. Then clear IIR or we'll miss events */
4292 if (I915_HAS_HOTPLUG(dev) &&
4293 iir & I915_DISPLAY_PORT_INTERRUPT)
4294 i9xx_hpd_irq_handler(dev);
4296 I915_WRITE(IIR, iir & ~flip_mask);
4297 new_iir = I915_READ(IIR); /* Flush posted writes */
4299 if (iir & I915_USER_INTERRUPT)
4300 notify_ring(dev, &dev_priv->ring[RCS]);
4302 for_each_pipe(dev_priv, pipe) {
4307 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4308 i915_handle_vblank(dev, plane, pipe, iir))
4309 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4311 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4314 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4315 i9xx_pipe_crc_irq_handler(dev, pipe);
4317 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4318 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4319 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4322 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4323 intel_opregion_asle_intr(dev);
4325 /* With MSI, interrupts are only generated when iir
4326 * transitions from zero to nonzero. If another bit got
4327 * set while we were handling the existing iir bits, then
4328 * we would never get another interrupt.
4330 * This is fine on non-MSI as well, as if we hit this path
4331 * we avoid exiting the interrupt handler only to generate
4334 * Note that for MSI this could cause a stray interrupt report
4335 * if an interrupt landed in the time between writing IIR and
4336 * the posting read. This should be rare enough to never
4337 * trigger the 99% of 100,000 interrupts test for disabling
4342 } while (iir & ~flip_mask);
4344 i915_update_dri1_breadcrumb(dev);
4349 static void i915_irq_uninstall(struct drm_device * dev)
4351 struct drm_i915_private *dev_priv = dev->dev_private;
4354 if (I915_HAS_HOTPLUG(dev)) {
4355 I915_WRITE(PORT_HOTPLUG_EN, 0);
4356 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4359 I915_WRITE16(HWSTAM, 0xffff);
4360 for_each_pipe(dev_priv, pipe) {
4361 /* Clear enable bits; then clear status bits */
4362 I915_WRITE(PIPESTAT(pipe), 0);
4363 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4365 I915_WRITE(IMR, 0xffffffff);
4366 I915_WRITE(IER, 0x0);
4368 I915_WRITE(IIR, I915_READ(IIR));
4371 static void i965_irq_preinstall(struct drm_device * dev)
4373 struct drm_i915_private *dev_priv = dev->dev_private;
4376 I915_WRITE(PORT_HOTPLUG_EN, 0);
4377 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4379 I915_WRITE(HWSTAM, 0xeffe);
4380 for_each_pipe(dev_priv, pipe)
4381 I915_WRITE(PIPESTAT(pipe), 0);
4382 I915_WRITE(IMR, 0xffffffff);
4383 I915_WRITE(IER, 0x0);
4387 static int i965_irq_postinstall(struct drm_device *dev)
4389 struct drm_i915_private *dev_priv = dev->dev_private;
4393 /* Unmask the interrupts that we always want on. */
4394 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4395 I915_DISPLAY_PORT_INTERRUPT |
4396 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4397 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4398 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4399 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4400 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4402 enable_mask = ~dev_priv->irq_mask;
4403 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4404 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4405 enable_mask |= I915_USER_INTERRUPT;
4408 enable_mask |= I915_BSD_USER_INTERRUPT;
4410 /* Interrupt setup is already guaranteed to be single-threaded, this is
4411 * just to make the assert_spin_locked check happy. */
4412 spin_lock_irq(&dev_priv->irq_lock);
4413 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4414 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4415 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4416 spin_unlock_irq(&dev_priv->irq_lock);
4419 * Enable some error detection, note the instruction error mask
4420 * bit is reserved, so we leave it masked.
4423 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4424 GM45_ERROR_MEM_PRIV |
4425 GM45_ERROR_CP_PRIV |
4426 I915_ERROR_MEMORY_REFRESH);
4428 error_mask = ~(I915_ERROR_PAGE_TABLE |
4429 I915_ERROR_MEMORY_REFRESH);
4431 I915_WRITE(EMR, error_mask);
4433 I915_WRITE(IMR, dev_priv->irq_mask);
4434 I915_WRITE(IER, enable_mask);
4437 I915_WRITE(PORT_HOTPLUG_EN, 0);
4438 POSTING_READ(PORT_HOTPLUG_EN);
4440 i915_enable_asle_pipestat(dev);
4445 static void i915_hpd_irq_setup(struct drm_device *dev)
4447 struct drm_i915_private *dev_priv = dev->dev_private;
4448 struct intel_encoder *intel_encoder;
4451 assert_spin_locked(&dev_priv->irq_lock);
4453 if (I915_HAS_HOTPLUG(dev)) {
4454 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4455 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4456 /* Note HDMI and DP share hotplug bits */
4457 /* enable bits are the same for all generations */
4458 for_each_intel_encoder(dev, intel_encoder)
4459 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4460 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4461 /* Programming the CRT detection parameters tends
4462 to generate a spurious hotplug event about three
4463 seconds later. So just do it once.
4466 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4467 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4468 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4470 /* Ignore TV since it's buggy */
4471 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4475 static irqreturn_t i965_irq_handler(int irq, void *arg)
4477 struct drm_device *dev = arg;
4478 struct drm_i915_private *dev_priv = dev->dev_private;
4480 u32 pipe_stats[I915_MAX_PIPES];
4481 int ret = IRQ_NONE, pipe;
4483 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4484 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4486 iir = I915_READ(IIR);
4489 bool irq_received = (iir & ~flip_mask) != 0;
4490 bool blc_event = false;
4492 /* Can't rely on pipestat interrupt bit in iir as it might
4493 * have been cleared after the pipestat interrupt was received.
4494 * It doesn't set the bit in iir again, but it still produces
4495 * interrupts (for non-MSI).
4497 spin_lock(&dev_priv->irq_lock);
4498 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4499 i915_handle_error(dev, false,
4500 "Command parser error, iir 0x%08x",
4503 for_each_pipe(dev_priv, pipe) {
4504 int reg = PIPESTAT(pipe);
4505 pipe_stats[pipe] = I915_READ(reg);
4508 * Clear the PIPE*STAT regs before the IIR
4510 if (pipe_stats[pipe] & 0x8000ffff) {
4511 I915_WRITE(reg, pipe_stats[pipe]);
4512 irq_received = true;
4515 spin_unlock(&dev_priv->irq_lock);
4522 /* Consume port. Then clear IIR or we'll miss events */
4523 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4524 i9xx_hpd_irq_handler(dev);
4526 I915_WRITE(IIR, iir & ~flip_mask);
4527 new_iir = I915_READ(IIR); /* Flush posted writes */
4529 if (iir & I915_USER_INTERRUPT)
4530 notify_ring(dev, &dev_priv->ring[RCS]);
4531 if (iir & I915_BSD_USER_INTERRUPT)
4532 notify_ring(dev, &dev_priv->ring[VCS]);
4534 for_each_pipe(dev_priv, pipe) {
4535 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4536 i915_handle_vblank(dev, pipe, pipe, iir))
4537 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4539 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4542 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4543 i9xx_pipe_crc_irq_handler(dev, pipe);
4545 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4546 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4547 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4550 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4551 intel_opregion_asle_intr(dev);
4553 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4554 gmbus_irq_handler(dev);
4556 /* With MSI, interrupts are only generated when iir
4557 * transitions from zero to nonzero. If another bit got
4558 * set while we were handling the existing iir bits, then
4559 * we would never get another interrupt.
4561 * This is fine on non-MSI as well, as if we hit this path
4562 * we avoid exiting the interrupt handler only to generate
4565 * Note that for MSI this could cause a stray interrupt report
4566 * if an interrupt landed in the time between writing IIR and
4567 * the posting read. This should be rare enough to never
4568 * trigger the 99% of 100,000 interrupts test for disabling
4574 i915_update_dri1_breadcrumb(dev);
4579 static void i965_irq_uninstall(struct drm_device * dev)
4581 struct drm_i915_private *dev_priv = dev->dev_private;
4587 I915_WRITE(PORT_HOTPLUG_EN, 0);
4588 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4590 I915_WRITE(HWSTAM, 0xffffffff);
4591 for_each_pipe(dev_priv, pipe)
4592 I915_WRITE(PIPESTAT(pipe), 0);
4593 I915_WRITE(IMR, 0xffffffff);
4594 I915_WRITE(IER, 0x0);
4596 for_each_pipe(dev_priv, pipe)
4597 I915_WRITE(PIPESTAT(pipe),
4598 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4599 I915_WRITE(IIR, I915_READ(IIR));
4602 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4604 struct drm_i915_private *dev_priv =
4605 container_of(work, typeof(*dev_priv),
4606 hotplug_reenable_work.work);
4607 struct drm_device *dev = dev_priv->dev;
4608 struct drm_mode_config *mode_config = &dev->mode_config;
4611 intel_runtime_pm_get(dev_priv);
4613 spin_lock_irq(&dev_priv->irq_lock);
4614 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4615 struct drm_connector *connector;
4617 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4620 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4622 list_for_each_entry(connector, &mode_config->connector_list, head) {
4623 struct intel_connector *intel_connector = to_intel_connector(connector);
4625 if (intel_connector->encoder->hpd_pin == i) {
4626 if (connector->polled != intel_connector->polled)
4627 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4629 connector->polled = intel_connector->polled;
4630 if (!connector->polled)
4631 connector->polled = DRM_CONNECTOR_POLL_HPD;
4635 if (dev_priv->display.hpd_irq_setup)
4636 dev_priv->display.hpd_irq_setup(dev);
4637 spin_unlock_irq(&dev_priv->irq_lock);
4639 intel_runtime_pm_put(dev_priv);
4643 * intel_irq_init - initializes irq support
4644 * @dev_priv: i915 device instance
4646 * This function initializes all the irq support including work items, timers
4647 * and all the vtables. It does not setup the interrupt itself though.
4649 void intel_irq_init(struct drm_i915_private *dev_priv)
4651 struct drm_device *dev = dev_priv->dev;
4653 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4654 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4655 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4656 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4657 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4659 /* Let's track the enabled rps events */
4660 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4661 /* WaGsvRC0ResidencyMethod:vlv */
4662 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4664 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4666 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4667 i915_hangcheck_elapsed,
4668 (unsigned long) dev);
4669 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4670 intel_hpd_irq_reenable_work);
4672 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4674 if (IS_GEN2(dev_priv)) {
4675 dev->max_vblank_count = 0;
4676 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4677 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4678 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4679 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4681 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4682 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4686 * Opt out of the vblank disable timer on everything except gen2.
4687 * Gen2 doesn't have a hardware frame counter and so depends on
4688 * vblank interrupts to produce sane vblank seuquence numbers.
4690 if (!IS_GEN2(dev_priv))
4691 dev->vblank_disable_immediate = true;
4693 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4694 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4695 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4698 if (IS_CHERRYVIEW(dev_priv)) {
4699 dev->driver->irq_handler = cherryview_irq_handler;
4700 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4701 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4702 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4703 dev->driver->enable_vblank = valleyview_enable_vblank;
4704 dev->driver->disable_vblank = valleyview_disable_vblank;
4705 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4706 } else if (IS_VALLEYVIEW(dev_priv)) {
4707 dev->driver->irq_handler = valleyview_irq_handler;
4708 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4709 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4710 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4711 dev->driver->enable_vblank = valleyview_enable_vblank;
4712 dev->driver->disable_vblank = valleyview_disable_vblank;
4713 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4714 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4715 dev->driver->irq_handler = gen8_irq_handler;
4716 dev->driver->irq_preinstall = gen8_irq_reset;
4717 dev->driver->irq_postinstall = gen8_irq_postinstall;
4718 dev->driver->irq_uninstall = gen8_irq_uninstall;
4719 dev->driver->enable_vblank = gen8_enable_vblank;
4720 dev->driver->disable_vblank = gen8_disable_vblank;
4721 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4722 } else if (HAS_PCH_SPLIT(dev)) {
4723 dev->driver->irq_handler = ironlake_irq_handler;
4724 dev->driver->irq_preinstall = ironlake_irq_reset;
4725 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4726 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4727 dev->driver->enable_vblank = ironlake_enable_vblank;
4728 dev->driver->disable_vblank = ironlake_disable_vblank;
4729 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4731 if (INTEL_INFO(dev_priv)->gen == 2) {
4732 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4733 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4734 dev->driver->irq_handler = i8xx_irq_handler;
4735 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4736 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4737 dev->driver->irq_preinstall = i915_irq_preinstall;
4738 dev->driver->irq_postinstall = i915_irq_postinstall;
4739 dev->driver->irq_uninstall = i915_irq_uninstall;
4740 dev->driver->irq_handler = i915_irq_handler;
4741 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4743 dev->driver->irq_preinstall = i965_irq_preinstall;
4744 dev->driver->irq_postinstall = i965_irq_postinstall;
4745 dev->driver->irq_uninstall = i965_irq_uninstall;
4746 dev->driver->irq_handler = i965_irq_handler;
4747 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4749 dev->driver->enable_vblank = i915_enable_vblank;
4750 dev->driver->disable_vblank = i915_disable_vblank;
4755 * intel_hpd_init - initializes and enables hpd support
4756 * @dev_priv: i915 device instance
4758 * This function enables the hotplug support. It requires that interrupts have
4759 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4760 * poll request can run concurrently to other code, so locking rules must be
4763 * This is a separate step from interrupt enabling to simplify the locking rules
4764 * in the driver load and resume code.
4766 void intel_hpd_init(struct drm_i915_private *dev_priv)
4768 struct drm_device *dev = dev_priv->dev;
4769 struct drm_mode_config *mode_config = &dev->mode_config;
4770 struct drm_connector *connector;
4773 for (i = 1; i < HPD_NUM_PINS; i++) {
4774 dev_priv->hpd_stats[i].hpd_cnt = 0;
4775 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4777 list_for_each_entry(connector, &mode_config->connector_list, head) {
4778 struct intel_connector *intel_connector = to_intel_connector(connector);
4779 connector->polled = intel_connector->polled;
4780 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4781 connector->polled = DRM_CONNECTOR_POLL_HPD;
4782 if (intel_connector->mst_port)
4783 connector->polled = DRM_CONNECTOR_POLL_HPD;
4786 /* Interrupt setup is already guaranteed to be single-threaded, this is
4787 * just to make the assert_spin_locked checks happy. */
4788 spin_lock_irq(&dev_priv->irq_lock);
4789 if (dev_priv->display.hpd_irq_setup)
4790 dev_priv->display.hpd_irq_setup(dev);
4791 spin_unlock_irq(&dev_priv->irq_lock);
4795 * intel_irq_install - enables the hardware interrupt
4796 * @dev_priv: i915 device instance
4798 * This function enables the hardware interrupt handling, but leaves the hotplug
4799 * handling still disabled. It is called after intel_irq_init().
4801 * In the driver load and resume code we need working interrupts in a few places
4802 * but don't want to deal with the hassle of concurrent probe and hotplug
4803 * workers. Hence the split into this two-stage approach.
4805 int intel_irq_install(struct drm_i915_private *dev_priv)
4808 * We enable some interrupt sources in our postinstall hooks, so mark
4809 * interrupts as enabled _before_ actually enabling them to avoid
4810 * special cases in our ordering checks.
4812 dev_priv->pm.irqs_enabled = true;
4814 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4818 * intel_irq_uninstall - finilizes all irq handling
4819 * @dev_priv: i915 device instance
4821 * This stops interrupt and hotplug handling and unregisters and frees all
4822 * resources acquired in the init functions.
4824 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4826 drm_irq_uninstall(dev_priv->dev);
4827 intel_hpd_cancel_work(dev_priv);
4828 dev_priv->pm.irqs_enabled = false;
4832 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4833 * @dev_priv: i915 device instance
4835 * This function is used to disable interrupts at runtime, both in the runtime
4836 * pm and the system suspend/resume code.
4838 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4840 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4841 dev_priv->pm.irqs_enabled = false;
4845 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4846 * @dev_priv: i915 device instance
4848 * This function is used to enable interrupts at runtime, both in the runtime
4849 * pm and the system suspend/resume code.
4851 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4853 dev_priv->pm.irqs_enabled = true;
4854 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4855 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);