1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt[] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
143 /* For display hotplug interrupt */
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
147 assert_spin_locked(&dev_priv->irq_lock);
149 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
152 if ((dev_priv->irq_mask & mask) != 0) {
153 dev_priv->irq_mask &= ~mask;
154 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
162 assert_spin_locked(&dev_priv->irq_lock);
164 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
167 if ((dev_priv->irq_mask & mask) != mask) {
168 dev_priv->irq_mask |= mask;
169 I915_WRITE(DEIMR, dev_priv->irq_mask);
175 * ilk_update_gt_irq - update GTIMR
176 * @dev_priv: driver private
177 * @interrupt_mask: mask of interrupt bits to update
178 * @enabled_irq_mask: mask of interrupt bits to enable
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 uint32_t interrupt_mask,
182 uint32_t enabled_irq_mask)
184 assert_spin_locked(&dev_priv->irq_lock);
186 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
189 dev_priv->gt_irq_mask &= ~interrupt_mask;
190 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
191 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
195 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
197 ilk_update_gt_irq(dev_priv, mask, mask);
200 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
202 ilk_update_gt_irq(dev_priv, mask, 0);
205 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
207 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
210 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
212 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
215 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
217 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
221 * snb_update_pm_irq - update GEN6_PMIMR
222 * @dev_priv: driver private
223 * @interrupt_mask: mask of interrupt bits to update
224 * @enabled_irq_mask: mask of interrupt bits to enable
226 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
227 uint32_t interrupt_mask,
228 uint32_t enabled_irq_mask)
232 assert_spin_locked(&dev_priv->irq_lock);
234 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
237 new_val = dev_priv->pm_irq_mask;
238 new_val &= ~interrupt_mask;
239 new_val |= (~enabled_irq_mask & interrupt_mask);
241 if (new_val != dev_priv->pm_irq_mask) {
242 dev_priv->pm_irq_mask = new_val;
243 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
244 POSTING_READ(gen6_pm_imr(dev_priv));
248 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
250 snb_update_pm_irq(dev_priv, mask, mask);
253 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
255 snb_update_pm_irq(dev_priv, mask, 0);
258 void gen6_enable_rps_interrupts(struct drm_device *dev)
260 struct drm_i915_private *dev_priv = dev->dev_private;
262 spin_lock_irq(&dev_priv->irq_lock);
263 WARN_ON(dev_priv->rps.pm_iir);
264 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
265 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
266 spin_unlock_irq(&dev_priv->irq_lock);
269 void gen6_disable_rps_interrupts(struct drm_device *dev)
271 struct drm_i915_private *dev_priv = dev->dev_private;
273 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
274 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
275 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
276 ~dev_priv->pm_rps_events);
277 /* Complete PM interrupt masking here doesn't race with the rps work
278 * item again unmasking PM interrupts because that is using a different
279 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
280 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
282 spin_lock_irq(&dev_priv->irq_lock);
283 dev_priv->rps.pm_iir = 0;
284 spin_unlock_irq(&dev_priv->irq_lock);
286 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
290 * ibx_display_interrupt_update - update SDEIMR
291 * @dev_priv: driver private
292 * @interrupt_mask: mask of interrupt bits to update
293 * @enabled_irq_mask: mask of interrupt bits to enable
295 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
296 uint32_t interrupt_mask,
297 uint32_t enabled_irq_mask)
299 uint32_t sdeimr = I915_READ(SDEIMR);
300 sdeimr &= ~interrupt_mask;
301 sdeimr |= (~enabled_irq_mask & interrupt_mask);
303 assert_spin_locked(&dev_priv->irq_lock);
305 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
308 I915_WRITE(SDEIMR, sdeimr);
309 POSTING_READ(SDEIMR);
313 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
314 u32 enable_mask, u32 status_mask)
316 u32 reg = PIPESTAT(pipe);
317 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
319 assert_spin_locked(&dev_priv->irq_lock);
320 WARN_ON(!intel_irqs_enabled(dev_priv));
322 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
323 status_mask & ~PIPESTAT_INT_STATUS_MASK,
324 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
325 pipe_name(pipe), enable_mask, status_mask))
328 if ((pipestat & enable_mask) == enable_mask)
331 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
333 /* Enable the interrupt, clear any pending status */
334 pipestat |= enable_mask | status_mask;
335 I915_WRITE(reg, pipestat);
340 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
341 u32 enable_mask, u32 status_mask)
343 u32 reg = PIPESTAT(pipe);
344 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
346 assert_spin_locked(&dev_priv->irq_lock);
347 WARN_ON(!intel_irqs_enabled(dev_priv));
349 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
350 status_mask & ~PIPESTAT_INT_STATUS_MASK,
351 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
352 pipe_name(pipe), enable_mask, status_mask))
355 if ((pipestat & enable_mask) == 0)
358 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
360 pipestat &= ~enable_mask;
361 I915_WRITE(reg, pipestat);
365 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
367 u32 enable_mask = status_mask << 16;
370 * On pipe A we don't support the PSR interrupt yet,
371 * on pipe B and C the same bit MBZ.
373 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
376 * On pipe B and C we don't support the PSR interrupt yet, on pipe
377 * A the same bit is for perf counters which we don't use either.
379 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
382 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
383 SPRITE0_FLIP_DONE_INT_EN_VLV |
384 SPRITE1_FLIP_DONE_INT_EN_VLV);
385 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
386 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
387 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
388 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
394 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
399 if (IS_VALLEYVIEW(dev_priv->dev))
400 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
403 enable_mask = status_mask << 16;
404 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
408 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
413 if (IS_VALLEYVIEW(dev_priv->dev))
414 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
417 enable_mask = status_mask << 16;
418 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
422 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
424 static void i915_enable_asle_pipestat(struct drm_device *dev)
426 struct drm_i915_private *dev_priv = dev->dev_private;
428 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
431 spin_lock_irq(&dev_priv->irq_lock);
433 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
434 if (INTEL_INFO(dev)->gen >= 4)
435 i915_enable_pipestat(dev_priv, PIPE_A,
436 PIPE_LEGACY_BLC_EVENT_STATUS);
438 spin_unlock_irq(&dev_priv->irq_lock);
442 * i915_pipe_enabled - check if a pipe is enabled
444 * @pipe: pipe to check
446 * Reading certain registers when the pipe is disabled can hang the chip.
447 * Use this routine to make sure the PLL is running and the pipe is active
448 * before reading such registers if unsure.
451 i915_pipe_enabled(struct drm_device *dev, int pipe)
453 struct drm_i915_private *dev_priv = dev->dev_private;
455 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
456 /* Locking is horribly broken here, but whatever. */
457 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
458 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
460 return intel_crtc->active;
462 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
467 * This timing diagram depicts the video signal in and
468 * around the vertical blanking period.
470 * Assumptions about the fictitious mode used in this example:
472 * vsync_start = vblank_start + 1
473 * vsync_end = vblank_start + 2
474 * vtotal = vblank_start + 3
477 * latch double buffered registers
478 * increment frame counter (ctg+)
479 * generate start of vblank interrupt (gen4+)
482 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
483 * | may be shifted forward 1-3 extra lines via PIPECONF
485 * | | start of vsync:
486 * | | generate vsync interrupt
488 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
489 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
490 * ----va---> <-----------------vb--------------------> <--------va-------------
491 * | | <----vs-----> |
492 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
493 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
494 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
496 * last visible pixel first visible pixel
497 * | increment frame counter (gen3/4)
498 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
500 * x = horizontal active
501 * _ = horizontal blanking
502 * hs = horizontal sync
503 * va = vertical active
504 * vb = vertical blanking
506 * vbs = vblank_start (number)
509 * - most events happen at the start of horizontal sync
510 * - frame start happens at the start of horizontal blank, 1-4 lines
511 * (depending on PIPECONF settings) after the start of vblank
512 * - gen3/4 pixel and frame counter are synchronized with the start
513 * of horizontal active on the first line of vertical active
516 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
518 /* Gen2 doesn't have a hardware frame counter */
522 /* Called from drm generic code, passed a 'crtc', which
523 * we use as a pipe index
525 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
527 struct drm_i915_private *dev_priv = dev->dev_private;
528 unsigned long high_frame;
529 unsigned long low_frame;
530 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
532 if (!i915_pipe_enabled(dev, pipe)) {
533 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
534 "pipe %c\n", pipe_name(pipe));
538 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
539 struct intel_crtc *intel_crtc =
540 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
541 const struct drm_display_mode *mode =
542 &intel_crtc->config.adjusted_mode;
544 htotal = mode->crtc_htotal;
545 hsync_start = mode->crtc_hsync_start;
546 vbl_start = mode->crtc_vblank_start;
547 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
548 vbl_start = DIV_ROUND_UP(vbl_start, 2);
550 enum transcoder cpu_transcoder = (enum transcoder) pipe;
552 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
553 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
554 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
555 if ((I915_READ(PIPECONF(cpu_transcoder)) &
556 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
557 vbl_start = DIV_ROUND_UP(vbl_start, 2);
560 /* Convert to pixel count */
563 /* Start of vblank event occurs at start of hsync */
564 vbl_start -= htotal - hsync_start;
566 high_frame = PIPEFRAME(pipe);
567 low_frame = PIPEFRAMEPIXEL(pipe);
570 * High & low register fields aren't synchronized, so make sure
571 * we get a low value that's stable across two reads of the high
575 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
576 low = I915_READ(low_frame);
577 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
578 } while (high1 != high2);
580 high1 >>= PIPE_FRAME_HIGH_SHIFT;
581 pixel = low & PIPE_PIXEL_MASK;
582 low >>= PIPE_FRAME_LOW_SHIFT;
585 * The frame counter increments at beginning of active.
586 * Cook up a vblank counter by also checking the pixel
587 * counter against vblank start.
589 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
592 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
594 struct drm_i915_private *dev_priv = dev->dev_private;
595 int reg = PIPE_FRMCOUNT_GM45(pipe);
597 if (!i915_pipe_enabled(dev, pipe)) {
598 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
599 "pipe %c\n", pipe_name(pipe));
603 return I915_READ(reg);
606 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
607 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
609 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
611 struct drm_device *dev = crtc->base.dev;
612 struct drm_i915_private *dev_priv = dev->dev_private;
613 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
614 enum pipe pipe = crtc->pipe;
615 int position, vtotal;
617 vtotal = mode->crtc_vtotal;
618 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
622 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
624 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
627 * See update_scanline_offset() for the details on the
628 * scanline_offset adjustment.
630 return (position + crtc->scanline_offset) % vtotal;
633 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
634 unsigned int flags, int *vpos, int *hpos,
635 ktime_t *stime, ktime_t *etime)
637 struct drm_i915_private *dev_priv = dev->dev_private;
638 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
640 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
642 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
645 unsigned long irqflags;
647 if (!intel_crtc->active) {
648 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
649 "pipe %c\n", pipe_name(pipe));
653 htotal = mode->crtc_htotal;
654 hsync_start = mode->crtc_hsync_start;
655 vtotal = mode->crtc_vtotal;
656 vbl_start = mode->crtc_vblank_start;
657 vbl_end = mode->crtc_vblank_end;
659 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
660 vbl_start = DIV_ROUND_UP(vbl_start, 2);
665 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
668 * Lock uncore.lock, as we will do multiple timing critical raw
669 * register reads, potentially with preemption disabled, so the
670 * following code must not block on uncore.lock.
672 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
674 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
676 /* Get optional system timestamp before query. */
678 *stime = ktime_get();
680 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
681 /* No obvious pixelcount register. Only query vertical
682 * scanout position from Display scan line register.
684 position = __intel_get_crtc_scanline(intel_crtc);
686 /* Have access to pixelcount since start of frame.
687 * We can split this into vertical and horizontal
690 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
692 /* convert to pixel counts */
698 * In interlaced modes, the pixel counter counts all pixels,
699 * so one field will have htotal more pixels. In order to avoid
700 * the reported position from jumping backwards when the pixel
701 * counter is beyond the length of the shorter field, just
702 * clamp the position the length of the shorter field. This
703 * matches how the scanline counter based position works since
704 * the scanline counter doesn't count the two half lines.
706 if (position >= vtotal)
707 position = vtotal - 1;
710 * Start of vblank interrupt is triggered at start of hsync,
711 * just prior to the first active line of vblank. However we
712 * consider lines to start at the leading edge of horizontal
713 * active. So, should we get here before we've crossed into
714 * the horizontal active of the first line in vblank, we would
715 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
716 * always add htotal-hsync_start to the current pixel position.
718 position = (position + htotal - hsync_start) % vtotal;
721 /* Get optional system timestamp after query. */
723 *etime = ktime_get();
725 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
727 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
729 in_vbl = position >= vbl_start && position < vbl_end;
732 * While in vblank, position will be negative
733 * counting up towards 0 at vbl_end. And outside
734 * vblank, position will be positive counting
737 if (position >= vbl_start)
740 position += vtotal - vbl_end;
742 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
746 *vpos = position / htotal;
747 *hpos = position - (*vpos * htotal);
752 ret |= DRM_SCANOUTPOS_IN_VBLANK;
757 int intel_get_crtc_scanline(struct intel_crtc *crtc)
759 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
760 unsigned long irqflags;
763 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
764 position = __intel_get_crtc_scanline(crtc);
765 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
770 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
772 struct timeval *vblank_time,
775 struct drm_crtc *crtc;
777 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
778 DRM_ERROR("Invalid crtc %d\n", pipe);
782 /* Get drm_crtc to timestamp: */
783 crtc = intel_get_crtc_for_pipe(dev, pipe);
785 DRM_ERROR("Invalid crtc %d\n", pipe);
789 if (!crtc->enabled) {
790 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
794 /* Helper routine in DRM core does all the work: */
795 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
798 &to_intel_crtc(crtc)->config.adjusted_mode);
801 static bool intel_hpd_irq_event(struct drm_device *dev,
802 struct drm_connector *connector)
804 enum drm_connector_status old_status;
806 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
807 old_status = connector->status;
809 connector->status = connector->funcs->detect(connector, false);
810 if (old_status == connector->status)
813 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
816 drm_get_connector_status_name(old_status),
817 drm_get_connector_status_name(connector->status));
822 static void i915_digport_work_func(struct work_struct *work)
824 struct drm_i915_private *dev_priv =
825 container_of(work, struct drm_i915_private, dig_port_work);
826 u32 long_port_mask, short_port_mask;
827 struct intel_digital_port *intel_dig_port;
831 spin_lock_irq(&dev_priv->irq_lock);
832 long_port_mask = dev_priv->long_hpd_port_mask;
833 dev_priv->long_hpd_port_mask = 0;
834 short_port_mask = dev_priv->short_hpd_port_mask;
835 dev_priv->short_hpd_port_mask = 0;
836 spin_unlock_irq(&dev_priv->irq_lock);
838 for (i = 0; i < I915_MAX_PORTS; i++) {
840 bool long_hpd = false;
841 intel_dig_port = dev_priv->hpd_irq_port[i];
842 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
845 if (long_port_mask & (1 << i)) {
848 } else if (short_port_mask & (1 << i))
852 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
854 /* if we get true fallback to old school hpd */
855 old_bits |= (1 << intel_dig_port->base.hpd_pin);
861 spin_lock_irq(&dev_priv->irq_lock);
862 dev_priv->hpd_event_bits |= old_bits;
863 spin_unlock_irq(&dev_priv->irq_lock);
864 schedule_work(&dev_priv->hotplug_work);
869 * Handle hotplug events outside the interrupt handler proper.
871 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
873 static void i915_hotplug_work_func(struct work_struct *work)
875 struct drm_i915_private *dev_priv =
876 container_of(work, struct drm_i915_private, hotplug_work);
877 struct drm_device *dev = dev_priv->dev;
878 struct drm_mode_config *mode_config = &dev->mode_config;
879 struct intel_connector *intel_connector;
880 struct intel_encoder *intel_encoder;
881 struct drm_connector *connector;
882 bool hpd_disabled = false;
883 bool changed = false;
886 mutex_lock(&mode_config->mutex);
887 DRM_DEBUG_KMS("running encoder hotplug functions\n");
889 spin_lock_irq(&dev_priv->irq_lock);
891 hpd_event_bits = dev_priv->hpd_event_bits;
892 dev_priv->hpd_event_bits = 0;
893 list_for_each_entry(connector, &mode_config->connector_list, head) {
894 intel_connector = to_intel_connector(connector);
895 if (!intel_connector->encoder)
897 intel_encoder = intel_connector->encoder;
898 if (intel_encoder->hpd_pin > HPD_NONE &&
899 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
900 connector->polled == DRM_CONNECTOR_POLL_HPD) {
901 DRM_INFO("HPD interrupt storm detected on connector %s: "
902 "switching from hotplug detection to polling\n",
904 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
905 connector->polled = DRM_CONNECTOR_POLL_CONNECT
906 | DRM_CONNECTOR_POLL_DISCONNECT;
909 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
910 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
911 connector->name, intel_encoder->hpd_pin);
914 /* if there were no outputs to poll, poll was disabled,
915 * therefore make sure it's enabled when disabling HPD on
918 drm_kms_helper_poll_enable(dev);
919 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
920 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
923 spin_unlock_irq(&dev_priv->irq_lock);
925 list_for_each_entry(connector, &mode_config->connector_list, head) {
926 intel_connector = to_intel_connector(connector);
927 if (!intel_connector->encoder)
929 intel_encoder = intel_connector->encoder;
930 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
931 if (intel_encoder->hot_plug)
932 intel_encoder->hot_plug(intel_encoder);
933 if (intel_hpd_irq_event(dev, connector))
937 mutex_unlock(&mode_config->mutex);
940 drm_kms_helper_hotplug_event(dev);
943 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
945 struct drm_i915_private *dev_priv = dev->dev_private;
946 u32 busy_up, busy_down, max_avg, min_avg;
949 spin_lock(&mchdev_lock);
951 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
953 new_delay = dev_priv->ips.cur_delay;
955 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
956 busy_up = I915_READ(RCPREVBSYTUPAVG);
957 busy_down = I915_READ(RCPREVBSYTDNAVG);
958 max_avg = I915_READ(RCBMAXAVG);
959 min_avg = I915_READ(RCBMINAVG);
961 /* Handle RCS change request from hw */
962 if (busy_up > max_avg) {
963 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
964 new_delay = dev_priv->ips.cur_delay - 1;
965 if (new_delay < dev_priv->ips.max_delay)
966 new_delay = dev_priv->ips.max_delay;
967 } else if (busy_down < min_avg) {
968 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
969 new_delay = dev_priv->ips.cur_delay + 1;
970 if (new_delay > dev_priv->ips.min_delay)
971 new_delay = dev_priv->ips.min_delay;
974 if (ironlake_set_drps(dev, new_delay))
975 dev_priv->ips.cur_delay = new_delay;
977 spin_unlock(&mchdev_lock);
982 static void notify_ring(struct drm_device *dev,
983 struct intel_engine_cs *ring)
985 if (!intel_ring_initialized(ring))
988 trace_i915_gem_request_complete(ring);
990 wake_up_all(&ring->irq_queue);
991 i915_queue_hangcheck(dev);
994 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
995 struct intel_rps_ei *rps_ei)
997 u32 cz_ts, cz_freq_khz;
998 u32 render_count, media_count;
999 u32 elapsed_render, elapsed_media, elapsed_time;
1002 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1003 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1005 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1006 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1008 if (rps_ei->cz_clock == 0) {
1009 rps_ei->cz_clock = cz_ts;
1010 rps_ei->render_c0 = render_count;
1011 rps_ei->media_c0 = media_count;
1013 return dev_priv->rps.cur_freq;
1016 elapsed_time = cz_ts - rps_ei->cz_clock;
1017 rps_ei->cz_clock = cz_ts;
1019 elapsed_render = render_count - rps_ei->render_c0;
1020 rps_ei->render_c0 = render_count;
1022 elapsed_media = media_count - rps_ei->media_c0;
1023 rps_ei->media_c0 = media_count;
1025 /* Convert all the counters into common unit of milli sec */
1026 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1027 elapsed_render /= cz_freq_khz;
1028 elapsed_media /= cz_freq_khz;
1031 * Calculate overall C0 residency percentage
1032 * only if elapsed time is non zero
1036 ((max(elapsed_render, elapsed_media) * 100)
1044 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1045 * busy-ness calculated from C0 counters of render & media power wells
1046 * @dev_priv: DRM device private
1049 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1051 u32 residency_C0_up = 0, residency_C0_down = 0;
1054 dev_priv->rps.ei_interrupt_count++;
1056 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1059 if (dev_priv->rps.up_ei.cz_clock == 0) {
1060 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1061 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1062 return dev_priv->rps.cur_freq;
1067 * To down throttle, C0 residency should be less than down threshold
1068 * for continous EI intervals. So calculate down EI counters
1069 * once in VLV_INT_COUNT_FOR_DOWN_EI
1071 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1073 dev_priv->rps.ei_interrupt_count = 0;
1075 residency_C0_down = vlv_c0_residency(dev_priv,
1076 &dev_priv->rps.down_ei);
1078 residency_C0_up = vlv_c0_residency(dev_priv,
1079 &dev_priv->rps.up_ei);
1082 new_delay = dev_priv->rps.cur_freq;
1084 adj = dev_priv->rps.last_adj;
1085 /* C0 residency is greater than UP threshold. Increase Frequency */
1086 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1092 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1093 new_delay = dev_priv->rps.cur_freq + adj;
1096 * For better performance, jump directly
1097 * to RPe if we're below it.
1099 if (new_delay < dev_priv->rps.efficient_freq)
1100 new_delay = dev_priv->rps.efficient_freq;
1102 } else if (!dev_priv->rps.ei_interrupt_count &&
1103 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1109 * This means, C0 residency is less than down threshold over
1110 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1112 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1113 new_delay = dev_priv->rps.cur_freq + adj;
1119 static void gen6_pm_rps_work(struct work_struct *work)
1121 struct drm_i915_private *dev_priv =
1122 container_of(work, struct drm_i915_private, rps.work);
1126 spin_lock_irq(&dev_priv->irq_lock);
1127 pm_iir = dev_priv->rps.pm_iir;
1128 dev_priv->rps.pm_iir = 0;
1129 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1130 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1131 spin_unlock_irq(&dev_priv->irq_lock);
1133 /* Make sure we didn't queue anything we're not going to process. */
1134 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1136 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1139 mutex_lock(&dev_priv->rps.hw_lock);
1141 adj = dev_priv->rps.last_adj;
1142 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1146 /* CHV needs even encode values */
1147 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1149 new_delay = dev_priv->rps.cur_freq + adj;
1152 * For better performance, jump directly
1153 * to RPe if we're below it.
1155 if (new_delay < dev_priv->rps.efficient_freq)
1156 new_delay = dev_priv->rps.efficient_freq;
1157 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1158 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1159 new_delay = dev_priv->rps.efficient_freq;
1161 new_delay = dev_priv->rps.min_freq_softlimit;
1163 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1164 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1165 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1169 /* CHV needs even encode values */
1170 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1172 new_delay = dev_priv->rps.cur_freq + adj;
1173 } else { /* unknown event */
1174 new_delay = dev_priv->rps.cur_freq;
1177 /* sysfs frequency interfaces may have snuck in while servicing the
1180 new_delay = clamp_t(int, new_delay,
1181 dev_priv->rps.min_freq_softlimit,
1182 dev_priv->rps.max_freq_softlimit);
1184 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1186 if (IS_VALLEYVIEW(dev_priv->dev))
1187 valleyview_set_rps(dev_priv->dev, new_delay);
1189 gen6_set_rps(dev_priv->dev, new_delay);
1191 mutex_unlock(&dev_priv->rps.hw_lock);
1196 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1198 * @work: workqueue struct
1200 * Doesn't actually do anything except notify userspace. As a consequence of
1201 * this event, userspace should try to remap the bad rows since statistically
1202 * it is likely the same row is more likely to go bad again.
1204 static void ivybridge_parity_work(struct work_struct *work)
1206 struct drm_i915_private *dev_priv =
1207 container_of(work, struct drm_i915_private, l3_parity.error_work);
1208 u32 error_status, row, bank, subbank;
1209 char *parity_event[6];
1213 /* We must turn off DOP level clock gating to access the L3 registers.
1214 * In order to prevent a get/put style interface, acquire struct mutex
1215 * any time we access those registers.
1217 mutex_lock(&dev_priv->dev->struct_mutex);
1219 /* If we've screwed up tracking, just let the interrupt fire again */
1220 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1223 misccpctl = I915_READ(GEN7_MISCCPCTL);
1224 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1225 POSTING_READ(GEN7_MISCCPCTL);
1227 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1231 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1234 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1236 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1238 error_status = I915_READ(reg);
1239 row = GEN7_PARITY_ERROR_ROW(error_status);
1240 bank = GEN7_PARITY_ERROR_BANK(error_status);
1241 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1243 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1246 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1247 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1248 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1249 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1250 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1251 parity_event[5] = NULL;
1253 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1254 KOBJ_CHANGE, parity_event);
1256 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1257 slice, row, bank, subbank);
1259 kfree(parity_event[4]);
1260 kfree(parity_event[3]);
1261 kfree(parity_event[2]);
1262 kfree(parity_event[1]);
1265 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1268 WARN_ON(dev_priv->l3_parity.which_slice);
1269 spin_lock_irq(&dev_priv->irq_lock);
1270 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1271 spin_unlock_irq(&dev_priv->irq_lock);
1273 mutex_unlock(&dev_priv->dev->struct_mutex);
1276 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1278 struct drm_i915_private *dev_priv = dev->dev_private;
1280 if (!HAS_L3_DPF(dev))
1283 spin_lock(&dev_priv->irq_lock);
1284 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1285 spin_unlock(&dev_priv->irq_lock);
1287 iir &= GT_PARITY_ERROR(dev);
1288 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1289 dev_priv->l3_parity.which_slice |= 1 << 1;
1291 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1292 dev_priv->l3_parity.which_slice |= 1 << 0;
1294 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1297 static void ilk_gt_irq_handler(struct drm_device *dev,
1298 struct drm_i915_private *dev_priv,
1302 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1303 notify_ring(dev, &dev_priv->ring[RCS]);
1304 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1305 notify_ring(dev, &dev_priv->ring[VCS]);
1308 static void snb_gt_irq_handler(struct drm_device *dev,
1309 struct drm_i915_private *dev_priv,
1314 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1315 notify_ring(dev, &dev_priv->ring[RCS]);
1316 if (gt_iir & GT_BSD_USER_INTERRUPT)
1317 notify_ring(dev, &dev_priv->ring[VCS]);
1318 if (gt_iir & GT_BLT_USER_INTERRUPT)
1319 notify_ring(dev, &dev_priv->ring[BCS]);
1321 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1322 GT_BSD_CS_ERROR_INTERRUPT |
1323 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1324 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1328 if (gt_iir & GT_PARITY_ERROR(dev))
1329 ivybridge_parity_error_irq_handler(dev, gt_iir);
1332 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1333 struct drm_i915_private *dev_priv,
1336 struct intel_engine_cs *ring;
1339 irqreturn_t ret = IRQ_NONE;
1341 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1342 tmp = I915_READ(GEN8_GT_IIR(0));
1344 I915_WRITE(GEN8_GT_IIR(0), tmp);
1347 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1348 ring = &dev_priv->ring[RCS];
1349 if (rcs & GT_RENDER_USER_INTERRUPT)
1350 notify_ring(dev, ring);
1351 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1352 intel_execlists_handle_ctx_events(ring);
1354 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1355 ring = &dev_priv->ring[BCS];
1356 if (bcs & GT_RENDER_USER_INTERRUPT)
1357 notify_ring(dev, ring);
1358 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1359 intel_execlists_handle_ctx_events(ring);
1361 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1364 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1365 tmp = I915_READ(GEN8_GT_IIR(1));
1367 I915_WRITE(GEN8_GT_IIR(1), tmp);
1370 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1371 ring = &dev_priv->ring[VCS];
1372 if (vcs & GT_RENDER_USER_INTERRUPT)
1373 notify_ring(dev, ring);
1374 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1375 intel_execlists_handle_ctx_events(ring);
1377 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1378 ring = &dev_priv->ring[VCS2];
1379 if (vcs & GT_RENDER_USER_INTERRUPT)
1380 notify_ring(dev, ring);
1381 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1382 intel_execlists_handle_ctx_events(ring);
1384 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1387 if (master_ctl & GEN8_GT_PM_IRQ) {
1388 tmp = I915_READ(GEN8_GT_IIR(2));
1389 if (tmp & dev_priv->pm_rps_events) {
1390 I915_WRITE(GEN8_GT_IIR(2),
1391 tmp & dev_priv->pm_rps_events);
1393 gen6_rps_irq_handler(dev_priv, tmp);
1395 DRM_ERROR("The master control interrupt lied (PM)!\n");
1398 if (master_ctl & GEN8_GT_VECS_IRQ) {
1399 tmp = I915_READ(GEN8_GT_IIR(3));
1401 I915_WRITE(GEN8_GT_IIR(3), tmp);
1404 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1405 ring = &dev_priv->ring[VECS];
1406 if (vcs & GT_RENDER_USER_INTERRUPT)
1407 notify_ring(dev, ring);
1408 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1409 intel_execlists_handle_ctx_events(ring);
1411 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1417 #define HPD_STORM_DETECT_PERIOD 1000
1418 #define HPD_STORM_THRESHOLD 5
1420 static int pch_port_to_hotplug_shift(enum port port)
1436 static int i915_port_to_hotplug_shift(enum port port)
1452 static inline enum port get_port_from_pin(enum hpd_pin pin)
1462 return PORT_A; /* no hpd */
1466 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1467 u32 hotplug_trigger,
1468 u32 dig_hotplug_reg,
1471 struct drm_i915_private *dev_priv = dev->dev_private;
1474 bool storm_detected = false;
1475 bool queue_dig = false, queue_hp = false;
1477 u32 dig_port_mask = 0;
1479 if (!hotplug_trigger)
1482 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1483 hotplug_trigger, dig_hotplug_reg);
1485 spin_lock(&dev_priv->irq_lock);
1486 for (i = 1; i < HPD_NUM_PINS; i++) {
1487 if (!(hpd[i] & hotplug_trigger))
1490 port = get_port_from_pin(i);
1491 if (port && dev_priv->hpd_irq_port[port]) {
1494 if (HAS_PCH_SPLIT(dev)) {
1495 dig_shift = pch_port_to_hotplug_shift(port);
1496 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1498 dig_shift = i915_port_to_hotplug_shift(port);
1499 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1502 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1504 long_hpd ? "long" : "short");
1505 /* for long HPD pulses we want to have the digital queue happen,
1506 but we still want HPD storm detection to function. */
1508 dev_priv->long_hpd_port_mask |= (1 << port);
1509 dig_port_mask |= hpd[i];
1511 /* for short HPD just trigger the digital queue */
1512 dev_priv->short_hpd_port_mask |= (1 << port);
1513 hotplug_trigger &= ~hpd[i];
1519 for (i = 1; i < HPD_NUM_PINS; i++) {
1520 if (hpd[i] & hotplug_trigger &&
1521 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1523 * On GMCH platforms the interrupt mask bits only
1524 * prevent irq generation, not the setting of the
1525 * hotplug bits itself. So only WARN about unexpected
1526 * interrupts on saner platforms.
1528 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1529 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1530 hotplug_trigger, i, hpd[i]);
1535 if (!(hpd[i] & hotplug_trigger) ||
1536 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1539 if (!(dig_port_mask & hpd[i])) {
1540 dev_priv->hpd_event_bits |= (1 << i);
1544 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1545 dev_priv->hpd_stats[i].hpd_last_jiffies
1546 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1547 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1548 dev_priv->hpd_stats[i].hpd_cnt = 0;
1549 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1550 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1551 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1552 dev_priv->hpd_event_bits &= ~(1 << i);
1553 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1554 storm_detected = true;
1556 dev_priv->hpd_stats[i].hpd_cnt++;
1557 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1558 dev_priv->hpd_stats[i].hpd_cnt);
1563 dev_priv->display.hpd_irq_setup(dev);
1564 spin_unlock(&dev_priv->irq_lock);
1567 * Our hotplug handler can grab modeset locks (by calling down into the
1568 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1569 * queue for otherwise the flush_work in the pageflip code will
1573 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1575 schedule_work(&dev_priv->hotplug_work);
1578 static void gmbus_irq_handler(struct drm_device *dev)
1580 struct drm_i915_private *dev_priv = dev->dev_private;
1582 wake_up_all(&dev_priv->gmbus_wait_queue);
1585 static void dp_aux_irq_handler(struct drm_device *dev)
1587 struct drm_i915_private *dev_priv = dev->dev_private;
1589 wake_up_all(&dev_priv->gmbus_wait_queue);
1592 #if defined(CONFIG_DEBUG_FS)
1593 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1594 uint32_t crc0, uint32_t crc1,
1595 uint32_t crc2, uint32_t crc3,
1598 struct drm_i915_private *dev_priv = dev->dev_private;
1599 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1600 struct intel_pipe_crc_entry *entry;
1603 spin_lock(&pipe_crc->lock);
1605 if (!pipe_crc->entries) {
1606 spin_unlock(&pipe_crc->lock);
1607 DRM_ERROR("spurious interrupt\n");
1611 head = pipe_crc->head;
1612 tail = pipe_crc->tail;
1614 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1615 spin_unlock(&pipe_crc->lock);
1616 DRM_ERROR("CRC buffer overflowing\n");
1620 entry = &pipe_crc->entries[head];
1622 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1623 entry->crc[0] = crc0;
1624 entry->crc[1] = crc1;
1625 entry->crc[2] = crc2;
1626 entry->crc[3] = crc3;
1627 entry->crc[4] = crc4;
1629 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1630 pipe_crc->head = head;
1632 spin_unlock(&pipe_crc->lock);
1634 wake_up_interruptible(&pipe_crc->wq);
1638 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1639 uint32_t crc0, uint32_t crc1,
1640 uint32_t crc2, uint32_t crc3,
1645 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1647 struct drm_i915_private *dev_priv = dev->dev_private;
1649 display_pipe_crc_irq_handler(dev, pipe,
1650 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1654 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1658 display_pipe_crc_irq_handler(dev, pipe,
1659 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1660 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1661 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1662 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1663 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1666 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1668 struct drm_i915_private *dev_priv = dev->dev_private;
1669 uint32_t res1, res2;
1671 if (INTEL_INFO(dev)->gen >= 3)
1672 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1676 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1677 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1681 display_pipe_crc_irq_handler(dev, pipe,
1682 I915_READ(PIPE_CRC_RES_RED(pipe)),
1683 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1684 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1688 /* The RPS events need forcewake, so we add them to a work queue and mask their
1689 * IMR bits until the work is done. Other interrupts can be processed without
1690 * the work queue. */
1691 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1693 if (pm_iir & dev_priv->pm_rps_events) {
1694 spin_lock(&dev_priv->irq_lock);
1695 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1696 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1697 spin_unlock(&dev_priv->irq_lock);
1699 queue_work(dev_priv->wq, &dev_priv->rps.work);
1702 if (INTEL_INFO(dev_priv)->gen >= 8)
1705 if (HAS_VEBOX(dev_priv->dev)) {
1706 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1707 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1709 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1710 i915_handle_error(dev_priv->dev, false,
1711 "VEBOX CS error interrupt 0x%08x",
1717 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1719 if (!drm_handle_vblank(dev, pipe))
1725 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1727 struct drm_i915_private *dev_priv = dev->dev_private;
1728 u32 pipe_stats[I915_MAX_PIPES] = { };
1731 spin_lock(&dev_priv->irq_lock);
1732 for_each_pipe(dev_priv, pipe) {
1734 u32 mask, iir_bit = 0;
1737 * PIPESTAT bits get signalled even when the interrupt is
1738 * disabled with the mask bits, and some of the status bits do
1739 * not generate interrupts at all (like the underrun bit). Hence
1740 * we need to be careful that we only handle what we want to
1744 /* fifo underruns are filterered in the underrun handler. */
1745 mask = PIPE_FIFO_UNDERRUN_STATUS;
1749 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1752 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1755 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1759 mask |= dev_priv->pipestat_irq_mask[pipe];
1764 reg = PIPESTAT(pipe);
1765 mask |= PIPESTAT_INT_ENABLE_MASK;
1766 pipe_stats[pipe] = I915_READ(reg) & mask;
1769 * Clear the PIPE*STAT regs before the IIR
1771 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1772 PIPESTAT_INT_STATUS_MASK))
1773 I915_WRITE(reg, pipe_stats[pipe]);
1775 spin_unlock(&dev_priv->irq_lock);
1777 for_each_pipe(dev_priv, pipe) {
1778 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1779 intel_pipe_handle_vblank(dev, pipe))
1780 intel_check_page_flip(dev, pipe);
1782 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1783 intel_prepare_page_flip(dev, pipe);
1784 intel_finish_page_flip(dev, pipe);
1787 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1788 i9xx_pipe_crc_irq_handler(dev, pipe);
1790 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1791 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1794 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1795 gmbus_irq_handler(dev);
1798 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1800 struct drm_i915_private *dev_priv = dev->dev_private;
1801 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1803 if (hotplug_status) {
1804 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1806 * Make sure hotplug status is cleared before we clear IIR, or else we
1807 * may miss hotplug events.
1809 POSTING_READ(PORT_HOTPLUG_STAT);
1812 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1814 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1816 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1818 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1821 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1822 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1823 dp_aux_irq_handler(dev);
1827 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1829 struct drm_device *dev = arg;
1830 struct drm_i915_private *dev_priv = dev->dev_private;
1831 u32 iir, gt_iir, pm_iir;
1832 irqreturn_t ret = IRQ_NONE;
1835 /* Find, clear, then process each source of interrupt */
1837 gt_iir = I915_READ(GTIIR);
1839 I915_WRITE(GTIIR, gt_iir);
1841 pm_iir = I915_READ(GEN6_PMIIR);
1843 I915_WRITE(GEN6_PMIIR, pm_iir);
1845 iir = I915_READ(VLV_IIR);
1847 /* Consume port before clearing IIR or we'll miss events */
1848 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1849 i9xx_hpd_irq_handler(dev);
1850 I915_WRITE(VLV_IIR, iir);
1853 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1859 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1861 gen6_rps_irq_handler(dev_priv, pm_iir);
1862 /* Call regardless, as some status bits might not be
1863 * signalled in iir */
1864 valleyview_pipestat_irq_handler(dev, iir);
1871 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1873 struct drm_device *dev = arg;
1874 struct drm_i915_private *dev_priv = dev->dev_private;
1875 u32 master_ctl, iir;
1876 irqreturn_t ret = IRQ_NONE;
1879 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1880 iir = I915_READ(VLV_IIR);
1882 if (master_ctl == 0 && iir == 0)
1887 I915_WRITE(GEN8_MASTER_IRQ, 0);
1889 /* Find, clear, then process each source of interrupt */
1892 /* Consume port before clearing IIR or we'll miss events */
1893 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1894 i9xx_hpd_irq_handler(dev);
1895 I915_WRITE(VLV_IIR, iir);
1898 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1900 /* Call regardless, as some status bits might not be
1901 * signalled in iir */
1902 valleyview_pipestat_irq_handler(dev, iir);
1904 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1905 POSTING_READ(GEN8_MASTER_IRQ);
1911 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1913 struct drm_i915_private *dev_priv = dev->dev_private;
1915 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1916 u32 dig_hotplug_reg;
1918 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1919 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1921 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1923 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1924 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1925 SDE_AUDIO_POWER_SHIFT);
1926 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1930 if (pch_iir & SDE_AUX_MASK)
1931 dp_aux_irq_handler(dev);
1933 if (pch_iir & SDE_GMBUS)
1934 gmbus_irq_handler(dev);
1936 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1937 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1939 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1940 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1942 if (pch_iir & SDE_POISON)
1943 DRM_ERROR("PCH poison interrupt\n");
1945 if (pch_iir & SDE_FDI_MASK)
1946 for_each_pipe(dev_priv, pipe)
1947 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1949 I915_READ(FDI_RX_IIR(pipe)));
1951 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1952 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1954 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1955 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1957 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1958 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1960 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1961 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1964 static void ivb_err_int_handler(struct drm_device *dev)
1966 struct drm_i915_private *dev_priv = dev->dev_private;
1967 u32 err_int = I915_READ(GEN7_ERR_INT);
1970 if (err_int & ERR_INT_POISON)
1971 DRM_ERROR("Poison interrupt\n");
1973 for_each_pipe(dev_priv, pipe) {
1974 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1975 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1977 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1978 if (IS_IVYBRIDGE(dev))
1979 ivb_pipe_crc_irq_handler(dev, pipe);
1981 hsw_pipe_crc_irq_handler(dev, pipe);
1985 I915_WRITE(GEN7_ERR_INT, err_int);
1988 static void cpt_serr_int_handler(struct drm_device *dev)
1990 struct drm_i915_private *dev_priv = dev->dev_private;
1991 u32 serr_int = I915_READ(SERR_INT);
1993 if (serr_int & SERR_INT_POISON)
1994 DRM_ERROR("PCH poison interrupt\n");
1996 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1997 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1999 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2000 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2002 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2003 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2005 I915_WRITE(SERR_INT, serr_int);
2008 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2010 struct drm_i915_private *dev_priv = dev->dev_private;
2012 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2013 u32 dig_hotplug_reg;
2015 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2016 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2018 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2020 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2021 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2022 SDE_AUDIO_POWER_SHIFT_CPT);
2023 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2027 if (pch_iir & SDE_AUX_MASK_CPT)
2028 dp_aux_irq_handler(dev);
2030 if (pch_iir & SDE_GMBUS_CPT)
2031 gmbus_irq_handler(dev);
2033 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2034 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2036 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2037 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2039 if (pch_iir & SDE_FDI_MASK_CPT)
2040 for_each_pipe(dev_priv, pipe)
2041 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2043 I915_READ(FDI_RX_IIR(pipe)));
2045 if (pch_iir & SDE_ERROR_CPT)
2046 cpt_serr_int_handler(dev);
2049 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2051 struct drm_i915_private *dev_priv = dev->dev_private;
2054 if (de_iir & DE_AUX_CHANNEL_A)
2055 dp_aux_irq_handler(dev);
2057 if (de_iir & DE_GSE)
2058 intel_opregion_asle_intr(dev);
2060 if (de_iir & DE_POISON)
2061 DRM_ERROR("Poison interrupt\n");
2063 for_each_pipe(dev_priv, pipe) {
2064 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2065 intel_pipe_handle_vblank(dev, pipe))
2066 intel_check_page_flip(dev, pipe);
2068 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2069 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2071 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2072 i9xx_pipe_crc_irq_handler(dev, pipe);
2074 /* plane/pipes map 1:1 on ilk+ */
2075 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2076 intel_prepare_page_flip(dev, pipe);
2077 intel_finish_page_flip_plane(dev, pipe);
2081 /* check event from PCH */
2082 if (de_iir & DE_PCH_EVENT) {
2083 u32 pch_iir = I915_READ(SDEIIR);
2085 if (HAS_PCH_CPT(dev))
2086 cpt_irq_handler(dev, pch_iir);
2088 ibx_irq_handler(dev, pch_iir);
2090 /* should clear PCH hotplug event before clear CPU irq */
2091 I915_WRITE(SDEIIR, pch_iir);
2094 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2095 ironlake_rps_change_irq_handler(dev);
2098 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2100 struct drm_i915_private *dev_priv = dev->dev_private;
2103 if (de_iir & DE_ERR_INT_IVB)
2104 ivb_err_int_handler(dev);
2106 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2107 dp_aux_irq_handler(dev);
2109 if (de_iir & DE_GSE_IVB)
2110 intel_opregion_asle_intr(dev);
2112 for_each_pipe(dev_priv, pipe) {
2113 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2114 intel_pipe_handle_vblank(dev, pipe))
2115 intel_check_page_flip(dev, pipe);
2117 /* plane/pipes map 1:1 on ilk+ */
2118 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2119 intel_prepare_page_flip(dev, pipe);
2120 intel_finish_page_flip_plane(dev, pipe);
2124 /* check event from PCH */
2125 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2126 u32 pch_iir = I915_READ(SDEIIR);
2128 cpt_irq_handler(dev, pch_iir);
2130 /* clear PCH hotplug event before clear CPU irq */
2131 I915_WRITE(SDEIIR, pch_iir);
2136 * To handle irqs with the minimum potential races with fresh interrupts, we:
2137 * 1 - Disable Master Interrupt Control.
2138 * 2 - Find the source(s) of the interrupt.
2139 * 3 - Clear the Interrupt Identity bits (IIR).
2140 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2141 * 5 - Re-enable Master Interrupt Control.
2143 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2145 struct drm_device *dev = arg;
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2147 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2148 irqreturn_t ret = IRQ_NONE;
2150 /* We get interrupts on unclaimed registers, so check for this before we
2151 * do any I915_{READ,WRITE}. */
2152 intel_uncore_check_errors(dev);
2154 /* disable master interrupt before clearing iir */
2155 de_ier = I915_READ(DEIER);
2156 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2157 POSTING_READ(DEIER);
2159 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2160 * interrupts will will be stored on its back queue, and then we'll be
2161 * able to process them after we restore SDEIER (as soon as we restore
2162 * it, we'll get an interrupt if SDEIIR still has something to process
2163 * due to its back queue). */
2164 if (!HAS_PCH_NOP(dev)) {
2165 sde_ier = I915_READ(SDEIER);
2166 I915_WRITE(SDEIER, 0);
2167 POSTING_READ(SDEIER);
2170 /* Find, clear, then process each source of interrupt */
2172 gt_iir = I915_READ(GTIIR);
2174 I915_WRITE(GTIIR, gt_iir);
2176 if (INTEL_INFO(dev)->gen >= 6)
2177 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2179 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2182 de_iir = I915_READ(DEIIR);
2184 I915_WRITE(DEIIR, de_iir);
2186 if (INTEL_INFO(dev)->gen >= 7)
2187 ivb_display_irq_handler(dev, de_iir);
2189 ilk_display_irq_handler(dev, de_iir);
2192 if (INTEL_INFO(dev)->gen >= 6) {
2193 u32 pm_iir = I915_READ(GEN6_PMIIR);
2195 I915_WRITE(GEN6_PMIIR, pm_iir);
2197 gen6_rps_irq_handler(dev_priv, pm_iir);
2201 I915_WRITE(DEIER, de_ier);
2202 POSTING_READ(DEIER);
2203 if (!HAS_PCH_NOP(dev)) {
2204 I915_WRITE(SDEIER, sde_ier);
2205 POSTING_READ(SDEIER);
2211 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2213 struct drm_device *dev = arg;
2214 struct drm_i915_private *dev_priv = dev->dev_private;
2216 irqreturn_t ret = IRQ_NONE;
2220 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2221 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2225 I915_WRITE(GEN8_MASTER_IRQ, 0);
2226 POSTING_READ(GEN8_MASTER_IRQ);
2228 /* Find, clear, then process each source of interrupt */
2230 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2232 if (master_ctl & GEN8_DE_MISC_IRQ) {
2233 tmp = I915_READ(GEN8_DE_MISC_IIR);
2235 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2237 if (tmp & GEN8_DE_MISC_GSE)
2238 intel_opregion_asle_intr(dev);
2240 DRM_ERROR("Unexpected DE Misc interrupt\n");
2243 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2246 if (master_ctl & GEN8_DE_PORT_IRQ) {
2247 tmp = I915_READ(GEN8_DE_PORT_IIR);
2249 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2251 if (tmp & GEN8_AUX_CHANNEL_A)
2252 dp_aux_irq_handler(dev);
2254 DRM_ERROR("Unexpected DE Port interrupt\n");
2257 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2260 for_each_pipe(dev_priv, pipe) {
2261 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2263 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2266 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2269 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2271 if (pipe_iir & GEN8_PIPE_VBLANK &&
2272 intel_pipe_handle_vblank(dev, pipe))
2273 intel_check_page_flip(dev, pipe);
2276 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2278 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2281 intel_prepare_page_flip(dev, pipe);
2282 intel_finish_page_flip_plane(dev, pipe);
2285 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2286 hsw_pipe_crc_irq_handler(dev, pipe);
2288 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2289 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2294 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2296 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2299 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2301 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2303 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2306 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2308 * FIXME(BDW): Assume for now that the new interrupt handling
2309 * scheme also closed the SDE interrupt handling race we've seen
2310 * on older pch-split platforms. But this needs testing.
2312 u32 pch_iir = I915_READ(SDEIIR);
2314 I915_WRITE(SDEIIR, pch_iir);
2316 cpt_irq_handler(dev, pch_iir);
2318 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2322 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2323 POSTING_READ(GEN8_MASTER_IRQ);
2328 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2329 bool reset_completed)
2331 struct intel_engine_cs *ring;
2335 * Notify all waiters for GPU completion events that reset state has
2336 * been changed, and that they need to restart their wait after
2337 * checking for potential errors (and bail out to drop locks if there is
2338 * a gpu reset pending so that i915_error_work_func can acquire them).
2341 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2342 for_each_ring(ring, dev_priv, i)
2343 wake_up_all(&ring->irq_queue);
2345 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2346 wake_up_all(&dev_priv->pending_flip_queue);
2349 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2350 * reset state is cleared.
2352 if (reset_completed)
2353 wake_up_all(&dev_priv->gpu_error.reset_queue);
2357 * i915_error_work_func - do process context error handling work
2358 * @work: work struct
2360 * Fire an error uevent so userspace can see that a hang or error
2363 static void i915_error_work_func(struct work_struct *work)
2365 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2367 struct drm_i915_private *dev_priv =
2368 container_of(error, struct drm_i915_private, gpu_error);
2369 struct drm_device *dev = dev_priv->dev;
2370 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2371 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2372 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2375 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2378 * Note that there's only one work item which does gpu resets, so we
2379 * need not worry about concurrent gpu resets potentially incrementing
2380 * error->reset_counter twice. We only need to take care of another
2381 * racing irq/hangcheck declaring the gpu dead for a second time. A
2382 * quick check for that is good enough: schedule_work ensures the
2383 * correct ordering between hang detection and this work item, and since
2384 * the reset in-progress bit is only ever set by code outside of this
2385 * work we don't need to worry about any other races.
2387 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2388 DRM_DEBUG_DRIVER("resetting chip\n");
2389 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2393 * In most cases it's guaranteed that we get here with an RPM
2394 * reference held, for example because there is a pending GPU
2395 * request that won't finish until the reset is done. This
2396 * isn't the case at least when we get here by doing a
2397 * simulated reset via debugs, so get an RPM reference.
2399 intel_runtime_pm_get(dev_priv);
2401 * All state reset _must_ be completed before we update the
2402 * reset counter, for otherwise waiters might miss the reset
2403 * pending state and not properly drop locks, resulting in
2404 * deadlocks with the reset work.
2406 ret = i915_reset(dev);
2408 intel_display_handle_reset(dev);
2410 intel_runtime_pm_put(dev_priv);
2414 * After all the gem state is reset, increment the reset
2415 * counter and wake up everyone waiting for the reset to
2418 * Since unlock operations are a one-sided barrier only,
2419 * we need to insert a barrier here to order any seqno
2421 * the counter increment.
2423 smp_mb__before_atomic();
2424 atomic_inc(&dev_priv->gpu_error.reset_counter);
2426 kobject_uevent_env(&dev->primary->kdev->kobj,
2427 KOBJ_CHANGE, reset_done_event);
2429 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2433 * Note: The wake_up also serves as a memory barrier so that
2434 * waiters see the update value of the reset counter atomic_t.
2436 i915_error_wake_up(dev_priv, true);
2440 static void i915_report_and_clear_eir(struct drm_device *dev)
2442 struct drm_i915_private *dev_priv = dev->dev_private;
2443 uint32_t instdone[I915_NUM_INSTDONE_REG];
2444 u32 eir = I915_READ(EIR);
2450 pr_err("render error detected, EIR: 0x%08x\n", eir);
2452 i915_get_extra_instdone(dev, instdone);
2455 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2456 u32 ipeir = I915_READ(IPEIR_I965);
2458 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2459 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2460 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2461 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2462 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2463 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2464 I915_WRITE(IPEIR_I965, ipeir);
2465 POSTING_READ(IPEIR_I965);
2467 if (eir & GM45_ERROR_PAGE_TABLE) {
2468 u32 pgtbl_err = I915_READ(PGTBL_ER);
2469 pr_err("page table error\n");
2470 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2471 I915_WRITE(PGTBL_ER, pgtbl_err);
2472 POSTING_READ(PGTBL_ER);
2476 if (!IS_GEN2(dev)) {
2477 if (eir & I915_ERROR_PAGE_TABLE) {
2478 u32 pgtbl_err = I915_READ(PGTBL_ER);
2479 pr_err("page table error\n");
2480 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2481 I915_WRITE(PGTBL_ER, pgtbl_err);
2482 POSTING_READ(PGTBL_ER);
2486 if (eir & I915_ERROR_MEMORY_REFRESH) {
2487 pr_err("memory refresh error:\n");
2488 for_each_pipe(dev_priv, pipe)
2489 pr_err("pipe %c stat: 0x%08x\n",
2490 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2491 /* pipestat has already been acked */
2493 if (eir & I915_ERROR_INSTRUCTION) {
2494 pr_err("instruction error\n");
2495 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2496 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2497 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2498 if (INTEL_INFO(dev)->gen < 4) {
2499 u32 ipeir = I915_READ(IPEIR);
2501 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2502 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2503 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2504 I915_WRITE(IPEIR, ipeir);
2505 POSTING_READ(IPEIR);
2507 u32 ipeir = I915_READ(IPEIR_I965);
2509 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2510 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2511 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2512 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2513 I915_WRITE(IPEIR_I965, ipeir);
2514 POSTING_READ(IPEIR_I965);
2518 I915_WRITE(EIR, eir);
2520 eir = I915_READ(EIR);
2523 * some errors might have become stuck,
2526 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2527 I915_WRITE(EMR, I915_READ(EMR) | eir);
2528 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2533 * i915_handle_error - handle an error interrupt
2536 * Do some basic checking of regsiter state at error interrupt time and
2537 * dump it to the syslog. Also call i915_capture_error_state() to make
2538 * sure we get a record and make it available in debugfs. Fire a uevent
2539 * so userspace knows something bad happened (should trigger collection
2540 * of a ring dump etc.).
2542 void i915_handle_error(struct drm_device *dev, bool wedged,
2543 const char *fmt, ...)
2545 struct drm_i915_private *dev_priv = dev->dev_private;
2549 va_start(args, fmt);
2550 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2553 i915_capture_error_state(dev, wedged, error_msg);
2554 i915_report_and_clear_eir(dev);
2557 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2558 &dev_priv->gpu_error.reset_counter);
2561 * Wakeup waiting processes so that the reset work function
2562 * i915_error_work_func doesn't deadlock trying to grab various
2563 * locks. By bumping the reset counter first, the woken
2564 * processes will see a reset in progress and back off,
2565 * releasing their locks and then wait for the reset completion.
2566 * We must do this for _all_ gpu waiters that might hold locks
2567 * that the reset work needs to acquire.
2569 * Note: The wake_up serves as the required memory barrier to
2570 * ensure that the waiters see the updated value of the reset
2573 i915_error_wake_up(dev_priv, false);
2577 * Our reset work can grab modeset locks (since it needs to reset the
2578 * state of outstanding pagelips). Hence it must not be run on our own
2579 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2580 * code will deadlock.
2582 schedule_work(&dev_priv->gpu_error.work);
2585 /* Called from drm generic code, passed 'crtc' which
2586 * we use as a pipe index
2588 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2590 struct drm_i915_private *dev_priv = dev->dev_private;
2591 unsigned long irqflags;
2593 if (!i915_pipe_enabled(dev, pipe))
2596 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2597 if (INTEL_INFO(dev)->gen >= 4)
2598 i915_enable_pipestat(dev_priv, pipe,
2599 PIPE_START_VBLANK_INTERRUPT_STATUS);
2601 i915_enable_pipestat(dev_priv, pipe,
2602 PIPE_VBLANK_INTERRUPT_STATUS);
2603 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2608 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2610 struct drm_i915_private *dev_priv = dev->dev_private;
2611 unsigned long irqflags;
2612 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2613 DE_PIPE_VBLANK(pipe);
2615 if (!i915_pipe_enabled(dev, pipe))
2618 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2619 ironlake_enable_display_irq(dev_priv, bit);
2620 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2625 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2627 struct drm_i915_private *dev_priv = dev->dev_private;
2628 unsigned long irqflags;
2630 if (!i915_pipe_enabled(dev, pipe))
2633 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2634 i915_enable_pipestat(dev_priv, pipe,
2635 PIPE_START_VBLANK_INTERRUPT_STATUS);
2636 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2641 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2643 struct drm_i915_private *dev_priv = dev->dev_private;
2644 unsigned long irqflags;
2646 if (!i915_pipe_enabled(dev, pipe))
2649 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2650 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2651 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2652 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2653 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2657 /* Called from drm generic code, passed 'crtc' which
2658 * we use as a pipe index
2660 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2662 struct drm_i915_private *dev_priv = dev->dev_private;
2663 unsigned long irqflags;
2665 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2666 i915_disable_pipestat(dev_priv, pipe,
2667 PIPE_VBLANK_INTERRUPT_STATUS |
2668 PIPE_START_VBLANK_INTERRUPT_STATUS);
2669 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2672 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2674 struct drm_i915_private *dev_priv = dev->dev_private;
2675 unsigned long irqflags;
2676 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2677 DE_PIPE_VBLANK(pipe);
2679 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2680 ironlake_disable_display_irq(dev_priv, bit);
2681 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2684 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2686 struct drm_i915_private *dev_priv = dev->dev_private;
2687 unsigned long irqflags;
2689 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2690 i915_disable_pipestat(dev_priv, pipe,
2691 PIPE_START_VBLANK_INTERRUPT_STATUS);
2692 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2695 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2697 struct drm_i915_private *dev_priv = dev->dev_private;
2698 unsigned long irqflags;
2700 if (!i915_pipe_enabled(dev, pipe))
2703 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2704 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2705 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2706 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2707 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2711 ring_last_seqno(struct intel_engine_cs *ring)
2713 return list_entry(ring->request_list.prev,
2714 struct drm_i915_gem_request, list)->seqno;
2718 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2720 return (list_empty(&ring->request_list) ||
2721 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2725 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2727 if (INTEL_INFO(dev)->gen >= 8) {
2728 return (ipehr >> 23) == 0x1c;
2730 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2731 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2732 MI_SEMAPHORE_REGISTER);
2736 static struct intel_engine_cs *
2737 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2739 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2740 struct intel_engine_cs *signaller;
2743 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2744 for_each_ring(signaller, dev_priv, i) {
2745 if (ring == signaller)
2748 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2752 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2754 for_each_ring(signaller, dev_priv, i) {
2755 if(ring == signaller)
2758 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2763 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2764 ring->id, ipehr, offset);
2769 static struct intel_engine_cs *
2770 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2772 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2773 u32 cmd, ipehr, head;
2777 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2778 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2782 * HEAD is likely pointing to the dword after the actual command,
2783 * so scan backwards until we find the MBOX. But limit it to just 3
2784 * or 4 dwords depending on the semaphore wait command size.
2785 * Note that we don't care about ACTHD here since that might
2786 * point at at batch, and semaphores are always emitted into the
2787 * ringbuffer itself.
2789 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2790 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2792 for (i = backwards; i; --i) {
2794 * Be paranoid and presume the hw has gone off into the wild -
2795 * our ring is smaller than what the hardware (and hence
2796 * HEAD_ADDR) allows. Also handles wrap-around.
2798 head &= ring->buffer->size - 1;
2800 /* This here seems to blow up */
2801 cmd = ioread32(ring->buffer->virtual_start + head);
2811 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2812 if (INTEL_INFO(ring->dev)->gen >= 8) {
2813 offset = ioread32(ring->buffer->virtual_start + head + 12);
2815 offset = ioread32(ring->buffer->virtual_start + head + 8);
2817 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2820 static int semaphore_passed(struct intel_engine_cs *ring)
2822 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2823 struct intel_engine_cs *signaller;
2826 ring->hangcheck.deadlock++;
2828 signaller = semaphore_waits_for(ring, &seqno);
2829 if (signaller == NULL)
2832 /* Prevent pathological recursion due to driver bugs */
2833 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2836 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2839 /* cursory check for an unkickable deadlock */
2840 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2841 semaphore_passed(signaller) < 0)
2847 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2849 struct intel_engine_cs *ring;
2852 for_each_ring(ring, dev_priv, i)
2853 ring->hangcheck.deadlock = 0;
2856 static enum intel_ring_hangcheck_action
2857 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2859 struct drm_device *dev = ring->dev;
2860 struct drm_i915_private *dev_priv = dev->dev_private;
2863 if (acthd != ring->hangcheck.acthd) {
2864 if (acthd > ring->hangcheck.max_acthd) {
2865 ring->hangcheck.max_acthd = acthd;
2866 return HANGCHECK_ACTIVE;
2869 return HANGCHECK_ACTIVE_LOOP;
2873 return HANGCHECK_HUNG;
2875 /* Is the chip hanging on a WAIT_FOR_EVENT?
2876 * If so we can simply poke the RB_WAIT bit
2877 * and break the hang. This should work on
2878 * all but the second generation chipsets.
2880 tmp = I915_READ_CTL(ring);
2881 if (tmp & RING_WAIT) {
2882 i915_handle_error(dev, false,
2883 "Kicking stuck wait on %s",
2885 I915_WRITE_CTL(ring, tmp);
2886 return HANGCHECK_KICK;
2889 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2890 switch (semaphore_passed(ring)) {
2892 return HANGCHECK_HUNG;
2894 i915_handle_error(dev, false,
2895 "Kicking stuck semaphore on %s",
2897 I915_WRITE_CTL(ring, tmp);
2898 return HANGCHECK_KICK;
2900 return HANGCHECK_WAIT;
2904 return HANGCHECK_HUNG;
2908 * This is called when the chip hasn't reported back with completed
2909 * batchbuffers in a long time. We keep track per ring seqno progress and
2910 * if there are no progress, hangcheck score for that ring is increased.
2911 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2912 * we kick the ring. If we see no progress on three subsequent calls
2913 * we assume chip is wedged and try to fix it by resetting the chip.
2915 static void i915_hangcheck_elapsed(unsigned long data)
2917 struct drm_device *dev = (struct drm_device *)data;
2918 struct drm_i915_private *dev_priv = dev->dev_private;
2919 struct intel_engine_cs *ring;
2921 int busy_count = 0, rings_hung = 0;
2922 bool stuck[I915_NUM_RINGS] = { 0 };
2927 if (!i915.enable_hangcheck)
2930 for_each_ring(ring, dev_priv, i) {
2935 semaphore_clear_deadlocks(dev_priv);
2937 seqno = ring->get_seqno(ring, false);
2938 acthd = intel_ring_get_active_head(ring);
2940 if (ring->hangcheck.seqno == seqno) {
2941 if (ring_idle(ring, seqno)) {
2942 ring->hangcheck.action = HANGCHECK_IDLE;
2944 if (waitqueue_active(&ring->irq_queue)) {
2945 /* Issue a wake-up to catch stuck h/w. */
2946 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2947 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2948 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2951 DRM_INFO("Fake missed irq on %s\n",
2953 wake_up_all(&ring->irq_queue);
2955 /* Safeguard against driver failure */
2956 ring->hangcheck.score += BUSY;
2960 /* We always increment the hangcheck score
2961 * if the ring is busy and still processing
2962 * the same request, so that no single request
2963 * can run indefinitely (such as a chain of
2964 * batches). The only time we do not increment
2965 * the hangcheck score on this ring, if this
2966 * ring is in a legitimate wait for another
2967 * ring. In that case the waiting ring is a
2968 * victim and we want to be sure we catch the
2969 * right culprit. Then every time we do kick
2970 * the ring, add a small increment to the
2971 * score so that we can catch a batch that is
2972 * being repeatedly kicked and so responsible
2973 * for stalling the machine.
2975 ring->hangcheck.action = ring_stuck(ring,
2978 switch (ring->hangcheck.action) {
2979 case HANGCHECK_IDLE:
2980 case HANGCHECK_WAIT:
2981 case HANGCHECK_ACTIVE:
2983 case HANGCHECK_ACTIVE_LOOP:
2984 ring->hangcheck.score += BUSY;
2986 case HANGCHECK_KICK:
2987 ring->hangcheck.score += KICK;
2989 case HANGCHECK_HUNG:
2990 ring->hangcheck.score += HUNG;
2996 ring->hangcheck.action = HANGCHECK_ACTIVE;
2998 /* Gradually reduce the count so that we catch DoS
2999 * attempts across multiple batches.
3001 if (ring->hangcheck.score > 0)
3002 ring->hangcheck.score--;
3004 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3007 ring->hangcheck.seqno = seqno;
3008 ring->hangcheck.acthd = acthd;
3012 for_each_ring(ring, dev_priv, i) {
3013 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3014 DRM_INFO("%s on %s\n",
3015 stuck[i] ? "stuck" : "no progress",
3022 return i915_handle_error(dev, true, "Ring hung");
3025 /* Reset timer case chip hangs without another request
3027 i915_queue_hangcheck(dev);
3030 void i915_queue_hangcheck(struct drm_device *dev)
3032 struct drm_i915_private *dev_priv = dev->dev_private;
3033 if (!i915.enable_hangcheck)
3036 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3037 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3040 static void ibx_irq_reset(struct drm_device *dev)
3042 struct drm_i915_private *dev_priv = dev->dev_private;
3044 if (HAS_PCH_NOP(dev))
3047 GEN5_IRQ_RESET(SDE);
3049 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3050 I915_WRITE(SERR_INT, 0xffffffff);
3054 * SDEIER is also touched by the interrupt handler to work around missed PCH
3055 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3056 * instead we unconditionally enable all PCH interrupt sources here, but then
3057 * only unmask them as needed with SDEIMR.
3059 * This function needs to be called before interrupts are enabled.
3061 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3063 struct drm_i915_private *dev_priv = dev->dev_private;
3065 if (HAS_PCH_NOP(dev))
3068 WARN_ON(I915_READ(SDEIER) != 0);
3069 I915_WRITE(SDEIER, 0xffffffff);
3070 POSTING_READ(SDEIER);
3073 static void gen5_gt_irq_reset(struct drm_device *dev)
3075 struct drm_i915_private *dev_priv = dev->dev_private;
3078 if (INTEL_INFO(dev)->gen >= 6)
3079 GEN5_IRQ_RESET(GEN6_PM);
3084 static void ironlake_irq_reset(struct drm_device *dev)
3086 struct drm_i915_private *dev_priv = dev->dev_private;
3088 I915_WRITE(HWSTAM, 0xffffffff);
3092 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3094 gen5_gt_irq_reset(dev);
3099 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3103 I915_WRITE(PORT_HOTPLUG_EN, 0);
3104 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3106 for_each_pipe(dev_priv, pipe)
3107 I915_WRITE(PIPESTAT(pipe), 0xffff);
3109 GEN5_IRQ_RESET(VLV_);
3112 static void valleyview_irq_preinstall(struct drm_device *dev)
3114 struct drm_i915_private *dev_priv = dev->dev_private;
3117 I915_WRITE(VLV_IMR, 0);
3118 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3119 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3120 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3122 gen5_gt_irq_reset(dev);
3124 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3126 vlv_display_irq_reset(dev_priv);
3129 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3131 GEN8_IRQ_RESET_NDX(GT, 0);
3132 GEN8_IRQ_RESET_NDX(GT, 1);
3133 GEN8_IRQ_RESET_NDX(GT, 2);
3134 GEN8_IRQ_RESET_NDX(GT, 3);
3137 static void gen8_irq_reset(struct drm_device *dev)
3139 struct drm_i915_private *dev_priv = dev->dev_private;
3142 I915_WRITE(GEN8_MASTER_IRQ, 0);
3143 POSTING_READ(GEN8_MASTER_IRQ);
3145 gen8_gt_irq_reset(dev_priv);
3147 for_each_pipe(dev_priv, pipe)
3148 if (intel_display_power_is_enabled(dev_priv,
3149 POWER_DOMAIN_PIPE(pipe)))
3150 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3152 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3153 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3154 GEN5_IRQ_RESET(GEN8_PCU_);
3159 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3161 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3163 spin_lock_irq(&dev_priv->irq_lock);
3164 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3165 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3166 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3167 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3168 spin_unlock_irq(&dev_priv->irq_lock);
3171 static void cherryview_irq_preinstall(struct drm_device *dev)
3173 struct drm_i915_private *dev_priv = dev->dev_private;
3175 I915_WRITE(GEN8_MASTER_IRQ, 0);
3176 POSTING_READ(GEN8_MASTER_IRQ);
3178 gen8_gt_irq_reset(dev_priv);
3180 GEN5_IRQ_RESET(GEN8_PCU_);
3182 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3184 vlv_display_irq_reset(dev_priv);
3187 static void ibx_hpd_irq_setup(struct drm_device *dev)
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3190 struct intel_encoder *intel_encoder;
3191 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3193 if (HAS_PCH_IBX(dev)) {
3194 hotplug_irqs = SDE_HOTPLUG_MASK;
3195 for_each_intel_encoder(dev, intel_encoder)
3196 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3197 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3199 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3200 for_each_intel_encoder(dev, intel_encoder)
3201 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3202 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3205 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3208 * Enable digital hotplug on the PCH, and configure the DP short pulse
3209 * duration to 2ms (which is the minimum in the Display Port spec)
3211 * This register is the same on all known PCH chips.
3213 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3214 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3215 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3216 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3217 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3218 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3221 static void ibx_irq_postinstall(struct drm_device *dev)
3223 struct drm_i915_private *dev_priv = dev->dev_private;
3226 if (HAS_PCH_NOP(dev))
3229 if (HAS_PCH_IBX(dev))
3230 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3232 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3234 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3235 I915_WRITE(SDEIMR, ~mask);
3238 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3240 struct drm_i915_private *dev_priv = dev->dev_private;
3241 u32 pm_irqs, gt_irqs;
3243 pm_irqs = gt_irqs = 0;
3245 dev_priv->gt_irq_mask = ~0;
3246 if (HAS_L3_DPF(dev)) {
3247 /* L3 parity interrupt is always unmasked. */
3248 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3249 gt_irqs |= GT_PARITY_ERROR(dev);
3252 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3254 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3255 ILK_BSD_USER_INTERRUPT;
3257 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3260 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3262 if (INTEL_INFO(dev)->gen >= 6) {
3263 pm_irqs |= dev_priv->pm_rps_events;
3266 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3268 dev_priv->pm_irq_mask = 0xffffffff;
3269 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3273 static int ironlake_irq_postinstall(struct drm_device *dev)
3275 struct drm_i915_private *dev_priv = dev->dev_private;
3276 u32 display_mask, extra_mask;
3278 if (INTEL_INFO(dev)->gen >= 7) {
3279 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3280 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3281 DE_PLANEB_FLIP_DONE_IVB |
3282 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3283 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3284 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3286 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3287 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3289 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3291 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3292 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3295 dev_priv->irq_mask = ~display_mask;
3297 I915_WRITE(HWSTAM, 0xeffe);
3299 ibx_irq_pre_postinstall(dev);
3301 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3303 gen5_gt_irq_postinstall(dev);
3305 ibx_irq_postinstall(dev);
3307 if (IS_IRONLAKE_M(dev)) {
3308 /* Enable PCU event interrupts
3310 * spinlocking not required here for correctness since interrupt
3311 * setup is guaranteed to run in single-threaded context. But we
3312 * need it to make the assert_spin_locked happy. */
3313 spin_lock_irq(&dev_priv->irq_lock);
3314 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3315 spin_unlock_irq(&dev_priv->irq_lock);
3321 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3327 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3328 PIPE_FIFO_UNDERRUN_STATUS;
3330 for_each_pipe(dev_priv, pipe)
3331 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3332 POSTING_READ(PIPESTAT(PIPE_A));
3334 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3335 PIPE_CRC_DONE_INTERRUPT_STATUS;
3337 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3338 for_each_pipe(dev_priv, pipe)
3339 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3341 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3342 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3343 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3344 if (IS_CHERRYVIEW(dev_priv))
3345 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3346 dev_priv->irq_mask &= ~iir_mask;
3348 I915_WRITE(VLV_IIR, iir_mask);
3349 I915_WRITE(VLV_IIR, iir_mask);
3350 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3351 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3352 POSTING_READ(VLV_IMR);
3355 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3361 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3362 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3363 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3364 if (IS_CHERRYVIEW(dev_priv))
3365 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3367 dev_priv->irq_mask |= iir_mask;
3368 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3369 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3370 I915_WRITE(VLV_IIR, iir_mask);
3371 I915_WRITE(VLV_IIR, iir_mask);
3372 POSTING_READ(VLV_IIR);
3374 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3375 PIPE_CRC_DONE_INTERRUPT_STATUS;
3377 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3378 for_each_pipe(dev_priv, pipe)
3379 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3381 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3382 PIPE_FIFO_UNDERRUN_STATUS;
3384 for_each_pipe(dev_priv, pipe)
3385 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3386 POSTING_READ(PIPESTAT(PIPE_A));
3389 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3391 assert_spin_locked(&dev_priv->irq_lock);
3393 if (dev_priv->display_irqs_enabled)
3396 dev_priv->display_irqs_enabled = true;
3398 if (intel_irqs_enabled(dev_priv))
3399 valleyview_display_irqs_install(dev_priv);
3402 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3404 assert_spin_locked(&dev_priv->irq_lock);
3406 if (!dev_priv->display_irqs_enabled)
3409 dev_priv->display_irqs_enabled = false;
3411 if (intel_irqs_enabled(dev_priv))
3412 valleyview_display_irqs_uninstall(dev_priv);
3415 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3417 dev_priv->irq_mask = ~0;
3419 I915_WRITE(PORT_HOTPLUG_EN, 0);
3420 POSTING_READ(PORT_HOTPLUG_EN);
3422 I915_WRITE(VLV_IIR, 0xffffffff);
3423 I915_WRITE(VLV_IIR, 0xffffffff);
3424 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3425 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3426 POSTING_READ(VLV_IMR);
3428 /* Interrupt setup is already guaranteed to be single-threaded, this is
3429 * just to make the assert_spin_locked check happy. */
3430 spin_lock_irq(&dev_priv->irq_lock);
3431 if (dev_priv->display_irqs_enabled)
3432 valleyview_display_irqs_install(dev_priv);
3433 spin_unlock_irq(&dev_priv->irq_lock);
3436 static int valleyview_irq_postinstall(struct drm_device *dev)
3438 struct drm_i915_private *dev_priv = dev->dev_private;
3440 vlv_display_irq_postinstall(dev_priv);
3442 gen5_gt_irq_postinstall(dev);
3444 /* ack & enable invalid PTE error interrupts */
3445 #if 0 /* FIXME: add support to irq handler for checking these bits */
3446 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3447 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3450 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3455 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3457 /* These are interrupts we'll toggle with the ring mask register */
3458 uint32_t gt_interrupts[] = {
3459 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3460 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3461 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3462 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3463 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3464 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3465 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3466 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3467 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3469 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3470 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3473 dev_priv->pm_irq_mask = 0xffffffff;
3474 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3475 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3476 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3477 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3480 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3482 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3483 uint32_t de_pipe_enables;
3486 if (IS_GEN9(dev_priv))
3487 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3488 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3490 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3491 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3493 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3494 GEN8_PIPE_FIFO_UNDERRUN;
3496 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3497 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3498 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3500 for_each_pipe(dev_priv, pipe)
3501 if (intel_display_power_is_enabled(dev_priv,
3502 POWER_DOMAIN_PIPE(pipe)))
3503 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3504 dev_priv->de_irq_mask[pipe],
3507 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3510 static int gen8_irq_postinstall(struct drm_device *dev)
3512 struct drm_i915_private *dev_priv = dev->dev_private;
3514 ibx_irq_pre_postinstall(dev);
3516 gen8_gt_irq_postinstall(dev_priv);
3517 gen8_de_irq_postinstall(dev_priv);
3519 ibx_irq_postinstall(dev);
3521 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3522 POSTING_READ(GEN8_MASTER_IRQ);
3527 static int cherryview_irq_postinstall(struct drm_device *dev)
3529 struct drm_i915_private *dev_priv = dev->dev_private;
3530 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3531 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3532 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3533 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3534 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3535 PIPE_CRC_DONE_INTERRUPT_STATUS;
3539 * Leave vblank interrupts masked initially. enable/disable will
3540 * toggle them based on usage.
3542 dev_priv->irq_mask = ~enable_mask;
3544 for_each_pipe(dev_priv, pipe)
3545 I915_WRITE(PIPESTAT(pipe), 0xffff);
3547 spin_lock_irq(&dev_priv->irq_lock);
3548 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3549 for_each_pipe(dev_priv, pipe)
3550 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3551 spin_unlock_irq(&dev_priv->irq_lock);
3553 I915_WRITE(VLV_IIR, 0xffffffff);
3554 I915_WRITE(VLV_IIR, 0xffffffff);
3555 I915_WRITE(VLV_IER, enable_mask);
3556 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3557 POSTING_READ(VLV_IMR);
3559 gen8_gt_irq_postinstall(dev_priv);
3561 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3562 POSTING_READ(GEN8_MASTER_IRQ);
3567 static void gen8_irq_uninstall(struct drm_device *dev)
3569 struct drm_i915_private *dev_priv = dev->dev_private;
3574 gen8_irq_reset(dev);
3577 static void valleyview_irq_uninstall(struct drm_device *dev)
3579 struct drm_i915_private *dev_priv = dev->dev_private;
3584 I915_WRITE(VLV_MASTER_IER, 0);
3586 gen5_gt_irq_reset(dev);
3588 I915_WRITE(HWSTAM, 0xffffffff);
3590 /* Interrupt setup is already guaranteed to be single-threaded, this is
3591 * just to make the assert_spin_locked check happy. */
3592 spin_lock_irq(&dev_priv->irq_lock);
3593 if (dev_priv->display_irqs_enabled)
3594 valleyview_display_irqs_uninstall(dev_priv);
3595 spin_unlock_irq(&dev_priv->irq_lock);
3597 vlv_display_irq_reset(dev_priv);
3599 dev_priv->irq_mask = 0;
3602 static void cherryview_irq_uninstall(struct drm_device *dev)
3604 struct drm_i915_private *dev_priv = dev->dev_private;
3610 I915_WRITE(GEN8_MASTER_IRQ, 0);
3611 POSTING_READ(GEN8_MASTER_IRQ);
3613 gen8_gt_irq_reset(dev_priv);
3615 GEN5_IRQ_RESET(GEN8_PCU_);
3617 I915_WRITE(PORT_HOTPLUG_EN, 0);
3618 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3620 for_each_pipe(dev_priv, pipe)
3621 I915_WRITE(PIPESTAT(pipe), 0xffff);
3623 GEN5_IRQ_RESET(VLV_);
3626 static void ironlake_irq_uninstall(struct drm_device *dev)
3628 struct drm_i915_private *dev_priv = dev->dev_private;
3633 ironlake_irq_reset(dev);
3636 static void i8xx_irq_preinstall(struct drm_device * dev)
3638 struct drm_i915_private *dev_priv = dev->dev_private;
3641 for_each_pipe(dev_priv, pipe)
3642 I915_WRITE(PIPESTAT(pipe), 0);
3643 I915_WRITE16(IMR, 0xffff);
3644 I915_WRITE16(IER, 0x0);
3645 POSTING_READ16(IER);
3648 static int i8xx_irq_postinstall(struct drm_device *dev)
3650 struct drm_i915_private *dev_priv = dev->dev_private;
3653 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3655 /* Unmask the interrupts that we always want on. */
3656 dev_priv->irq_mask =
3657 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3658 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3659 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3660 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3661 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3662 I915_WRITE16(IMR, dev_priv->irq_mask);
3665 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3666 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3667 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3668 I915_USER_INTERRUPT);
3669 POSTING_READ16(IER);
3671 /* Interrupt setup is already guaranteed to be single-threaded, this is
3672 * just to make the assert_spin_locked check happy. */
3673 spin_lock_irq(&dev_priv->irq_lock);
3674 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3675 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3676 spin_unlock_irq(&dev_priv->irq_lock);
3682 * Returns true when a page flip has completed.
3684 static bool i8xx_handle_vblank(struct drm_device *dev,
3685 int plane, int pipe, u32 iir)
3687 struct drm_i915_private *dev_priv = dev->dev_private;
3688 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3690 if (!intel_pipe_handle_vblank(dev, pipe))
3693 if ((iir & flip_pending) == 0)
3694 goto check_page_flip;
3696 intel_prepare_page_flip(dev, plane);
3698 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3699 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3700 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3701 * the flip is completed (no longer pending). Since this doesn't raise
3702 * an interrupt per se, we watch for the change at vblank.
3704 if (I915_READ16(ISR) & flip_pending)
3705 goto check_page_flip;
3707 intel_finish_page_flip(dev, pipe);
3711 intel_check_page_flip(dev, pipe);
3715 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3717 struct drm_device *dev = arg;
3718 struct drm_i915_private *dev_priv = dev->dev_private;
3723 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3724 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3726 iir = I915_READ16(IIR);
3730 while (iir & ~flip_mask) {
3731 /* Can't rely on pipestat interrupt bit in iir as it might
3732 * have been cleared after the pipestat interrupt was received.
3733 * It doesn't set the bit in iir again, but it still produces
3734 * interrupts (for non-MSI).
3736 spin_lock(&dev_priv->irq_lock);
3737 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3738 i915_handle_error(dev, false,
3739 "Command parser error, iir 0x%08x",
3742 for_each_pipe(dev_priv, pipe) {
3743 int reg = PIPESTAT(pipe);
3744 pipe_stats[pipe] = I915_READ(reg);
3747 * Clear the PIPE*STAT regs before the IIR
3749 if (pipe_stats[pipe] & 0x8000ffff)
3750 I915_WRITE(reg, pipe_stats[pipe]);
3752 spin_unlock(&dev_priv->irq_lock);
3754 I915_WRITE16(IIR, iir & ~flip_mask);
3755 new_iir = I915_READ16(IIR); /* Flush posted writes */
3757 i915_update_dri1_breadcrumb(dev);
3759 if (iir & I915_USER_INTERRUPT)
3760 notify_ring(dev, &dev_priv->ring[RCS]);
3762 for_each_pipe(dev_priv, pipe) {
3767 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3768 i8xx_handle_vblank(dev, plane, pipe, iir))
3769 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3771 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3772 i9xx_pipe_crc_irq_handler(dev, pipe);
3774 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3775 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3785 static void i8xx_irq_uninstall(struct drm_device * dev)
3787 struct drm_i915_private *dev_priv = dev->dev_private;
3790 for_each_pipe(dev_priv, pipe) {
3791 /* Clear enable bits; then clear status bits */
3792 I915_WRITE(PIPESTAT(pipe), 0);
3793 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3795 I915_WRITE16(IMR, 0xffff);
3796 I915_WRITE16(IER, 0x0);
3797 I915_WRITE16(IIR, I915_READ16(IIR));
3800 static void i915_irq_preinstall(struct drm_device * dev)
3802 struct drm_i915_private *dev_priv = dev->dev_private;
3805 if (I915_HAS_HOTPLUG(dev)) {
3806 I915_WRITE(PORT_HOTPLUG_EN, 0);
3807 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3810 I915_WRITE16(HWSTAM, 0xeffe);
3811 for_each_pipe(dev_priv, pipe)
3812 I915_WRITE(PIPESTAT(pipe), 0);
3813 I915_WRITE(IMR, 0xffffffff);
3814 I915_WRITE(IER, 0x0);
3818 static int i915_irq_postinstall(struct drm_device *dev)
3820 struct drm_i915_private *dev_priv = dev->dev_private;
3823 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3825 /* Unmask the interrupts that we always want on. */
3826 dev_priv->irq_mask =
3827 ~(I915_ASLE_INTERRUPT |
3828 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3829 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3830 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3831 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3832 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3835 I915_ASLE_INTERRUPT |
3836 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3837 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3838 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3839 I915_USER_INTERRUPT;
3841 if (I915_HAS_HOTPLUG(dev)) {
3842 I915_WRITE(PORT_HOTPLUG_EN, 0);
3843 POSTING_READ(PORT_HOTPLUG_EN);
3845 /* Enable in IER... */
3846 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3847 /* and unmask in IMR */
3848 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3851 I915_WRITE(IMR, dev_priv->irq_mask);
3852 I915_WRITE(IER, enable_mask);
3855 i915_enable_asle_pipestat(dev);
3857 /* Interrupt setup is already guaranteed to be single-threaded, this is
3858 * just to make the assert_spin_locked check happy. */
3859 spin_lock_irq(&dev_priv->irq_lock);
3860 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3861 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3862 spin_unlock_irq(&dev_priv->irq_lock);
3868 * Returns true when a page flip has completed.
3870 static bool i915_handle_vblank(struct drm_device *dev,
3871 int plane, int pipe, u32 iir)
3873 struct drm_i915_private *dev_priv = dev->dev_private;
3874 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3876 if (!intel_pipe_handle_vblank(dev, pipe))
3879 if ((iir & flip_pending) == 0)
3880 goto check_page_flip;
3882 intel_prepare_page_flip(dev, plane);
3884 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3885 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3886 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3887 * the flip is completed (no longer pending). Since this doesn't raise
3888 * an interrupt per se, we watch for the change at vblank.
3890 if (I915_READ(ISR) & flip_pending)
3891 goto check_page_flip;
3893 intel_finish_page_flip(dev, pipe);
3897 intel_check_page_flip(dev, pipe);
3901 static irqreturn_t i915_irq_handler(int irq, void *arg)
3903 struct drm_device *dev = arg;
3904 struct drm_i915_private *dev_priv = dev->dev_private;
3905 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3907 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3908 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3909 int pipe, ret = IRQ_NONE;
3911 iir = I915_READ(IIR);
3913 bool irq_received = (iir & ~flip_mask) != 0;
3914 bool blc_event = false;
3916 /* Can't rely on pipestat interrupt bit in iir as it might
3917 * have been cleared after the pipestat interrupt was received.
3918 * It doesn't set the bit in iir again, but it still produces
3919 * interrupts (for non-MSI).
3921 spin_lock(&dev_priv->irq_lock);
3922 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3923 i915_handle_error(dev, false,
3924 "Command parser error, iir 0x%08x",
3927 for_each_pipe(dev_priv, pipe) {
3928 int reg = PIPESTAT(pipe);
3929 pipe_stats[pipe] = I915_READ(reg);
3931 /* Clear the PIPE*STAT regs before the IIR */
3932 if (pipe_stats[pipe] & 0x8000ffff) {
3933 I915_WRITE(reg, pipe_stats[pipe]);
3934 irq_received = true;
3937 spin_unlock(&dev_priv->irq_lock);
3942 /* Consume port. Then clear IIR or we'll miss events */
3943 if (I915_HAS_HOTPLUG(dev) &&
3944 iir & I915_DISPLAY_PORT_INTERRUPT)
3945 i9xx_hpd_irq_handler(dev);
3947 I915_WRITE(IIR, iir & ~flip_mask);
3948 new_iir = I915_READ(IIR); /* Flush posted writes */
3950 if (iir & I915_USER_INTERRUPT)
3951 notify_ring(dev, &dev_priv->ring[RCS]);
3953 for_each_pipe(dev_priv, pipe) {
3958 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3959 i915_handle_vblank(dev, plane, pipe, iir))
3960 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3962 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3965 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3966 i9xx_pipe_crc_irq_handler(dev, pipe);
3968 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3969 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3973 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3974 intel_opregion_asle_intr(dev);
3976 /* With MSI, interrupts are only generated when iir
3977 * transitions from zero to nonzero. If another bit got
3978 * set while we were handling the existing iir bits, then
3979 * we would never get another interrupt.
3981 * This is fine on non-MSI as well, as if we hit this path
3982 * we avoid exiting the interrupt handler only to generate
3985 * Note that for MSI this could cause a stray interrupt report
3986 * if an interrupt landed in the time between writing IIR and
3987 * the posting read. This should be rare enough to never
3988 * trigger the 99% of 100,000 interrupts test for disabling
3993 } while (iir & ~flip_mask);
3995 i915_update_dri1_breadcrumb(dev);
4000 static void i915_irq_uninstall(struct drm_device * dev)
4002 struct drm_i915_private *dev_priv = dev->dev_private;
4005 if (I915_HAS_HOTPLUG(dev)) {
4006 I915_WRITE(PORT_HOTPLUG_EN, 0);
4007 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4010 I915_WRITE16(HWSTAM, 0xffff);
4011 for_each_pipe(dev_priv, pipe) {
4012 /* Clear enable bits; then clear status bits */
4013 I915_WRITE(PIPESTAT(pipe), 0);
4014 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4016 I915_WRITE(IMR, 0xffffffff);
4017 I915_WRITE(IER, 0x0);
4019 I915_WRITE(IIR, I915_READ(IIR));
4022 static void i965_irq_preinstall(struct drm_device * dev)
4024 struct drm_i915_private *dev_priv = dev->dev_private;
4027 I915_WRITE(PORT_HOTPLUG_EN, 0);
4028 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4030 I915_WRITE(HWSTAM, 0xeffe);
4031 for_each_pipe(dev_priv, pipe)
4032 I915_WRITE(PIPESTAT(pipe), 0);
4033 I915_WRITE(IMR, 0xffffffff);
4034 I915_WRITE(IER, 0x0);
4038 static int i965_irq_postinstall(struct drm_device *dev)
4040 struct drm_i915_private *dev_priv = dev->dev_private;
4044 /* Unmask the interrupts that we always want on. */
4045 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4046 I915_DISPLAY_PORT_INTERRUPT |
4047 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4048 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4049 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4050 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4051 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4053 enable_mask = ~dev_priv->irq_mask;
4054 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4055 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4056 enable_mask |= I915_USER_INTERRUPT;
4059 enable_mask |= I915_BSD_USER_INTERRUPT;
4061 /* Interrupt setup is already guaranteed to be single-threaded, this is
4062 * just to make the assert_spin_locked check happy. */
4063 spin_lock_irq(&dev_priv->irq_lock);
4064 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4065 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4066 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4067 spin_unlock_irq(&dev_priv->irq_lock);
4070 * Enable some error detection, note the instruction error mask
4071 * bit is reserved, so we leave it masked.
4074 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4075 GM45_ERROR_MEM_PRIV |
4076 GM45_ERROR_CP_PRIV |
4077 I915_ERROR_MEMORY_REFRESH);
4079 error_mask = ~(I915_ERROR_PAGE_TABLE |
4080 I915_ERROR_MEMORY_REFRESH);
4082 I915_WRITE(EMR, error_mask);
4084 I915_WRITE(IMR, dev_priv->irq_mask);
4085 I915_WRITE(IER, enable_mask);
4088 I915_WRITE(PORT_HOTPLUG_EN, 0);
4089 POSTING_READ(PORT_HOTPLUG_EN);
4091 i915_enable_asle_pipestat(dev);
4096 static void i915_hpd_irq_setup(struct drm_device *dev)
4098 struct drm_i915_private *dev_priv = dev->dev_private;
4099 struct intel_encoder *intel_encoder;
4102 assert_spin_locked(&dev_priv->irq_lock);
4104 if (I915_HAS_HOTPLUG(dev)) {
4105 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4106 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4107 /* Note HDMI and DP share hotplug bits */
4108 /* enable bits are the same for all generations */
4109 for_each_intel_encoder(dev, intel_encoder)
4110 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4111 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4112 /* Programming the CRT detection parameters tends
4113 to generate a spurious hotplug event about three
4114 seconds later. So just do it once.
4117 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4118 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4119 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4121 /* Ignore TV since it's buggy */
4122 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4126 static irqreturn_t i965_irq_handler(int irq, void *arg)
4128 struct drm_device *dev = arg;
4129 struct drm_i915_private *dev_priv = dev->dev_private;
4131 u32 pipe_stats[I915_MAX_PIPES];
4132 int ret = IRQ_NONE, pipe;
4134 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4135 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4137 iir = I915_READ(IIR);
4140 bool irq_received = (iir & ~flip_mask) != 0;
4141 bool blc_event = false;
4143 /* Can't rely on pipestat interrupt bit in iir as it might
4144 * have been cleared after the pipestat interrupt was received.
4145 * It doesn't set the bit in iir again, but it still produces
4146 * interrupts (for non-MSI).
4148 spin_lock(&dev_priv->irq_lock);
4149 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4150 i915_handle_error(dev, false,
4151 "Command parser error, iir 0x%08x",
4154 for_each_pipe(dev_priv, pipe) {
4155 int reg = PIPESTAT(pipe);
4156 pipe_stats[pipe] = I915_READ(reg);
4159 * Clear the PIPE*STAT regs before the IIR
4161 if (pipe_stats[pipe] & 0x8000ffff) {
4162 I915_WRITE(reg, pipe_stats[pipe]);
4163 irq_received = true;
4166 spin_unlock(&dev_priv->irq_lock);
4173 /* Consume port. Then clear IIR or we'll miss events */
4174 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4175 i9xx_hpd_irq_handler(dev);
4177 I915_WRITE(IIR, iir & ~flip_mask);
4178 new_iir = I915_READ(IIR); /* Flush posted writes */
4180 if (iir & I915_USER_INTERRUPT)
4181 notify_ring(dev, &dev_priv->ring[RCS]);
4182 if (iir & I915_BSD_USER_INTERRUPT)
4183 notify_ring(dev, &dev_priv->ring[VCS]);
4185 for_each_pipe(dev_priv, pipe) {
4186 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4187 i915_handle_vblank(dev, pipe, pipe, iir))
4188 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4190 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4193 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4194 i9xx_pipe_crc_irq_handler(dev, pipe);
4196 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4197 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4200 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4201 intel_opregion_asle_intr(dev);
4203 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4204 gmbus_irq_handler(dev);
4206 /* With MSI, interrupts are only generated when iir
4207 * transitions from zero to nonzero. If another bit got
4208 * set while we were handling the existing iir bits, then
4209 * we would never get another interrupt.
4211 * This is fine on non-MSI as well, as if we hit this path
4212 * we avoid exiting the interrupt handler only to generate
4215 * Note that for MSI this could cause a stray interrupt report
4216 * if an interrupt landed in the time between writing IIR and
4217 * the posting read. This should be rare enough to never
4218 * trigger the 99% of 100,000 interrupts test for disabling
4224 i915_update_dri1_breadcrumb(dev);
4229 static void i965_irq_uninstall(struct drm_device * dev)
4231 struct drm_i915_private *dev_priv = dev->dev_private;
4237 I915_WRITE(PORT_HOTPLUG_EN, 0);
4238 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4240 I915_WRITE(HWSTAM, 0xffffffff);
4241 for_each_pipe(dev_priv, pipe)
4242 I915_WRITE(PIPESTAT(pipe), 0);
4243 I915_WRITE(IMR, 0xffffffff);
4244 I915_WRITE(IER, 0x0);
4246 for_each_pipe(dev_priv, pipe)
4247 I915_WRITE(PIPESTAT(pipe),
4248 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4249 I915_WRITE(IIR, I915_READ(IIR));
4252 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4254 struct drm_i915_private *dev_priv =
4255 container_of(work, typeof(*dev_priv),
4256 hotplug_reenable_work.work);
4257 struct drm_device *dev = dev_priv->dev;
4258 struct drm_mode_config *mode_config = &dev->mode_config;
4261 intel_runtime_pm_get(dev_priv);
4263 spin_lock_irq(&dev_priv->irq_lock);
4264 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4265 struct drm_connector *connector;
4267 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4270 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4272 list_for_each_entry(connector, &mode_config->connector_list, head) {
4273 struct intel_connector *intel_connector = to_intel_connector(connector);
4275 if (intel_connector->encoder->hpd_pin == i) {
4276 if (connector->polled != intel_connector->polled)
4277 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4279 connector->polled = intel_connector->polled;
4280 if (!connector->polled)
4281 connector->polled = DRM_CONNECTOR_POLL_HPD;
4285 if (dev_priv->display.hpd_irq_setup)
4286 dev_priv->display.hpd_irq_setup(dev);
4287 spin_unlock_irq(&dev_priv->irq_lock);
4289 intel_runtime_pm_put(dev_priv);
4293 * intel_irq_init - initializes irq support
4294 * @dev_priv: i915 device instance
4296 * This function initializes all the irq support including work items, timers
4297 * and all the vtables. It does not setup the interrupt itself though.
4299 void intel_irq_init(struct drm_i915_private *dev_priv)
4301 struct drm_device *dev = dev_priv->dev;
4303 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4304 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4305 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4306 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4307 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4309 /* Let's track the enabled rps events */
4310 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4311 /* WaGsvRC0ResidencyMethod:vlv */
4312 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4314 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4316 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4317 i915_hangcheck_elapsed,
4318 (unsigned long) dev);
4319 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4320 intel_hpd_irq_reenable_work);
4322 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4324 if (IS_GEN2(dev_priv)) {
4325 dev->max_vblank_count = 0;
4326 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4327 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4328 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4329 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4331 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4332 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4336 * Opt out of the vblank disable timer on everything except gen2.
4337 * Gen2 doesn't have a hardware frame counter and so depends on
4338 * vblank interrupts to produce sane vblank seuquence numbers.
4340 if (!IS_GEN2(dev_priv))
4341 dev->vblank_disable_immediate = true;
4343 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4344 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4345 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4348 if (IS_CHERRYVIEW(dev_priv)) {
4349 dev->driver->irq_handler = cherryview_irq_handler;
4350 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4351 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4352 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4353 dev->driver->enable_vblank = valleyview_enable_vblank;
4354 dev->driver->disable_vblank = valleyview_disable_vblank;
4355 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4356 } else if (IS_VALLEYVIEW(dev_priv)) {
4357 dev->driver->irq_handler = valleyview_irq_handler;
4358 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4359 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4360 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4361 dev->driver->enable_vblank = valleyview_enable_vblank;
4362 dev->driver->disable_vblank = valleyview_disable_vblank;
4363 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4364 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4365 dev->driver->irq_handler = gen8_irq_handler;
4366 dev->driver->irq_preinstall = gen8_irq_reset;
4367 dev->driver->irq_postinstall = gen8_irq_postinstall;
4368 dev->driver->irq_uninstall = gen8_irq_uninstall;
4369 dev->driver->enable_vblank = gen8_enable_vblank;
4370 dev->driver->disable_vblank = gen8_disable_vblank;
4371 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4372 } else if (HAS_PCH_SPLIT(dev)) {
4373 dev->driver->irq_handler = ironlake_irq_handler;
4374 dev->driver->irq_preinstall = ironlake_irq_reset;
4375 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4376 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4377 dev->driver->enable_vblank = ironlake_enable_vblank;
4378 dev->driver->disable_vblank = ironlake_disable_vblank;
4379 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4381 if (INTEL_INFO(dev_priv)->gen == 2) {
4382 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4383 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4384 dev->driver->irq_handler = i8xx_irq_handler;
4385 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4386 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4387 dev->driver->irq_preinstall = i915_irq_preinstall;
4388 dev->driver->irq_postinstall = i915_irq_postinstall;
4389 dev->driver->irq_uninstall = i915_irq_uninstall;
4390 dev->driver->irq_handler = i915_irq_handler;
4391 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4393 dev->driver->irq_preinstall = i965_irq_preinstall;
4394 dev->driver->irq_postinstall = i965_irq_postinstall;
4395 dev->driver->irq_uninstall = i965_irq_uninstall;
4396 dev->driver->irq_handler = i965_irq_handler;
4397 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4399 dev->driver->enable_vblank = i915_enable_vblank;
4400 dev->driver->disable_vblank = i915_disable_vblank;
4405 * intel_hpd_init - initializes and enables hpd support
4406 * @dev_priv: i915 device instance
4408 * This function enables the hotplug support. It requires that interrupts have
4409 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4410 * poll request can run concurrently to other code, so locking rules must be
4413 * This is a separate step from interrupt enabling to simplify the locking rules
4414 * in the driver load and resume code.
4416 void intel_hpd_init(struct drm_i915_private *dev_priv)
4418 struct drm_device *dev = dev_priv->dev;
4419 struct drm_mode_config *mode_config = &dev->mode_config;
4420 struct drm_connector *connector;
4423 for (i = 1; i < HPD_NUM_PINS; i++) {
4424 dev_priv->hpd_stats[i].hpd_cnt = 0;
4425 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4427 list_for_each_entry(connector, &mode_config->connector_list, head) {
4428 struct intel_connector *intel_connector = to_intel_connector(connector);
4429 connector->polled = intel_connector->polled;
4430 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4431 connector->polled = DRM_CONNECTOR_POLL_HPD;
4432 if (intel_connector->mst_port)
4433 connector->polled = DRM_CONNECTOR_POLL_HPD;
4436 /* Interrupt setup is already guaranteed to be single-threaded, this is
4437 * just to make the assert_spin_locked checks happy. */
4438 spin_lock_irq(&dev_priv->irq_lock);
4439 if (dev_priv->display.hpd_irq_setup)
4440 dev_priv->display.hpd_irq_setup(dev);
4441 spin_unlock_irq(&dev_priv->irq_lock);
4445 * intel_irq_install - enables the hardware interrupt
4446 * @dev_priv: i915 device instance
4448 * This function enables the hardware interrupt handling, but leaves the hotplug
4449 * handling still disabled. It is called after intel_irq_init().
4451 * In the driver load and resume code we need working interrupts in a few places
4452 * but don't want to deal with the hassle of concurrent probe and hotplug
4453 * workers. Hence the split into this two-stage approach.
4455 int intel_irq_install(struct drm_i915_private *dev_priv)
4458 * We enable some interrupt sources in our postinstall hooks, so mark
4459 * interrupts as enabled _before_ actually enabling them to avoid
4460 * special cases in our ordering checks.
4462 dev_priv->pm.irqs_enabled = true;
4464 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4468 * intel_irq_uninstall - finilizes all irq handling
4469 * @dev_priv: i915 device instance
4471 * This stops interrupt and hotplug handling and unregisters and frees all
4472 * resources acquired in the init functions.
4474 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4476 drm_irq_uninstall(dev_priv->dev);
4477 intel_hpd_cancel_work(dev_priv);
4478 dev_priv->pm.irqs_enabled = false;
4482 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4483 * @dev_priv: i915 device instance
4485 * This function is used to disable interrupts at runtime, both in the runtime
4486 * pm and the system suspend/resume code.
4488 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4490 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4491 dev_priv->pm.irqs_enabled = false;
4495 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4496 * @dev_priv: i915 device instance
4498 * This function is used to enable interrupts at runtime, both in the runtime
4499 * pm and the system suspend/resume code.
4501 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4503 dev_priv->pm.irqs_enabled = true;
4504 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4505 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);