1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
47 static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
55 static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
64 static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static void ibx_hpd_irq_setup(struct drm_device *dev);
83 static void i915_hpd_irq_setup(struct drm_device *dev);
85 /* For display hotplug interrupt */
87 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
89 if ((dev_priv->irq_mask & mask) != 0) {
90 dev_priv->irq_mask &= ~mask;
91 I915_WRITE(DEIMR, dev_priv->irq_mask);
97 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
99 if ((dev_priv->irq_mask & mask) != mask) {
100 dev_priv->irq_mask |= mask;
101 I915_WRITE(DEIMR, dev_priv->irq_mask);
107 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
109 u32 reg = PIPESTAT(pipe);
110 u32 pipestat = I915_READ(reg) & 0x7fff0000;
112 if ((pipestat & mask) == mask)
115 /* Enable the interrupt, clear any pending status */
116 pipestat |= mask | (mask >> 16);
117 I915_WRITE(reg, pipestat);
122 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
124 u32 reg = PIPESTAT(pipe);
125 u32 pipestat = I915_READ(reg) & 0x7fff0000;
127 if ((pipestat & mask) == 0)
131 I915_WRITE(reg, pipestat);
136 * intel_enable_asle - enable ASLE interrupt for OpRegion
138 void intel_enable_asle(struct drm_device *dev)
140 drm_i915_private_t *dev_priv = dev->dev_private;
141 unsigned long irqflags;
143 /* FIXME: opregion/asle for VLV */
144 if (IS_VALLEYVIEW(dev))
147 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
149 if (HAS_PCH_SPLIT(dev))
150 ironlake_enable_display_irq(dev_priv, DE_GSE);
152 i915_enable_pipestat(dev_priv, 1,
153 PIPE_LEGACY_BLC_EVENT_ENABLE);
154 if (INTEL_INFO(dev)->gen >= 4)
155 i915_enable_pipestat(dev_priv, 0,
156 PIPE_LEGACY_BLC_EVENT_ENABLE);
159 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
163 * i915_pipe_enabled - check if a pipe is enabled
165 * @pipe: pipe to check
167 * Reading certain registers when the pipe is disabled can hang the chip.
168 * Use this routine to make sure the PLL is running and the pipe is active
169 * before reading such registers if unsure.
172 i915_pipe_enabled(struct drm_device *dev, int pipe)
174 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
175 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
178 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
181 /* Called from drm generic code, passed a 'crtc', which
182 * we use as a pipe index
184 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
186 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
187 unsigned long high_frame;
188 unsigned long low_frame;
189 u32 high1, high2, low;
191 if (!i915_pipe_enabled(dev, pipe)) {
192 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
193 "pipe %c\n", pipe_name(pipe));
197 high_frame = PIPEFRAME(pipe);
198 low_frame = PIPEFRAMEPIXEL(pipe);
201 * High & low register fields aren't synchronized, so make sure
202 * we get a low value that's stable across two reads of the high
206 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
207 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
208 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
209 } while (high1 != high2);
211 high1 >>= PIPE_FRAME_HIGH_SHIFT;
212 low >>= PIPE_FRAME_LOW_SHIFT;
213 return (high1 << 8) | low;
216 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
218 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
219 int reg = PIPE_FRMCOUNT_GM45(pipe);
221 if (!i915_pipe_enabled(dev, pipe)) {
222 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
223 "pipe %c\n", pipe_name(pipe));
227 return I915_READ(reg);
230 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
231 int *vpos, int *hpos)
233 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
234 u32 vbl = 0, position = 0;
235 int vbl_start, vbl_end, htotal, vtotal;
238 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
241 if (!i915_pipe_enabled(dev, pipe)) {
242 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
243 "pipe %c\n", pipe_name(pipe));
248 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
250 if (INTEL_INFO(dev)->gen >= 4) {
251 /* No obvious pixelcount register. Only query vertical
252 * scanout position from Display scan line register.
254 position = I915_READ(PIPEDSL(pipe));
256 /* Decode into vertical scanout position. Don't have
257 * horizontal scanout position.
259 *vpos = position & 0x1fff;
262 /* Have access to pixelcount since start of frame.
263 * We can split this into vertical and horizontal
266 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
268 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
269 *vpos = position / htotal;
270 *hpos = position - (*vpos * htotal);
273 /* Query vblank area. */
274 vbl = I915_READ(VBLANK(cpu_transcoder));
276 /* Test position against vblank region. */
277 vbl_start = vbl & 0x1fff;
278 vbl_end = (vbl >> 16) & 0x1fff;
280 if ((*vpos < vbl_start) || (*vpos > vbl_end))
283 /* Inside "upper part" of vblank area? Apply corrective offset: */
284 if (in_vbl && (*vpos >= vbl_start))
285 *vpos = *vpos - vtotal;
287 /* Readouts valid? */
289 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
293 ret |= DRM_SCANOUTPOS_INVBL;
298 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
300 struct timeval *vblank_time,
303 struct drm_crtc *crtc;
305 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
306 DRM_ERROR("Invalid crtc %d\n", pipe);
310 /* Get drm_crtc to timestamp: */
311 crtc = intel_get_crtc_for_pipe(dev, pipe);
313 DRM_ERROR("Invalid crtc %d\n", pipe);
317 if (!crtc->enabled) {
318 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
322 /* Helper routine in DRM core does all the work: */
323 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
329 * Handle hotplug events outside the interrupt handler proper.
331 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
333 static void i915_hotplug_work_func(struct work_struct *work)
335 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
337 struct drm_device *dev = dev_priv->dev;
338 struct drm_mode_config *mode_config = &dev->mode_config;
339 struct intel_connector *intel_connector;
340 struct intel_encoder *intel_encoder;
341 struct drm_connector *connector;
342 unsigned long irqflags;
343 bool hpd_disabled = false;
345 /* HPD irq before everything is fully set up. */
346 if (!dev_priv->enable_hotplug_processing)
349 mutex_lock(&mode_config->mutex);
350 DRM_DEBUG_KMS("running encoder hotplug functions\n");
352 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
353 list_for_each_entry(connector, &mode_config->connector_list, head) {
354 intel_connector = to_intel_connector(connector);
355 intel_encoder = intel_connector->encoder;
356 if (intel_encoder->hpd_pin > HPD_NONE &&
357 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
358 connector->polled == DRM_CONNECTOR_POLL_HPD) {
359 DRM_INFO("HPD interrupt storm detected on connector %s: "
360 "switching from hotplug detection to polling\n",
361 drm_get_connector_name(connector));
362 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
363 connector->polled = DRM_CONNECTOR_POLL_CONNECT
364 | DRM_CONNECTOR_POLL_DISCONNECT;
368 /* if there were no outputs to poll, poll was disabled,
369 * therefore make sure it's enabled when disabling HPD on
372 drm_kms_helper_poll_enable(dev);
373 mod_timer(&dev_priv->hotplug_reenable_timer,
374 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
377 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
379 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
380 if (intel_encoder->hot_plug)
381 intel_encoder->hot_plug(intel_encoder);
383 mutex_unlock(&mode_config->mutex);
385 /* Just fire off a uevent and let userspace tell us what to do */
386 drm_helper_hpd_irq_event(dev);
389 static void ironlake_handle_rps_change(struct drm_device *dev)
391 drm_i915_private_t *dev_priv = dev->dev_private;
392 u32 busy_up, busy_down, max_avg, min_avg;
396 spin_lock_irqsave(&mchdev_lock, flags);
398 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
400 new_delay = dev_priv->ips.cur_delay;
402 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
403 busy_up = I915_READ(RCPREVBSYTUPAVG);
404 busy_down = I915_READ(RCPREVBSYTDNAVG);
405 max_avg = I915_READ(RCBMAXAVG);
406 min_avg = I915_READ(RCBMINAVG);
408 /* Handle RCS change request from hw */
409 if (busy_up > max_avg) {
410 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
411 new_delay = dev_priv->ips.cur_delay - 1;
412 if (new_delay < dev_priv->ips.max_delay)
413 new_delay = dev_priv->ips.max_delay;
414 } else if (busy_down < min_avg) {
415 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
416 new_delay = dev_priv->ips.cur_delay + 1;
417 if (new_delay > dev_priv->ips.min_delay)
418 new_delay = dev_priv->ips.min_delay;
421 if (ironlake_set_drps(dev, new_delay))
422 dev_priv->ips.cur_delay = new_delay;
424 spin_unlock_irqrestore(&mchdev_lock, flags);
429 static void notify_ring(struct drm_device *dev,
430 struct intel_ring_buffer *ring)
432 struct drm_i915_private *dev_priv = dev->dev_private;
434 if (ring->obj == NULL)
437 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
439 wake_up_all(&ring->irq_queue);
440 if (i915_enable_hangcheck) {
441 dev_priv->gpu_error.hangcheck_count = 0;
442 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
443 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
447 static void gen6_pm_rps_work(struct work_struct *work)
449 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
454 spin_lock_irq(&dev_priv->rps.lock);
455 pm_iir = dev_priv->rps.pm_iir;
456 dev_priv->rps.pm_iir = 0;
457 pm_imr = I915_READ(GEN6_PMIMR);
458 I915_WRITE(GEN6_PMIMR, 0);
459 spin_unlock_irq(&dev_priv->rps.lock);
461 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
464 mutex_lock(&dev_priv->rps.hw_lock);
466 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
467 new_delay = dev_priv->rps.cur_delay + 1;
469 new_delay = dev_priv->rps.cur_delay - 1;
471 /* sysfs frequency interfaces may have snuck in while servicing the
474 if (!(new_delay > dev_priv->rps.max_delay ||
475 new_delay < dev_priv->rps.min_delay)) {
476 gen6_set_rps(dev_priv->dev, new_delay);
479 mutex_unlock(&dev_priv->rps.hw_lock);
484 * ivybridge_parity_work - Workqueue called when a parity error interrupt
486 * @work: workqueue struct
488 * Doesn't actually do anything except notify userspace. As a consequence of
489 * this event, userspace should try to remap the bad rows since statistically
490 * it is likely the same row is more likely to go bad again.
492 static void ivybridge_parity_work(struct work_struct *work)
494 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
495 l3_parity.error_work);
496 u32 error_status, row, bank, subbank;
497 char *parity_event[5];
501 /* We must turn off DOP level clock gating to access the L3 registers.
502 * In order to prevent a get/put style interface, acquire struct mutex
503 * any time we access those registers.
505 mutex_lock(&dev_priv->dev->struct_mutex);
507 misccpctl = I915_READ(GEN7_MISCCPCTL);
508 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
509 POSTING_READ(GEN7_MISCCPCTL);
511 error_status = I915_READ(GEN7_L3CDERRST1);
512 row = GEN7_PARITY_ERROR_ROW(error_status);
513 bank = GEN7_PARITY_ERROR_BANK(error_status);
514 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
516 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
517 GEN7_L3CDERRST1_ENABLE);
518 POSTING_READ(GEN7_L3CDERRST1);
520 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
522 spin_lock_irqsave(&dev_priv->irq_lock, flags);
523 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
524 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
525 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
527 mutex_unlock(&dev_priv->dev->struct_mutex);
529 parity_event[0] = "L3_PARITY_ERROR=1";
530 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
531 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
532 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
533 parity_event[4] = NULL;
535 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
536 KOBJ_CHANGE, parity_event);
538 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
541 kfree(parity_event[3]);
542 kfree(parity_event[2]);
543 kfree(parity_event[1]);
546 static void ivybridge_handle_parity_error(struct drm_device *dev)
548 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
551 if (!HAS_L3_GPU_CACHE(dev))
554 spin_lock_irqsave(&dev_priv->irq_lock, flags);
555 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
556 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
559 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
562 static void snb_gt_irq_handler(struct drm_device *dev,
563 struct drm_i915_private *dev_priv,
567 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
568 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
569 notify_ring(dev, &dev_priv->ring[RCS]);
570 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
571 notify_ring(dev, &dev_priv->ring[VCS]);
572 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
573 notify_ring(dev, &dev_priv->ring[BCS]);
575 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
576 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
577 GT_RENDER_CS_ERROR_INTERRUPT)) {
578 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
579 i915_handle_error(dev, false);
582 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
583 ivybridge_handle_parity_error(dev);
586 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
592 * IIR bits should never already be set because IMR should
593 * prevent an interrupt from being shown in IIR. The warning
594 * displays a case where we've unsafely cleared
595 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
596 * type is not a problem, it displays a problem in the logic.
598 * The mask bit in IMR is cleared by dev_priv->rps.work.
601 spin_lock_irqsave(&dev_priv->rps.lock, flags);
602 dev_priv->rps.pm_iir |= pm_iir;
603 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
604 POSTING_READ(GEN6_PMIMR);
605 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
607 queue_work(dev_priv->wq, &dev_priv->rps.work);
610 #define HPD_STORM_DETECT_PERIOD 1000
611 #define HPD_STORM_THRESHOLD 5
613 static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
617 drm_i915_private_t *dev_priv = dev->dev_private;
618 unsigned long irqflags;
622 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
624 for (i = 1; i < HPD_NUM_PINS; i++) {
626 if (!(hpd[i] & hotplug_trigger) ||
627 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
630 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
631 dev_priv->hpd_stats[i].hpd_last_jiffies
632 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
633 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
634 dev_priv->hpd_stats[i].hpd_cnt = 0;
635 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
636 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
637 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
640 dev_priv->hpd_stats[i].hpd_cnt++;
644 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
649 static void gmbus_irq_handler(struct drm_device *dev)
651 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
653 wake_up_all(&dev_priv->gmbus_wait_queue);
656 static void dp_aux_irq_handler(struct drm_device *dev)
658 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
660 wake_up_all(&dev_priv->gmbus_wait_queue);
663 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
665 struct drm_device *dev = (struct drm_device *) arg;
666 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
667 u32 iir, gt_iir, pm_iir;
668 irqreturn_t ret = IRQ_NONE;
669 unsigned long irqflags;
671 u32 pipe_stats[I915_MAX_PIPES];
673 atomic_inc(&dev_priv->irq_received);
676 iir = I915_READ(VLV_IIR);
677 gt_iir = I915_READ(GTIIR);
678 pm_iir = I915_READ(GEN6_PMIIR);
680 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
685 snb_gt_irq_handler(dev, dev_priv, gt_iir);
687 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
688 for_each_pipe(pipe) {
689 int reg = PIPESTAT(pipe);
690 pipe_stats[pipe] = I915_READ(reg);
693 * Clear the PIPE*STAT regs before the IIR
695 if (pipe_stats[pipe] & 0x8000ffff) {
696 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
697 DRM_DEBUG_DRIVER("pipe %c underrun\n",
699 I915_WRITE(reg, pipe_stats[pipe]);
702 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
704 for_each_pipe(pipe) {
705 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
706 drm_handle_vblank(dev, pipe);
708 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
709 intel_prepare_page_flip(dev, pipe);
710 intel_finish_page_flip(dev, pipe);
714 /* Consume port. Then clear IIR or we'll miss events */
715 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
716 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
717 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
719 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
721 if (hotplug_trigger) {
722 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
723 i915_hpd_irq_setup(dev);
724 queue_work(dev_priv->wq,
725 &dev_priv->hotplug_work);
727 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
728 I915_READ(PORT_HOTPLUG_STAT);
731 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
732 gmbus_irq_handler(dev);
734 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
735 gen6_queue_rps_work(dev_priv, pm_iir);
737 I915_WRITE(GTIIR, gt_iir);
738 I915_WRITE(GEN6_PMIIR, pm_iir);
739 I915_WRITE(VLV_IIR, iir);
746 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
748 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
750 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
752 if (hotplug_trigger) {
753 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
754 ibx_hpd_irq_setup(dev);
755 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
757 if (pch_iir & SDE_AUDIO_POWER_MASK)
758 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
759 (pch_iir & SDE_AUDIO_POWER_MASK) >>
760 SDE_AUDIO_POWER_SHIFT);
762 if (pch_iir & SDE_AUX_MASK)
763 dp_aux_irq_handler(dev);
765 if (pch_iir & SDE_GMBUS)
766 gmbus_irq_handler(dev);
768 if (pch_iir & SDE_AUDIO_HDCP_MASK)
769 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
771 if (pch_iir & SDE_AUDIO_TRANS_MASK)
772 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
774 if (pch_iir & SDE_POISON)
775 DRM_ERROR("PCH poison interrupt\n");
777 if (pch_iir & SDE_FDI_MASK)
779 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
781 I915_READ(FDI_RX_IIR(pipe)));
783 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
784 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
786 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
787 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
789 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
790 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
791 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
792 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
795 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
797 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
799 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
801 if (hotplug_trigger) {
802 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
803 ibx_hpd_irq_setup(dev);
804 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
806 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
807 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
808 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
809 SDE_AUDIO_POWER_SHIFT_CPT);
811 if (pch_iir & SDE_AUX_MASK_CPT)
812 dp_aux_irq_handler(dev);
814 if (pch_iir & SDE_GMBUS_CPT)
815 gmbus_irq_handler(dev);
817 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
818 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
820 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
821 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
823 if (pch_iir & SDE_FDI_MASK_CPT)
825 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
827 I915_READ(FDI_RX_IIR(pipe)));
830 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
832 struct drm_device *dev = (struct drm_device *) arg;
833 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
834 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
835 irqreturn_t ret = IRQ_NONE;
838 atomic_inc(&dev_priv->irq_received);
840 /* disable master interrupt before clearing iir */
841 de_ier = I915_READ(DEIER);
842 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
844 /* Disable south interrupts. We'll only write to SDEIIR once, so further
845 * interrupts will will be stored on its back queue, and then we'll be
846 * able to process them after we restore SDEIER (as soon as we restore
847 * it, we'll get an interrupt if SDEIIR still has something to process
848 * due to its back queue). */
849 if (!HAS_PCH_NOP(dev)) {
850 sde_ier = I915_READ(SDEIER);
851 I915_WRITE(SDEIER, 0);
852 POSTING_READ(SDEIER);
855 gt_iir = I915_READ(GTIIR);
857 snb_gt_irq_handler(dev, dev_priv, gt_iir);
858 I915_WRITE(GTIIR, gt_iir);
862 de_iir = I915_READ(DEIIR);
864 if (de_iir & DE_AUX_CHANNEL_A_IVB)
865 dp_aux_irq_handler(dev);
867 if (de_iir & DE_GSE_IVB)
868 intel_opregion_gse_intr(dev);
870 for (i = 0; i < 3; i++) {
871 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
872 drm_handle_vblank(dev, i);
873 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
874 intel_prepare_page_flip(dev, i);
875 intel_finish_page_flip_plane(dev, i);
879 /* check event from PCH */
880 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
881 u32 pch_iir = I915_READ(SDEIIR);
883 cpt_irq_handler(dev, pch_iir);
885 /* clear PCH hotplug event before clear CPU irq */
886 I915_WRITE(SDEIIR, pch_iir);
889 I915_WRITE(DEIIR, de_iir);
893 pm_iir = I915_READ(GEN6_PMIIR);
895 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
896 gen6_queue_rps_work(dev_priv, pm_iir);
897 I915_WRITE(GEN6_PMIIR, pm_iir);
901 I915_WRITE(DEIER, de_ier);
903 if (!HAS_PCH_NOP(dev)) {
904 I915_WRITE(SDEIER, sde_ier);
905 POSTING_READ(SDEIER);
911 static void ilk_gt_irq_handler(struct drm_device *dev,
912 struct drm_i915_private *dev_priv,
915 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
916 notify_ring(dev, &dev_priv->ring[RCS]);
917 if (gt_iir & GT_BSD_USER_INTERRUPT)
918 notify_ring(dev, &dev_priv->ring[VCS]);
921 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
923 struct drm_device *dev = (struct drm_device *) arg;
924 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
926 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
928 atomic_inc(&dev_priv->irq_received);
930 /* disable master interrupt before clearing iir */
931 de_ier = I915_READ(DEIER);
932 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
935 /* Disable south interrupts. We'll only write to SDEIIR once, so further
936 * interrupts will will be stored on its back queue, and then we'll be
937 * able to process them after we restore SDEIER (as soon as we restore
938 * it, we'll get an interrupt if SDEIIR still has something to process
939 * due to its back queue). */
940 sde_ier = I915_READ(SDEIER);
941 I915_WRITE(SDEIER, 0);
942 POSTING_READ(SDEIER);
944 de_iir = I915_READ(DEIIR);
945 gt_iir = I915_READ(GTIIR);
946 pm_iir = I915_READ(GEN6_PMIIR);
948 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
954 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
956 snb_gt_irq_handler(dev, dev_priv, gt_iir);
958 if (de_iir & DE_AUX_CHANNEL_A)
959 dp_aux_irq_handler(dev);
962 intel_opregion_gse_intr(dev);
964 if (de_iir & DE_PIPEA_VBLANK)
965 drm_handle_vblank(dev, 0);
967 if (de_iir & DE_PIPEB_VBLANK)
968 drm_handle_vblank(dev, 1);
970 if (de_iir & DE_PLANEA_FLIP_DONE) {
971 intel_prepare_page_flip(dev, 0);
972 intel_finish_page_flip_plane(dev, 0);
975 if (de_iir & DE_PLANEB_FLIP_DONE) {
976 intel_prepare_page_flip(dev, 1);
977 intel_finish_page_flip_plane(dev, 1);
980 /* check event from PCH */
981 if (de_iir & DE_PCH_EVENT) {
982 u32 pch_iir = I915_READ(SDEIIR);
984 if (HAS_PCH_CPT(dev))
985 cpt_irq_handler(dev, pch_iir);
987 ibx_irq_handler(dev, pch_iir);
989 /* should clear PCH hotplug event before clear CPU irq */
990 I915_WRITE(SDEIIR, pch_iir);
993 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
994 ironlake_handle_rps_change(dev);
996 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
997 gen6_queue_rps_work(dev_priv, pm_iir);
999 I915_WRITE(GTIIR, gt_iir);
1000 I915_WRITE(DEIIR, de_iir);
1001 I915_WRITE(GEN6_PMIIR, pm_iir);
1004 I915_WRITE(DEIER, de_ier);
1005 POSTING_READ(DEIER);
1006 I915_WRITE(SDEIER, sde_ier);
1007 POSTING_READ(SDEIER);
1012 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1013 bool reset_completed)
1015 struct intel_ring_buffer *ring;
1019 * Notify all waiters for GPU completion events that reset state has
1020 * been changed, and that they need to restart their wait after
1021 * checking for potential errors (and bail out to drop locks if there is
1022 * a gpu reset pending so that i915_error_work_func can acquire them).
1025 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1026 for_each_ring(ring, dev_priv, i)
1027 wake_up_all(&ring->irq_queue);
1029 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1030 wake_up_all(&dev_priv->pending_flip_queue);
1033 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1034 * reset state is cleared.
1036 if (reset_completed)
1037 wake_up_all(&dev_priv->gpu_error.reset_queue);
1041 * i915_error_work_func - do process context error handling work
1042 * @work: work struct
1044 * Fire an error uevent so userspace can see that a hang or error
1047 static void i915_error_work_func(struct work_struct *work)
1049 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1051 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1053 struct drm_device *dev = dev_priv->dev;
1054 char *error_event[] = { "ERROR=1", NULL };
1055 char *reset_event[] = { "RESET=1", NULL };
1056 char *reset_done_event[] = { "ERROR=0", NULL };
1059 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1062 * Note that there's only one work item which does gpu resets, so we
1063 * need not worry about concurrent gpu resets potentially incrementing
1064 * error->reset_counter twice. We only need to take care of another
1065 * racing irq/hangcheck declaring the gpu dead for a second time. A
1066 * quick check for that is good enough: schedule_work ensures the
1067 * correct ordering between hang detection and this work item, and since
1068 * the reset in-progress bit is only ever set by code outside of this
1069 * work we don't need to worry about any other races.
1071 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1072 DRM_DEBUG_DRIVER("resetting chip\n");
1073 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1077 * All state reset _must_ be completed before we update the
1078 * reset counter, for otherwise waiters might miss the reset
1079 * pending state and not properly drop locks, resulting in
1080 * deadlocks with the reset work.
1082 ret = i915_reset(dev);
1084 intel_display_handle_reset(dev);
1088 * After all the gem state is reset, increment the reset
1089 * counter and wake up everyone waiting for the reset to
1092 * Since unlock operations are a one-sided barrier only,
1093 * we need to insert a barrier here to order any seqno
1095 * the counter increment.
1097 smp_mb__before_atomic_inc();
1098 atomic_inc(&dev_priv->gpu_error.reset_counter);
1100 kobject_uevent_env(&dev->primary->kdev.kobj,
1101 KOBJ_CHANGE, reset_done_event);
1103 atomic_set(&error->reset_counter, I915_WEDGED);
1107 * Note: The wake_up also serves as a memory barrier so that
1108 * waiters see the update value of the reset counter atomic_t.
1110 i915_error_wake_up(dev_priv, true);
1114 /* NB: please notice the memset */
1115 static void i915_get_extra_instdone(struct drm_device *dev,
1118 struct drm_i915_private *dev_priv = dev->dev_private;
1119 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1121 switch(INTEL_INFO(dev)->gen) {
1124 instdone[0] = I915_READ(INSTDONE);
1129 instdone[0] = I915_READ(INSTDONE_I965);
1130 instdone[1] = I915_READ(INSTDONE1);
1133 WARN_ONCE(1, "Unsupported platform\n");
1135 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1136 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1137 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1138 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1143 #ifdef CONFIG_DEBUG_FS
1144 static struct drm_i915_error_object *
1145 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1146 struct drm_i915_gem_object *src,
1147 const int num_pages)
1149 struct drm_i915_error_object *dst;
1153 if (src == NULL || src->pages == NULL)
1156 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1160 reloc_offset = src->gtt_offset;
1161 for (i = 0; i < num_pages; i++) {
1162 unsigned long flags;
1165 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1169 local_irq_save(flags);
1170 if (reloc_offset < dev_priv->gtt.mappable_end &&
1171 src->has_global_gtt_mapping) {
1174 /* Simply ignore tiling or any overlapping fence.
1175 * It's part of the error state, and this hopefully
1176 * captures what the GPU read.
1179 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1181 memcpy_fromio(d, s, PAGE_SIZE);
1182 io_mapping_unmap_atomic(s);
1183 } else if (src->stolen) {
1184 unsigned long offset;
1186 offset = dev_priv->mm.stolen_base;
1187 offset += src->stolen->start;
1188 offset += i << PAGE_SHIFT;
1190 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1195 page = i915_gem_object_get_page(src, i);
1197 drm_clflush_pages(&page, 1);
1199 s = kmap_atomic(page);
1200 memcpy(d, s, PAGE_SIZE);
1203 drm_clflush_pages(&page, 1);
1205 local_irq_restore(flags);
1209 reloc_offset += PAGE_SIZE;
1211 dst->page_count = num_pages;
1212 dst->gtt_offset = src->gtt_offset;
1218 kfree(dst->pages[i]);
1222 #define i915_error_object_create(dev_priv, src) \
1223 i915_error_object_create_sized((dev_priv), (src), \
1224 (src)->base.size>>PAGE_SHIFT)
1227 i915_error_object_free(struct drm_i915_error_object *obj)
1234 for (page = 0; page < obj->page_count; page++)
1235 kfree(obj->pages[page]);
1241 i915_error_state_free(struct kref *error_ref)
1243 struct drm_i915_error_state *error = container_of(error_ref,
1244 typeof(*error), ref);
1247 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1248 i915_error_object_free(error->ring[i].batchbuffer);
1249 i915_error_object_free(error->ring[i].ringbuffer);
1250 kfree(error->ring[i].requests);
1253 kfree(error->active_bo);
1254 kfree(error->overlay);
1257 static void capture_bo(struct drm_i915_error_buffer *err,
1258 struct drm_i915_gem_object *obj)
1260 err->size = obj->base.size;
1261 err->name = obj->base.name;
1262 err->rseqno = obj->last_read_seqno;
1263 err->wseqno = obj->last_write_seqno;
1264 err->gtt_offset = obj->gtt_offset;
1265 err->read_domains = obj->base.read_domains;
1266 err->write_domain = obj->base.write_domain;
1267 err->fence_reg = obj->fence_reg;
1269 if (obj->pin_count > 0)
1271 if (obj->user_pin_count > 0)
1273 err->tiling = obj->tiling_mode;
1274 err->dirty = obj->dirty;
1275 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1276 err->ring = obj->ring ? obj->ring->id : -1;
1277 err->cache_level = obj->cache_level;
1280 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1281 int count, struct list_head *head)
1283 struct drm_i915_gem_object *obj;
1286 list_for_each_entry(obj, head, mm_list) {
1287 capture_bo(err++, obj);
1295 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1296 int count, struct list_head *head)
1298 struct drm_i915_gem_object *obj;
1301 list_for_each_entry(obj, head, gtt_list) {
1302 if (obj->pin_count == 0)
1305 capture_bo(err++, obj);
1313 static void i915_gem_record_fences(struct drm_device *dev,
1314 struct drm_i915_error_state *error)
1316 struct drm_i915_private *dev_priv = dev->dev_private;
1320 switch (INTEL_INFO(dev)->gen) {
1323 for (i = 0; i < dev_priv->num_fence_regs; i++)
1324 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1328 for (i = 0; i < 16; i++)
1329 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1332 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1333 for (i = 0; i < 8; i++)
1334 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1336 for (i = 0; i < 8; i++)
1337 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1345 static struct drm_i915_error_object *
1346 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1347 struct intel_ring_buffer *ring)
1349 struct drm_i915_gem_object *obj;
1352 if (!ring->get_seqno)
1355 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1356 u32 acthd = I915_READ(ACTHD);
1358 if (WARN_ON(ring->id != RCS))
1361 obj = ring->private;
1362 if (acthd >= obj->gtt_offset &&
1363 acthd < obj->gtt_offset + obj->base.size)
1364 return i915_error_object_create(dev_priv, obj);
1367 seqno = ring->get_seqno(ring, false);
1368 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1369 if (obj->ring != ring)
1372 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1375 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1378 /* We need to copy these to an anonymous buffer as the simplest
1379 * method to avoid being overwritten by userspace.
1381 return i915_error_object_create(dev_priv, obj);
1387 static void i915_record_ring_state(struct drm_device *dev,
1388 struct drm_i915_error_state *error,
1389 struct intel_ring_buffer *ring)
1391 struct drm_i915_private *dev_priv = dev->dev_private;
1393 if (INTEL_INFO(dev)->gen >= 6) {
1394 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1395 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1396 error->semaphore_mboxes[ring->id][0]
1397 = I915_READ(RING_SYNC_0(ring->mmio_base));
1398 error->semaphore_mboxes[ring->id][1]
1399 = I915_READ(RING_SYNC_1(ring->mmio_base));
1400 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1401 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1404 if (INTEL_INFO(dev)->gen >= 4) {
1405 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1406 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1407 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1408 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1409 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1410 if (ring->id == RCS)
1411 error->bbaddr = I915_READ64(BB_ADDR);
1413 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1414 error->ipeir[ring->id] = I915_READ(IPEIR);
1415 error->ipehr[ring->id] = I915_READ(IPEHR);
1416 error->instdone[ring->id] = I915_READ(INSTDONE);
1419 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1420 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1421 error->seqno[ring->id] = ring->get_seqno(ring, false);
1422 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1423 error->head[ring->id] = I915_READ_HEAD(ring);
1424 error->tail[ring->id] = I915_READ_TAIL(ring);
1425 error->ctl[ring->id] = I915_READ_CTL(ring);
1427 error->cpu_ring_head[ring->id] = ring->head;
1428 error->cpu_ring_tail[ring->id] = ring->tail;
1432 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1433 struct drm_i915_error_state *error,
1434 struct drm_i915_error_ring *ering)
1436 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1437 struct drm_i915_gem_object *obj;
1439 /* Currently render ring is the only HW context user */
1440 if (ring->id != RCS || !error->ccid)
1443 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1444 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1445 ering->ctx = i915_error_object_create_sized(dev_priv,
1451 static void i915_gem_record_rings(struct drm_device *dev,
1452 struct drm_i915_error_state *error)
1454 struct drm_i915_private *dev_priv = dev->dev_private;
1455 struct intel_ring_buffer *ring;
1456 struct drm_i915_gem_request *request;
1459 for_each_ring(ring, dev_priv, i) {
1460 i915_record_ring_state(dev, error, ring);
1462 error->ring[i].batchbuffer =
1463 i915_error_first_batchbuffer(dev_priv, ring);
1465 error->ring[i].ringbuffer =
1466 i915_error_object_create(dev_priv, ring->obj);
1469 i915_gem_record_active_context(ring, error, &error->ring[i]);
1472 list_for_each_entry(request, &ring->request_list, list)
1475 error->ring[i].num_requests = count;
1476 error->ring[i].requests =
1477 kmalloc(count*sizeof(struct drm_i915_error_request),
1479 if (error->ring[i].requests == NULL) {
1480 error->ring[i].num_requests = 0;
1485 list_for_each_entry(request, &ring->request_list, list) {
1486 struct drm_i915_error_request *erq;
1488 erq = &error->ring[i].requests[count++];
1489 erq->seqno = request->seqno;
1490 erq->jiffies = request->emitted_jiffies;
1491 erq->tail = request->tail;
1497 * i915_capture_error_state - capture an error record for later analysis
1500 * Should be called when an error is detected (either a hang or an error
1501 * interrupt) to capture error state from the time of the error. Fills
1502 * out a structure which becomes available in debugfs for user level tools
1505 static void i915_capture_error_state(struct drm_device *dev)
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1508 struct drm_i915_gem_object *obj;
1509 struct drm_i915_error_state *error;
1510 unsigned long flags;
1513 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1514 error = dev_priv->gpu_error.first_error;
1515 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1519 /* Account for pipe specific data like PIPE*STAT */
1520 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1522 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1526 DRM_INFO("capturing error event; look for more information in "
1527 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1528 dev->primary->index);
1530 kref_init(&error->ref);
1531 error->eir = I915_READ(EIR);
1532 error->pgtbl_er = I915_READ(PGTBL_ER);
1533 if (HAS_HW_CONTEXTS(dev))
1534 error->ccid = I915_READ(CCID);
1536 if (HAS_PCH_SPLIT(dev))
1537 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1538 else if (IS_VALLEYVIEW(dev))
1539 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1540 else if (IS_GEN2(dev))
1541 error->ier = I915_READ16(IER);
1543 error->ier = I915_READ(IER);
1545 if (INTEL_INFO(dev)->gen >= 6)
1546 error->derrmr = I915_READ(DERRMR);
1548 if (IS_VALLEYVIEW(dev))
1549 error->forcewake = I915_READ(FORCEWAKE_VLV);
1550 else if (INTEL_INFO(dev)->gen >= 7)
1551 error->forcewake = I915_READ(FORCEWAKE_MT);
1552 else if (INTEL_INFO(dev)->gen == 6)
1553 error->forcewake = I915_READ(FORCEWAKE);
1555 if (!HAS_PCH_SPLIT(dev))
1557 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1559 if (INTEL_INFO(dev)->gen >= 6) {
1560 error->error = I915_READ(ERROR_GEN6);
1561 error->done_reg = I915_READ(DONE_REG);
1564 if (INTEL_INFO(dev)->gen == 7)
1565 error->err_int = I915_READ(GEN7_ERR_INT);
1567 i915_get_extra_instdone(dev, error->extra_instdone);
1569 i915_gem_record_fences(dev, error);
1570 i915_gem_record_rings(dev, error);
1572 /* Record buffers on the active and pinned lists. */
1573 error->active_bo = NULL;
1574 error->pinned_bo = NULL;
1577 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1579 error->active_bo_count = i;
1580 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1583 error->pinned_bo_count = i - error->active_bo_count;
1585 error->active_bo = NULL;
1586 error->pinned_bo = NULL;
1588 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1590 if (error->active_bo)
1592 error->active_bo + error->active_bo_count;
1595 if (error->active_bo)
1596 error->active_bo_count =
1597 capture_active_bo(error->active_bo,
1598 error->active_bo_count,
1599 &dev_priv->mm.active_list);
1601 if (error->pinned_bo)
1602 error->pinned_bo_count =
1603 capture_pinned_bo(error->pinned_bo,
1604 error->pinned_bo_count,
1605 &dev_priv->mm.bound_list);
1607 do_gettimeofday(&error->time);
1609 error->overlay = intel_overlay_capture_error_state(dev);
1610 error->display = intel_display_capture_error_state(dev);
1612 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1613 if (dev_priv->gpu_error.first_error == NULL) {
1614 dev_priv->gpu_error.first_error = error;
1617 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1620 i915_error_state_free(&error->ref);
1623 void i915_destroy_error_state(struct drm_device *dev)
1625 struct drm_i915_private *dev_priv = dev->dev_private;
1626 struct drm_i915_error_state *error;
1627 unsigned long flags;
1629 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1630 error = dev_priv->gpu_error.first_error;
1631 dev_priv->gpu_error.first_error = NULL;
1632 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1635 kref_put(&error->ref, i915_error_state_free);
1638 #define i915_capture_error_state(x)
1641 static void i915_report_and_clear_eir(struct drm_device *dev)
1643 struct drm_i915_private *dev_priv = dev->dev_private;
1644 uint32_t instdone[I915_NUM_INSTDONE_REG];
1645 u32 eir = I915_READ(EIR);
1651 pr_err("render error detected, EIR: 0x%08x\n", eir);
1653 i915_get_extra_instdone(dev, instdone);
1656 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1657 u32 ipeir = I915_READ(IPEIR_I965);
1659 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1660 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1661 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1662 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1663 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1664 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1665 I915_WRITE(IPEIR_I965, ipeir);
1666 POSTING_READ(IPEIR_I965);
1668 if (eir & GM45_ERROR_PAGE_TABLE) {
1669 u32 pgtbl_err = I915_READ(PGTBL_ER);
1670 pr_err("page table error\n");
1671 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1672 I915_WRITE(PGTBL_ER, pgtbl_err);
1673 POSTING_READ(PGTBL_ER);
1677 if (!IS_GEN2(dev)) {
1678 if (eir & I915_ERROR_PAGE_TABLE) {
1679 u32 pgtbl_err = I915_READ(PGTBL_ER);
1680 pr_err("page table error\n");
1681 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1682 I915_WRITE(PGTBL_ER, pgtbl_err);
1683 POSTING_READ(PGTBL_ER);
1687 if (eir & I915_ERROR_MEMORY_REFRESH) {
1688 pr_err("memory refresh error:\n");
1690 pr_err("pipe %c stat: 0x%08x\n",
1691 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1692 /* pipestat has already been acked */
1694 if (eir & I915_ERROR_INSTRUCTION) {
1695 pr_err("instruction error\n");
1696 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1697 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1698 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1699 if (INTEL_INFO(dev)->gen < 4) {
1700 u32 ipeir = I915_READ(IPEIR);
1702 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1703 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1704 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1705 I915_WRITE(IPEIR, ipeir);
1706 POSTING_READ(IPEIR);
1708 u32 ipeir = I915_READ(IPEIR_I965);
1710 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1711 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1712 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1713 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1714 I915_WRITE(IPEIR_I965, ipeir);
1715 POSTING_READ(IPEIR_I965);
1719 I915_WRITE(EIR, eir);
1721 eir = I915_READ(EIR);
1724 * some errors might have become stuck,
1727 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1728 I915_WRITE(EMR, I915_READ(EMR) | eir);
1729 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1734 * i915_handle_error - handle an error interrupt
1737 * Do some basic checking of regsiter state at error interrupt time and
1738 * dump it to the syslog. Also call i915_capture_error_state() to make
1739 * sure we get a record and make it available in debugfs. Fire a uevent
1740 * so userspace knows something bad happened (should trigger collection
1741 * of a ring dump etc.).
1743 void i915_handle_error(struct drm_device *dev, bool wedged)
1745 struct drm_i915_private *dev_priv = dev->dev_private;
1747 i915_capture_error_state(dev);
1748 i915_report_and_clear_eir(dev);
1751 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1752 &dev_priv->gpu_error.reset_counter);
1755 * Wakeup waiting processes so that the reset work function
1756 * i915_error_work_func doesn't deadlock trying to grab various
1757 * locks. By bumping the reset counter first, the woken
1758 * processes will see a reset in progress and back off,
1759 * releasing their locks and then wait for the reset completion.
1760 * We must do this for _all_ gpu waiters that might hold locks
1761 * that the reset work needs to acquire.
1763 * Note: The wake_up serves as the required memory barrier to
1764 * ensure that the waiters see the updated value of the reset
1767 i915_error_wake_up(dev_priv, false);
1771 * Our reset work can grab modeset locks (since it needs to reset the
1772 * state of outstanding pagelips). Hence it must not be run on our own
1773 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1774 * code will deadlock.
1776 schedule_work(&dev_priv->gpu_error.work);
1779 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1781 drm_i915_private_t *dev_priv = dev->dev_private;
1782 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1783 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1784 struct drm_i915_gem_object *obj;
1785 struct intel_unpin_work *work;
1786 unsigned long flags;
1787 bool stall_detected;
1789 /* Ignore early vblank irqs */
1790 if (intel_crtc == NULL)
1793 spin_lock_irqsave(&dev->event_lock, flags);
1794 work = intel_crtc->unpin_work;
1797 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1798 !work->enable_stall_check) {
1799 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1800 spin_unlock_irqrestore(&dev->event_lock, flags);
1804 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1805 obj = work->pending_flip_obj;
1806 if (INTEL_INFO(dev)->gen >= 4) {
1807 int dspsurf = DSPSURF(intel_crtc->plane);
1808 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1811 int dspaddr = DSPADDR(intel_crtc->plane);
1812 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1813 crtc->y * crtc->fb->pitches[0] +
1814 crtc->x * crtc->fb->bits_per_pixel/8);
1817 spin_unlock_irqrestore(&dev->event_lock, flags);
1819 if (stall_detected) {
1820 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1821 intel_prepare_page_flip(dev, intel_crtc->plane);
1825 /* Called from drm generic code, passed 'crtc' which
1826 * we use as a pipe index
1828 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1830 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1831 unsigned long irqflags;
1833 if (!i915_pipe_enabled(dev, pipe))
1836 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1837 if (INTEL_INFO(dev)->gen >= 4)
1838 i915_enable_pipestat(dev_priv, pipe,
1839 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1841 i915_enable_pipestat(dev_priv, pipe,
1842 PIPE_VBLANK_INTERRUPT_ENABLE);
1844 /* maintain vblank delivery even in deep C-states */
1845 if (dev_priv->info->gen == 3)
1846 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1847 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1852 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1855 unsigned long irqflags;
1857 if (!i915_pipe_enabled(dev, pipe))
1860 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1861 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1862 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1863 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1868 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1870 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1871 unsigned long irqflags;
1873 if (!i915_pipe_enabled(dev, pipe))
1876 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1877 ironlake_enable_display_irq(dev_priv,
1878 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1879 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1884 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1886 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1887 unsigned long irqflags;
1890 if (!i915_pipe_enabled(dev, pipe))
1893 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1894 imr = I915_READ(VLV_IMR);
1896 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1898 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1899 I915_WRITE(VLV_IMR, imr);
1900 i915_enable_pipestat(dev_priv, pipe,
1901 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1902 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1907 /* Called from drm generic code, passed 'crtc' which
1908 * we use as a pipe index
1910 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1912 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1913 unsigned long irqflags;
1915 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1916 if (dev_priv->info->gen == 3)
1917 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1919 i915_disable_pipestat(dev_priv, pipe,
1920 PIPE_VBLANK_INTERRUPT_ENABLE |
1921 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1922 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1925 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1927 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1928 unsigned long irqflags;
1930 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1931 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1932 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1933 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1936 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1938 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1939 unsigned long irqflags;
1941 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1942 ironlake_disable_display_irq(dev_priv,
1943 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1944 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1947 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1949 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1950 unsigned long irqflags;
1953 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1954 i915_disable_pipestat(dev_priv, pipe,
1955 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1956 imr = I915_READ(VLV_IMR);
1958 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1960 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1961 I915_WRITE(VLV_IMR, imr);
1962 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1966 ring_last_seqno(struct intel_ring_buffer *ring)
1968 return list_entry(ring->request_list.prev,
1969 struct drm_i915_gem_request, list)->seqno;
1972 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1974 if (list_empty(&ring->request_list) ||
1975 i915_seqno_passed(ring->get_seqno(ring, false),
1976 ring_last_seqno(ring))) {
1977 /* Issue a wake-up to catch stuck h/w. */
1978 if (waitqueue_active(&ring->irq_queue)) {
1979 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1981 wake_up_all(&ring->irq_queue);
1989 static bool semaphore_passed(struct intel_ring_buffer *ring)
1991 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1992 u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1993 struct intel_ring_buffer *signaller;
1994 u32 cmd, ipehr, acthd_min;
1996 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1997 if ((ipehr & ~(0x3 << 16)) !=
1998 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2001 /* ACTHD is likely pointing to the dword after the actual command,
2002 * so scan backwards until we find the MBOX.
2004 acthd_min = max((int)acthd - 3 * 4, 0);
2006 cmd = ioread32(ring->virtual_start + acthd);
2011 if (acthd < acthd_min)
2015 signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2016 return i915_seqno_passed(signaller->get_seqno(signaller, false),
2017 ioread32(ring->virtual_start+acthd+4)+1);
2020 static bool kick_ring(struct intel_ring_buffer *ring)
2022 struct drm_device *dev = ring->dev;
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 tmp = I915_READ_CTL(ring);
2025 if (tmp & RING_WAIT) {
2026 DRM_ERROR("Kicking stuck wait on %s\n",
2028 I915_WRITE_CTL(ring, tmp);
2032 if (INTEL_INFO(dev)->gen >= 6 &&
2033 tmp & RING_WAIT_SEMAPHORE &&
2034 semaphore_passed(ring)) {
2035 DRM_ERROR("Kicking stuck semaphore on %s\n",
2037 I915_WRITE_CTL(ring, tmp);
2043 static bool i915_hangcheck_hung(struct drm_device *dev)
2045 drm_i915_private_t *dev_priv = dev->dev_private;
2047 if (dev_priv->gpu_error.hangcheck_count++ > 1) {
2050 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
2051 i915_handle_error(dev, true);
2053 if (!IS_GEN2(dev)) {
2054 struct intel_ring_buffer *ring;
2057 /* Is the chip hanging on a WAIT_FOR_EVENT?
2058 * If so we can simply poke the RB_WAIT bit
2059 * and break the hang. This should work on
2060 * all but the second generation chipsets.
2062 for_each_ring(ring, dev_priv, i)
2063 hung &= !kick_ring(ring);
2073 * This is called when the chip hasn't reported back with completed
2074 * batchbuffers in a long time. The first time this is called we simply record
2075 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
2076 * again, we assume the chip is wedged and try to fix it.
2078 void i915_hangcheck_elapsed(unsigned long data)
2080 struct drm_device *dev = (struct drm_device *)data;
2081 drm_i915_private_t *dev_priv = dev->dev_private;
2082 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
2083 struct intel_ring_buffer *ring;
2084 bool err = false, idle;
2087 if (!i915_enable_hangcheck)
2090 memset(acthd, 0, sizeof(acthd));
2092 for_each_ring(ring, dev_priv, i) {
2093 idle &= i915_hangcheck_ring_idle(ring, &err);
2094 acthd[i] = intel_ring_get_active_head(ring);
2097 /* If all work is done then ACTHD clearly hasn't advanced. */
2100 if (i915_hangcheck_hung(dev))
2106 dev_priv->gpu_error.hangcheck_count = 0;
2110 i915_get_extra_instdone(dev, instdone);
2111 if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
2112 sizeof(acthd)) == 0 &&
2113 memcmp(dev_priv->gpu_error.prev_instdone, instdone,
2114 sizeof(instdone)) == 0) {
2115 if (i915_hangcheck_hung(dev))
2118 dev_priv->gpu_error.hangcheck_count = 0;
2120 memcpy(dev_priv->gpu_error.last_acthd, acthd,
2122 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
2127 /* Reset timer case chip hangs without another request being added */
2128 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2129 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2134 static void ironlake_irq_preinstall(struct drm_device *dev)
2136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2138 atomic_set(&dev_priv->irq_received, 0);
2140 I915_WRITE(HWSTAM, 0xeffe);
2142 /* XXX hotplug from PCH */
2144 I915_WRITE(DEIMR, 0xffffffff);
2145 I915_WRITE(DEIER, 0x0);
2146 POSTING_READ(DEIER);
2149 I915_WRITE(GTIMR, 0xffffffff);
2150 I915_WRITE(GTIER, 0x0);
2151 POSTING_READ(GTIER);
2153 if (HAS_PCH_NOP(dev))
2156 /* south display irq */
2157 I915_WRITE(SDEIMR, 0xffffffff);
2159 * SDEIER is also touched by the interrupt handler to work around missed
2160 * PCH interrupts. Hence we can't update it after the interrupt handler
2161 * is enabled - instead we unconditionally enable all PCH interrupt
2162 * sources here, but then only unmask them as needed with SDEIMR.
2164 I915_WRITE(SDEIER, 0xffffffff);
2165 POSTING_READ(SDEIER);
2168 static void valleyview_irq_preinstall(struct drm_device *dev)
2170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2173 atomic_set(&dev_priv->irq_received, 0);
2176 I915_WRITE(VLV_IMR, 0);
2177 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2178 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2179 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2182 I915_WRITE(GTIIR, I915_READ(GTIIR));
2183 I915_WRITE(GTIIR, I915_READ(GTIIR));
2184 I915_WRITE(GTIMR, 0xffffffff);
2185 I915_WRITE(GTIER, 0x0);
2186 POSTING_READ(GTIER);
2188 I915_WRITE(DPINVGTT, 0xff);
2190 I915_WRITE(PORT_HOTPLUG_EN, 0);
2191 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2193 I915_WRITE(PIPESTAT(pipe), 0xffff);
2194 I915_WRITE(VLV_IIR, 0xffffffff);
2195 I915_WRITE(VLV_IMR, 0xffffffff);
2196 I915_WRITE(VLV_IER, 0x0);
2197 POSTING_READ(VLV_IER);
2200 static void ibx_hpd_irq_setup(struct drm_device *dev)
2202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2203 struct drm_mode_config *mode_config = &dev->mode_config;
2204 struct intel_encoder *intel_encoder;
2205 u32 mask = ~I915_READ(SDEIMR);
2208 if (HAS_PCH_IBX(dev)) {
2209 mask &= ~SDE_HOTPLUG_MASK;
2210 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2211 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2212 mask |= hpd_ibx[intel_encoder->hpd_pin];
2214 mask &= ~SDE_HOTPLUG_MASK_CPT;
2215 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2216 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2217 mask |= hpd_cpt[intel_encoder->hpd_pin];
2220 I915_WRITE(SDEIMR, ~mask);
2223 * Enable digital hotplug on the PCH, and configure the DP short pulse
2224 * duration to 2ms (which is the minimum in the Display Port spec)
2226 * This register is the same on all known PCH chips.
2228 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2229 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2230 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2231 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2232 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2233 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2236 static void ibx_irq_postinstall(struct drm_device *dev)
2238 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2241 if (HAS_PCH_IBX(dev))
2242 mask = SDE_GMBUS | SDE_AUX_MASK;
2244 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2246 if (HAS_PCH_NOP(dev))
2249 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2250 I915_WRITE(SDEIMR, ~mask);
2253 static int ironlake_irq_postinstall(struct drm_device *dev)
2255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2256 /* enable kind of interrupts always enabled */
2257 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2258 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2262 dev_priv->irq_mask = ~display_mask;
2264 /* should always can generate irq */
2265 I915_WRITE(DEIIR, I915_READ(DEIIR));
2266 I915_WRITE(DEIMR, dev_priv->irq_mask);
2267 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2268 POSTING_READ(DEIER);
2270 dev_priv->gt_irq_mask = ~0;
2272 I915_WRITE(GTIIR, I915_READ(GTIIR));
2273 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2278 GEN6_BSD_USER_INTERRUPT |
2279 GEN6_BLITTER_USER_INTERRUPT;
2284 GT_BSD_USER_INTERRUPT;
2285 I915_WRITE(GTIER, render_irqs);
2286 POSTING_READ(GTIER);
2288 ibx_irq_postinstall(dev);
2290 if (IS_IRONLAKE_M(dev)) {
2291 /* Clear & enable PCU event interrupts */
2292 I915_WRITE(DEIIR, DE_PCU_EVENT);
2293 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2294 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2300 static int ivybridge_irq_postinstall(struct drm_device *dev)
2302 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2303 /* enable kind of interrupts always enabled */
2305 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2306 DE_PLANEC_FLIP_DONE_IVB |
2307 DE_PLANEB_FLIP_DONE_IVB |
2308 DE_PLANEA_FLIP_DONE_IVB |
2309 DE_AUX_CHANNEL_A_IVB;
2312 dev_priv->irq_mask = ~display_mask;
2314 /* should always can generate irq */
2315 I915_WRITE(DEIIR, I915_READ(DEIIR));
2316 I915_WRITE(DEIMR, dev_priv->irq_mask);
2319 DE_PIPEC_VBLANK_IVB |
2320 DE_PIPEB_VBLANK_IVB |
2321 DE_PIPEA_VBLANK_IVB);
2322 POSTING_READ(DEIER);
2324 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2326 I915_WRITE(GTIIR, I915_READ(GTIIR));
2327 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2329 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2330 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2331 I915_WRITE(GTIER, render_irqs);
2332 POSTING_READ(GTIER);
2334 ibx_irq_postinstall(dev);
2339 static int valleyview_irq_postinstall(struct drm_device *dev)
2341 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2343 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2347 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2348 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2349 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2350 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2351 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2354 *Leave vblank interrupts masked initially. enable/disable will
2355 * toggle them based on usage.
2357 dev_priv->irq_mask = (~enable_mask) |
2358 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2359 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2361 /* Hack for broken MSIs on VLV */
2362 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2363 pci_read_config_word(dev->pdev, 0x98, &msid);
2364 msid &= 0xff; /* mask out delivery bits */
2366 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2368 I915_WRITE(PORT_HOTPLUG_EN, 0);
2369 POSTING_READ(PORT_HOTPLUG_EN);
2371 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2372 I915_WRITE(VLV_IER, enable_mask);
2373 I915_WRITE(VLV_IIR, 0xffffffff);
2374 I915_WRITE(PIPESTAT(0), 0xffff);
2375 I915_WRITE(PIPESTAT(1), 0xffff);
2376 POSTING_READ(VLV_IER);
2378 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2379 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2380 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2382 I915_WRITE(VLV_IIR, 0xffffffff);
2383 I915_WRITE(VLV_IIR, 0xffffffff);
2385 I915_WRITE(GTIIR, I915_READ(GTIIR));
2386 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2388 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2389 GEN6_BLITTER_USER_INTERRUPT;
2390 I915_WRITE(GTIER, render_irqs);
2391 POSTING_READ(GTIER);
2393 /* ack & enable invalid PTE error interrupts */
2394 #if 0 /* FIXME: add support to irq handler for checking these bits */
2395 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2396 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2399 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2404 static void valleyview_irq_uninstall(struct drm_device *dev)
2406 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2412 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2415 I915_WRITE(PIPESTAT(pipe), 0xffff);
2417 I915_WRITE(HWSTAM, 0xffffffff);
2418 I915_WRITE(PORT_HOTPLUG_EN, 0);
2419 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2421 I915_WRITE(PIPESTAT(pipe), 0xffff);
2422 I915_WRITE(VLV_IIR, 0xffffffff);
2423 I915_WRITE(VLV_IMR, 0xffffffff);
2424 I915_WRITE(VLV_IER, 0x0);
2425 POSTING_READ(VLV_IER);
2428 static void ironlake_irq_uninstall(struct drm_device *dev)
2430 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2435 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2437 I915_WRITE(HWSTAM, 0xffffffff);
2439 I915_WRITE(DEIMR, 0xffffffff);
2440 I915_WRITE(DEIER, 0x0);
2441 I915_WRITE(DEIIR, I915_READ(DEIIR));
2443 I915_WRITE(GTIMR, 0xffffffff);
2444 I915_WRITE(GTIER, 0x0);
2445 I915_WRITE(GTIIR, I915_READ(GTIIR));
2447 if (HAS_PCH_NOP(dev))
2450 I915_WRITE(SDEIMR, 0xffffffff);
2451 I915_WRITE(SDEIER, 0x0);
2452 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2455 static void i8xx_irq_preinstall(struct drm_device * dev)
2457 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2460 atomic_set(&dev_priv->irq_received, 0);
2463 I915_WRITE(PIPESTAT(pipe), 0);
2464 I915_WRITE16(IMR, 0xffff);
2465 I915_WRITE16(IER, 0x0);
2466 POSTING_READ16(IER);
2469 static int i8xx_irq_postinstall(struct drm_device *dev)
2471 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2474 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2476 /* Unmask the interrupts that we always want on. */
2477 dev_priv->irq_mask =
2478 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2479 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2480 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2481 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2482 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2483 I915_WRITE16(IMR, dev_priv->irq_mask);
2486 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2487 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2488 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2489 I915_USER_INTERRUPT);
2490 POSTING_READ16(IER);
2496 * Returns true when a page flip has completed.
2498 static bool i8xx_handle_vblank(struct drm_device *dev,
2501 drm_i915_private_t *dev_priv = dev->dev_private;
2502 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2504 if (!drm_handle_vblank(dev, pipe))
2507 if ((iir & flip_pending) == 0)
2510 intel_prepare_page_flip(dev, pipe);
2512 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2513 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2514 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2515 * the flip is completed (no longer pending). Since this doesn't raise
2516 * an interrupt per se, we watch for the change at vblank.
2518 if (I915_READ16(ISR) & flip_pending)
2521 intel_finish_page_flip(dev, pipe);
2526 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2528 struct drm_device *dev = (struct drm_device *) arg;
2529 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2532 unsigned long irqflags;
2536 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2537 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2539 atomic_inc(&dev_priv->irq_received);
2541 iir = I915_READ16(IIR);
2545 while (iir & ~flip_mask) {
2546 /* Can't rely on pipestat interrupt bit in iir as it might
2547 * have been cleared after the pipestat interrupt was received.
2548 * It doesn't set the bit in iir again, but it still produces
2549 * interrupts (for non-MSI).
2551 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2552 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2553 i915_handle_error(dev, false);
2555 for_each_pipe(pipe) {
2556 int reg = PIPESTAT(pipe);
2557 pipe_stats[pipe] = I915_READ(reg);
2560 * Clear the PIPE*STAT regs before the IIR
2562 if (pipe_stats[pipe] & 0x8000ffff) {
2563 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2564 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2566 I915_WRITE(reg, pipe_stats[pipe]);
2570 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2572 I915_WRITE16(IIR, iir & ~flip_mask);
2573 new_iir = I915_READ16(IIR); /* Flush posted writes */
2575 i915_update_dri1_breadcrumb(dev);
2577 if (iir & I915_USER_INTERRUPT)
2578 notify_ring(dev, &dev_priv->ring[RCS]);
2580 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2581 i8xx_handle_vblank(dev, 0, iir))
2582 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2584 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2585 i8xx_handle_vblank(dev, 1, iir))
2586 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2594 static void i8xx_irq_uninstall(struct drm_device * dev)
2596 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2599 for_each_pipe(pipe) {
2600 /* Clear enable bits; then clear status bits */
2601 I915_WRITE(PIPESTAT(pipe), 0);
2602 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2604 I915_WRITE16(IMR, 0xffff);
2605 I915_WRITE16(IER, 0x0);
2606 I915_WRITE16(IIR, I915_READ16(IIR));
2609 static void i915_irq_preinstall(struct drm_device * dev)
2611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2614 atomic_set(&dev_priv->irq_received, 0);
2616 if (I915_HAS_HOTPLUG(dev)) {
2617 I915_WRITE(PORT_HOTPLUG_EN, 0);
2618 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2621 I915_WRITE16(HWSTAM, 0xeffe);
2623 I915_WRITE(PIPESTAT(pipe), 0);
2624 I915_WRITE(IMR, 0xffffffff);
2625 I915_WRITE(IER, 0x0);
2629 static int i915_irq_postinstall(struct drm_device *dev)
2631 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2634 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2636 /* Unmask the interrupts that we always want on. */
2637 dev_priv->irq_mask =
2638 ~(I915_ASLE_INTERRUPT |
2639 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2640 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2641 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2642 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2643 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2646 I915_ASLE_INTERRUPT |
2647 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2648 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2649 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2650 I915_USER_INTERRUPT;
2652 if (I915_HAS_HOTPLUG(dev)) {
2653 I915_WRITE(PORT_HOTPLUG_EN, 0);
2654 POSTING_READ(PORT_HOTPLUG_EN);
2656 /* Enable in IER... */
2657 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2658 /* and unmask in IMR */
2659 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2662 I915_WRITE(IMR, dev_priv->irq_mask);
2663 I915_WRITE(IER, enable_mask);
2666 intel_opregion_enable_asle(dev);
2672 * Returns true when a page flip has completed.
2674 static bool i915_handle_vblank(struct drm_device *dev,
2675 int plane, int pipe, u32 iir)
2677 drm_i915_private_t *dev_priv = dev->dev_private;
2678 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2680 if (!drm_handle_vblank(dev, pipe))
2683 if ((iir & flip_pending) == 0)
2686 intel_prepare_page_flip(dev, plane);
2688 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2689 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2690 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2691 * the flip is completed (no longer pending). Since this doesn't raise
2692 * an interrupt per se, we watch for the change at vblank.
2694 if (I915_READ(ISR) & flip_pending)
2697 intel_finish_page_flip(dev, pipe);
2702 static irqreturn_t i915_irq_handler(int irq, void *arg)
2704 struct drm_device *dev = (struct drm_device *) arg;
2705 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2706 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2707 unsigned long irqflags;
2709 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2710 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2711 int pipe, ret = IRQ_NONE;
2713 atomic_inc(&dev_priv->irq_received);
2715 iir = I915_READ(IIR);
2717 bool irq_received = (iir & ~flip_mask) != 0;
2718 bool blc_event = false;
2720 /* Can't rely on pipestat interrupt bit in iir as it might
2721 * have been cleared after the pipestat interrupt was received.
2722 * It doesn't set the bit in iir again, but it still produces
2723 * interrupts (for non-MSI).
2725 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2726 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2727 i915_handle_error(dev, false);
2729 for_each_pipe(pipe) {
2730 int reg = PIPESTAT(pipe);
2731 pipe_stats[pipe] = I915_READ(reg);
2733 /* Clear the PIPE*STAT regs before the IIR */
2734 if (pipe_stats[pipe] & 0x8000ffff) {
2735 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2736 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2738 I915_WRITE(reg, pipe_stats[pipe]);
2739 irq_received = true;
2742 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2747 /* Consume port. Then clear IIR or we'll miss events */
2748 if ((I915_HAS_HOTPLUG(dev)) &&
2749 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2750 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2751 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2753 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2755 if (hotplug_trigger) {
2756 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
2757 i915_hpd_irq_setup(dev);
2758 queue_work(dev_priv->wq,
2759 &dev_priv->hotplug_work);
2761 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2762 POSTING_READ(PORT_HOTPLUG_STAT);
2765 I915_WRITE(IIR, iir & ~flip_mask);
2766 new_iir = I915_READ(IIR); /* Flush posted writes */
2768 if (iir & I915_USER_INTERRUPT)
2769 notify_ring(dev, &dev_priv->ring[RCS]);
2771 for_each_pipe(pipe) {
2776 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2777 i915_handle_vblank(dev, plane, pipe, iir))
2778 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2780 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2784 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2785 intel_opregion_asle_intr(dev);
2787 /* With MSI, interrupts are only generated when iir
2788 * transitions from zero to nonzero. If another bit got
2789 * set while we were handling the existing iir bits, then
2790 * we would never get another interrupt.
2792 * This is fine on non-MSI as well, as if we hit this path
2793 * we avoid exiting the interrupt handler only to generate
2796 * Note that for MSI this could cause a stray interrupt report
2797 * if an interrupt landed in the time between writing IIR and
2798 * the posting read. This should be rare enough to never
2799 * trigger the 99% of 100,000 interrupts test for disabling
2804 } while (iir & ~flip_mask);
2806 i915_update_dri1_breadcrumb(dev);
2811 static void i915_irq_uninstall(struct drm_device * dev)
2813 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2816 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2818 if (I915_HAS_HOTPLUG(dev)) {
2819 I915_WRITE(PORT_HOTPLUG_EN, 0);
2820 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2823 I915_WRITE16(HWSTAM, 0xffff);
2824 for_each_pipe(pipe) {
2825 /* Clear enable bits; then clear status bits */
2826 I915_WRITE(PIPESTAT(pipe), 0);
2827 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2829 I915_WRITE(IMR, 0xffffffff);
2830 I915_WRITE(IER, 0x0);
2832 I915_WRITE(IIR, I915_READ(IIR));
2835 static void i965_irq_preinstall(struct drm_device * dev)
2837 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2840 atomic_set(&dev_priv->irq_received, 0);
2842 I915_WRITE(PORT_HOTPLUG_EN, 0);
2843 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2845 I915_WRITE(HWSTAM, 0xeffe);
2847 I915_WRITE(PIPESTAT(pipe), 0);
2848 I915_WRITE(IMR, 0xffffffff);
2849 I915_WRITE(IER, 0x0);
2853 static int i965_irq_postinstall(struct drm_device *dev)
2855 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2859 /* Unmask the interrupts that we always want on. */
2860 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2861 I915_DISPLAY_PORT_INTERRUPT |
2862 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2863 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2864 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2865 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2866 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2868 enable_mask = ~dev_priv->irq_mask;
2869 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2870 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2871 enable_mask |= I915_USER_INTERRUPT;
2874 enable_mask |= I915_BSD_USER_INTERRUPT;
2876 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2879 * Enable some error detection, note the instruction error mask
2880 * bit is reserved, so we leave it masked.
2883 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2884 GM45_ERROR_MEM_PRIV |
2885 GM45_ERROR_CP_PRIV |
2886 I915_ERROR_MEMORY_REFRESH);
2888 error_mask = ~(I915_ERROR_PAGE_TABLE |
2889 I915_ERROR_MEMORY_REFRESH);
2891 I915_WRITE(EMR, error_mask);
2893 I915_WRITE(IMR, dev_priv->irq_mask);
2894 I915_WRITE(IER, enable_mask);
2897 I915_WRITE(PORT_HOTPLUG_EN, 0);
2898 POSTING_READ(PORT_HOTPLUG_EN);
2900 intel_opregion_enable_asle(dev);
2905 static void i915_hpd_irq_setup(struct drm_device *dev)
2907 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2908 struct drm_mode_config *mode_config = &dev->mode_config;
2909 struct intel_encoder *intel_encoder;
2912 if (I915_HAS_HOTPLUG(dev)) {
2913 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2914 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2915 /* Note HDMI and DP share hotplug bits */
2916 /* enable bits are the same for all generations */
2917 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2918 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2919 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2920 /* Programming the CRT detection parameters tends
2921 to generate a spurious hotplug event about three
2922 seconds later. So just do it once.
2925 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2926 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2927 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2929 /* Ignore TV since it's buggy */
2930 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2934 static irqreturn_t i965_irq_handler(int irq, void *arg)
2936 struct drm_device *dev = (struct drm_device *) arg;
2937 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2939 u32 pipe_stats[I915_MAX_PIPES];
2940 unsigned long irqflags;
2942 int ret = IRQ_NONE, pipe;
2944 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2945 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2947 atomic_inc(&dev_priv->irq_received);
2949 iir = I915_READ(IIR);
2952 bool blc_event = false;
2954 irq_received = (iir & ~flip_mask) != 0;
2956 /* Can't rely on pipestat interrupt bit in iir as it might
2957 * have been cleared after the pipestat interrupt was received.
2958 * It doesn't set the bit in iir again, but it still produces
2959 * interrupts (for non-MSI).
2961 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2962 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2963 i915_handle_error(dev, false);
2965 for_each_pipe(pipe) {
2966 int reg = PIPESTAT(pipe);
2967 pipe_stats[pipe] = I915_READ(reg);
2970 * Clear the PIPE*STAT regs before the IIR
2972 if (pipe_stats[pipe] & 0x8000ffff) {
2973 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2974 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2976 I915_WRITE(reg, pipe_stats[pipe]);
2980 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2987 /* Consume port. Then clear IIR or we'll miss events */
2988 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2989 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2990 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2991 HOTPLUG_INT_STATUS_G4X :
2992 HOTPLUG_INT_STATUS_I915);
2994 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2996 if (hotplug_trigger) {
2997 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
2998 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
2999 i915_hpd_irq_setup(dev);
3000 queue_work(dev_priv->wq,
3001 &dev_priv->hotplug_work);
3003 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3004 I915_READ(PORT_HOTPLUG_STAT);
3007 I915_WRITE(IIR, iir & ~flip_mask);
3008 new_iir = I915_READ(IIR); /* Flush posted writes */
3010 if (iir & I915_USER_INTERRUPT)
3011 notify_ring(dev, &dev_priv->ring[RCS]);
3012 if (iir & I915_BSD_USER_INTERRUPT)
3013 notify_ring(dev, &dev_priv->ring[VCS]);
3015 for_each_pipe(pipe) {
3016 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3017 i915_handle_vblank(dev, pipe, pipe, iir))
3018 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3020 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3025 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3026 intel_opregion_asle_intr(dev);
3028 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3029 gmbus_irq_handler(dev);
3031 /* With MSI, interrupts are only generated when iir
3032 * transitions from zero to nonzero. If another bit got
3033 * set while we were handling the existing iir bits, then
3034 * we would never get another interrupt.
3036 * This is fine on non-MSI as well, as if we hit this path
3037 * we avoid exiting the interrupt handler only to generate
3040 * Note that for MSI this could cause a stray interrupt report
3041 * if an interrupt landed in the time between writing IIR and
3042 * the posting read. This should be rare enough to never
3043 * trigger the 99% of 100,000 interrupts test for disabling
3049 i915_update_dri1_breadcrumb(dev);
3054 static void i965_irq_uninstall(struct drm_device * dev)
3056 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3062 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3064 I915_WRITE(PORT_HOTPLUG_EN, 0);
3065 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3067 I915_WRITE(HWSTAM, 0xffffffff);
3069 I915_WRITE(PIPESTAT(pipe), 0);
3070 I915_WRITE(IMR, 0xffffffff);
3071 I915_WRITE(IER, 0x0);
3074 I915_WRITE(PIPESTAT(pipe),
3075 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3076 I915_WRITE(IIR, I915_READ(IIR));
3079 static void i915_reenable_hotplug_timer_func(unsigned long data)
3081 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3082 struct drm_device *dev = dev_priv->dev;
3083 struct drm_mode_config *mode_config = &dev->mode_config;
3084 unsigned long irqflags;
3087 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3088 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3089 struct drm_connector *connector;
3091 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3094 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3096 list_for_each_entry(connector, &mode_config->connector_list, head) {
3097 struct intel_connector *intel_connector = to_intel_connector(connector);
3099 if (intel_connector->encoder->hpd_pin == i) {
3100 if (connector->polled != intel_connector->polled)
3101 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3102 drm_get_connector_name(connector));
3103 connector->polled = intel_connector->polled;
3104 if (!connector->polled)
3105 connector->polled = DRM_CONNECTOR_POLL_HPD;
3109 if (dev_priv->display.hpd_irq_setup)
3110 dev_priv->display.hpd_irq_setup(dev);
3111 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3114 void intel_irq_init(struct drm_device *dev)
3116 struct drm_i915_private *dev_priv = dev->dev_private;
3118 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3119 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3120 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3121 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3123 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3124 i915_hangcheck_elapsed,
3125 (unsigned long) dev);
3126 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3127 (unsigned long) dev_priv);
3129 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3131 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3132 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3133 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3134 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3135 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3138 if (drm_core_check_feature(dev, DRIVER_MODESET))
3139 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3141 dev->driver->get_vblank_timestamp = NULL;
3142 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3144 if (IS_VALLEYVIEW(dev)) {
3145 dev->driver->irq_handler = valleyview_irq_handler;
3146 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3147 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3148 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3149 dev->driver->enable_vblank = valleyview_enable_vblank;
3150 dev->driver->disable_vblank = valleyview_disable_vblank;
3151 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3152 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3153 /* Share pre & uninstall handlers with ILK/SNB */
3154 dev->driver->irq_handler = ivybridge_irq_handler;
3155 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3156 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3157 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3158 dev->driver->enable_vblank = ivybridge_enable_vblank;
3159 dev->driver->disable_vblank = ivybridge_disable_vblank;
3160 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3161 } else if (HAS_PCH_SPLIT(dev)) {
3162 dev->driver->irq_handler = ironlake_irq_handler;
3163 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3164 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3165 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3166 dev->driver->enable_vblank = ironlake_enable_vblank;
3167 dev->driver->disable_vblank = ironlake_disable_vblank;
3168 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3170 if (INTEL_INFO(dev)->gen == 2) {
3171 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3172 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3173 dev->driver->irq_handler = i8xx_irq_handler;
3174 dev->driver->irq_uninstall = i8xx_irq_uninstall;
3175 } else if (INTEL_INFO(dev)->gen == 3) {
3176 dev->driver->irq_preinstall = i915_irq_preinstall;
3177 dev->driver->irq_postinstall = i915_irq_postinstall;
3178 dev->driver->irq_uninstall = i915_irq_uninstall;
3179 dev->driver->irq_handler = i915_irq_handler;
3180 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3182 dev->driver->irq_preinstall = i965_irq_preinstall;
3183 dev->driver->irq_postinstall = i965_irq_postinstall;
3184 dev->driver->irq_uninstall = i965_irq_uninstall;
3185 dev->driver->irq_handler = i965_irq_handler;
3186 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3188 dev->driver->enable_vblank = i915_enable_vblank;
3189 dev->driver->disable_vblank = i915_disable_vblank;
3193 void intel_hpd_init(struct drm_device *dev)
3195 struct drm_i915_private *dev_priv = dev->dev_private;
3196 struct drm_mode_config *mode_config = &dev->mode_config;
3197 struct drm_connector *connector;
3200 for (i = 1; i < HPD_NUM_PINS; i++) {
3201 dev_priv->hpd_stats[i].hpd_cnt = 0;
3202 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3204 list_for_each_entry(connector, &mode_config->connector_list, head) {
3205 struct intel_connector *intel_connector = to_intel_connector(connector);
3206 connector->polled = intel_connector->polled;
3207 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3208 connector->polled = DRM_CONNECTOR_POLL_HPD;
3210 if (dev_priv->display.hpd_irq_setup)
3211 dev_priv->display.hpd_irq_setup(dev);