drm/i915: ring irq cleanups
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include "drmP.h"
34 #include "drm.h"
35 #include "i915_drm.h"
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 #define MAX_NOPID ((u32)~0)
41
42 /**
43  * Interrupts that are always left unmasked.
44  *
45  * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
46  * we leave them always unmasked in IMR and then control enabling them through
47  * PIPESTAT alone.
48  */
49 #define I915_INTERRUPT_ENABLE_FIX                       \
50         (I915_ASLE_INTERRUPT |                          \
51          I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |          \
52          I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |          \
53          I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |  \
54          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |  \
55          I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
56
57 /** Interrupts that we mask and unmask at runtime. */
58 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
59
60 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
61                                  PIPE_VBLANK_INTERRUPT_STATUS)
62
63 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
64                                  PIPE_VBLANK_INTERRUPT_ENABLE)
65
66 #define DRM_I915_VBLANK_PIPE_ALL        (DRM_I915_VBLANK_PIPE_A | \
67                                          DRM_I915_VBLANK_PIPE_B)
68
69 /* For display hotplug interrupt */
70 static void
71 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
72 {
73         if ((dev_priv->irq_mask & mask) != 0) {
74                 dev_priv->irq_mask &= ~mask;
75                 I915_WRITE(DEIMR, dev_priv->irq_mask);
76                 POSTING_READ(DEIMR);
77         }
78 }
79
80 static inline void
81 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
82 {
83         if ((dev_priv->irq_mask & mask) != mask) {
84                 dev_priv->irq_mask |= mask;
85                 I915_WRITE(DEIMR, dev_priv->irq_mask);
86                 POSTING_READ(DEIMR);
87         }
88 }
89
90 void
91 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
92 {
93         if ((dev_priv->pipestat[pipe] & mask) != mask) {
94                 u32 reg = PIPESTAT(pipe);
95
96                 dev_priv->pipestat[pipe] |= mask;
97                 /* Enable the interrupt, clear any pending status */
98                 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
99                 POSTING_READ(reg);
100         }
101 }
102
103 void
104 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
105 {
106         if ((dev_priv->pipestat[pipe] & mask) != 0) {
107                 u32 reg = PIPESTAT(pipe);
108
109                 dev_priv->pipestat[pipe] &= ~mask;
110                 I915_WRITE(reg, dev_priv->pipestat[pipe]);
111                 POSTING_READ(reg);
112         }
113 }
114
115 /**
116  * intel_enable_asle - enable ASLE interrupt for OpRegion
117  */
118 void intel_enable_asle(struct drm_device *dev)
119 {
120         drm_i915_private_t *dev_priv = dev->dev_private;
121         unsigned long irqflags;
122
123         /* FIXME: opregion/asle for VLV */
124         if (IS_VALLEYVIEW(dev))
125                 return;
126
127         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
128
129         if (HAS_PCH_SPLIT(dev))
130                 ironlake_enable_display_irq(dev_priv, DE_GSE);
131         else {
132                 i915_enable_pipestat(dev_priv, 1,
133                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
134                 if (INTEL_INFO(dev)->gen >= 4)
135                         i915_enable_pipestat(dev_priv, 0,
136                                              PIPE_LEGACY_BLC_EVENT_ENABLE);
137         }
138
139         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
140 }
141
142 /**
143  * i915_pipe_enabled - check if a pipe is enabled
144  * @dev: DRM device
145  * @pipe: pipe to check
146  *
147  * Reading certain registers when the pipe is disabled can hang the chip.
148  * Use this routine to make sure the PLL is running and the pipe is active
149  * before reading such registers if unsure.
150  */
151 static int
152 i915_pipe_enabled(struct drm_device *dev, int pipe)
153 {
154         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
155         return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
156 }
157
158 /* Called from drm generic code, passed a 'crtc', which
159  * we use as a pipe index
160  */
161 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
162 {
163         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
164         unsigned long high_frame;
165         unsigned long low_frame;
166         u32 high1, high2, low;
167
168         if (!i915_pipe_enabled(dev, pipe)) {
169                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
170                                 "pipe %c\n", pipe_name(pipe));
171                 return 0;
172         }
173
174         high_frame = PIPEFRAME(pipe);
175         low_frame = PIPEFRAMEPIXEL(pipe);
176
177         /*
178          * High & low register fields aren't synchronized, so make sure
179          * we get a low value that's stable across two reads of the high
180          * register.
181          */
182         do {
183                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
184                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
185                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
186         } while (high1 != high2);
187
188         high1 >>= PIPE_FRAME_HIGH_SHIFT;
189         low >>= PIPE_FRAME_LOW_SHIFT;
190         return (high1 << 8) | low;
191 }
192
193 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
194 {
195         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
196         int reg = PIPE_FRMCOUNT_GM45(pipe);
197
198         if (!i915_pipe_enabled(dev, pipe)) {
199                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
200                                  "pipe %c\n", pipe_name(pipe));
201                 return 0;
202         }
203
204         return I915_READ(reg);
205 }
206
207 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
208                              int *vpos, int *hpos)
209 {
210         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
211         u32 vbl = 0, position = 0;
212         int vbl_start, vbl_end, htotal, vtotal;
213         bool in_vbl = true;
214         int ret = 0;
215
216         if (!i915_pipe_enabled(dev, pipe)) {
217                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
218                                  "pipe %c\n", pipe_name(pipe));
219                 return 0;
220         }
221
222         /* Get vtotal. */
223         vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
224
225         if (INTEL_INFO(dev)->gen >= 4) {
226                 /* No obvious pixelcount register. Only query vertical
227                  * scanout position from Display scan line register.
228                  */
229                 position = I915_READ(PIPEDSL(pipe));
230
231                 /* Decode into vertical scanout position. Don't have
232                  * horizontal scanout position.
233                  */
234                 *vpos = position & 0x1fff;
235                 *hpos = 0;
236         } else {
237                 /* Have access to pixelcount since start of frame.
238                  * We can split this into vertical and horizontal
239                  * scanout position.
240                  */
241                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
242
243                 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
244                 *vpos = position / htotal;
245                 *hpos = position - (*vpos * htotal);
246         }
247
248         /* Query vblank area. */
249         vbl = I915_READ(VBLANK(pipe));
250
251         /* Test position against vblank region. */
252         vbl_start = vbl & 0x1fff;
253         vbl_end = (vbl >> 16) & 0x1fff;
254
255         if ((*vpos < vbl_start) || (*vpos > vbl_end))
256                 in_vbl = false;
257
258         /* Inside "upper part" of vblank area? Apply corrective offset: */
259         if (in_vbl && (*vpos >= vbl_start))
260                 *vpos = *vpos - vtotal;
261
262         /* Readouts valid? */
263         if (vbl > 0)
264                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
265
266         /* In vblank? */
267         if (in_vbl)
268                 ret |= DRM_SCANOUTPOS_INVBL;
269
270         return ret;
271 }
272
273 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
274                               int *max_error,
275                               struct timeval *vblank_time,
276                               unsigned flags)
277 {
278         struct drm_i915_private *dev_priv = dev->dev_private;
279         struct drm_crtc *crtc;
280
281         if (pipe < 0 || pipe >= dev_priv->num_pipe) {
282                 DRM_ERROR("Invalid crtc %d\n", pipe);
283                 return -EINVAL;
284         }
285
286         /* Get drm_crtc to timestamp: */
287         crtc = intel_get_crtc_for_pipe(dev, pipe);
288         if (crtc == NULL) {
289                 DRM_ERROR("Invalid crtc %d\n", pipe);
290                 return -EINVAL;
291         }
292
293         if (!crtc->enabled) {
294                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
295                 return -EBUSY;
296         }
297
298         /* Helper routine in DRM core does all the work: */
299         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
300                                                      vblank_time, flags,
301                                                      crtc);
302 }
303
304 /*
305  * Handle hotplug events outside the interrupt handler proper.
306  */
307 static void i915_hotplug_work_func(struct work_struct *work)
308 {
309         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
310                                                     hotplug_work);
311         struct drm_device *dev = dev_priv->dev;
312         struct drm_mode_config *mode_config = &dev->mode_config;
313         struct intel_encoder *encoder;
314
315         mutex_lock(&mode_config->mutex);
316         DRM_DEBUG_KMS("running encoder hotplug functions\n");
317
318         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
319                 if (encoder->hot_plug)
320                         encoder->hot_plug(encoder);
321
322         mutex_unlock(&mode_config->mutex);
323
324         /* Just fire off a uevent and let userspace tell us what to do */
325         drm_helper_hpd_irq_event(dev);
326 }
327
328 static void i915_handle_rps_change(struct drm_device *dev)
329 {
330         drm_i915_private_t *dev_priv = dev->dev_private;
331         u32 busy_up, busy_down, max_avg, min_avg;
332         u8 new_delay = dev_priv->cur_delay;
333
334         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
335         busy_up = I915_READ(RCPREVBSYTUPAVG);
336         busy_down = I915_READ(RCPREVBSYTDNAVG);
337         max_avg = I915_READ(RCBMAXAVG);
338         min_avg = I915_READ(RCBMINAVG);
339
340         /* Handle RCS change request from hw */
341         if (busy_up > max_avg) {
342                 if (dev_priv->cur_delay != dev_priv->max_delay)
343                         new_delay = dev_priv->cur_delay - 1;
344                 if (new_delay < dev_priv->max_delay)
345                         new_delay = dev_priv->max_delay;
346         } else if (busy_down < min_avg) {
347                 if (dev_priv->cur_delay != dev_priv->min_delay)
348                         new_delay = dev_priv->cur_delay + 1;
349                 if (new_delay > dev_priv->min_delay)
350                         new_delay = dev_priv->min_delay;
351         }
352
353         if (ironlake_set_drps(dev, new_delay))
354                 dev_priv->cur_delay = new_delay;
355
356         return;
357 }
358
359 static void notify_ring(struct drm_device *dev,
360                         struct intel_ring_buffer *ring)
361 {
362         struct drm_i915_private *dev_priv = dev->dev_private;
363         u32 seqno;
364
365         if (ring->obj == NULL)
366                 return;
367
368         seqno = ring->get_seqno(ring);
369         trace_i915_gem_request_complete(ring, seqno);
370
371         ring->irq_seqno = seqno;
372         wake_up_all(&ring->irq_queue);
373         if (i915_enable_hangcheck) {
374                 dev_priv->hangcheck_count = 0;
375                 mod_timer(&dev_priv->hangcheck_timer,
376                           jiffies +
377                           msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
378         }
379 }
380
381 static void gen6_pm_rps_work(struct work_struct *work)
382 {
383         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
384                                                     rps_work);
385         u8 new_delay = dev_priv->cur_delay;
386         u32 pm_iir, pm_imr;
387
388         spin_lock_irq(&dev_priv->rps_lock);
389         pm_iir = dev_priv->pm_iir;
390         dev_priv->pm_iir = 0;
391         pm_imr = I915_READ(GEN6_PMIMR);
392         I915_WRITE(GEN6_PMIMR, 0);
393         spin_unlock_irq(&dev_priv->rps_lock);
394
395         if (!pm_iir)
396                 return;
397
398         mutex_lock(&dev_priv->dev->struct_mutex);
399         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
400                 if (dev_priv->cur_delay != dev_priv->max_delay)
401                         new_delay = dev_priv->cur_delay + 1;
402                 if (new_delay > dev_priv->max_delay)
403                         new_delay = dev_priv->max_delay;
404         } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
405                 gen6_gt_force_wake_get(dev_priv);
406                 if (dev_priv->cur_delay != dev_priv->min_delay)
407                         new_delay = dev_priv->cur_delay - 1;
408                 if (new_delay < dev_priv->min_delay) {
409                         new_delay = dev_priv->min_delay;
410                         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
411                                    I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
412                                    ((new_delay << 16) & 0x3f0000));
413                 } else {
414                         /* Make sure we continue to get down interrupts
415                          * until we hit the minimum frequency */
416                         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
417                                    I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
418                 }
419                 gen6_gt_force_wake_put(dev_priv);
420         }
421
422         gen6_set_rps(dev_priv->dev, new_delay);
423         dev_priv->cur_delay = new_delay;
424
425         /*
426          * rps_lock not held here because clearing is non-destructive. There is
427          * an *extremely* unlikely race with gen6_rps_enable() that is prevented
428          * by holding struct_mutex for the duration of the write.
429          */
430         mutex_unlock(&dev_priv->dev->struct_mutex);
431 }
432
433 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
434 {
435         struct drm_device *dev = (struct drm_device *) arg;
436         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
437         u32 iir, gt_iir, pm_iir;
438         irqreturn_t ret = IRQ_NONE;
439         unsigned long irqflags;
440         int pipe;
441         u32 pipe_stats[I915_MAX_PIPES];
442         u32 vblank_status;
443         int vblank = 0;
444         bool blc_event;
445
446         atomic_inc(&dev_priv->irq_received);
447
448         vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
449                 PIPE_VBLANK_INTERRUPT_STATUS;
450
451         while (true) {
452                 iir = I915_READ(VLV_IIR);
453                 gt_iir = I915_READ(GTIIR);
454                 pm_iir = I915_READ(GEN6_PMIIR);
455
456                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
457                         goto out;
458
459                 ret = IRQ_HANDLED;
460
461                 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
462                         notify_ring(dev, &dev_priv->ring[RCS]);
463                 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
464                         notify_ring(dev, &dev_priv->ring[VCS]);
465                 if (gt_iir & GT_GEN6_BLT_USER_INTERRUPT)
466                         notify_ring(dev, &dev_priv->ring[BCS]);
467
468                 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
469                               GT_GEN6_BSD_CS_ERROR_INTERRUPT |
470                               GT_RENDER_CS_ERROR_INTERRUPT)) {
471                         DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
472                         i915_handle_error(dev, false);
473                 }
474
475                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
476                 for_each_pipe(pipe) {
477                         int reg = PIPESTAT(pipe);
478                         pipe_stats[pipe] = I915_READ(reg);
479
480                         /*
481                          * Clear the PIPE*STAT regs before the IIR
482                          */
483                         if (pipe_stats[pipe] & 0x8000ffff) {
484                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
485                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
486                                                          pipe_name(pipe));
487                                 I915_WRITE(reg, pipe_stats[pipe]);
488                         }
489                 }
490                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
491
492                 /* Consume port.  Then clear IIR or we'll miss events */
493                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
494                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
495
496                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
497                                          hotplug_status);
498                         if (hotplug_status & dev_priv->hotplug_supported_mask)
499                                 queue_work(dev_priv->wq,
500                                            &dev_priv->hotplug_work);
501
502                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
503                         I915_READ(PORT_HOTPLUG_STAT);
504                 }
505
506
507                 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
508                         drm_handle_vblank(dev, 0);
509                         vblank++;
510                         if (!dev_priv->flip_pending_is_done) {
511                                 intel_finish_page_flip(dev, 0);
512                         }
513                 }
514
515                 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
516                         drm_handle_vblank(dev, 1);
517                         vblank++;
518                         if (!dev_priv->flip_pending_is_done) {
519                                 intel_finish_page_flip(dev, 0);
520                         }
521                 }
522
523                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
524                         blc_event = true;
525
526                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
527                         unsigned long flags;
528                         spin_lock_irqsave(&dev_priv->rps_lock, flags);
529                         WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
530                         dev_priv->pm_iir |= pm_iir;
531                         I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
532                         POSTING_READ(GEN6_PMIMR);
533                         spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
534                         queue_work(dev_priv->wq, &dev_priv->rps_work);
535                 }
536
537                 I915_WRITE(GTIIR, gt_iir);
538                 I915_WRITE(GEN6_PMIIR, pm_iir);
539                 I915_WRITE(VLV_IIR, iir);
540         }
541
542 out:
543         return ret;
544 }
545
546 static void pch_irq_handler(struct drm_device *dev)
547 {
548         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
549         u32 pch_iir;
550         int pipe;
551
552         pch_iir = I915_READ(SDEIIR);
553
554         if (pch_iir & SDE_AUDIO_POWER_MASK)
555                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
556                                  (pch_iir & SDE_AUDIO_POWER_MASK) >>
557                                  SDE_AUDIO_POWER_SHIFT);
558
559         if (pch_iir & SDE_GMBUS)
560                 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
561
562         if (pch_iir & SDE_AUDIO_HDCP_MASK)
563                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
564
565         if (pch_iir & SDE_AUDIO_TRANS_MASK)
566                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
567
568         if (pch_iir & SDE_POISON)
569                 DRM_ERROR("PCH poison interrupt\n");
570
571         if (pch_iir & SDE_FDI_MASK)
572                 for_each_pipe(pipe)
573                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
574                                          pipe_name(pipe),
575                                          I915_READ(FDI_RX_IIR(pipe)));
576
577         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
578                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
579
580         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
581                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
582
583         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
584                 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
585         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
586                 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
587 }
588
589 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
590 {
591         struct drm_device *dev = (struct drm_device *) arg;
592         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
593         int ret = IRQ_NONE;
594         u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
595         struct drm_i915_master_private *master_priv;
596
597         atomic_inc(&dev_priv->irq_received);
598
599         /* disable master interrupt before clearing iir  */
600         de_ier = I915_READ(DEIER);
601         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
602         POSTING_READ(DEIER);
603
604         de_iir = I915_READ(DEIIR);
605         gt_iir = I915_READ(GTIIR);
606         pch_iir = I915_READ(SDEIIR);
607         pm_iir = I915_READ(GEN6_PMIIR);
608
609         if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
610                 goto done;
611
612         ret = IRQ_HANDLED;
613
614         if (dev->primary->master) {
615                 master_priv = dev->primary->master->driver_priv;
616                 if (master_priv->sarea_priv)
617                         master_priv->sarea_priv->last_dispatch =
618                                 READ_BREADCRUMB(dev_priv);
619         }
620
621         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
622                 notify_ring(dev, &dev_priv->ring[RCS]);
623         if (gt_iir & GEN6_BSD_USER_INTERRUPT)
624                 notify_ring(dev, &dev_priv->ring[VCS]);
625         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
626                 notify_ring(dev, &dev_priv->ring[BCS]);
627
628         if (de_iir & DE_GSE_IVB)
629                 intel_opregion_gse_intr(dev);
630
631         if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
632                 intel_prepare_page_flip(dev, 0);
633                 intel_finish_page_flip_plane(dev, 0);
634         }
635
636         if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
637                 intel_prepare_page_flip(dev, 1);
638                 intel_finish_page_flip_plane(dev, 1);
639         }
640
641         if (de_iir & DE_PIPEA_VBLANK_IVB)
642                 drm_handle_vblank(dev, 0);
643
644         if (de_iir & DE_PIPEB_VBLANK_IVB)
645                 drm_handle_vblank(dev, 1);
646
647         /* check event from PCH */
648         if (de_iir & DE_PCH_EVENT_IVB) {
649                 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
650                         queue_work(dev_priv->wq, &dev_priv->hotplug_work);
651                 pch_irq_handler(dev);
652         }
653
654         if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
655                 unsigned long flags;
656                 spin_lock_irqsave(&dev_priv->rps_lock, flags);
657                 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
658                 dev_priv->pm_iir |= pm_iir;
659                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
660                 POSTING_READ(GEN6_PMIMR);
661                 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
662                 queue_work(dev_priv->wq, &dev_priv->rps_work);
663         }
664
665         /* should clear PCH hotplug event before clear CPU irq */
666         I915_WRITE(SDEIIR, pch_iir);
667         I915_WRITE(GTIIR, gt_iir);
668         I915_WRITE(DEIIR, de_iir);
669         I915_WRITE(GEN6_PMIIR, pm_iir);
670
671 done:
672         I915_WRITE(DEIER, de_ier);
673         POSTING_READ(DEIER);
674
675         return ret;
676 }
677
678 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
679 {
680         struct drm_device *dev = (struct drm_device *) arg;
681         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
682         int ret = IRQ_NONE;
683         u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
684         u32 hotplug_mask;
685         struct drm_i915_master_private *master_priv;
686         u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
687
688         atomic_inc(&dev_priv->irq_received);
689
690         if (IS_GEN6(dev))
691                 bsd_usr_interrupt = GEN6_BSD_USER_INTERRUPT;
692
693         /* disable master interrupt before clearing iir  */
694         de_ier = I915_READ(DEIER);
695         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
696         POSTING_READ(DEIER);
697
698         de_iir = I915_READ(DEIIR);
699         gt_iir = I915_READ(GTIIR);
700         pch_iir = I915_READ(SDEIIR);
701         pm_iir = I915_READ(GEN6_PMIIR);
702
703         if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
704             (!IS_GEN6(dev) || pm_iir == 0))
705                 goto done;
706
707         if (HAS_PCH_CPT(dev))
708                 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
709         else
710                 hotplug_mask = SDE_HOTPLUG_MASK;
711
712         ret = IRQ_HANDLED;
713
714         if (dev->primary->master) {
715                 master_priv = dev->primary->master->driver_priv;
716                 if (master_priv->sarea_priv)
717                         master_priv->sarea_priv->last_dispatch =
718                                 READ_BREADCRUMB(dev_priv);
719         }
720
721         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
722                 notify_ring(dev, &dev_priv->ring[RCS]);
723         if (gt_iir & bsd_usr_interrupt)
724                 notify_ring(dev, &dev_priv->ring[VCS]);
725         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
726                 notify_ring(dev, &dev_priv->ring[BCS]);
727
728         if (de_iir & DE_GSE)
729                 intel_opregion_gse_intr(dev);
730
731         if (de_iir & DE_PLANEA_FLIP_DONE) {
732                 intel_prepare_page_flip(dev, 0);
733                 intel_finish_page_flip_plane(dev, 0);
734         }
735
736         if (de_iir & DE_PLANEB_FLIP_DONE) {
737                 intel_prepare_page_flip(dev, 1);
738                 intel_finish_page_flip_plane(dev, 1);
739         }
740
741         if (de_iir & DE_PIPEA_VBLANK)
742                 drm_handle_vblank(dev, 0);
743
744         if (de_iir & DE_PIPEB_VBLANK)
745                 drm_handle_vblank(dev, 1);
746
747         /* check event from PCH */
748         if (de_iir & DE_PCH_EVENT) {
749                 if (pch_iir & hotplug_mask)
750                         queue_work(dev_priv->wq, &dev_priv->hotplug_work);
751                 pch_irq_handler(dev);
752         }
753
754         if (de_iir & DE_PCU_EVENT) {
755                 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
756                 i915_handle_rps_change(dev);
757         }
758
759         if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
760                 /*
761                  * IIR bits should never already be set because IMR should
762                  * prevent an interrupt from being shown in IIR. The warning
763                  * displays a case where we've unsafely cleared
764                  * dev_priv->pm_iir. Although missing an interrupt of the same
765                  * type is not a problem, it displays a problem in the logic.
766                  *
767                  * The mask bit in IMR is cleared by rps_work.
768                  */
769                 unsigned long flags;
770                 spin_lock_irqsave(&dev_priv->rps_lock, flags);
771                 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
772                 dev_priv->pm_iir |= pm_iir;
773                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
774                 POSTING_READ(GEN6_PMIMR);
775                 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
776                 queue_work(dev_priv->wq, &dev_priv->rps_work);
777         }
778
779         /* should clear PCH hotplug event before clear CPU irq */
780         I915_WRITE(SDEIIR, pch_iir);
781         I915_WRITE(GTIIR, gt_iir);
782         I915_WRITE(DEIIR, de_iir);
783         I915_WRITE(GEN6_PMIIR, pm_iir);
784
785 done:
786         I915_WRITE(DEIER, de_ier);
787         POSTING_READ(DEIER);
788
789         return ret;
790 }
791
792 /**
793  * i915_error_work_func - do process context error handling work
794  * @work: work struct
795  *
796  * Fire an error uevent so userspace can see that a hang or error
797  * was detected.
798  */
799 static void i915_error_work_func(struct work_struct *work)
800 {
801         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
802                                                     error_work);
803         struct drm_device *dev = dev_priv->dev;
804         char *error_event[] = { "ERROR=1", NULL };
805         char *reset_event[] = { "RESET=1", NULL };
806         char *reset_done_event[] = { "ERROR=0", NULL };
807
808         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
809
810         if (atomic_read(&dev_priv->mm.wedged)) {
811                 DRM_DEBUG_DRIVER("resetting chip\n");
812                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
813                 if (!i915_reset(dev, GRDOM_RENDER)) {
814                         atomic_set(&dev_priv->mm.wedged, 0);
815                         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
816                 }
817                 complete_all(&dev_priv->error_completion);
818         }
819 }
820
821 #ifdef CONFIG_DEBUG_FS
822 static struct drm_i915_error_object *
823 i915_error_object_create(struct drm_i915_private *dev_priv,
824                          struct drm_i915_gem_object *src)
825 {
826         struct drm_i915_error_object *dst;
827         int page, page_count;
828         u32 reloc_offset;
829
830         if (src == NULL || src->pages == NULL)
831                 return NULL;
832
833         page_count = src->base.size / PAGE_SIZE;
834
835         dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
836         if (dst == NULL)
837                 return NULL;
838
839         reloc_offset = src->gtt_offset;
840         for (page = 0; page < page_count; page++) {
841                 unsigned long flags;
842                 void *d;
843
844                 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
845                 if (d == NULL)
846                         goto unwind;
847
848                 local_irq_save(flags);
849                 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
850                     src->has_global_gtt_mapping) {
851                         void __iomem *s;
852
853                         /* Simply ignore tiling or any overlapping fence.
854                          * It's part of the error state, and this hopefully
855                          * captures what the GPU read.
856                          */
857
858                         s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
859                                                      reloc_offset);
860                         memcpy_fromio(d, s, PAGE_SIZE);
861                         io_mapping_unmap_atomic(s);
862                 } else {
863                         void *s;
864
865                         drm_clflush_pages(&src->pages[page], 1);
866
867                         s = kmap_atomic(src->pages[page]);
868                         memcpy(d, s, PAGE_SIZE);
869                         kunmap_atomic(s);
870
871                         drm_clflush_pages(&src->pages[page], 1);
872                 }
873                 local_irq_restore(flags);
874
875                 dst->pages[page] = d;
876
877                 reloc_offset += PAGE_SIZE;
878         }
879         dst->page_count = page_count;
880         dst->gtt_offset = src->gtt_offset;
881
882         return dst;
883
884 unwind:
885         while (page--)
886                 kfree(dst->pages[page]);
887         kfree(dst);
888         return NULL;
889 }
890
891 static void
892 i915_error_object_free(struct drm_i915_error_object *obj)
893 {
894         int page;
895
896         if (obj == NULL)
897                 return;
898
899         for (page = 0; page < obj->page_count; page++)
900                 kfree(obj->pages[page]);
901
902         kfree(obj);
903 }
904
905 static void
906 i915_error_state_free(struct drm_device *dev,
907                       struct drm_i915_error_state *error)
908 {
909         int i;
910
911         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
912                 i915_error_object_free(error->ring[i].batchbuffer);
913                 i915_error_object_free(error->ring[i].ringbuffer);
914                 kfree(error->ring[i].requests);
915         }
916
917         kfree(error->active_bo);
918         kfree(error->overlay);
919         kfree(error);
920 }
921
922 static u32 capture_bo_list(struct drm_i915_error_buffer *err,
923                            int count,
924                            struct list_head *head)
925 {
926         struct drm_i915_gem_object *obj;
927         int i = 0;
928
929         list_for_each_entry(obj, head, mm_list) {
930                 err->size = obj->base.size;
931                 err->name = obj->base.name;
932                 err->seqno = obj->last_rendering_seqno;
933                 err->gtt_offset = obj->gtt_offset;
934                 err->read_domains = obj->base.read_domains;
935                 err->write_domain = obj->base.write_domain;
936                 err->fence_reg = obj->fence_reg;
937                 err->pinned = 0;
938                 if (obj->pin_count > 0)
939                         err->pinned = 1;
940                 if (obj->user_pin_count > 0)
941                         err->pinned = -1;
942                 err->tiling = obj->tiling_mode;
943                 err->dirty = obj->dirty;
944                 err->purgeable = obj->madv != I915_MADV_WILLNEED;
945                 err->ring = obj->ring ? obj->ring->id : -1;
946                 err->cache_level = obj->cache_level;
947
948                 if (++i == count)
949                         break;
950
951                 err++;
952         }
953
954         return i;
955 }
956
957 static void i915_gem_record_fences(struct drm_device *dev,
958                                    struct drm_i915_error_state *error)
959 {
960         struct drm_i915_private *dev_priv = dev->dev_private;
961         int i;
962
963         /* Fences */
964         switch (INTEL_INFO(dev)->gen) {
965         case 7:
966         case 6:
967                 for (i = 0; i < 16; i++)
968                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
969                 break;
970         case 5:
971         case 4:
972                 for (i = 0; i < 16; i++)
973                         error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
974                 break;
975         case 3:
976                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
977                         for (i = 0; i < 8; i++)
978                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
979         case 2:
980                 for (i = 0; i < 8; i++)
981                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
982                 break;
983
984         }
985 }
986
987 static struct drm_i915_error_object *
988 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
989                              struct intel_ring_buffer *ring)
990 {
991         struct drm_i915_gem_object *obj;
992         u32 seqno;
993
994         if (!ring->get_seqno)
995                 return NULL;
996
997         seqno = ring->get_seqno(ring);
998         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
999                 if (obj->ring != ring)
1000                         continue;
1001
1002                 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
1003                         continue;
1004
1005                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1006                         continue;
1007
1008                 /* We need to copy these to an anonymous buffer as the simplest
1009                  * method to avoid being overwritten by userspace.
1010                  */
1011                 return i915_error_object_create(dev_priv, obj);
1012         }
1013
1014         return NULL;
1015 }
1016
1017 static void i915_record_ring_state(struct drm_device *dev,
1018                                    struct drm_i915_error_state *error,
1019                                    struct intel_ring_buffer *ring)
1020 {
1021         struct drm_i915_private *dev_priv = dev->dev_private;
1022
1023         if (INTEL_INFO(dev)->gen >= 6) {
1024                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1025                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1026                 error->semaphore_mboxes[ring->id][0]
1027                         = I915_READ(RING_SYNC_0(ring->mmio_base));
1028                 error->semaphore_mboxes[ring->id][1]
1029                         = I915_READ(RING_SYNC_1(ring->mmio_base));
1030         }
1031
1032         if (INTEL_INFO(dev)->gen >= 4) {
1033                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1034                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1035                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1036                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1037                 if (ring->id == RCS) {
1038                         error->instdone1 = I915_READ(INSTDONE1);
1039                         error->bbaddr = I915_READ64(BB_ADDR);
1040                 }
1041         } else {
1042                 error->ipeir[ring->id] = I915_READ(IPEIR);
1043                 error->ipehr[ring->id] = I915_READ(IPEHR);
1044                 error->instdone[ring->id] = I915_READ(INSTDONE);
1045         }
1046
1047         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1048         error->seqno[ring->id] = ring->get_seqno(ring);
1049         error->acthd[ring->id] = intel_ring_get_active_head(ring);
1050         error->head[ring->id] = I915_READ_HEAD(ring);
1051         error->tail[ring->id] = I915_READ_TAIL(ring);
1052
1053         error->cpu_ring_head[ring->id] = ring->head;
1054         error->cpu_ring_tail[ring->id] = ring->tail;
1055 }
1056
1057 static void i915_gem_record_rings(struct drm_device *dev,
1058                                   struct drm_i915_error_state *error)
1059 {
1060         struct drm_i915_private *dev_priv = dev->dev_private;
1061         struct drm_i915_gem_request *request;
1062         int i, count;
1063
1064         for (i = 0; i < I915_NUM_RINGS; i++) {
1065                 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1066
1067                 if (ring->obj == NULL)
1068                         continue;
1069
1070                 i915_record_ring_state(dev, error, ring);
1071
1072                 error->ring[i].batchbuffer =
1073                         i915_error_first_batchbuffer(dev_priv, ring);
1074
1075                 error->ring[i].ringbuffer =
1076                         i915_error_object_create(dev_priv, ring->obj);
1077
1078                 count = 0;
1079                 list_for_each_entry(request, &ring->request_list, list)
1080                         count++;
1081
1082                 error->ring[i].num_requests = count;
1083                 error->ring[i].requests =
1084                         kmalloc(count*sizeof(struct drm_i915_error_request),
1085                                 GFP_ATOMIC);
1086                 if (error->ring[i].requests == NULL) {
1087                         error->ring[i].num_requests = 0;
1088                         continue;
1089                 }
1090
1091                 count = 0;
1092                 list_for_each_entry(request, &ring->request_list, list) {
1093                         struct drm_i915_error_request *erq;
1094
1095                         erq = &error->ring[i].requests[count++];
1096                         erq->seqno = request->seqno;
1097                         erq->jiffies = request->emitted_jiffies;
1098                         erq->tail = request->tail;
1099                 }
1100         }
1101 }
1102
1103 /**
1104  * i915_capture_error_state - capture an error record for later analysis
1105  * @dev: drm device
1106  *
1107  * Should be called when an error is detected (either a hang or an error
1108  * interrupt) to capture error state from the time of the error.  Fills
1109  * out a structure which becomes available in debugfs for user level tools
1110  * to pick up.
1111  */
1112 static void i915_capture_error_state(struct drm_device *dev)
1113 {
1114         struct drm_i915_private *dev_priv = dev->dev_private;
1115         struct drm_i915_gem_object *obj;
1116         struct drm_i915_error_state *error;
1117         unsigned long flags;
1118         int i, pipe;
1119
1120         spin_lock_irqsave(&dev_priv->error_lock, flags);
1121         error = dev_priv->first_error;
1122         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1123         if (error)
1124                 return;
1125
1126         /* Account for pipe specific data like PIPE*STAT */
1127         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1128         if (!error) {
1129                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1130                 return;
1131         }
1132
1133         DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1134                  dev->primary->index);
1135
1136         error->eir = I915_READ(EIR);
1137         error->pgtbl_er = I915_READ(PGTBL_ER);
1138         for_each_pipe(pipe)
1139                 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1140
1141         if (INTEL_INFO(dev)->gen >= 6) {
1142                 error->error = I915_READ(ERROR_GEN6);
1143                 error->done_reg = I915_READ(DONE_REG);
1144         }
1145
1146         i915_gem_record_fences(dev, error);
1147         i915_gem_record_rings(dev, error);
1148
1149         /* Record buffers on the active and pinned lists. */
1150         error->active_bo = NULL;
1151         error->pinned_bo = NULL;
1152
1153         i = 0;
1154         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1155                 i++;
1156         error->active_bo_count = i;
1157         list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
1158                 i++;
1159         error->pinned_bo_count = i - error->active_bo_count;
1160
1161         error->active_bo = NULL;
1162         error->pinned_bo = NULL;
1163         if (i) {
1164                 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1165                                            GFP_ATOMIC);
1166                 if (error->active_bo)
1167                         error->pinned_bo =
1168                                 error->active_bo + error->active_bo_count;
1169         }
1170
1171         if (error->active_bo)
1172                 error->active_bo_count =
1173                         capture_bo_list(error->active_bo,
1174                                         error->active_bo_count,
1175                                         &dev_priv->mm.active_list);
1176
1177         if (error->pinned_bo)
1178                 error->pinned_bo_count =
1179                         capture_bo_list(error->pinned_bo,
1180                                         error->pinned_bo_count,
1181                                         &dev_priv->mm.pinned_list);
1182
1183         do_gettimeofday(&error->time);
1184
1185         error->overlay = intel_overlay_capture_error_state(dev);
1186         error->display = intel_display_capture_error_state(dev);
1187
1188         spin_lock_irqsave(&dev_priv->error_lock, flags);
1189         if (dev_priv->first_error == NULL) {
1190                 dev_priv->first_error = error;
1191                 error = NULL;
1192         }
1193         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1194
1195         if (error)
1196                 i915_error_state_free(dev, error);
1197 }
1198
1199 void i915_destroy_error_state(struct drm_device *dev)
1200 {
1201         struct drm_i915_private *dev_priv = dev->dev_private;
1202         struct drm_i915_error_state *error;
1203         unsigned long flags;
1204
1205         spin_lock_irqsave(&dev_priv->error_lock, flags);
1206         error = dev_priv->first_error;
1207         dev_priv->first_error = NULL;
1208         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1209
1210         if (error)
1211                 i915_error_state_free(dev, error);
1212 }
1213 #else
1214 #define i915_capture_error_state(x)
1215 #endif
1216
1217 static void i915_report_and_clear_eir(struct drm_device *dev)
1218 {
1219         struct drm_i915_private *dev_priv = dev->dev_private;
1220         u32 eir = I915_READ(EIR);
1221         int pipe;
1222
1223         if (!eir)
1224                 return;
1225
1226         pr_err("render error detected, EIR: 0x%08x\n", eir);
1227
1228         if (IS_G4X(dev)) {
1229                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1230                         u32 ipeir = I915_READ(IPEIR_I965);
1231
1232                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1233                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1234                         pr_err("  INSTDONE: 0x%08x\n",
1235                                I915_READ(INSTDONE_I965));
1236                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1237                         pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1238                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1239                         I915_WRITE(IPEIR_I965, ipeir);
1240                         POSTING_READ(IPEIR_I965);
1241                 }
1242                 if (eir & GM45_ERROR_PAGE_TABLE) {
1243                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1244                         pr_err("page table error\n");
1245                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1246                         I915_WRITE(PGTBL_ER, pgtbl_err);
1247                         POSTING_READ(PGTBL_ER);
1248                 }
1249         }
1250
1251         if (!IS_GEN2(dev)) {
1252                 if (eir & I915_ERROR_PAGE_TABLE) {
1253                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1254                         pr_err("page table error\n");
1255                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1256                         I915_WRITE(PGTBL_ER, pgtbl_err);
1257                         POSTING_READ(PGTBL_ER);
1258                 }
1259         }
1260
1261         if (eir & I915_ERROR_MEMORY_REFRESH) {
1262                 pr_err("memory refresh error:\n");
1263                 for_each_pipe(pipe)
1264                         pr_err("pipe %c stat: 0x%08x\n",
1265                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1266                 /* pipestat has already been acked */
1267         }
1268         if (eir & I915_ERROR_INSTRUCTION) {
1269                 pr_err("instruction error\n");
1270                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1271                 if (INTEL_INFO(dev)->gen < 4) {
1272                         u32 ipeir = I915_READ(IPEIR);
1273
1274                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1275                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1276                         pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1277                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1278                         I915_WRITE(IPEIR, ipeir);
1279                         POSTING_READ(IPEIR);
1280                 } else {
1281                         u32 ipeir = I915_READ(IPEIR_I965);
1282
1283                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1284                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1285                         pr_err("  INSTDONE: 0x%08x\n",
1286                                I915_READ(INSTDONE_I965));
1287                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1288                         pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1289                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1290                         I915_WRITE(IPEIR_I965, ipeir);
1291                         POSTING_READ(IPEIR_I965);
1292                 }
1293         }
1294
1295         I915_WRITE(EIR, eir);
1296         POSTING_READ(EIR);
1297         eir = I915_READ(EIR);
1298         if (eir) {
1299                 /*
1300                  * some errors might have become stuck,
1301                  * mask them.
1302                  */
1303                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1304                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1305                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1306         }
1307 }
1308
1309 /**
1310  * i915_handle_error - handle an error interrupt
1311  * @dev: drm device
1312  *
1313  * Do some basic checking of regsiter state at error interrupt time and
1314  * dump it to the syslog.  Also call i915_capture_error_state() to make
1315  * sure we get a record and make it available in debugfs.  Fire a uevent
1316  * so userspace knows something bad happened (should trigger collection
1317  * of a ring dump etc.).
1318  */
1319 void i915_handle_error(struct drm_device *dev, bool wedged)
1320 {
1321         struct drm_i915_private *dev_priv = dev->dev_private;
1322
1323         i915_capture_error_state(dev);
1324         i915_report_and_clear_eir(dev);
1325
1326         if (wedged) {
1327                 INIT_COMPLETION(dev_priv->error_completion);
1328                 atomic_set(&dev_priv->mm.wedged, 1);
1329
1330                 /*
1331                  * Wakeup waiting processes so they don't hang
1332                  */
1333                 wake_up_all(&dev_priv->ring[RCS].irq_queue);
1334                 if (HAS_BSD(dev))
1335                         wake_up_all(&dev_priv->ring[VCS].irq_queue);
1336                 if (HAS_BLT(dev))
1337                         wake_up_all(&dev_priv->ring[BCS].irq_queue);
1338         }
1339
1340         queue_work(dev_priv->wq, &dev_priv->error_work);
1341 }
1342
1343 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1344 {
1345         drm_i915_private_t *dev_priv = dev->dev_private;
1346         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1347         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1348         struct drm_i915_gem_object *obj;
1349         struct intel_unpin_work *work;
1350         unsigned long flags;
1351         bool stall_detected;
1352
1353         /* Ignore early vblank irqs */
1354         if (intel_crtc == NULL)
1355                 return;
1356
1357         spin_lock_irqsave(&dev->event_lock, flags);
1358         work = intel_crtc->unpin_work;
1359
1360         if (work == NULL || work->pending || !work->enable_stall_check) {
1361                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1362                 spin_unlock_irqrestore(&dev->event_lock, flags);
1363                 return;
1364         }
1365
1366         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1367         obj = work->pending_flip_obj;
1368         if (INTEL_INFO(dev)->gen >= 4) {
1369                 int dspsurf = DSPSURF(intel_crtc->plane);
1370                 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
1371         } else {
1372                 int dspaddr = DSPADDR(intel_crtc->plane);
1373                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1374                                                         crtc->y * crtc->fb->pitches[0] +
1375                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1376         }
1377
1378         spin_unlock_irqrestore(&dev->event_lock, flags);
1379
1380         if (stall_detected) {
1381                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1382                 intel_prepare_page_flip(dev, intel_crtc->plane);
1383         }
1384 }
1385
1386 static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1387 {
1388         struct drm_device *dev = (struct drm_device *) arg;
1389         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1390         struct drm_i915_master_private *master_priv;
1391         u32 iir, new_iir;
1392         u32 pipe_stats[I915_MAX_PIPES];
1393         u32 vblank_status;
1394         int vblank = 0;
1395         unsigned long irqflags;
1396         int irq_received;
1397         int ret = IRQ_NONE, pipe;
1398         bool blc_event = false;
1399
1400         atomic_inc(&dev_priv->irq_received);
1401
1402         iir = I915_READ(IIR);
1403
1404         if (INTEL_INFO(dev)->gen >= 4)
1405                 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
1406         else
1407                 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
1408
1409         for (;;) {
1410                 irq_received = iir != 0;
1411
1412                 /* Can't rely on pipestat interrupt bit in iir as it might
1413                  * have been cleared after the pipestat interrupt was received.
1414                  * It doesn't set the bit in iir again, but it still produces
1415                  * interrupts (for non-MSI).
1416                  */
1417                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1418                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1419                         i915_handle_error(dev, false);
1420
1421                 for_each_pipe(pipe) {
1422                         int reg = PIPESTAT(pipe);
1423                         pipe_stats[pipe] = I915_READ(reg);
1424
1425                         /*
1426                          * Clear the PIPE*STAT regs before the IIR
1427                          */
1428                         if (pipe_stats[pipe] & 0x8000ffff) {
1429                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1430                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
1431                                                          pipe_name(pipe));
1432                                 I915_WRITE(reg, pipe_stats[pipe]);
1433                                 irq_received = 1;
1434                         }
1435                 }
1436                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1437
1438                 if (!irq_received)
1439                         break;
1440
1441                 ret = IRQ_HANDLED;
1442
1443                 /* Consume port.  Then clear IIR or we'll miss events */
1444                 if ((I915_HAS_HOTPLUG(dev)) &&
1445                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
1446                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1447
1448                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1449                                   hotplug_status);
1450                         if (hotplug_status & dev_priv->hotplug_supported_mask)
1451                                 queue_work(dev_priv->wq,
1452                                            &dev_priv->hotplug_work);
1453
1454                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1455                         I915_READ(PORT_HOTPLUG_STAT);
1456                 }
1457
1458                 I915_WRITE(IIR, iir);
1459                 new_iir = I915_READ(IIR); /* Flush posted writes */
1460
1461                 if (dev->primary->master) {
1462                         master_priv = dev->primary->master->driver_priv;
1463                         if (master_priv->sarea_priv)
1464                                 master_priv->sarea_priv->last_dispatch =
1465                                         READ_BREADCRUMB(dev_priv);
1466                 }
1467
1468                 if (iir & I915_USER_INTERRUPT)
1469                         notify_ring(dev, &dev_priv->ring[RCS]);
1470                 if (iir & I915_BSD_USER_INTERRUPT)
1471                         notify_ring(dev, &dev_priv->ring[VCS]);
1472
1473                 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1474                         intel_prepare_page_flip(dev, 0);
1475                         if (dev_priv->flip_pending_is_done)
1476                                 intel_finish_page_flip_plane(dev, 0);
1477                 }
1478
1479                 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1480                         intel_prepare_page_flip(dev, 1);
1481                         if (dev_priv->flip_pending_is_done)
1482                                 intel_finish_page_flip_plane(dev, 1);
1483                 }
1484
1485                 for_each_pipe(pipe) {
1486                         if (pipe_stats[pipe] & vblank_status &&
1487                             drm_handle_vblank(dev, pipe)) {
1488                                 vblank++;
1489                                 if (!dev_priv->flip_pending_is_done) {
1490                                         i915_pageflip_stall_check(dev, pipe);
1491                                         intel_finish_page_flip(dev, pipe);
1492                                 }
1493                         }
1494
1495                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1496                                 blc_event = true;
1497                 }
1498
1499
1500                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1501                         intel_opregion_asle_intr(dev);
1502
1503                 /* With MSI, interrupts are only generated when iir
1504                  * transitions from zero to nonzero.  If another bit got
1505                  * set while we were handling the existing iir bits, then
1506                  * we would never get another interrupt.
1507                  *
1508                  * This is fine on non-MSI as well, as if we hit this path
1509                  * we avoid exiting the interrupt handler only to generate
1510                  * another one.
1511                  *
1512                  * Note that for MSI this could cause a stray interrupt report
1513                  * if an interrupt landed in the time between writing IIR and
1514                  * the posting read.  This should be rare enough to never
1515                  * trigger the 99% of 100,000 interrupts test for disabling
1516                  * stray interrupts.
1517                  */
1518                 iir = new_iir;
1519         }
1520
1521         return ret;
1522 }
1523
1524 static int i915_emit_irq(struct drm_device * dev)
1525 {
1526         drm_i915_private_t *dev_priv = dev->dev_private;
1527         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1528
1529         i915_kernel_lost_context(dev);
1530
1531         DRM_DEBUG_DRIVER("\n");
1532
1533         dev_priv->counter++;
1534         if (dev_priv->counter > 0x7FFFFFFFUL)
1535                 dev_priv->counter = 1;
1536         if (master_priv->sarea_priv)
1537                 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1538
1539         if (BEGIN_LP_RING(4) == 0) {
1540                 OUT_RING(MI_STORE_DWORD_INDEX);
1541                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1542                 OUT_RING(dev_priv->counter);
1543                 OUT_RING(MI_USER_INTERRUPT);
1544                 ADVANCE_LP_RING();
1545         }
1546
1547         return dev_priv->counter;
1548 }
1549
1550 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1551 {
1552         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1553         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1554         int ret = 0;
1555         struct intel_ring_buffer *ring = LP_RING(dev_priv);
1556
1557         DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1558                   READ_BREADCRUMB(dev_priv));
1559
1560         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1561                 if (master_priv->sarea_priv)
1562                         master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1563                 return 0;
1564         }
1565
1566         if (master_priv->sarea_priv)
1567                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1568
1569         if (ring->irq_get(ring)) {
1570                 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1571                             READ_BREADCRUMB(dev_priv) >= irq_nr);
1572                 ring->irq_put(ring);
1573         } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1574                 ret = -EBUSY;
1575
1576         if (ret == -EBUSY) {
1577                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1578                           READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1579         }
1580
1581         return ret;
1582 }
1583
1584 /* Needs the lock as it touches the ring.
1585  */
1586 int i915_irq_emit(struct drm_device *dev, void *data,
1587                          struct drm_file *file_priv)
1588 {
1589         drm_i915_private_t *dev_priv = dev->dev_private;
1590         drm_i915_irq_emit_t *emit = data;
1591         int result;
1592
1593         if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1594                 DRM_ERROR("called with no initialization\n");
1595                 return -EINVAL;
1596         }
1597
1598         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1599
1600         mutex_lock(&dev->struct_mutex);
1601         result = i915_emit_irq(dev);
1602         mutex_unlock(&dev->struct_mutex);
1603
1604         if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1605                 DRM_ERROR("copy_to_user\n");
1606                 return -EFAULT;
1607         }
1608
1609         return 0;
1610 }
1611
1612 /* Doesn't need the hardware lock.
1613  */
1614 int i915_irq_wait(struct drm_device *dev, void *data,
1615                          struct drm_file *file_priv)
1616 {
1617         drm_i915_private_t *dev_priv = dev->dev_private;
1618         drm_i915_irq_wait_t *irqwait = data;
1619
1620         if (!dev_priv) {
1621                 DRM_ERROR("called with no initialization\n");
1622                 return -EINVAL;
1623         }
1624
1625         return i915_wait_irq(dev, irqwait->irq_seq);
1626 }
1627
1628 /* Called from drm generic code, passed 'crtc' which
1629  * we use as a pipe index
1630  */
1631 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1632 {
1633         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1634         unsigned long irqflags;
1635
1636         if (!i915_pipe_enabled(dev, pipe))
1637                 return -EINVAL;
1638
1639         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1640         if (INTEL_INFO(dev)->gen >= 4)
1641                 i915_enable_pipestat(dev_priv, pipe,
1642                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1643         else
1644                 i915_enable_pipestat(dev_priv, pipe,
1645                                      PIPE_VBLANK_INTERRUPT_ENABLE);
1646
1647         /* maintain vblank delivery even in deep C-states */
1648         if (dev_priv->info->gen == 3)
1649                 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
1650         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1651
1652         return 0;
1653 }
1654
1655 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1656 {
1657         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1658         unsigned long irqflags;
1659
1660         if (!i915_pipe_enabled(dev, pipe))
1661                 return -EINVAL;
1662
1663         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1664         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1665                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1666         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1667
1668         return 0;
1669 }
1670
1671 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1672 {
1673         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1674         unsigned long irqflags;
1675
1676         if (!i915_pipe_enabled(dev, pipe))
1677                 return -EINVAL;
1678
1679         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1680         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1681                                     DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1682         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1683
1684         return 0;
1685 }
1686
1687 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1688 {
1689         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1690         unsigned long irqflags;
1691         u32 dpfl, imr;
1692
1693         if (!i915_pipe_enabled(dev, pipe))
1694                 return -EINVAL;
1695
1696         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1697         dpfl = I915_READ(VLV_DPFLIPSTAT);
1698         imr = I915_READ(VLV_IMR);
1699         if (pipe == 0) {
1700                 dpfl |= PIPEA_VBLANK_INT_EN;
1701                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1702         } else {
1703                 dpfl |= PIPEA_VBLANK_INT_EN;
1704                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1705         }
1706         I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1707         I915_WRITE(VLV_IMR, imr);
1708         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1709
1710         return 0;
1711 }
1712
1713 /* Called from drm generic code, passed 'crtc' which
1714  * we use as a pipe index
1715  */
1716 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1717 {
1718         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1719         unsigned long irqflags;
1720
1721         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1722         if (dev_priv->info->gen == 3)
1723                 I915_WRITE(INSTPM,
1724                            INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1725
1726         i915_disable_pipestat(dev_priv, pipe,
1727                               PIPE_VBLANK_INTERRUPT_ENABLE |
1728                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1729         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1730 }
1731
1732 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1733 {
1734         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1735         unsigned long irqflags;
1736
1737         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1738         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1739                                      DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1740         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1741 }
1742
1743 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1744 {
1745         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1746         unsigned long irqflags;
1747
1748         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1749         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1750                                      DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1751         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1752 }
1753
1754 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1755 {
1756         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1757         unsigned long irqflags;
1758         u32 dpfl, imr;
1759
1760         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1761         dpfl = I915_READ(VLV_DPFLIPSTAT);
1762         imr = I915_READ(VLV_IMR);
1763         if (pipe == 0) {
1764                 dpfl &= ~PIPEA_VBLANK_INT_EN;
1765                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1766         } else {
1767                 dpfl &= ~PIPEB_VBLANK_INT_EN;
1768                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1769         }
1770         I915_WRITE(VLV_IMR, imr);
1771         I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1772         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1773 }
1774
1775
1776 /* Set the vblank monitor pipe
1777  */
1778 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1779                          struct drm_file *file_priv)
1780 {
1781         drm_i915_private_t *dev_priv = dev->dev_private;
1782
1783         if (!dev_priv) {
1784                 DRM_ERROR("called with no initialization\n");
1785                 return -EINVAL;
1786         }
1787
1788         return 0;
1789 }
1790
1791 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1792                          struct drm_file *file_priv)
1793 {
1794         drm_i915_private_t *dev_priv = dev->dev_private;
1795         drm_i915_vblank_pipe_t *pipe = data;
1796
1797         if (!dev_priv) {
1798                 DRM_ERROR("called with no initialization\n");
1799                 return -EINVAL;
1800         }
1801
1802         pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1803
1804         return 0;
1805 }
1806
1807 /**
1808  * Schedule buffer swap at given vertical blank.
1809  */
1810 int i915_vblank_swap(struct drm_device *dev, void *data,
1811                      struct drm_file *file_priv)
1812 {
1813         /* The delayed swap mechanism was fundamentally racy, and has been
1814          * removed.  The model was that the client requested a delayed flip/swap
1815          * from the kernel, then waited for vblank before continuing to perform
1816          * rendering.  The problem was that the kernel might wake the client
1817          * up before it dispatched the vblank swap (since the lock has to be
1818          * held while touching the ringbuffer), in which case the client would
1819          * clear and start the next frame before the swap occurred, and
1820          * flicker would occur in addition to likely missing the vblank.
1821          *
1822          * In the absence of this ioctl, userland falls back to a correct path
1823          * of waiting for a vblank, then dispatching the swap on its own.
1824          * Context switching to userland and back is plenty fast enough for
1825          * meeting the requirements of vblank swapping.
1826          */
1827         return -EINVAL;
1828 }
1829
1830 static u32
1831 ring_last_seqno(struct intel_ring_buffer *ring)
1832 {
1833         return list_entry(ring->request_list.prev,
1834                           struct drm_i915_gem_request, list)->seqno;
1835 }
1836
1837 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1838 {
1839         if (list_empty(&ring->request_list) ||
1840             i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1841                 /* Issue a wake-up to catch stuck h/w. */
1842                 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
1843                         DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1844                                   ring->name,
1845                                   ring->waiting_seqno,
1846                                   ring->get_seqno(ring));
1847                         wake_up_all(&ring->irq_queue);
1848                         *err = true;
1849                 }
1850                 return true;
1851         }
1852         return false;
1853 }
1854
1855 static bool kick_ring(struct intel_ring_buffer *ring)
1856 {
1857         struct drm_device *dev = ring->dev;
1858         struct drm_i915_private *dev_priv = dev->dev_private;
1859         u32 tmp = I915_READ_CTL(ring);
1860         if (tmp & RING_WAIT) {
1861                 DRM_ERROR("Kicking stuck wait on %s\n",
1862                           ring->name);
1863                 I915_WRITE_CTL(ring, tmp);
1864                 return true;
1865         }
1866         return false;
1867 }
1868
1869 /**
1870  * This is called when the chip hasn't reported back with completed
1871  * batchbuffers in a long time. The first time this is called we simply record
1872  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1873  * again, we assume the chip is wedged and try to fix it.
1874  */
1875 void i915_hangcheck_elapsed(unsigned long data)
1876 {
1877         struct drm_device *dev = (struct drm_device *)data;
1878         drm_i915_private_t *dev_priv = dev->dev_private;
1879         uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
1880         bool err = false;
1881
1882         if (!i915_enable_hangcheck)
1883                 return;
1884
1885         /* If all work is done then ACTHD clearly hasn't advanced. */
1886         if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1887             i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1888             i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1889                 dev_priv->hangcheck_count = 0;
1890                 if (err)
1891                         goto repeat;
1892                 return;
1893         }
1894
1895         if (INTEL_INFO(dev)->gen < 4) {
1896                 instdone = I915_READ(INSTDONE);
1897                 instdone1 = 0;
1898         } else {
1899                 instdone = I915_READ(INSTDONE_I965);
1900                 instdone1 = I915_READ(INSTDONE1);
1901         }
1902         acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
1903         acthd_bsd = HAS_BSD(dev) ?
1904                 intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
1905         acthd_blt = HAS_BLT(dev) ?
1906                 intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
1907
1908         if (dev_priv->last_acthd == acthd &&
1909             dev_priv->last_acthd_bsd == acthd_bsd &&
1910             dev_priv->last_acthd_blt == acthd_blt &&
1911             dev_priv->last_instdone == instdone &&
1912             dev_priv->last_instdone1 == instdone1) {
1913                 if (dev_priv->hangcheck_count++ > 1) {
1914                         DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1915                         i915_handle_error(dev, true);
1916
1917                         if (!IS_GEN2(dev)) {
1918                                 /* Is the chip hanging on a WAIT_FOR_EVENT?
1919                                  * If so we can simply poke the RB_WAIT bit
1920                                  * and break the hang. This should work on
1921                                  * all but the second generation chipsets.
1922                                  */
1923                                 if (kick_ring(&dev_priv->ring[RCS]))
1924                                         goto repeat;
1925
1926                                 if (HAS_BSD(dev) &&
1927                                     kick_ring(&dev_priv->ring[VCS]))
1928                                         goto repeat;
1929
1930                                 if (HAS_BLT(dev) &&
1931                                     kick_ring(&dev_priv->ring[BCS]))
1932                                         goto repeat;
1933                         }
1934
1935                         return;
1936                 }
1937         } else {
1938                 dev_priv->hangcheck_count = 0;
1939
1940                 dev_priv->last_acthd = acthd;
1941                 dev_priv->last_acthd_bsd = acthd_bsd;
1942                 dev_priv->last_acthd_blt = acthd_blt;
1943                 dev_priv->last_instdone = instdone;
1944                 dev_priv->last_instdone1 = instdone1;
1945         }
1946
1947 repeat:
1948         /* Reset timer case chip hangs without another request being added */
1949         mod_timer(&dev_priv->hangcheck_timer,
1950                   jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1951 }
1952
1953 /* drm_dma.h hooks
1954 */
1955 static void ironlake_irq_preinstall(struct drm_device *dev)
1956 {
1957         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1958
1959         atomic_set(&dev_priv->irq_received, 0);
1960
1961         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1962         INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1963         if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1964                 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1965
1966         I915_WRITE(HWSTAM, 0xeffe);
1967
1968         /* XXX hotplug from PCH */
1969
1970         I915_WRITE(DEIMR, 0xffffffff);
1971         I915_WRITE(DEIER, 0x0);
1972         POSTING_READ(DEIER);
1973
1974         /* and GT */
1975         I915_WRITE(GTIMR, 0xffffffff);
1976         I915_WRITE(GTIER, 0x0);
1977         POSTING_READ(GTIER);
1978
1979         /* south display irq */
1980         I915_WRITE(SDEIMR, 0xffffffff);
1981         I915_WRITE(SDEIER, 0x0);
1982         POSTING_READ(SDEIER);
1983 }
1984
1985 static void valleyview_irq_preinstall(struct drm_device *dev)
1986 {
1987         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1988         int pipe;
1989
1990         atomic_set(&dev_priv->irq_received, 0);
1991
1992         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1993         INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1994
1995         /* VLV magic */
1996         I915_WRITE(VLV_IMR, 0);
1997         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1998         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1999         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2000
2001         if (IS_GEN6(dev) || IS_GEN7(dev)) {
2002                 /* Workaround stalls observed on Sandy Bridge GPUs by
2003                  * making the blitter command streamer generate a
2004                  * write to the Hardware Status Page for
2005                  * MI_USER_INTERRUPT.  This appears to serialize the
2006                  * previous seqno write out before the interrupt
2007                  * happens.
2008                  */
2009                 I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
2010                 I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
2011         }
2012
2013         /* and GT */
2014         I915_WRITE(GTIIR, I915_READ(GTIIR));
2015         I915_WRITE(GTIIR, I915_READ(GTIIR));
2016         I915_WRITE(GTIMR, 0xffffffff);
2017         I915_WRITE(GTIER, 0x0);
2018         POSTING_READ(GTIER);
2019
2020         I915_WRITE(DPINVGTT, 0xff);
2021
2022         I915_WRITE(PORT_HOTPLUG_EN, 0);
2023         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2024         for_each_pipe(pipe)
2025                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2026         I915_WRITE(VLV_IIR, 0xffffffff);
2027         I915_WRITE(VLV_IMR, 0xffffffff);
2028         I915_WRITE(VLV_IER, 0x0);
2029         POSTING_READ(VLV_IER);
2030 }
2031
2032 /*
2033  * Enable digital hotplug on the PCH, and configure the DP short pulse
2034  * duration to 2ms (which is the minimum in the Display Port spec)
2035  *
2036  * This register is the same on all known PCH chips.
2037  */
2038
2039 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
2040 {
2041         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2042         u32     hotplug;
2043
2044         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2045         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2046         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2047         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2048         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2049         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2050 }
2051
2052 static int ironlake_irq_postinstall(struct drm_device *dev)
2053 {
2054         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2055         /* enable kind of interrupts always enabled */
2056         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2057                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
2058         u32 render_irqs;
2059         u32 hotplug_mask;
2060
2061         DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
2062         if (HAS_BSD(dev))
2063                 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
2064         if (HAS_BLT(dev))
2065                 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2066
2067         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2068         dev_priv->irq_mask = ~display_mask;
2069
2070         /* should always can generate irq */
2071         I915_WRITE(DEIIR, I915_READ(DEIIR));
2072         I915_WRITE(DEIMR, dev_priv->irq_mask);
2073         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2074         POSTING_READ(DEIER);
2075
2076         dev_priv->gt_irq_mask = ~0;
2077
2078         I915_WRITE(GTIIR, I915_READ(GTIIR));
2079         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2080
2081         if (IS_GEN6(dev))
2082                 render_irqs =
2083                         GT_USER_INTERRUPT |
2084                         GEN6_BSD_USER_INTERRUPT |
2085                         GEN6_BLITTER_USER_INTERRUPT;
2086         else
2087                 render_irqs =
2088                         GT_USER_INTERRUPT |
2089                         GT_PIPE_NOTIFY |
2090                         GT_BSD_USER_INTERRUPT;
2091         I915_WRITE(GTIER, render_irqs);
2092         POSTING_READ(GTIER);
2093
2094         if (HAS_PCH_CPT(dev)) {
2095                 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
2096                                 SDE_PORTB_HOTPLUG_CPT |
2097                                 SDE_PORTC_HOTPLUG_CPT |
2098                                 SDE_PORTD_HOTPLUG_CPT);
2099         } else {
2100                 hotplug_mask = (SDE_CRT_HOTPLUG |
2101                                 SDE_PORTB_HOTPLUG |
2102                                 SDE_PORTC_HOTPLUG |
2103                                 SDE_PORTD_HOTPLUG |
2104                                 SDE_AUX_MASK);
2105         }
2106
2107         dev_priv->pch_irq_mask = ~hotplug_mask;
2108
2109         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2110         I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
2111         I915_WRITE(SDEIER, hotplug_mask);
2112         POSTING_READ(SDEIER);
2113
2114         ironlake_enable_pch_hotplug(dev);
2115
2116         if (IS_IRONLAKE_M(dev)) {
2117                 /* Clear & enable PCU event interrupts */
2118                 I915_WRITE(DEIIR, DE_PCU_EVENT);
2119                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2120                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2121         }
2122
2123         return 0;
2124 }
2125
2126 static int ivybridge_irq_postinstall(struct drm_device *dev)
2127 {
2128         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2129         /* enable kind of interrupts always enabled */
2130         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2131                 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
2132                 DE_PLANEB_FLIP_DONE_IVB;
2133         u32 render_irqs;
2134         u32 hotplug_mask;
2135
2136         DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
2137         if (HAS_BSD(dev))
2138                 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
2139         if (HAS_BLT(dev))
2140                 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2141
2142         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2143         dev_priv->irq_mask = ~display_mask;
2144
2145         /* should always can generate irq */
2146         I915_WRITE(DEIIR, I915_READ(DEIIR));
2147         I915_WRITE(DEIMR, dev_priv->irq_mask);
2148         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
2149                    DE_PIPEB_VBLANK_IVB);
2150         POSTING_READ(DEIER);
2151
2152         dev_priv->gt_irq_mask = ~0;
2153
2154         I915_WRITE(GTIIR, I915_READ(GTIIR));
2155         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2156
2157         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2158                 GEN6_BLITTER_USER_INTERRUPT;
2159         I915_WRITE(GTIER, render_irqs);
2160         POSTING_READ(GTIER);
2161
2162         hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
2163                         SDE_PORTB_HOTPLUG_CPT |
2164                         SDE_PORTC_HOTPLUG_CPT |
2165                         SDE_PORTD_HOTPLUG_CPT);
2166         dev_priv->pch_irq_mask = ~hotplug_mask;
2167
2168         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2169         I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
2170         I915_WRITE(SDEIER, hotplug_mask);
2171         POSTING_READ(SDEIER);
2172
2173         ironlake_enable_pch_hotplug(dev);
2174
2175         return 0;
2176 }
2177
2178 static int valleyview_irq_postinstall(struct drm_device *dev)
2179 {
2180         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2181         u32 render_irqs;
2182         u32 enable_mask;
2183         u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2184         u16 msid;
2185
2186         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2187         enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2188                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2189
2190         dev_priv->irq_mask = ~enable_mask;
2191
2192
2193         DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
2194         DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
2195         DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2196
2197         dev_priv->pipestat[0] = 0;
2198         dev_priv->pipestat[1] = 0;
2199
2200         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2201
2202         /* Hack for broken MSIs on VLV */
2203         pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2204         pci_read_config_word(dev->pdev, 0x98, &msid);
2205         msid &= 0xff; /* mask out delivery bits */
2206         msid |= (1<<14);
2207         pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2208
2209         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2210         I915_WRITE(VLV_IER, enable_mask);
2211         I915_WRITE(VLV_IIR, 0xffffffff);
2212         I915_WRITE(PIPESTAT(0), 0xffff);
2213         I915_WRITE(PIPESTAT(1), 0xffff);
2214         POSTING_READ(VLV_IER);
2215
2216         I915_WRITE(VLV_IIR, 0xffffffff);
2217         I915_WRITE(VLV_IIR, 0xffffffff);
2218
2219         render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
2220                 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
2221                 GT_GEN6_BLT_USER_INTERRUPT |
2222                 GT_GEN6_BSD_USER_INTERRUPT |
2223                 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
2224                 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
2225                 GT_PIPE_NOTIFY |
2226                 GT_RENDER_CS_ERROR_INTERRUPT |
2227                 GT_SYNC_STATUS |
2228                 GT_USER_INTERRUPT;
2229
2230         dev_priv->gt_irq_mask = ~render_irqs;
2231
2232         I915_WRITE(GTIIR, I915_READ(GTIIR));
2233         I915_WRITE(GTIIR, I915_READ(GTIIR));
2234         I915_WRITE(GTIMR, 0);
2235         I915_WRITE(GTIER, render_irqs);
2236         POSTING_READ(GTIER);
2237
2238         /* ack & enable invalid PTE error interrupts */
2239 #if 0 /* FIXME: add support to irq handler for checking these bits */
2240         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2241         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2242 #endif
2243
2244         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2245 #if 0 /* FIXME: check register definitions; some have moved */
2246         /* Note HDMI and DP share bits */
2247         if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2248                 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2249         if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2250                 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2251         if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2252                 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2253         if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2254                 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2255         if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2256                 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2257         if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2258                 hotplug_en |= CRT_HOTPLUG_INT_EN;
2259                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2260         }
2261 #endif
2262
2263         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2264
2265         return 0;
2266 }
2267
2268 static void i915_driver_irq_preinstall(struct drm_device * dev)
2269 {
2270         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2271         int pipe;
2272
2273         atomic_set(&dev_priv->irq_received, 0);
2274
2275         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2276         INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2277
2278         if (I915_HAS_HOTPLUG(dev)) {
2279                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2280                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2281         }
2282
2283         I915_WRITE(HWSTAM, 0xeffe);
2284         for_each_pipe(pipe)
2285                 I915_WRITE(PIPESTAT(pipe), 0);
2286         I915_WRITE(IMR, 0xffffffff);
2287         I915_WRITE(IER, 0x0);
2288         POSTING_READ(IER);
2289 }
2290
2291 /*
2292  * Must be called after intel_modeset_init or hotplug interrupts won't be
2293  * enabled correctly.
2294  */
2295 static int i915_driver_irq_postinstall(struct drm_device *dev)
2296 {
2297         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2298         u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
2299         u32 error_mask;
2300
2301         dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2302
2303         /* Unmask the interrupts that we always want on. */
2304         dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
2305
2306         dev_priv->pipestat[0] = 0;
2307         dev_priv->pipestat[1] = 0;
2308
2309         if (I915_HAS_HOTPLUG(dev)) {
2310                 /* Enable in IER... */
2311                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2312                 /* and unmask in IMR */
2313                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2314         }
2315
2316         /*
2317          * Enable some error detection, note the instruction error mask
2318          * bit is reserved, so we leave it masked.
2319          */
2320         if (IS_G4X(dev)) {
2321                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2322                                GM45_ERROR_MEM_PRIV |
2323                                GM45_ERROR_CP_PRIV |
2324                                I915_ERROR_MEMORY_REFRESH);
2325         } else {
2326                 error_mask = ~(I915_ERROR_PAGE_TABLE |
2327                                I915_ERROR_MEMORY_REFRESH);
2328         }
2329         I915_WRITE(EMR, error_mask);
2330
2331         I915_WRITE(IMR, dev_priv->irq_mask);
2332         I915_WRITE(IER, enable_mask);
2333         POSTING_READ(IER);
2334
2335         if (I915_HAS_HOTPLUG(dev)) {
2336                 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2337
2338                 /* Note HDMI and DP share bits */
2339                 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2340                         hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2341                 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2342                         hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2343                 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2344                         hotplug_en |= HDMID_HOTPLUG_INT_EN;
2345                 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2346                         hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2347                 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2348                         hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2349                 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2350                         hotplug_en |= CRT_HOTPLUG_INT_EN;
2351
2352                         /* Programming the CRT detection parameters tends
2353                            to generate a spurious hotplug event about three
2354                            seconds later.  So just do it once.
2355                         */
2356                         if (IS_G4X(dev))
2357                                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2358                         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2359                 }
2360
2361                 /* Ignore TV since it's buggy */
2362
2363                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2364         }
2365
2366         intel_opregion_enable_asle(dev);
2367
2368         return 0;
2369 }
2370
2371 static void valleyview_irq_uninstall(struct drm_device *dev)
2372 {
2373         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2374         int pipe;
2375
2376         if (!dev_priv)
2377                 return;
2378
2379         dev_priv->vblank_pipe = 0;
2380
2381         for_each_pipe(pipe)
2382                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2383
2384         I915_WRITE(HWSTAM, 0xffffffff);
2385         I915_WRITE(PORT_HOTPLUG_EN, 0);
2386         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2387         for_each_pipe(pipe)
2388                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2389         I915_WRITE(VLV_IIR, 0xffffffff);
2390         I915_WRITE(VLV_IMR, 0xffffffff);
2391         I915_WRITE(VLV_IER, 0x0);
2392         POSTING_READ(VLV_IER);
2393 }
2394
2395 static void ironlake_irq_uninstall(struct drm_device *dev)
2396 {
2397         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2398
2399         if (!dev_priv)
2400                 return;
2401
2402         dev_priv->vblank_pipe = 0;
2403
2404         I915_WRITE(HWSTAM, 0xffffffff);
2405
2406         I915_WRITE(DEIMR, 0xffffffff);
2407         I915_WRITE(DEIER, 0x0);
2408         I915_WRITE(DEIIR, I915_READ(DEIIR));
2409
2410         I915_WRITE(GTIMR, 0xffffffff);
2411         I915_WRITE(GTIER, 0x0);
2412         I915_WRITE(GTIIR, I915_READ(GTIIR));
2413
2414         I915_WRITE(SDEIMR, 0xffffffff);
2415         I915_WRITE(SDEIER, 0x0);
2416         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2417 }
2418
2419 static void i915_driver_irq_uninstall(struct drm_device * dev)
2420 {
2421         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2422         int pipe;
2423
2424         if (!dev_priv)
2425                 return;
2426
2427         dev_priv->vblank_pipe = 0;
2428
2429         if (I915_HAS_HOTPLUG(dev)) {
2430                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2431                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2432         }
2433
2434         I915_WRITE(HWSTAM, 0xffffffff);
2435         for_each_pipe(pipe)
2436                 I915_WRITE(PIPESTAT(pipe), 0);
2437         I915_WRITE(IMR, 0xffffffff);
2438         I915_WRITE(IER, 0x0);
2439
2440         for_each_pipe(pipe)
2441                 I915_WRITE(PIPESTAT(pipe),
2442                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2443         I915_WRITE(IIR, I915_READ(IIR));
2444 }
2445
2446 void intel_irq_init(struct drm_device *dev)
2447 {
2448         dev->driver->get_vblank_counter = i915_get_vblank_counter;
2449         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2450         if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) ||
2451             IS_VALLEYVIEW(dev)) {
2452                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2453                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2454         }
2455
2456         if (drm_core_check_feature(dev, DRIVER_MODESET))
2457                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2458         else
2459                 dev->driver->get_vblank_timestamp = NULL;
2460         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2461
2462         if (IS_VALLEYVIEW(dev)) {
2463                 dev->driver->irq_handler = valleyview_irq_handler;
2464                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2465                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2466                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2467                 dev->driver->enable_vblank = valleyview_enable_vblank;
2468                 dev->driver->disable_vblank = valleyview_disable_vblank;
2469         } else if (IS_IVYBRIDGE(dev)) {
2470                 /* Share pre & uninstall handlers with ILK/SNB */
2471                 dev->driver->irq_handler = ivybridge_irq_handler;
2472                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2473                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2474                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2475                 dev->driver->enable_vblank = ivybridge_enable_vblank;
2476                 dev->driver->disable_vblank = ivybridge_disable_vblank;
2477         } else if (HAS_PCH_SPLIT(dev)) {
2478                 dev->driver->irq_handler = ironlake_irq_handler;
2479                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2480                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2481                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2482                 dev->driver->enable_vblank = ironlake_enable_vblank;
2483                 dev->driver->disable_vblank = ironlake_disable_vblank;
2484         } else {
2485                 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
2486                 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
2487                 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
2488                 dev->driver->irq_handler = i915_driver_irq_handler;
2489                 dev->driver->enable_vblank = i915_enable_vblank;
2490                 dev->driver->disable_vblank = i915_disable_vblank;
2491         }
2492 }