Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 static const u32 hpd_ibx[] = {
40         [HPD_CRT] = SDE_CRT_HOTPLUG,
41         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45 };
46
47 static const u32 hpd_cpt[] = {
48         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53 };
54
55 static const u32 hpd_mask_i915[] = {
56         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62 };
63
64 static const u32 hpd_status_gen4[] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71 };
72
73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static void ibx_hpd_irq_setup(struct drm_device *dev);
83 static void i915_hpd_irq_setup(struct drm_device *dev);
84
85 /* For display hotplug interrupt */
86 static void
87 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
88 {
89         if ((dev_priv->irq_mask & mask) != 0) {
90                 dev_priv->irq_mask &= ~mask;
91                 I915_WRITE(DEIMR, dev_priv->irq_mask);
92                 POSTING_READ(DEIMR);
93         }
94 }
95
96 static void
97 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
98 {
99         if ((dev_priv->irq_mask & mask) != mask) {
100                 dev_priv->irq_mask |= mask;
101                 I915_WRITE(DEIMR, dev_priv->irq_mask);
102                 POSTING_READ(DEIMR);
103         }
104 }
105
106 void
107 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
108 {
109         u32 reg = PIPESTAT(pipe);
110         u32 pipestat = I915_READ(reg) & 0x7fff0000;
111
112         if ((pipestat & mask) == mask)
113                 return;
114
115         /* Enable the interrupt, clear any pending status */
116         pipestat |= mask | (mask >> 16);
117         I915_WRITE(reg, pipestat);
118         POSTING_READ(reg);
119 }
120
121 void
122 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
123 {
124         u32 reg = PIPESTAT(pipe);
125         u32 pipestat = I915_READ(reg) & 0x7fff0000;
126
127         if ((pipestat & mask) == 0)
128                 return;
129
130         pipestat &= ~mask;
131         I915_WRITE(reg, pipestat);
132         POSTING_READ(reg);
133 }
134
135 /**
136  * intel_enable_asle - enable ASLE interrupt for OpRegion
137  */
138 void intel_enable_asle(struct drm_device *dev)
139 {
140         drm_i915_private_t *dev_priv = dev->dev_private;
141         unsigned long irqflags;
142
143         /* FIXME: opregion/asle for VLV */
144         if (IS_VALLEYVIEW(dev))
145                 return;
146
147         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
148
149         if (HAS_PCH_SPLIT(dev))
150                 ironlake_enable_display_irq(dev_priv, DE_GSE);
151         else {
152                 i915_enable_pipestat(dev_priv, 1,
153                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
154                 if (INTEL_INFO(dev)->gen >= 4)
155                         i915_enable_pipestat(dev_priv, 0,
156                                              PIPE_LEGACY_BLC_EVENT_ENABLE);
157         }
158
159         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
160 }
161
162 /**
163  * i915_pipe_enabled - check if a pipe is enabled
164  * @dev: DRM device
165  * @pipe: pipe to check
166  *
167  * Reading certain registers when the pipe is disabled can hang the chip.
168  * Use this routine to make sure the PLL is running and the pipe is active
169  * before reading such registers if unsure.
170  */
171 static int
172 i915_pipe_enabled(struct drm_device *dev, int pipe)
173 {
174         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
175         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
176                                                                       pipe);
177
178         return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
179 }
180
181 /* Called from drm generic code, passed a 'crtc', which
182  * we use as a pipe index
183  */
184 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
185 {
186         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
187         unsigned long high_frame;
188         unsigned long low_frame;
189         u32 high1, high2, low;
190
191         if (!i915_pipe_enabled(dev, pipe)) {
192                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
193                                 "pipe %c\n", pipe_name(pipe));
194                 return 0;
195         }
196
197         high_frame = PIPEFRAME(pipe);
198         low_frame = PIPEFRAMEPIXEL(pipe);
199
200         /*
201          * High & low register fields aren't synchronized, so make sure
202          * we get a low value that's stable across two reads of the high
203          * register.
204          */
205         do {
206                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
207                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
208                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
209         } while (high1 != high2);
210
211         high1 >>= PIPE_FRAME_HIGH_SHIFT;
212         low >>= PIPE_FRAME_LOW_SHIFT;
213         return (high1 << 8) | low;
214 }
215
216 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
217 {
218         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
219         int reg = PIPE_FRMCOUNT_GM45(pipe);
220
221         if (!i915_pipe_enabled(dev, pipe)) {
222                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
223                                  "pipe %c\n", pipe_name(pipe));
224                 return 0;
225         }
226
227         return I915_READ(reg);
228 }
229
230 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
231                              int *vpos, int *hpos)
232 {
233         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
234         u32 vbl = 0, position = 0;
235         int vbl_start, vbl_end, htotal, vtotal;
236         bool in_vbl = true;
237         int ret = 0;
238         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
239                                                                       pipe);
240
241         if (!i915_pipe_enabled(dev, pipe)) {
242                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
243                                  "pipe %c\n", pipe_name(pipe));
244                 return 0;
245         }
246
247         /* Get vtotal. */
248         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
249
250         if (INTEL_INFO(dev)->gen >= 4) {
251                 /* No obvious pixelcount register. Only query vertical
252                  * scanout position from Display scan line register.
253                  */
254                 position = I915_READ(PIPEDSL(pipe));
255
256                 /* Decode into vertical scanout position. Don't have
257                  * horizontal scanout position.
258                  */
259                 *vpos = position & 0x1fff;
260                 *hpos = 0;
261         } else {
262                 /* Have access to pixelcount since start of frame.
263                  * We can split this into vertical and horizontal
264                  * scanout position.
265                  */
266                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
267
268                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
269                 *vpos = position / htotal;
270                 *hpos = position - (*vpos * htotal);
271         }
272
273         /* Query vblank area. */
274         vbl = I915_READ(VBLANK(cpu_transcoder));
275
276         /* Test position against vblank region. */
277         vbl_start = vbl & 0x1fff;
278         vbl_end = (vbl >> 16) & 0x1fff;
279
280         if ((*vpos < vbl_start) || (*vpos > vbl_end))
281                 in_vbl = false;
282
283         /* Inside "upper part" of vblank area? Apply corrective offset: */
284         if (in_vbl && (*vpos >= vbl_start))
285                 *vpos = *vpos - vtotal;
286
287         /* Readouts valid? */
288         if (vbl > 0)
289                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
290
291         /* In vblank? */
292         if (in_vbl)
293                 ret |= DRM_SCANOUTPOS_INVBL;
294
295         return ret;
296 }
297
298 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
299                               int *max_error,
300                               struct timeval *vblank_time,
301                               unsigned flags)
302 {
303         struct drm_crtc *crtc;
304
305         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
306                 DRM_ERROR("Invalid crtc %d\n", pipe);
307                 return -EINVAL;
308         }
309
310         /* Get drm_crtc to timestamp: */
311         crtc = intel_get_crtc_for_pipe(dev, pipe);
312         if (crtc == NULL) {
313                 DRM_ERROR("Invalid crtc %d\n", pipe);
314                 return -EINVAL;
315         }
316
317         if (!crtc->enabled) {
318                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
319                 return -EBUSY;
320         }
321
322         /* Helper routine in DRM core does all the work: */
323         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
324                                                      vblank_time, flags,
325                                                      crtc);
326 }
327
328 /*
329  * Handle hotplug events outside the interrupt handler proper.
330  */
331 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
332
333 static void i915_hotplug_work_func(struct work_struct *work)
334 {
335         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
336                                                     hotplug_work);
337         struct drm_device *dev = dev_priv->dev;
338         struct drm_mode_config *mode_config = &dev->mode_config;
339         struct intel_connector *intel_connector;
340         struct intel_encoder *intel_encoder;
341         struct drm_connector *connector;
342         unsigned long irqflags;
343         bool hpd_disabled = false;
344
345         /* HPD irq before everything is fully set up. */
346         if (!dev_priv->enable_hotplug_processing)
347                 return;
348
349         mutex_lock(&mode_config->mutex);
350         DRM_DEBUG_KMS("running encoder hotplug functions\n");
351
352         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
353         list_for_each_entry(connector, &mode_config->connector_list, head) {
354                 intel_connector = to_intel_connector(connector);
355                 intel_encoder = intel_connector->encoder;
356                 if (intel_encoder->hpd_pin > HPD_NONE &&
357                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
358                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
359                         DRM_INFO("HPD interrupt storm detected on connector %s: "
360                                  "switching from hotplug detection to polling\n",
361                                 drm_get_connector_name(connector));
362                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
363                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
364                                 | DRM_CONNECTOR_POLL_DISCONNECT;
365                         hpd_disabled = true;
366                 }
367         }
368          /* if there were no outputs to poll, poll was disabled,
369           * therefore make sure it's enabled when disabling HPD on
370           * some connectors */
371         if (hpd_disabled) {
372                 drm_kms_helper_poll_enable(dev);
373                 mod_timer(&dev_priv->hotplug_reenable_timer,
374                           jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
375         }
376
377         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
378
379         list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
380                 if (intel_encoder->hot_plug)
381                         intel_encoder->hot_plug(intel_encoder);
382
383         mutex_unlock(&mode_config->mutex);
384
385         /* Just fire off a uevent and let userspace tell us what to do */
386         drm_helper_hpd_irq_event(dev);
387 }
388
389 static void ironlake_handle_rps_change(struct drm_device *dev)
390 {
391         drm_i915_private_t *dev_priv = dev->dev_private;
392         u32 busy_up, busy_down, max_avg, min_avg;
393         u8 new_delay;
394         unsigned long flags;
395
396         spin_lock_irqsave(&mchdev_lock, flags);
397
398         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
399
400         new_delay = dev_priv->ips.cur_delay;
401
402         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
403         busy_up = I915_READ(RCPREVBSYTUPAVG);
404         busy_down = I915_READ(RCPREVBSYTDNAVG);
405         max_avg = I915_READ(RCBMAXAVG);
406         min_avg = I915_READ(RCBMINAVG);
407
408         /* Handle RCS change request from hw */
409         if (busy_up > max_avg) {
410                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
411                         new_delay = dev_priv->ips.cur_delay - 1;
412                 if (new_delay < dev_priv->ips.max_delay)
413                         new_delay = dev_priv->ips.max_delay;
414         } else if (busy_down < min_avg) {
415                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
416                         new_delay = dev_priv->ips.cur_delay + 1;
417                 if (new_delay > dev_priv->ips.min_delay)
418                         new_delay = dev_priv->ips.min_delay;
419         }
420
421         if (ironlake_set_drps(dev, new_delay))
422                 dev_priv->ips.cur_delay = new_delay;
423
424         spin_unlock_irqrestore(&mchdev_lock, flags);
425
426         return;
427 }
428
429 static void notify_ring(struct drm_device *dev,
430                         struct intel_ring_buffer *ring)
431 {
432         struct drm_i915_private *dev_priv = dev->dev_private;
433
434         if (ring->obj == NULL)
435                 return;
436
437         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
438
439         wake_up_all(&ring->irq_queue);
440         if (i915_enable_hangcheck) {
441                 dev_priv->gpu_error.hangcheck_count = 0;
442                 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
443                           round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
444         }
445 }
446
447 static void gen6_pm_rps_work(struct work_struct *work)
448 {
449         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
450                                                     rps.work);
451         u32 pm_iir, pm_imr;
452         u8 new_delay;
453
454         spin_lock_irq(&dev_priv->rps.lock);
455         pm_iir = dev_priv->rps.pm_iir;
456         dev_priv->rps.pm_iir = 0;
457         pm_imr = I915_READ(GEN6_PMIMR);
458         I915_WRITE(GEN6_PMIMR, 0);
459         spin_unlock_irq(&dev_priv->rps.lock);
460
461         if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
462                 return;
463
464         mutex_lock(&dev_priv->rps.hw_lock);
465
466         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
467                 new_delay = dev_priv->rps.cur_delay + 1;
468         else
469                 new_delay = dev_priv->rps.cur_delay - 1;
470
471         /* sysfs frequency interfaces may have snuck in while servicing the
472          * interrupt
473          */
474         if (!(new_delay > dev_priv->rps.max_delay ||
475               new_delay < dev_priv->rps.min_delay)) {
476                 gen6_set_rps(dev_priv->dev, new_delay);
477         }
478
479         mutex_unlock(&dev_priv->rps.hw_lock);
480 }
481
482
483 /**
484  * ivybridge_parity_work - Workqueue called when a parity error interrupt
485  * occurred.
486  * @work: workqueue struct
487  *
488  * Doesn't actually do anything except notify userspace. As a consequence of
489  * this event, userspace should try to remap the bad rows since statistically
490  * it is likely the same row is more likely to go bad again.
491  */
492 static void ivybridge_parity_work(struct work_struct *work)
493 {
494         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
495                                                     l3_parity.error_work);
496         u32 error_status, row, bank, subbank;
497         char *parity_event[5];
498         uint32_t misccpctl;
499         unsigned long flags;
500
501         /* We must turn off DOP level clock gating to access the L3 registers.
502          * In order to prevent a get/put style interface, acquire struct mutex
503          * any time we access those registers.
504          */
505         mutex_lock(&dev_priv->dev->struct_mutex);
506
507         misccpctl = I915_READ(GEN7_MISCCPCTL);
508         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
509         POSTING_READ(GEN7_MISCCPCTL);
510
511         error_status = I915_READ(GEN7_L3CDERRST1);
512         row = GEN7_PARITY_ERROR_ROW(error_status);
513         bank = GEN7_PARITY_ERROR_BANK(error_status);
514         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
515
516         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
517                                     GEN7_L3CDERRST1_ENABLE);
518         POSTING_READ(GEN7_L3CDERRST1);
519
520         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
521
522         spin_lock_irqsave(&dev_priv->irq_lock, flags);
523         dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
524         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
525         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
526
527         mutex_unlock(&dev_priv->dev->struct_mutex);
528
529         parity_event[0] = "L3_PARITY_ERROR=1";
530         parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
531         parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
532         parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
533         parity_event[4] = NULL;
534
535         kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
536                            KOBJ_CHANGE, parity_event);
537
538         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
539                   row, bank, subbank);
540
541         kfree(parity_event[3]);
542         kfree(parity_event[2]);
543         kfree(parity_event[1]);
544 }
545
546 static void ivybridge_handle_parity_error(struct drm_device *dev)
547 {
548         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
549         unsigned long flags;
550
551         if (!HAS_L3_GPU_CACHE(dev))
552                 return;
553
554         spin_lock_irqsave(&dev_priv->irq_lock, flags);
555         dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
556         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
557         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
558
559         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
560 }
561
562 static void snb_gt_irq_handler(struct drm_device *dev,
563                                struct drm_i915_private *dev_priv,
564                                u32 gt_iir)
565 {
566
567         if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
568                       GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
569                 notify_ring(dev, &dev_priv->ring[RCS]);
570         if (gt_iir & GEN6_BSD_USER_INTERRUPT)
571                 notify_ring(dev, &dev_priv->ring[VCS]);
572         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
573                 notify_ring(dev, &dev_priv->ring[BCS]);
574
575         if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
576                       GT_GEN6_BSD_CS_ERROR_INTERRUPT |
577                       GT_RENDER_CS_ERROR_INTERRUPT)) {
578                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
579                 i915_handle_error(dev, false);
580         }
581
582         if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
583                 ivybridge_handle_parity_error(dev);
584 }
585
586 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
587                                 u32 pm_iir)
588 {
589         unsigned long flags;
590
591         /*
592          * IIR bits should never already be set because IMR should
593          * prevent an interrupt from being shown in IIR. The warning
594          * displays a case where we've unsafely cleared
595          * dev_priv->rps.pm_iir. Although missing an interrupt of the same
596          * type is not a problem, it displays a problem in the logic.
597          *
598          * The mask bit in IMR is cleared by dev_priv->rps.work.
599          */
600
601         spin_lock_irqsave(&dev_priv->rps.lock, flags);
602         dev_priv->rps.pm_iir |= pm_iir;
603         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
604         POSTING_READ(GEN6_PMIMR);
605         spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
606
607         queue_work(dev_priv->wq, &dev_priv->rps.work);
608 }
609
610 #define HPD_STORM_DETECT_PERIOD 1000
611 #define HPD_STORM_THRESHOLD 5
612
613 static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
614                                             u32 hotplug_trigger,
615                                             const u32 *hpd)
616 {
617         drm_i915_private_t *dev_priv = dev->dev_private;
618         unsigned long irqflags;
619         int i;
620         bool ret = false;
621
622         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
623
624         for (i = 1; i < HPD_NUM_PINS; i++) {
625
626                 if (!(hpd[i] & hotplug_trigger) ||
627                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
628                         continue;
629
630                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
631                                    dev_priv->hpd_stats[i].hpd_last_jiffies
632                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
633                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
634                         dev_priv->hpd_stats[i].hpd_cnt = 0;
635                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
636                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
637                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
638                         ret = true;
639                 } else {
640                         dev_priv->hpd_stats[i].hpd_cnt++;
641                 }
642         }
643
644         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
645
646         return ret;
647 }
648
649 static void gmbus_irq_handler(struct drm_device *dev)
650 {
651         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
652
653         wake_up_all(&dev_priv->gmbus_wait_queue);
654 }
655
656 static void dp_aux_irq_handler(struct drm_device *dev)
657 {
658         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
659
660         wake_up_all(&dev_priv->gmbus_wait_queue);
661 }
662
663 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
664 {
665         struct drm_device *dev = (struct drm_device *) arg;
666         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
667         u32 iir, gt_iir, pm_iir;
668         irqreturn_t ret = IRQ_NONE;
669         unsigned long irqflags;
670         int pipe;
671         u32 pipe_stats[I915_MAX_PIPES];
672
673         atomic_inc(&dev_priv->irq_received);
674
675         while (true) {
676                 iir = I915_READ(VLV_IIR);
677                 gt_iir = I915_READ(GTIIR);
678                 pm_iir = I915_READ(GEN6_PMIIR);
679
680                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
681                         goto out;
682
683                 ret = IRQ_HANDLED;
684
685                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
686
687                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
688                 for_each_pipe(pipe) {
689                         int reg = PIPESTAT(pipe);
690                         pipe_stats[pipe] = I915_READ(reg);
691
692                         /*
693                          * Clear the PIPE*STAT regs before the IIR
694                          */
695                         if (pipe_stats[pipe] & 0x8000ffff) {
696                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
697                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
698                                                          pipe_name(pipe));
699                                 I915_WRITE(reg, pipe_stats[pipe]);
700                         }
701                 }
702                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
703
704                 for_each_pipe(pipe) {
705                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
706                                 drm_handle_vblank(dev, pipe);
707
708                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
709                                 intel_prepare_page_flip(dev, pipe);
710                                 intel_finish_page_flip(dev, pipe);
711                         }
712                 }
713
714                 /* Consume port.  Then clear IIR or we'll miss events */
715                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
716                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
717                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
718
719                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
720                                          hotplug_status);
721                         if (hotplug_trigger) {
722                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
723                                         i915_hpd_irq_setup(dev);
724                                 queue_work(dev_priv->wq,
725                                            &dev_priv->hotplug_work);
726                         }
727                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
728                         I915_READ(PORT_HOTPLUG_STAT);
729                 }
730
731                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
732                         gmbus_irq_handler(dev);
733
734                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
735                         gen6_queue_rps_work(dev_priv, pm_iir);
736
737                 I915_WRITE(GTIIR, gt_iir);
738                 I915_WRITE(GEN6_PMIIR, pm_iir);
739                 I915_WRITE(VLV_IIR, iir);
740         }
741
742 out:
743         return ret;
744 }
745
746 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
747 {
748         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
749         int pipe;
750         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
751
752         if (hotplug_trigger) {
753                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
754                         ibx_hpd_irq_setup(dev);
755                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
756         }
757         if (pch_iir & SDE_AUDIO_POWER_MASK)
758                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
759                                  (pch_iir & SDE_AUDIO_POWER_MASK) >>
760                                  SDE_AUDIO_POWER_SHIFT);
761
762         if (pch_iir & SDE_AUX_MASK)
763                 dp_aux_irq_handler(dev);
764
765         if (pch_iir & SDE_GMBUS)
766                 gmbus_irq_handler(dev);
767
768         if (pch_iir & SDE_AUDIO_HDCP_MASK)
769                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
770
771         if (pch_iir & SDE_AUDIO_TRANS_MASK)
772                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
773
774         if (pch_iir & SDE_POISON)
775                 DRM_ERROR("PCH poison interrupt\n");
776
777         if (pch_iir & SDE_FDI_MASK)
778                 for_each_pipe(pipe)
779                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
780                                          pipe_name(pipe),
781                                          I915_READ(FDI_RX_IIR(pipe)));
782
783         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
784                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
785
786         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
787                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
788
789         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
790                 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
791         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
792                 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
793 }
794
795 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
796 {
797         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
798         int pipe;
799         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
800
801         if (hotplug_trigger) {
802                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
803                         ibx_hpd_irq_setup(dev);
804                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
805         }
806         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
807                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
808                                  (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
809                                  SDE_AUDIO_POWER_SHIFT_CPT);
810
811         if (pch_iir & SDE_AUX_MASK_CPT)
812                 dp_aux_irq_handler(dev);
813
814         if (pch_iir & SDE_GMBUS_CPT)
815                 gmbus_irq_handler(dev);
816
817         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
818                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
819
820         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
821                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
822
823         if (pch_iir & SDE_FDI_MASK_CPT)
824                 for_each_pipe(pipe)
825                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
826                                          pipe_name(pipe),
827                                          I915_READ(FDI_RX_IIR(pipe)));
828 }
829
830 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
831 {
832         struct drm_device *dev = (struct drm_device *) arg;
833         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
834         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
835         irqreturn_t ret = IRQ_NONE;
836         int i;
837
838         atomic_inc(&dev_priv->irq_received);
839
840         /* disable master interrupt before clearing iir  */
841         de_ier = I915_READ(DEIER);
842         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
843
844         /* Disable south interrupts. We'll only write to SDEIIR once, so further
845          * interrupts will will be stored on its back queue, and then we'll be
846          * able to process them after we restore SDEIER (as soon as we restore
847          * it, we'll get an interrupt if SDEIIR still has something to process
848          * due to its back queue). */
849         if (!HAS_PCH_NOP(dev)) {
850                 sde_ier = I915_READ(SDEIER);
851                 I915_WRITE(SDEIER, 0);
852                 POSTING_READ(SDEIER);
853         }
854
855         gt_iir = I915_READ(GTIIR);
856         if (gt_iir) {
857                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
858                 I915_WRITE(GTIIR, gt_iir);
859                 ret = IRQ_HANDLED;
860         }
861
862         de_iir = I915_READ(DEIIR);
863         if (de_iir) {
864                 if (de_iir & DE_AUX_CHANNEL_A_IVB)
865                         dp_aux_irq_handler(dev);
866
867                 if (de_iir & DE_GSE_IVB)
868                         intel_opregion_gse_intr(dev);
869
870                 for (i = 0; i < 3; i++) {
871                         if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
872                                 drm_handle_vblank(dev, i);
873                         if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
874                                 intel_prepare_page_flip(dev, i);
875                                 intel_finish_page_flip_plane(dev, i);
876                         }
877                 }
878
879                 /* check event from PCH */
880                 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
881                         u32 pch_iir = I915_READ(SDEIIR);
882
883                         cpt_irq_handler(dev, pch_iir);
884
885                         /* clear PCH hotplug event before clear CPU irq */
886                         I915_WRITE(SDEIIR, pch_iir);
887                 }
888
889                 I915_WRITE(DEIIR, de_iir);
890                 ret = IRQ_HANDLED;
891         }
892
893         pm_iir = I915_READ(GEN6_PMIIR);
894         if (pm_iir) {
895                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
896                         gen6_queue_rps_work(dev_priv, pm_iir);
897                 I915_WRITE(GEN6_PMIIR, pm_iir);
898                 ret = IRQ_HANDLED;
899         }
900
901         I915_WRITE(DEIER, de_ier);
902         POSTING_READ(DEIER);
903         if (!HAS_PCH_NOP(dev)) {
904                 I915_WRITE(SDEIER, sde_ier);
905                 POSTING_READ(SDEIER);
906         }
907
908         return ret;
909 }
910
911 static void ilk_gt_irq_handler(struct drm_device *dev,
912                                struct drm_i915_private *dev_priv,
913                                u32 gt_iir)
914 {
915         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
916                 notify_ring(dev, &dev_priv->ring[RCS]);
917         if (gt_iir & GT_BSD_USER_INTERRUPT)
918                 notify_ring(dev, &dev_priv->ring[VCS]);
919 }
920
921 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
922 {
923         struct drm_device *dev = (struct drm_device *) arg;
924         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
925         int ret = IRQ_NONE;
926         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
927
928         atomic_inc(&dev_priv->irq_received);
929
930         /* disable master interrupt before clearing iir  */
931         de_ier = I915_READ(DEIER);
932         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
933         POSTING_READ(DEIER);
934
935         /* Disable south interrupts. We'll only write to SDEIIR once, so further
936          * interrupts will will be stored on its back queue, and then we'll be
937          * able to process them after we restore SDEIER (as soon as we restore
938          * it, we'll get an interrupt if SDEIIR still has something to process
939          * due to its back queue). */
940         sde_ier = I915_READ(SDEIER);
941         I915_WRITE(SDEIER, 0);
942         POSTING_READ(SDEIER);
943
944         de_iir = I915_READ(DEIIR);
945         gt_iir = I915_READ(GTIIR);
946         pm_iir = I915_READ(GEN6_PMIIR);
947
948         if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
949                 goto done;
950
951         ret = IRQ_HANDLED;
952
953         if (IS_GEN5(dev))
954                 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
955         else
956                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
957
958         if (de_iir & DE_AUX_CHANNEL_A)
959                 dp_aux_irq_handler(dev);
960
961         if (de_iir & DE_GSE)
962                 intel_opregion_gse_intr(dev);
963
964         if (de_iir & DE_PIPEA_VBLANK)
965                 drm_handle_vblank(dev, 0);
966
967         if (de_iir & DE_PIPEB_VBLANK)
968                 drm_handle_vblank(dev, 1);
969
970         if (de_iir & DE_PLANEA_FLIP_DONE) {
971                 intel_prepare_page_flip(dev, 0);
972                 intel_finish_page_flip_plane(dev, 0);
973         }
974
975         if (de_iir & DE_PLANEB_FLIP_DONE) {
976                 intel_prepare_page_flip(dev, 1);
977                 intel_finish_page_flip_plane(dev, 1);
978         }
979
980         /* check event from PCH */
981         if (de_iir & DE_PCH_EVENT) {
982                 u32 pch_iir = I915_READ(SDEIIR);
983
984                 if (HAS_PCH_CPT(dev))
985                         cpt_irq_handler(dev, pch_iir);
986                 else
987                         ibx_irq_handler(dev, pch_iir);
988
989                 /* should clear PCH hotplug event before clear CPU irq */
990                 I915_WRITE(SDEIIR, pch_iir);
991         }
992
993         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
994                 ironlake_handle_rps_change(dev);
995
996         if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
997                 gen6_queue_rps_work(dev_priv, pm_iir);
998
999         I915_WRITE(GTIIR, gt_iir);
1000         I915_WRITE(DEIIR, de_iir);
1001         I915_WRITE(GEN6_PMIIR, pm_iir);
1002
1003 done:
1004         I915_WRITE(DEIER, de_ier);
1005         POSTING_READ(DEIER);
1006         I915_WRITE(SDEIER, sde_ier);
1007         POSTING_READ(SDEIER);
1008
1009         return ret;
1010 }
1011
1012 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1013                                bool reset_completed)
1014 {
1015         struct intel_ring_buffer *ring;
1016         int i;
1017
1018         /*
1019          * Notify all waiters for GPU completion events that reset state has
1020          * been changed, and that they need to restart their wait after
1021          * checking for potential errors (and bail out to drop locks if there is
1022          * a gpu reset pending so that i915_error_work_func can acquire them).
1023          */
1024
1025         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1026         for_each_ring(ring, dev_priv, i)
1027                 wake_up_all(&ring->irq_queue);
1028
1029         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1030         wake_up_all(&dev_priv->pending_flip_queue);
1031
1032         /*
1033          * Signal tasks blocked in i915_gem_wait_for_error that the pending
1034          * reset state is cleared.
1035          */
1036         if (reset_completed)
1037                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1038 }
1039
1040 /**
1041  * i915_error_work_func - do process context error handling work
1042  * @work: work struct
1043  *
1044  * Fire an error uevent so userspace can see that a hang or error
1045  * was detected.
1046  */
1047 static void i915_error_work_func(struct work_struct *work)
1048 {
1049         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1050                                                     work);
1051         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1052                                                     gpu_error);
1053         struct drm_device *dev = dev_priv->dev;
1054         char *error_event[] = { "ERROR=1", NULL };
1055         char *reset_event[] = { "RESET=1", NULL };
1056         char *reset_done_event[] = { "ERROR=0", NULL };
1057         int ret;
1058
1059         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1060
1061         /*
1062          * Note that there's only one work item which does gpu resets, so we
1063          * need not worry about concurrent gpu resets potentially incrementing
1064          * error->reset_counter twice. We only need to take care of another
1065          * racing irq/hangcheck declaring the gpu dead for a second time. A
1066          * quick check for that is good enough: schedule_work ensures the
1067          * correct ordering between hang detection and this work item, and since
1068          * the reset in-progress bit is only ever set by code outside of this
1069          * work we don't need to worry about any other races.
1070          */
1071         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1072                 DRM_DEBUG_DRIVER("resetting chip\n");
1073                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1074                                    reset_event);
1075
1076                 /*
1077                  * All state reset _must_ be completed before we update the
1078                  * reset counter, for otherwise waiters might miss the reset
1079                  * pending state and not properly drop locks, resulting in
1080                  * deadlocks with the reset work.
1081                  */
1082                 ret = i915_reset(dev);
1083
1084                 intel_display_handle_reset(dev);
1085
1086                 if (ret == 0) {
1087                         /*
1088                          * After all the gem state is reset, increment the reset
1089                          * counter and wake up everyone waiting for the reset to
1090                          * complete.
1091                          *
1092                          * Since unlock operations are a one-sided barrier only,
1093                          * we need to insert a barrier here to order any seqno
1094                          * updates before
1095                          * the counter increment.
1096                          */
1097                         smp_mb__before_atomic_inc();
1098                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1099
1100                         kobject_uevent_env(&dev->primary->kdev.kobj,
1101                                            KOBJ_CHANGE, reset_done_event);
1102                 } else {
1103                         atomic_set(&error->reset_counter, I915_WEDGED);
1104                 }
1105
1106                 /*
1107                  * Note: The wake_up also serves as a memory barrier so that
1108                  * waiters see the update value of the reset counter atomic_t.
1109                  */
1110                 i915_error_wake_up(dev_priv, true);
1111         }
1112 }
1113
1114 /* NB: please notice the memset */
1115 static void i915_get_extra_instdone(struct drm_device *dev,
1116                                     uint32_t *instdone)
1117 {
1118         struct drm_i915_private *dev_priv = dev->dev_private;
1119         memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1120
1121         switch(INTEL_INFO(dev)->gen) {
1122         case 2:
1123         case 3:
1124                 instdone[0] = I915_READ(INSTDONE);
1125                 break;
1126         case 4:
1127         case 5:
1128         case 6:
1129                 instdone[0] = I915_READ(INSTDONE_I965);
1130                 instdone[1] = I915_READ(INSTDONE1);
1131                 break;
1132         default:
1133                 WARN_ONCE(1, "Unsupported platform\n");
1134         case 7:
1135                 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1136                 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1137                 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1138                 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1139                 break;
1140         }
1141 }
1142
1143 #ifdef CONFIG_DEBUG_FS
1144 static struct drm_i915_error_object *
1145 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1146                                struct drm_i915_gem_object *src,
1147                                const int num_pages)
1148 {
1149         struct drm_i915_error_object *dst;
1150         int i;
1151         u32 reloc_offset;
1152
1153         if (src == NULL || src->pages == NULL)
1154                 return NULL;
1155
1156         dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1157         if (dst == NULL)
1158                 return NULL;
1159
1160         reloc_offset = src->gtt_offset;
1161         for (i = 0; i < num_pages; i++) {
1162                 unsigned long flags;
1163                 void *d;
1164
1165                 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1166                 if (d == NULL)
1167                         goto unwind;
1168
1169                 local_irq_save(flags);
1170                 if (reloc_offset < dev_priv->gtt.mappable_end &&
1171                     src->has_global_gtt_mapping) {
1172                         void __iomem *s;
1173
1174                         /* Simply ignore tiling or any overlapping fence.
1175                          * It's part of the error state, and this hopefully
1176                          * captures what the GPU read.
1177                          */
1178
1179                         s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1180                                                      reloc_offset);
1181                         memcpy_fromio(d, s, PAGE_SIZE);
1182                         io_mapping_unmap_atomic(s);
1183                 } else if (src->stolen) {
1184                         unsigned long offset;
1185
1186                         offset = dev_priv->mm.stolen_base;
1187                         offset += src->stolen->start;
1188                         offset += i << PAGE_SHIFT;
1189
1190                         memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1191                 } else {
1192                         struct page *page;
1193                         void *s;
1194
1195                         page = i915_gem_object_get_page(src, i);
1196
1197                         drm_clflush_pages(&page, 1);
1198
1199                         s = kmap_atomic(page);
1200                         memcpy(d, s, PAGE_SIZE);
1201                         kunmap_atomic(s);
1202
1203                         drm_clflush_pages(&page, 1);
1204                 }
1205                 local_irq_restore(flags);
1206
1207                 dst->pages[i] = d;
1208
1209                 reloc_offset += PAGE_SIZE;
1210         }
1211         dst->page_count = num_pages;
1212         dst->gtt_offset = src->gtt_offset;
1213
1214         return dst;
1215
1216 unwind:
1217         while (i--)
1218                 kfree(dst->pages[i]);
1219         kfree(dst);
1220         return NULL;
1221 }
1222 #define i915_error_object_create(dev_priv, src) \
1223         i915_error_object_create_sized((dev_priv), (src), \
1224                                        (src)->base.size>>PAGE_SHIFT)
1225
1226 static void
1227 i915_error_object_free(struct drm_i915_error_object *obj)
1228 {
1229         int page;
1230
1231         if (obj == NULL)
1232                 return;
1233
1234         for (page = 0; page < obj->page_count; page++)
1235                 kfree(obj->pages[page]);
1236
1237         kfree(obj);
1238 }
1239
1240 void
1241 i915_error_state_free(struct kref *error_ref)
1242 {
1243         struct drm_i915_error_state *error = container_of(error_ref,
1244                                                           typeof(*error), ref);
1245         int i;
1246
1247         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1248                 i915_error_object_free(error->ring[i].batchbuffer);
1249                 i915_error_object_free(error->ring[i].ringbuffer);
1250                 kfree(error->ring[i].requests);
1251         }
1252
1253         kfree(error->active_bo);
1254         kfree(error->overlay);
1255         kfree(error);
1256 }
1257 static void capture_bo(struct drm_i915_error_buffer *err,
1258                        struct drm_i915_gem_object *obj)
1259 {
1260         err->size = obj->base.size;
1261         err->name = obj->base.name;
1262         err->rseqno = obj->last_read_seqno;
1263         err->wseqno = obj->last_write_seqno;
1264         err->gtt_offset = obj->gtt_offset;
1265         err->read_domains = obj->base.read_domains;
1266         err->write_domain = obj->base.write_domain;
1267         err->fence_reg = obj->fence_reg;
1268         err->pinned = 0;
1269         if (obj->pin_count > 0)
1270                 err->pinned = 1;
1271         if (obj->user_pin_count > 0)
1272                 err->pinned = -1;
1273         err->tiling = obj->tiling_mode;
1274         err->dirty = obj->dirty;
1275         err->purgeable = obj->madv != I915_MADV_WILLNEED;
1276         err->ring = obj->ring ? obj->ring->id : -1;
1277         err->cache_level = obj->cache_level;
1278 }
1279
1280 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1281                              int count, struct list_head *head)
1282 {
1283         struct drm_i915_gem_object *obj;
1284         int i = 0;
1285
1286         list_for_each_entry(obj, head, mm_list) {
1287                 capture_bo(err++, obj);
1288                 if (++i == count)
1289                         break;
1290         }
1291
1292         return i;
1293 }
1294
1295 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1296                              int count, struct list_head *head)
1297 {
1298         struct drm_i915_gem_object *obj;
1299         int i = 0;
1300
1301         list_for_each_entry(obj, head, gtt_list) {
1302                 if (obj->pin_count == 0)
1303                         continue;
1304
1305                 capture_bo(err++, obj);
1306                 if (++i == count)
1307                         break;
1308         }
1309
1310         return i;
1311 }
1312
1313 static void i915_gem_record_fences(struct drm_device *dev,
1314                                    struct drm_i915_error_state *error)
1315 {
1316         struct drm_i915_private *dev_priv = dev->dev_private;
1317         int i;
1318
1319         /* Fences */
1320         switch (INTEL_INFO(dev)->gen) {
1321         case 7:
1322         case 6:
1323                 for (i = 0; i < dev_priv->num_fence_regs; i++)
1324                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1325                 break;
1326         case 5:
1327         case 4:
1328                 for (i = 0; i < 16; i++)
1329                         error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1330                 break;
1331         case 3:
1332                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1333                         for (i = 0; i < 8; i++)
1334                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1335         case 2:
1336                 for (i = 0; i < 8; i++)
1337                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1338                 break;
1339
1340         default:
1341                 BUG();
1342         }
1343 }
1344
1345 static struct drm_i915_error_object *
1346 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1347                              struct intel_ring_buffer *ring)
1348 {
1349         struct drm_i915_gem_object *obj;
1350         u32 seqno;
1351
1352         if (!ring->get_seqno)
1353                 return NULL;
1354
1355         if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1356                 u32 acthd = I915_READ(ACTHD);
1357
1358                 if (WARN_ON(ring->id != RCS))
1359                         return NULL;
1360
1361                 obj = ring->private;
1362                 if (acthd >= obj->gtt_offset &&
1363                     acthd < obj->gtt_offset + obj->base.size)
1364                         return i915_error_object_create(dev_priv, obj);
1365         }
1366
1367         seqno = ring->get_seqno(ring, false);
1368         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1369                 if (obj->ring != ring)
1370                         continue;
1371
1372                 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1373                         continue;
1374
1375                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1376                         continue;
1377
1378                 /* We need to copy these to an anonymous buffer as the simplest
1379                  * method to avoid being overwritten by userspace.
1380                  */
1381                 return i915_error_object_create(dev_priv, obj);
1382         }
1383
1384         return NULL;
1385 }
1386
1387 static void i915_record_ring_state(struct drm_device *dev,
1388                                    struct drm_i915_error_state *error,
1389                                    struct intel_ring_buffer *ring)
1390 {
1391         struct drm_i915_private *dev_priv = dev->dev_private;
1392
1393         if (INTEL_INFO(dev)->gen >= 6) {
1394                 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1395                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1396                 error->semaphore_mboxes[ring->id][0]
1397                         = I915_READ(RING_SYNC_0(ring->mmio_base));
1398                 error->semaphore_mboxes[ring->id][1]
1399                         = I915_READ(RING_SYNC_1(ring->mmio_base));
1400                 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1401                 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1402         }
1403
1404         if (INTEL_INFO(dev)->gen >= 4) {
1405                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1406                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1407                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1408                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1409                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1410                 if (ring->id == RCS)
1411                         error->bbaddr = I915_READ64(BB_ADDR);
1412         } else {
1413                 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1414                 error->ipeir[ring->id] = I915_READ(IPEIR);
1415                 error->ipehr[ring->id] = I915_READ(IPEHR);
1416                 error->instdone[ring->id] = I915_READ(INSTDONE);
1417         }
1418
1419         error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1420         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1421         error->seqno[ring->id] = ring->get_seqno(ring, false);
1422         error->acthd[ring->id] = intel_ring_get_active_head(ring);
1423         error->head[ring->id] = I915_READ_HEAD(ring);
1424         error->tail[ring->id] = I915_READ_TAIL(ring);
1425         error->ctl[ring->id] = I915_READ_CTL(ring);
1426
1427         error->cpu_ring_head[ring->id] = ring->head;
1428         error->cpu_ring_tail[ring->id] = ring->tail;
1429 }
1430
1431
1432 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1433                                            struct drm_i915_error_state *error,
1434                                            struct drm_i915_error_ring *ering)
1435 {
1436         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1437         struct drm_i915_gem_object *obj;
1438
1439         /* Currently render ring is the only HW context user */
1440         if (ring->id != RCS || !error->ccid)
1441                 return;
1442
1443         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1444                 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1445                         ering->ctx = i915_error_object_create_sized(dev_priv,
1446                                                                     obj, 1);
1447                 }
1448         }
1449 }
1450
1451 static void i915_gem_record_rings(struct drm_device *dev,
1452                                   struct drm_i915_error_state *error)
1453 {
1454         struct drm_i915_private *dev_priv = dev->dev_private;
1455         struct intel_ring_buffer *ring;
1456         struct drm_i915_gem_request *request;
1457         int i, count;
1458
1459         for_each_ring(ring, dev_priv, i) {
1460                 i915_record_ring_state(dev, error, ring);
1461
1462                 error->ring[i].batchbuffer =
1463                         i915_error_first_batchbuffer(dev_priv, ring);
1464
1465                 error->ring[i].ringbuffer =
1466                         i915_error_object_create(dev_priv, ring->obj);
1467
1468
1469                 i915_gem_record_active_context(ring, error, &error->ring[i]);
1470
1471                 count = 0;
1472                 list_for_each_entry(request, &ring->request_list, list)
1473                         count++;
1474
1475                 error->ring[i].num_requests = count;
1476                 error->ring[i].requests =
1477                         kmalloc(count*sizeof(struct drm_i915_error_request),
1478                                 GFP_ATOMIC);
1479                 if (error->ring[i].requests == NULL) {
1480                         error->ring[i].num_requests = 0;
1481                         continue;
1482                 }
1483
1484                 count = 0;
1485                 list_for_each_entry(request, &ring->request_list, list) {
1486                         struct drm_i915_error_request *erq;
1487
1488                         erq = &error->ring[i].requests[count++];
1489                         erq->seqno = request->seqno;
1490                         erq->jiffies = request->emitted_jiffies;
1491                         erq->tail = request->tail;
1492                 }
1493         }
1494 }
1495
1496 /**
1497  * i915_capture_error_state - capture an error record for later analysis
1498  * @dev: drm device
1499  *
1500  * Should be called when an error is detected (either a hang or an error
1501  * interrupt) to capture error state from the time of the error.  Fills
1502  * out a structure which becomes available in debugfs for user level tools
1503  * to pick up.
1504  */
1505 static void i915_capture_error_state(struct drm_device *dev)
1506 {
1507         struct drm_i915_private *dev_priv = dev->dev_private;
1508         struct drm_i915_gem_object *obj;
1509         struct drm_i915_error_state *error;
1510         unsigned long flags;
1511         int i, pipe;
1512
1513         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1514         error = dev_priv->gpu_error.first_error;
1515         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1516         if (error)
1517                 return;
1518
1519         /* Account for pipe specific data like PIPE*STAT */
1520         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1521         if (!error) {
1522                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1523                 return;
1524         }
1525
1526         DRM_INFO("capturing error event; look for more information in "
1527                  "/sys/kernel/debug/dri/%d/i915_error_state\n",
1528                  dev->primary->index);
1529
1530         kref_init(&error->ref);
1531         error->eir = I915_READ(EIR);
1532         error->pgtbl_er = I915_READ(PGTBL_ER);
1533         if (HAS_HW_CONTEXTS(dev))
1534                 error->ccid = I915_READ(CCID);
1535
1536         if (HAS_PCH_SPLIT(dev))
1537                 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1538         else if (IS_VALLEYVIEW(dev))
1539                 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1540         else if (IS_GEN2(dev))
1541                 error->ier = I915_READ16(IER);
1542         else
1543                 error->ier = I915_READ(IER);
1544
1545         if (INTEL_INFO(dev)->gen >= 6)
1546                 error->derrmr = I915_READ(DERRMR);
1547
1548         if (IS_VALLEYVIEW(dev))
1549                 error->forcewake = I915_READ(FORCEWAKE_VLV);
1550         else if (INTEL_INFO(dev)->gen >= 7)
1551                 error->forcewake = I915_READ(FORCEWAKE_MT);
1552         else if (INTEL_INFO(dev)->gen == 6)
1553                 error->forcewake = I915_READ(FORCEWAKE);
1554
1555         if (!HAS_PCH_SPLIT(dev))
1556                 for_each_pipe(pipe)
1557                         error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1558
1559         if (INTEL_INFO(dev)->gen >= 6) {
1560                 error->error = I915_READ(ERROR_GEN6);
1561                 error->done_reg = I915_READ(DONE_REG);
1562         }
1563
1564         if (INTEL_INFO(dev)->gen == 7)
1565                 error->err_int = I915_READ(GEN7_ERR_INT);
1566
1567         i915_get_extra_instdone(dev, error->extra_instdone);
1568
1569         i915_gem_record_fences(dev, error);
1570         i915_gem_record_rings(dev, error);
1571
1572         /* Record buffers on the active and pinned lists. */
1573         error->active_bo = NULL;
1574         error->pinned_bo = NULL;
1575
1576         i = 0;
1577         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1578                 i++;
1579         error->active_bo_count = i;
1580         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1581                 if (obj->pin_count)
1582                         i++;
1583         error->pinned_bo_count = i - error->active_bo_count;
1584
1585         error->active_bo = NULL;
1586         error->pinned_bo = NULL;
1587         if (i) {
1588                 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1589                                            GFP_ATOMIC);
1590                 if (error->active_bo)
1591                         error->pinned_bo =
1592                                 error->active_bo + error->active_bo_count;
1593         }
1594
1595         if (error->active_bo)
1596                 error->active_bo_count =
1597                         capture_active_bo(error->active_bo,
1598                                           error->active_bo_count,
1599                                           &dev_priv->mm.active_list);
1600
1601         if (error->pinned_bo)
1602                 error->pinned_bo_count =
1603                         capture_pinned_bo(error->pinned_bo,
1604                                           error->pinned_bo_count,
1605                                           &dev_priv->mm.bound_list);
1606
1607         do_gettimeofday(&error->time);
1608
1609         error->overlay = intel_overlay_capture_error_state(dev);
1610         error->display = intel_display_capture_error_state(dev);
1611
1612         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1613         if (dev_priv->gpu_error.first_error == NULL) {
1614                 dev_priv->gpu_error.first_error = error;
1615                 error = NULL;
1616         }
1617         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1618
1619         if (error)
1620                 i915_error_state_free(&error->ref);
1621 }
1622
1623 void i915_destroy_error_state(struct drm_device *dev)
1624 {
1625         struct drm_i915_private *dev_priv = dev->dev_private;
1626         struct drm_i915_error_state *error;
1627         unsigned long flags;
1628
1629         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1630         error = dev_priv->gpu_error.first_error;
1631         dev_priv->gpu_error.first_error = NULL;
1632         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1633
1634         if (error)
1635                 kref_put(&error->ref, i915_error_state_free);
1636 }
1637 #else
1638 #define i915_capture_error_state(x)
1639 #endif
1640
1641 static void i915_report_and_clear_eir(struct drm_device *dev)
1642 {
1643         struct drm_i915_private *dev_priv = dev->dev_private;
1644         uint32_t instdone[I915_NUM_INSTDONE_REG];
1645         u32 eir = I915_READ(EIR);
1646         int pipe, i;
1647
1648         if (!eir)
1649                 return;
1650
1651         pr_err("render error detected, EIR: 0x%08x\n", eir);
1652
1653         i915_get_extra_instdone(dev, instdone);
1654
1655         if (IS_G4X(dev)) {
1656                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1657                         u32 ipeir = I915_READ(IPEIR_I965);
1658
1659                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1660                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1661                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1662                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1663                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1664                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1665                         I915_WRITE(IPEIR_I965, ipeir);
1666                         POSTING_READ(IPEIR_I965);
1667                 }
1668                 if (eir & GM45_ERROR_PAGE_TABLE) {
1669                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1670                         pr_err("page table error\n");
1671                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1672                         I915_WRITE(PGTBL_ER, pgtbl_err);
1673                         POSTING_READ(PGTBL_ER);
1674                 }
1675         }
1676
1677         if (!IS_GEN2(dev)) {
1678                 if (eir & I915_ERROR_PAGE_TABLE) {
1679                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1680                         pr_err("page table error\n");
1681                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1682                         I915_WRITE(PGTBL_ER, pgtbl_err);
1683                         POSTING_READ(PGTBL_ER);
1684                 }
1685         }
1686
1687         if (eir & I915_ERROR_MEMORY_REFRESH) {
1688                 pr_err("memory refresh error:\n");
1689                 for_each_pipe(pipe)
1690                         pr_err("pipe %c stat: 0x%08x\n",
1691                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1692                 /* pipestat has already been acked */
1693         }
1694         if (eir & I915_ERROR_INSTRUCTION) {
1695                 pr_err("instruction error\n");
1696                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1697                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1698                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1699                 if (INTEL_INFO(dev)->gen < 4) {
1700                         u32 ipeir = I915_READ(IPEIR);
1701
1702                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1703                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1704                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1705                         I915_WRITE(IPEIR, ipeir);
1706                         POSTING_READ(IPEIR);
1707                 } else {
1708                         u32 ipeir = I915_READ(IPEIR_I965);
1709
1710                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1711                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1712                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1713                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1714                         I915_WRITE(IPEIR_I965, ipeir);
1715                         POSTING_READ(IPEIR_I965);
1716                 }
1717         }
1718
1719         I915_WRITE(EIR, eir);
1720         POSTING_READ(EIR);
1721         eir = I915_READ(EIR);
1722         if (eir) {
1723                 /*
1724                  * some errors might have become stuck,
1725                  * mask them.
1726                  */
1727                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1728                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1729                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1730         }
1731 }
1732
1733 /**
1734  * i915_handle_error - handle an error interrupt
1735  * @dev: drm device
1736  *
1737  * Do some basic checking of regsiter state at error interrupt time and
1738  * dump it to the syslog.  Also call i915_capture_error_state() to make
1739  * sure we get a record and make it available in debugfs.  Fire a uevent
1740  * so userspace knows something bad happened (should trigger collection
1741  * of a ring dump etc.).
1742  */
1743 void i915_handle_error(struct drm_device *dev, bool wedged)
1744 {
1745         struct drm_i915_private *dev_priv = dev->dev_private;
1746
1747         i915_capture_error_state(dev);
1748         i915_report_and_clear_eir(dev);
1749
1750         if (wedged) {
1751                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1752                                 &dev_priv->gpu_error.reset_counter);
1753
1754                 /*
1755                  * Wakeup waiting processes so that the reset work function
1756                  * i915_error_work_func doesn't deadlock trying to grab various
1757                  * locks. By bumping the reset counter first, the woken
1758                  * processes will see a reset in progress and back off,
1759                  * releasing their locks and then wait for the reset completion.
1760                  * We must do this for _all_ gpu waiters that might hold locks
1761                  * that the reset work needs to acquire.
1762                  *
1763                  * Note: The wake_up serves as the required memory barrier to
1764                  * ensure that the waiters see the updated value of the reset
1765                  * counter atomic_t.
1766                  */
1767                 i915_error_wake_up(dev_priv, false);
1768         }
1769
1770         /*
1771          * Our reset work can grab modeset locks (since it needs to reset the
1772          * state of outstanding pagelips). Hence it must not be run on our own
1773          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1774          * code will deadlock.
1775          */
1776         schedule_work(&dev_priv->gpu_error.work);
1777 }
1778
1779 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1780 {
1781         drm_i915_private_t *dev_priv = dev->dev_private;
1782         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1783         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1784         struct drm_i915_gem_object *obj;
1785         struct intel_unpin_work *work;
1786         unsigned long flags;
1787         bool stall_detected;
1788
1789         /* Ignore early vblank irqs */
1790         if (intel_crtc == NULL)
1791                 return;
1792
1793         spin_lock_irqsave(&dev->event_lock, flags);
1794         work = intel_crtc->unpin_work;
1795
1796         if (work == NULL ||
1797             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1798             !work->enable_stall_check) {
1799                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1800                 spin_unlock_irqrestore(&dev->event_lock, flags);
1801                 return;
1802         }
1803
1804         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1805         obj = work->pending_flip_obj;
1806         if (INTEL_INFO(dev)->gen >= 4) {
1807                 int dspsurf = DSPSURF(intel_crtc->plane);
1808                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1809                                         obj->gtt_offset;
1810         } else {
1811                 int dspaddr = DSPADDR(intel_crtc->plane);
1812                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1813                                                         crtc->y * crtc->fb->pitches[0] +
1814                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1815         }
1816
1817         spin_unlock_irqrestore(&dev->event_lock, flags);
1818
1819         if (stall_detected) {
1820                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1821                 intel_prepare_page_flip(dev, intel_crtc->plane);
1822         }
1823 }
1824
1825 /* Called from drm generic code, passed 'crtc' which
1826  * we use as a pipe index
1827  */
1828 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1829 {
1830         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1831         unsigned long irqflags;
1832
1833         if (!i915_pipe_enabled(dev, pipe))
1834                 return -EINVAL;
1835
1836         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1837         if (INTEL_INFO(dev)->gen >= 4)
1838                 i915_enable_pipestat(dev_priv, pipe,
1839                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1840         else
1841                 i915_enable_pipestat(dev_priv, pipe,
1842                                      PIPE_VBLANK_INTERRUPT_ENABLE);
1843
1844         /* maintain vblank delivery even in deep C-states */
1845         if (dev_priv->info->gen == 3)
1846                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1847         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1848
1849         return 0;
1850 }
1851
1852 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1853 {
1854         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1855         unsigned long irqflags;
1856
1857         if (!i915_pipe_enabled(dev, pipe))
1858                 return -EINVAL;
1859
1860         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1861         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1862                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1863         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1864
1865         return 0;
1866 }
1867
1868 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1869 {
1870         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1871         unsigned long irqflags;
1872
1873         if (!i915_pipe_enabled(dev, pipe))
1874                 return -EINVAL;
1875
1876         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1877         ironlake_enable_display_irq(dev_priv,
1878                                     DE_PIPEA_VBLANK_IVB << (5 * pipe));
1879         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1880
1881         return 0;
1882 }
1883
1884 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1885 {
1886         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1887         unsigned long irqflags;
1888         u32 imr;
1889
1890         if (!i915_pipe_enabled(dev, pipe))
1891                 return -EINVAL;
1892
1893         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1894         imr = I915_READ(VLV_IMR);
1895         if (pipe == 0)
1896                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1897         else
1898                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1899         I915_WRITE(VLV_IMR, imr);
1900         i915_enable_pipestat(dev_priv, pipe,
1901                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
1902         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1903
1904         return 0;
1905 }
1906
1907 /* Called from drm generic code, passed 'crtc' which
1908  * we use as a pipe index
1909  */
1910 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1911 {
1912         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1913         unsigned long irqflags;
1914
1915         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1916         if (dev_priv->info->gen == 3)
1917                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1918
1919         i915_disable_pipestat(dev_priv, pipe,
1920                               PIPE_VBLANK_INTERRUPT_ENABLE |
1921                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1922         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1923 }
1924
1925 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1926 {
1927         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1928         unsigned long irqflags;
1929
1930         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1931         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1932                                      DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1933         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1934 }
1935
1936 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1937 {
1938         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1939         unsigned long irqflags;
1940
1941         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1942         ironlake_disable_display_irq(dev_priv,
1943                                      DE_PIPEA_VBLANK_IVB << (pipe * 5));
1944         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1945 }
1946
1947 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1948 {
1949         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1950         unsigned long irqflags;
1951         u32 imr;
1952
1953         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1954         i915_disable_pipestat(dev_priv, pipe,
1955                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1956         imr = I915_READ(VLV_IMR);
1957         if (pipe == 0)
1958                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1959         else
1960                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1961         I915_WRITE(VLV_IMR, imr);
1962         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1963 }
1964
1965 static u32
1966 ring_last_seqno(struct intel_ring_buffer *ring)
1967 {
1968         return list_entry(ring->request_list.prev,
1969                           struct drm_i915_gem_request, list)->seqno;
1970 }
1971
1972 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1973 {
1974         if (list_empty(&ring->request_list) ||
1975             i915_seqno_passed(ring->get_seqno(ring, false),
1976                               ring_last_seqno(ring))) {
1977                 /* Issue a wake-up to catch stuck h/w. */
1978                 if (waitqueue_active(&ring->irq_queue)) {
1979                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1980                                   ring->name);
1981                         wake_up_all(&ring->irq_queue);
1982                         *err = true;
1983                 }
1984                 return true;
1985         }
1986         return false;
1987 }
1988
1989 static bool semaphore_passed(struct intel_ring_buffer *ring)
1990 {
1991         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1992         u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1993         struct intel_ring_buffer *signaller;
1994         u32 cmd, ipehr, acthd_min;
1995
1996         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1997         if ((ipehr & ~(0x3 << 16)) !=
1998             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1999                 return false;
2000
2001         /* ACTHD is likely pointing to the dword after the actual command,
2002          * so scan backwards until we find the MBOX.
2003          */
2004         acthd_min = max((int)acthd - 3 * 4, 0);
2005         do {
2006                 cmd = ioread32(ring->virtual_start + acthd);
2007                 if (cmd == ipehr)
2008                         break;
2009
2010                 acthd -= 4;
2011                 if (acthd < acthd_min)
2012                         return false;
2013         } while (1);
2014
2015         signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2016         return i915_seqno_passed(signaller->get_seqno(signaller, false),
2017                                  ioread32(ring->virtual_start+acthd+4)+1);
2018 }
2019
2020 static bool kick_ring(struct intel_ring_buffer *ring)
2021 {
2022         struct drm_device *dev = ring->dev;
2023         struct drm_i915_private *dev_priv = dev->dev_private;
2024         u32 tmp = I915_READ_CTL(ring);
2025         if (tmp & RING_WAIT) {
2026                 DRM_ERROR("Kicking stuck wait on %s\n",
2027                           ring->name);
2028                 I915_WRITE_CTL(ring, tmp);
2029                 return true;
2030         }
2031
2032         if (INTEL_INFO(dev)->gen >= 6 &&
2033             tmp & RING_WAIT_SEMAPHORE &&
2034             semaphore_passed(ring)) {
2035                 DRM_ERROR("Kicking stuck semaphore on %s\n",
2036                           ring->name);
2037                 I915_WRITE_CTL(ring, tmp);
2038                 return true;
2039         }
2040         return false;
2041 }
2042
2043 static bool i915_hangcheck_hung(struct drm_device *dev)
2044 {
2045         drm_i915_private_t *dev_priv = dev->dev_private;
2046
2047         if (dev_priv->gpu_error.hangcheck_count++ > 1) {
2048                 bool hung = true;
2049
2050                 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
2051                 i915_handle_error(dev, true);
2052
2053                 if (!IS_GEN2(dev)) {
2054                         struct intel_ring_buffer *ring;
2055                         int i;
2056
2057                         /* Is the chip hanging on a WAIT_FOR_EVENT?
2058                          * If so we can simply poke the RB_WAIT bit
2059                          * and break the hang. This should work on
2060                          * all but the second generation chipsets.
2061                          */
2062                         for_each_ring(ring, dev_priv, i)
2063                                 hung &= !kick_ring(ring);
2064                 }
2065
2066                 return hung;
2067         }
2068
2069         return false;
2070 }
2071
2072 /**
2073  * This is called when the chip hasn't reported back with completed
2074  * batchbuffers in a long time. The first time this is called we simply record
2075  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
2076  * again, we assume the chip is wedged and try to fix it.
2077  */
2078 void i915_hangcheck_elapsed(unsigned long data)
2079 {
2080         struct drm_device *dev = (struct drm_device *)data;
2081         drm_i915_private_t *dev_priv = dev->dev_private;
2082         uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
2083         struct intel_ring_buffer *ring;
2084         bool err = false, idle;
2085         int i;
2086
2087         if (!i915_enable_hangcheck)
2088                 return;
2089
2090         memset(acthd, 0, sizeof(acthd));
2091         idle = true;
2092         for_each_ring(ring, dev_priv, i) {
2093             idle &= i915_hangcheck_ring_idle(ring, &err);
2094             acthd[i] = intel_ring_get_active_head(ring);
2095         }
2096
2097         /* If all work is done then ACTHD clearly hasn't advanced. */
2098         if (idle) {
2099                 if (err) {
2100                         if (i915_hangcheck_hung(dev))
2101                                 return;
2102
2103                         goto repeat;
2104                 }
2105
2106                 dev_priv->gpu_error.hangcheck_count = 0;
2107                 return;
2108         }
2109
2110         i915_get_extra_instdone(dev, instdone);
2111         if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
2112                    sizeof(acthd)) == 0 &&
2113             memcmp(dev_priv->gpu_error.prev_instdone, instdone,
2114                    sizeof(instdone)) == 0) {
2115                 if (i915_hangcheck_hung(dev))
2116                         return;
2117         } else {
2118                 dev_priv->gpu_error.hangcheck_count = 0;
2119
2120                 memcpy(dev_priv->gpu_error.last_acthd, acthd,
2121                        sizeof(acthd));
2122                 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
2123                        sizeof(instdone));
2124         }
2125
2126 repeat:
2127         /* Reset timer case chip hangs without another request being added */
2128         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2129                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2130 }
2131
2132 /* drm_dma.h hooks
2133 */
2134 static void ironlake_irq_preinstall(struct drm_device *dev)
2135 {
2136         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2137
2138         atomic_set(&dev_priv->irq_received, 0);
2139
2140         I915_WRITE(HWSTAM, 0xeffe);
2141
2142         /* XXX hotplug from PCH */
2143
2144         I915_WRITE(DEIMR, 0xffffffff);
2145         I915_WRITE(DEIER, 0x0);
2146         POSTING_READ(DEIER);
2147
2148         /* and GT */
2149         I915_WRITE(GTIMR, 0xffffffff);
2150         I915_WRITE(GTIER, 0x0);
2151         POSTING_READ(GTIER);
2152
2153         if (HAS_PCH_NOP(dev))
2154                 return;
2155
2156         /* south display irq */
2157         I915_WRITE(SDEIMR, 0xffffffff);
2158         /*
2159          * SDEIER is also touched by the interrupt handler to work around missed
2160          * PCH interrupts. Hence we can't update it after the interrupt handler
2161          * is enabled - instead we unconditionally enable all PCH interrupt
2162          * sources here, but then only unmask them as needed with SDEIMR.
2163          */
2164         I915_WRITE(SDEIER, 0xffffffff);
2165         POSTING_READ(SDEIER);
2166 }
2167
2168 static void valleyview_irq_preinstall(struct drm_device *dev)
2169 {
2170         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2171         int pipe;
2172
2173         atomic_set(&dev_priv->irq_received, 0);
2174
2175         /* VLV magic */
2176         I915_WRITE(VLV_IMR, 0);
2177         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2178         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2179         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2180
2181         /* and GT */
2182         I915_WRITE(GTIIR, I915_READ(GTIIR));
2183         I915_WRITE(GTIIR, I915_READ(GTIIR));
2184         I915_WRITE(GTIMR, 0xffffffff);
2185         I915_WRITE(GTIER, 0x0);
2186         POSTING_READ(GTIER);
2187
2188         I915_WRITE(DPINVGTT, 0xff);
2189
2190         I915_WRITE(PORT_HOTPLUG_EN, 0);
2191         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2192         for_each_pipe(pipe)
2193                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2194         I915_WRITE(VLV_IIR, 0xffffffff);
2195         I915_WRITE(VLV_IMR, 0xffffffff);
2196         I915_WRITE(VLV_IER, 0x0);
2197         POSTING_READ(VLV_IER);
2198 }
2199
2200 static void ibx_hpd_irq_setup(struct drm_device *dev)
2201 {
2202         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2203         struct drm_mode_config *mode_config = &dev->mode_config;
2204         struct intel_encoder *intel_encoder;
2205         u32 mask = ~I915_READ(SDEIMR);
2206         u32 hotplug;
2207
2208         if (HAS_PCH_IBX(dev)) {
2209                 mask &= ~SDE_HOTPLUG_MASK;
2210                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2211                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2212                                 mask |= hpd_ibx[intel_encoder->hpd_pin];
2213         } else {
2214                 mask &= ~SDE_HOTPLUG_MASK_CPT;
2215                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2216                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2217                                 mask |= hpd_cpt[intel_encoder->hpd_pin];
2218         }
2219
2220         I915_WRITE(SDEIMR, ~mask);
2221
2222         /*
2223          * Enable digital hotplug on the PCH, and configure the DP short pulse
2224          * duration to 2ms (which is the minimum in the Display Port spec)
2225          *
2226          * This register is the same on all known PCH chips.
2227          */
2228         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2229         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2230         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2231         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2232         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2233         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2234 }
2235
2236 static void ibx_irq_postinstall(struct drm_device *dev)
2237 {
2238         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2239         u32 mask;
2240
2241         if (HAS_PCH_IBX(dev))
2242                 mask = SDE_GMBUS | SDE_AUX_MASK;
2243         else
2244                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2245
2246         if (HAS_PCH_NOP(dev))
2247                 return;
2248
2249         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2250         I915_WRITE(SDEIMR, ~mask);
2251 }
2252
2253 static int ironlake_irq_postinstall(struct drm_device *dev)
2254 {
2255         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2256         /* enable kind of interrupts always enabled */
2257         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2258                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2259                            DE_AUX_CHANNEL_A;
2260         u32 render_irqs;
2261
2262         dev_priv->irq_mask = ~display_mask;
2263
2264         /* should always can generate irq */
2265         I915_WRITE(DEIIR, I915_READ(DEIIR));
2266         I915_WRITE(DEIMR, dev_priv->irq_mask);
2267         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2268         POSTING_READ(DEIER);
2269
2270         dev_priv->gt_irq_mask = ~0;
2271
2272         I915_WRITE(GTIIR, I915_READ(GTIIR));
2273         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2274
2275         if (IS_GEN6(dev))
2276                 render_irqs =
2277                         GT_USER_INTERRUPT |
2278                         GEN6_BSD_USER_INTERRUPT |
2279                         GEN6_BLITTER_USER_INTERRUPT;
2280         else
2281                 render_irqs =
2282                         GT_USER_INTERRUPT |
2283                         GT_PIPE_NOTIFY |
2284                         GT_BSD_USER_INTERRUPT;
2285         I915_WRITE(GTIER, render_irqs);
2286         POSTING_READ(GTIER);
2287
2288         ibx_irq_postinstall(dev);
2289
2290         if (IS_IRONLAKE_M(dev)) {
2291                 /* Clear & enable PCU event interrupts */
2292                 I915_WRITE(DEIIR, DE_PCU_EVENT);
2293                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2294                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2295         }
2296
2297         return 0;
2298 }
2299
2300 static int ivybridge_irq_postinstall(struct drm_device *dev)
2301 {
2302         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2303         /* enable kind of interrupts always enabled */
2304         u32 display_mask =
2305                 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2306                 DE_PLANEC_FLIP_DONE_IVB |
2307                 DE_PLANEB_FLIP_DONE_IVB |
2308                 DE_PLANEA_FLIP_DONE_IVB |
2309                 DE_AUX_CHANNEL_A_IVB;
2310         u32 render_irqs;
2311
2312         dev_priv->irq_mask = ~display_mask;
2313
2314         /* should always can generate irq */
2315         I915_WRITE(DEIIR, I915_READ(DEIIR));
2316         I915_WRITE(DEIMR, dev_priv->irq_mask);
2317         I915_WRITE(DEIER,
2318                    display_mask |
2319                    DE_PIPEC_VBLANK_IVB |
2320                    DE_PIPEB_VBLANK_IVB |
2321                    DE_PIPEA_VBLANK_IVB);
2322         POSTING_READ(DEIER);
2323
2324         dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2325
2326         I915_WRITE(GTIIR, I915_READ(GTIIR));
2327         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2328
2329         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2330                 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2331         I915_WRITE(GTIER, render_irqs);
2332         POSTING_READ(GTIER);
2333
2334         ibx_irq_postinstall(dev);
2335
2336         return 0;
2337 }
2338
2339 static int valleyview_irq_postinstall(struct drm_device *dev)
2340 {
2341         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2342         u32 enable_mask;
2343         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2344         u32 render_irqs;
2345         u16 msid;
2346
2347         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2348         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2349                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2350                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2351                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2352
2353         /*
2354          *Leave vblank interrupts masked initially.  enable/disable will
2355          * toggle them based on usage.
2356          */
2357         dev_priv->irq_mask = (~enable_mask) |
2358                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2359                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2360
2361         /* Hack for broken MSIs on VLV */
2362         pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2363         pci_read_config_word(dev->pdev, 0x98, &msid);
2364         msid &= 0xff; /* mask out delivery bits */
2365         msid |= (1<<14);
2366         pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2367
2368         I915_WRITE(PORT_HOTPLUG_EN, 0);
2369         POSTING_READ(PORT_HOTPLUG_EN);
2370
2371         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2372         I915_WRITE(VLV_IER, enable_mask);
2373         I915_WRITE(VLV_IIR, 0xffffffff);
2374         I915_WRITE(PIPESTAT(0), 0xffff);
2375         I915_WRITE(PIPESTAT(1), 0xffff);
2376         POSTING_READ(VLV_IER);
2377
2378         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2379         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2380         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2381
2382         I915_WRITE(VLV_IIR, 0xffffffff);
2383         I915_WRITE(VLV_IIR, 0xffffffff);
2384
2385         I915_WRITE(GTIIR, I915_READ(GTIIR));
2386         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2387
2388         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2389                 GEN6_BLITTER_USER_INTERRUPT;
2390         I915_WRITE(GTIER, render_irqs);
2391         POSTING_READ(GTIER);
2392
2393         /* ack & enable invalid PTE error interrupts */
2394 #if 0 /* FIXME: add support to irq handler for checking these bits */
2395         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2396         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2397 #endif
2398
2399         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2400
2401         return 0;
2402 }
2403
2404 static void valleyview_irq_uninstall(struct drm_device *dev)
2405 {
2406         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2407         int pipe;
2408
2409         if (!dev_priv)
2410                 return;
2411
2412         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2413
2414         for_each_pipe(pipe)
2415                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2416
2417         I915_WRITE(HWSTAM, 0xffffffff);
2418         I915_WRITE(PORT_HOTPLUG_EN, 0);
2419         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2420         for_each_pipe(pipe)
2421                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2422         I915_WRITE(VLV_IIR, 0xffffffff);
2423         I915_WRITE(VLV_IMR, 0xffffffff);
2424         I915_WRITE(VLV_IER, 0x0);
2425         POSTING_READ(VLV_IER);
2426 }
2427
2428 static void ironlake_irq_uninstall(struct drm_device *dev)
2429 {
2430         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2431
2432         if (!dev_priv)
2433                 return;
2434
2435         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2436
2437         I915_WRITE(HWSTAM, 0xffffffff);
2438
2439         I915_WRITE(DEIMR, 0xffffffff);
2440         I915_WRITE(DEIER, 0x0);
2441         I915_WRITE(DEIIR, I915_READ(DEIIR));
2442
2443         I915_WRITE(GTIMR, 0xffffffff);
2444         I915_WRITE(GTIER, 0x0);
2445         I915_WRITE(GTIIR, I915_READ(GTIIR));
2446
2447         if (HAS_PCH_NOP(dev))
2448                 return;
2449
2450         I915_WRITE(SDEIMR, 0xffffffff);
2451         I915_WRITE(SDEIER, 0x0);
2452         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2453 }
2454
2455 static void i8xx_irq_preinstall(struct drm_device * dev)
2456 {
2457         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2458         int pipe;
2459
2460         atomic_set(&dev_priv->irq_received, 0);
2461
2462         for_each_pipe(pipe)
2463                 I915_WRITE(PIPESTAT(pipe), 0);
2464         I915_WRITE16(IMR, 0xffff);
2465         I915_WRITE16(IER, 0x0);
2466         POSTING_READ16(IER);
2467 }
2468
2469 static int i8xx_irq_postinstall(struct drm_device *dev)
2470 {
2471         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2472
2473         I915_WRITE16(EMR,
2474                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2475
2476         /* Unmask the interrupts that we always want on. */
2477         dev_priv->irq_mask =
2478                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2479                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2480                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2481                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2482                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2483         I915_WRITE16(IMR, dev_priv->irq_mask);
2484
2485         I915_WRITE16(IER,
2486                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2487                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2488                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2489                      I915_USER_INTERRUPT);
2490         POSTING_READ16(IER);
2491
2492         return 0;
2493 }
2494
2495 /*
2496  * Returns true when a page flip has completed.
2497  */
2498 static bool i8xx_handle_vblank(struct drm_device *dev,
2499                                int pipe, u16 iir)
2500 {
2501         drm_i915_private_t *dev_priv = dev->dev_private;
2502         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2503
2504         if (!drm_handle_vblank(dev, pipe))
2505                 return false;
2506
2507         if ((iir & flip_pending) == 0)
2508                 return false;
2509
2510         intel_prepare_page_flip(dev, pipe);
2511
2512         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2513          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2514          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2515          * the flip is completed (no longer pending). Since this doesn't raise
2516          * an interrupt per se, we watch for the change at vblank.
2517          */
2518         if (I915_READ16(ISR) & flip_pending)
2519                 return false;
2520
2521         intel_finish_page_flip(dev, pipe);
2522
2523         return true;
2524 }
2525
2526 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2527 {
2528         struct drm_device *dev = (struct drm_device *) arg;
2529         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2530         u16 iir, new_iir;
2531         u32 pipe_stats[2];
2532         unsigned long irqflags;
2533         int irq_received;
2534         int pipe;
2535         u16 flip_mask =
2536                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2537                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2538
2539         atomic_inc(&dev_priv->irq_received);
2540
2541         iir = I915_READ16(IIR);
2542         if (iir == 0)
2543                 return IRQ_NONE;
2544
2545         while (iir & ~flip_mask) {
2546                 /* Can't rely on pipestat interrupt bit in iir as it might
2547                  * have been cleared after the pipestat interrupt was received.
2548                  * It doesn't set the bit in iir again, but it still produces
2549                  * interrupts (for non-MSI).
2550                  */
2551                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2552                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2553                         i915_handle_error(dev, false);
2554
2555                 for_each_pipe(pipe) {
2556                         int reg = PIPESTAT(pipe);
2557                         pipe_stats[pipe] = I915_READ(reg);
2558
2559                         /*
2560                          * Clear the PIPE*STAT regs before the IIR
2561                          */
2562                         if (pipe_stats[pipe] & 0x8000ffff) {
2563                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2564                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2565                                                          pipe_name(pipe));
2566                                 I915_WRITE(reg, pipe_stats[pipe]);
2567                                 irq_received = 1;
2568                         }
2569                 }
2570                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2571
2572                 I915_WRITE16(IIR, iir & ~flip_mask);
2573                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2574
2575                 i915_update_dri1_breadcrumb(dev);
2576
2577                 if (iir & I915_USER_INTERRUPT)
2578                         notify_ring(dev, &dev_priv->ring[RCS]);
2579
2580                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2581                     i8xx_handle_vblank(dev, 0, iir))
2582                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2583
2584                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2585                     i8xx_handle_vblank(dev, 1, iir))
2586                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2587
2588                 iir = new_iir;
2589         }
2590
2591         return IRQ_HANDLED;
2592 }
2593
2594 static void i8xx_irq_uninstall(struct drm_device * dev)
2595 {
2596         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2597         int pipe;
2598
2599         for_each_pipe(pipe) {
2600                 /* Clear enable bits; then clear status bits */
2601                 I915_WRITE(PIPESTAT(pipe), 0);
2602                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2603         }
2604         I915_WRITE16(IMR, 0xffff);
2605         I915_WRITE16(IER, 0x0);
2606         I915_WRITE16(IIR, I915_READ16(IIR));
2607 }
2608
2609 static void i915_irq_preinstall(struct drm_device * dev)
2610 {
2611         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2612         int pipe;
2613
2614         atomic_set(&dev_priv->irq_received, 0);
2615
2616         if (I915_HAS_HOTPLUG(dev)) {
2617                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2618                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2619         }
2620
2621         I915_WRITE16(HWSTAM, 0xeffe);
2622         for_each_pipe(pipe)
2623                 I915_WRITE(PIPESTAT(pipe), 0);
2624         I915_WRITE(IMR, 0xffffffff);
2625         I915_WRITE(IER, 0x0);
2626         POSTING_READ(IER);
2627 }
2628
2629 static int i915_irq_postinstall(struct drm_device *dev)
2630 {
2631         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2632         u32 enable_mask;
2633
2634         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2635
2636         /* Unmask the interrupts that we always want on. */
2637         dev_priv->irq_mask =
2638                 ~(I915_ASLE_INTERRUPT |
2639                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2640                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2641                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2642                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2643                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2644
2645         enable_mask =
2646                 I915_ASLE_INTERRUPT |
2647                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2648                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2649                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2650                 I915_USER_INTERRUPT;
2651
2652         if (I915_HAS_HOTPLUG(dev)) {
2653                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2654                 POSTING_READ(PORT_HOTPLUG_EN);
2655
2656                 /* Enable in IER... */
2657                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2658                 /* and unmask in IMR */
2659                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2660         }
2661
2662         I915_WRITE(IMR, dev_priv->irq_mask);
2663         I915_WRITE(IER, enable_mask);
2664         POSTING_READ(IER);
2665
2666         intel_opregion_enable_asle(dev);
2667
2668         return 0;
2669 }
2670
2671 /*
2672  * Returns true when a page flip has completed.
2673  */
2674 static bool i915_handle_vblank(struct drm_device *dev,
2675                                int plane, int pipe, u32 iir)
2676 {
2677         drm_i915_private_t *dev_priv = dev->dev_private;
2678         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2679
2680         if (!drm_handle_vblank(dev, pipe))
2681                 return false;
2682
2683         if ((iir & flip_pending) == 0)
2684                 return false;
2685
2686         intel_prepare_page_flip(dev, plane);
2687
2688         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2689          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2690          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2691          * the flip is completed (no longer pending). Since this doesn't raise
2692          * an interrupt per se, we watch for the change at vblank.
2693          */
2694         if (I915_READ(ISR) & flip_pending)
2695                 return false;
2696
2697         intel_finish_page_flip(dev, pipe);
2698
2699         return true;
2700 }
2701
2702 static irqreturn_t i915_irq_handler(int irq, void *arg)
2703 {
2704         struct drm_device *dev = (struct drm_device *) arg;
2705         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2706         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2707         unsigned long irqflags;
2708         u32 flip_mask =
2709                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2710                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2711         int pipe, ret = IRQ_NONE;
2712
2713         atomic_inc(&dev_priv->irq_received);
2714
2715         iir = I915_READ(IIR);
2716         do {
2717                 bool irq_received = (iir & ~flip_mask) != 0;
2718                 bool blc_event = false;
2719
2720                 /* Can't rely on pipestat interrupt bit in iir as it might
2721                  * have been cleared after the pipestat interrupt was received.
2722                  * It doesn't set the bit in iir again, but it still produces
2723                  * interrupts (for non-MSI).
2724                  */
2725                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2726                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2727                         i915_handle_error(dev, false);
2728
2729                 for_each_pipe(pipe) {
2730                         int reg = PIPESTAT(pipe);
2731                         pipe_stats[pipe] = I915_READ(reg);
2732
2733                         /* Clear the PIPE*STAT regs before the IIR */
2734                         if (pipe_stats[pipe] & 0x8000ffff) {
2735                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2736                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2737                                                          pipe_name(pipe));
2738                                 I915_WRITE(reg, pipe_stats[pipe]);
2739                                 irq_received = true;
2740                         }
2741                 }
2742                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2743
2744                 if (!irq_received)
2745                         break;
2746
2747                 /* Consume port.  Then clear IIR or we'll miss events */
2748                 if ((I915_HAS_HOTPLUG(dev)) &&
2749                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2750                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2751                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2752
2753                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2754                                   hotplug_status);
2755                         if (hotplug_trigger) {
2756                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
2757                                         i915_hpd_irq_setup(dev);
2758                                 queue_work(dev_priv->wq,
2759                                            &dev_priv->hotplug_work);
2760                         }
2761                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2762                         POSTING_READ(PORT_HOTPLUG_STAT);
2763                 }
2764
2765                 I915_WRITE(IIR, iir & ~flip_mask);
2766                 new_iir = I915_READ(IIR); /* Flush posted writes */
2767
2768                 if (iir & I915_USER_INTERRUPT)
2769                         notify_ring(dev, &dev_priv->ring[RCS]);
2770
2771                 for_each_pipe(pipe) {
2772                         int plane = pipe;
2773                         if (IS_MOBILE(dev))
2774                                 plane = !plane;
2775
2776                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2777                             i915_handle_vblank(dev, plane, pipe, iir))
2778                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2779
2780                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2781                                 blc_event = true;
2782                 }
2783
2784                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2785                         intel_opregion_asle_intr(dev);
2786
2787                 /* With MSI, interrupts are only generated when iir
2788                  * transitions from zero to nonzero.  If another bit got
2789                  * set while we were handling the existing iir bits, then
2790                  * we would never get another interrupt.
2791                  *
2792                  * This is fine on non-MSI as well, as if we hit this path
2793                  * we avoid exiting the interrupt handler only to generate
2794                  * another one.
2795                  *
2796                  * Note that for MSI this could cause a stray interrupt report
2797                  * if an interrupt landed in the time between writing IIR and
2798                  * the posting read.  This should be rare enough to never
2799                  * trigger the 99% of 100,000 interrupts test for disabling
2800                  * stray interrupts.
2801                  */
2802                 ret = IRQ_HANDLED;
2803                 iir = new_iir;
2804         } while (iir & ~flip_mask);
2805
2806         i915_update_dri1_breadcrumb(dev);
2807
2808         return ret;
2809 }
2810
2811 static void i915_irq_uninstall(struct drm_device * dev)
2812 {
2813         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2814         int pipe;
2815
2816         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2817
2818         if (I915_HAS_HOTPLUG(dev)) {
2819                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2820                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2821         }
2822
2823         I915_WRITE16(HWSTAM, 0xffff);
2824         for_each_pipe(pipe) {
2825                 /* Clear enable bits; then clear status bits */
2826                 I915_WRITE(PIPESTAT(pipe), 0);
2827                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2828         }
2829         I915_WRITE(IMR, 0xffffffff);
2830         I915_WRITE(IER, 0x0);
2831
2832         I915_WRITE(IIR, I915_READ(IIR));
2833 }
2834
2835 static void i965_irq_preinstall(struct drm_device * dev)
2836 {
2837         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2838         int pipe;
2839
2840         atomic_set(&dev_priv->irq_received, 0);
2841
2842         I915_WRITE(PORT_HOTPLUG_EN, 0);
2843         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2844
2845         I915_WRITE(HWSTAM, 0xeffe);
2846         for_each_pipe(pipe)
2847                 I915_WRITE(PIPESTAT(pipe), 0);
2848         I915_WRITE(IMR, 0xffffffff);
2849         I915_WRITE(IER, 0x0);
2850         POSTING_READ(IER);
2851 }
2852
2853 static int i965_irq_postinstall(struct drm_device *dev)
2854 {
2855         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2856         u32 enable_mask;
2857         u32 error_mask;
2858
2859         /* Unmask the interrupts that we always want on. */
2860         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2861                                I915_DISPLAY_PORT_INTERRUPT |
2862                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2863                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2864                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2865                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2866                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2867
2868         enable_mask = ~dev_priv->irq_mask;
2869         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2870                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2871         enable_mask |= I915_USER_INTERRUPT;
2872
2873         if (IS_G4X(dev))
2874                 enable_mask |= I915_BSD_USER_INTERRUPT;
2875
2876         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2877
2878         /*
2879          * Enable some error detection, note the instruction error mask
2880          * bit is reserved, so we leave it masked.
2881          */
2882         if (IS_G4X(dev)) {
2883                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2884                                GM45_ERROR_MEM_PRIV |
2885                                GM45_ERROR_CP_PRIV |
2886                                I915_ERROR_MEMORY_REFRESH);
2887         } else {
2888                 error_mask = ~(I915_ERROR_PAGE_TABLE |
2889                                I915_ERROR_MEMORY_REFRESH);
2890         }
2891         I915_WRITE(EMR, error_mask);
2892
2893         I915_WRITE(IMR, dev_priv->irq_mask);
2894         I915_WRITE(IER, enable_mask);
2895         POSTING_READ(IER);
2896
2897         I915_WRITE(PORT_HOTPLUG_EN, 0);
2898         POSTING_READ(PORT_HOTPLUG_EN);
2899
2900         intel_opregion_enable_asle(dev);
2901
2902         return 0;
2903 }
2904
2905 static void i915_hpd_irq_setup(struct drm_device *dev)
2906 {
2907         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2908         struct drm_mode_config *mode_config = &dev->mode_config;
2909         struct intel_encoder *intel_encoder;
2910         u32 hotplug_en;
2911
2912         if (I915_HAS_HOTPLUG(dev)) {
2913                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2914                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2915                 /* Note HDMI and DP share hotplug bits */
2916                 /* enable bits are the same for all generations */
2917                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2918                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2919                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2920                 /* Programming the CRT detection parameters tends
2921                    to generate a spurious hotplug event about three
2922                    seconds later.  So just do it once.
2923                 */
2924                 if (IS_G4X(dev))
2925                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2926                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2927                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2928
2929                 /* Ignore TV since it's buggy */
2930                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2931         }
2932 }
2933
2934 static irqreturn_t i965_irq_handler(int irq, void *arg)
2935 {
2936         struct drm_device *dev = (struct drm_device *) arg;
2937         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2938         u32 iir, new_iir;
2939         u32 pipe_stats[I915_MAX_PIPES];
2940         unsigned long irqflags;
2941         int irq_received;
2942         int ret = IRQ_NONE, pipe;
2943         u32 flip_mask =
2944                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2945                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2946
2947         atomic_inc(&dev_priv->irq_received);
2948
2949         iir = I915_READ(IIR);
2950
2951         for (;;) {
2952                 bool blc_event = false;
2953
2954                 irq_received = (iir & ~flip_mask) != 0;
2955
2956                 /* Can't rely on pipestat interrupt bit in iir as it might
2957                  * have been cleared after the pipestat interrupt was received.
2958                  * It doesn't set the bit in iir again, but it still produces
2959                  * interrupts (for non-MSI).
2960                  */
2961                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2962                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2963                         i915_handle_error(dev, false);
2964
2965                 for_each_pipe(pipe) {
2966                         int reg = PIPESTAT(pipe);
2967                         pipe_stats[pipe] = I915_READ(reg);
2968
2969                         /*
2970                          * Clear the PIPE*STAT regs before the IIR
2971                          */
2972                         if (pipe_stats[pipe] & 0x8000ffff) {
2973                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2974                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2975                                                          pipe_name(pipe));
2976                                 I915_WRITE(reg, pipe_stats[pipe]);
2977                                 irq_received = 1;
2978                         }
2979                 }
2980                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2981
2982                 if (!irq_received)
2983                         break;
2984
2985                 ret = IRQ_HANDLED;
2986
2987                 /* Consume port.  Then clear IIR or we'll miss events */
2988                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2989                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2990                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2991                                                                   HOTPLUG_INT_STATUS_G4X :
2992                                                                   HOTPLUG_INT_STATUS_I915);
2993
2994                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2995                                   hotplug_status);
2996                         if (hotplug_trigger) {
2997                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
2998                                                             IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
2999                                         i915_hpd_irq_setup(dev);
3000                                 queue_work(dev_priv->wq,
3001                                            &dev_priv->hotplug_work);
3002                         }
3003                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3004                         I915_READ(PORT_HOTPLUG_STAT);
3005                 }
3006
3007                 I915_WRITE(IIR, iir & ~flip_mask);
3008                 new_iir = I915_READ(IIR); /* Flush posted writes */
3009
3010                 if (iir & I915_USER_INTERRUPT)
3011                         notify_ring(dev, &dev_priv->ring[RCS]);
3012                 if (iir & I915_BSD_USER_INTERRUPT)
3013                         notify_ring(dev, &dev_priv->ring[VCS]);
3014
3015                 for_each_pipe(pipe) {
3016                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3017                             i915_handle_vblank(dev, pipe, pipe, iir))
3018                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3019
3020                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3021                                 blc_event = true;
3022                 }
3023
3024
3025                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3026                         intel_opregion_asle_intr(dev);
3027
3028                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3029                         gmbus_irq_handler(dev);
3030
3031                 /* With MSI, interrupts are only generated when iir
3032                  * transitions from zero to nonzero.  If another bit got
3033                  * set while we were handling the existing iir bits, then
3034                  * we would never get another interrupt.
3035                  *
3036                  * This is fine on non-MSI as well, as if we hit this path
3037                  * we avoid exiting the interrupt handler only to generate
3038                  * another one.
3039                  *
3040                  * Note that for MSI this could cause a stray interrupt report
3041                  * if an interrupt landed in the time between writing IIR and
3042                  * the posting read.  This should be rare enough to never
3043                  * trigger the 99% of 100,000 interrupts test for disabling
3044                  * stray interrupts.
3045                  */
3046                 iir = new_iir;
3047         }
3048
3049         i915_update_dri1_breadcrumb(dev);
3050
3051         return ret;
3052 }
3053
3054 static void i965_irq_uninstall(struct drm_device * dev)
3055 {
3056         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3057         int pipe;
3058
3059         if (!dev_priv)
3060                 return;
3061
3062         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3063
3064         I915_WRITE(PORT_HOTPLUG_EN, 0);
3065         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3066
3067         I915_WRITE(HWSTAM, 0xffffffff);
3068         for_each_pipe(pipe)
3069                 I915_WRITE(PIPESTAT(pipe), 0);
3070         I915_WRITE(IMR, 0xffffffff);
3071         I915_WRITE(IER, 0x0);
3072
3073         for_each_pipe(pipe)
3074                 I915_WRITE(PIPESTAT(pipe),
3075                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3076         I915_WRITE(IIR, I915_READ(IIR));
3077 }
3078
3079 static void i915_reenable_hotplug_timer_func(unsigned long data)
3080 {
3081         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3082         struct drm_device *dev = dev_priv->dev;
3083         struct drm_mode_config *mode_config = &dev->mode_config;
3084         unsigned long irqflags;
3085         int i;
3086
3087         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3088         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3089                 struct drm_connector *connector;
3090
3091                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3092                         continue;
3093
3094                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3095
3096                 list_for_each_entry(connector, &mode_config->connector_list, head) {
3097                         struct intel_connector *intel_connector = to_intel_connector(connector);
3098
3099                         if (intel_connector->encoder->hpd_pin == i) {
3100                                 if (connector->polled != intel_connector->polled)
3101                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3102                                                          drm_get_connector_name(connector));
3103                                 connector->polled = intel_connector->polled;
3104                                 if (!connector->polled)
3105                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3106                         }
3107                 }
3108         }
3109         if (dev_priv->display.hpd_irq_setup)
3110                 dev_priv->display.hpd_irq_setup(dev);
3111         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3112 }
3113
3114 void intel_irq_init(struct drm_device *dev)
3115 {
3116         struct drm_i915_private *dev_priv = dev->dev_private;
3117
3118         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3119         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3120         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3121         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3122
3123         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3124                     i915_hangcheck_elapsed,
3125                     (unsigned long) dev);
3126         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3127                     (unsigned long) dev_priv);
3128
3129         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3130
3131         dev->driver->get_vblank_counter = i915_get_vblank_counter;
3132         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3133         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3134                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3135                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3136         }
3137
3138         if (drm_core_check_feature(dev, DRIVER_MODESET))
3139                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3140         else
3141                 dev->driver->get_vblank_timestamp = NULL;
3142         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3143
3144         if (IS_VALLEYVIEW(dev)) {
3145                 dev->driver->irq_handler = valleyview_irq_handler;
3146                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3147                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3148                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3149                 dev->driver->enable_vblank = valleyview_enable_vblank;
3150                 dev->driver->disable_vblank = valleyview_disable_vblank;
3151                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3152         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3153                 /* Share pre & uninstall handlers with ILK/SNB */
3154                 dev->driver->irq_handler = ivybridge_irq_handler;
3155                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3156                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3157                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3158                 dev->driver->enable_vblank = ivybridge_enable_vblank;
3159                 dev->driver->disable_vblank = ivybridge_disable_vblank;
3160                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3161         } else if (HAS_PCH_SPLIT(dev)) {
3162                 dev->driver->irq_handler = ironlake_irq_handler;
3163                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3164                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3165                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3166                 dev->driver->enable_vblank = ironlake_enable_vblank;
3167                 dev->driver->disable_vblank = ironlake_disable_vblank;
3168                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3169         } else {
3170                 if (INTEL_INFO(dev)->gen == 2) {
3171                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3172                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3173                         dev->driver->irq_handler = i8xx_irq_handler;
3174                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3175                 } else if (INTEL_INFO(dev)->gen == 3) {
3176                         dev->driver->irq_preinstall = i915_irq_preinstall;
3177                         dev->driver->irq_postinstall = i915_irq_postinstall;
3178                         dev->driver->irq_uninstall = i915_irq_uninstall;
3179                         dev->driver->irq_handler = i915_irq_handler;
3180                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3181                 } else {
3182                         dev->driver->irq_preinstall = i965_irq_preinstall;
3183                         dev->driver->irq_postinstall = i965_irq_postinstall;
3184                         dev->driver->irq_uninstall = i965_irq_uninstall;
3185                         dev->driver->irq_handler = i965_irq_handler;
3186                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3187                 }
3188                 dev->driver->enable_vblank = i915_enable_vblank;
3189                 dev->driver->disable_vblank = i915_disable_vblank;
3190         }
3191 }
3192
3193 void intel_hpd_init(struct drm_device *dev)
3194 {
3195         struct drm_i915_private *dev_priv = dev->dev_private;
3196         struct drm_mode_config *mode_config = &dev->mode_config;
3197         struct drm_connector *connector;
3198         int i;
3199
3200         for (i = 1; i < HPD_NUM_PINS; i++) {
3201                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3202                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3203         }
3204         list_for_each_entry(connector, &mode_config->connector_list, head) {
3205                 struct intel_connector *intel_connector = to_intel_connector(connector);
3206                 connector->polled = intel_connector->polled;
3207                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3208                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3209         }
3210         if (dev_priv->display.hpd_irq_setup)
3211                 dev_priv->display.hpd_irq_setup(dev);
3212 }