Merge tag 'v3.15-rc1' into perf/urgent
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/device.h>
31 #include <drm/drmP.h>
32 #include <drm/i915_drm.h>
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <drm/drm_crtc_helper.h>
40
41 static struct drm_driver driver;
42
43 #define GEN_DEFAULT_PIPEOFFSETS \
44         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
45                           PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
46         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
47                            TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
48         .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
49         .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52
53 static const struct intel_device_info intel_i830_info = {
54         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
55         .has_overlay = 1, .overlay_needs_physical = 1,
56         .ring_mask = RENDER_RING,
57         GEN_DEFAULT_PIPEOFFSETS,
58 };
59
60 static const struct intel_device_info intel_845g_info = {
61         .gen = 2, .num_pipes = 1,
62         .has_overlay = 1, .overlay_needs_physical = 1,
63         .ring_mask = RENDER_RING,
64         GEN_DEFAULT_PIPEOFFSETS,
65 };
66
67 static const struct intel_device_info intel_i85x_info = {
68         .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
69         .cursor_needs_physical = 1,
70         .has_overlay = 1, .overlay_needs_physical = 1,
71         .has_fbc = 1,
72         .ring_mask = RENDER_RING,
73         GEN_DEFAULT_PIPEOFFSETS,
74 };
75
76 static const struct intel_device_info intel_i865g_info = {
77         .gen = 2, .num_pipes = 1,
78         .has_overlay = 1, .overlay_needs_physical = 1,
79         .ring_mask = RENDER_RING,
80         GEN_DEFAULT_PIPEOFFSETS,
81 };
82
83 static const struct intel_device_info intel_i915g_info = {
84         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
85         .has_overlay = 1, .overlay_needs_physical = 1,
86         .ring_mask = RENDER_RING,
87         GEN_DEFAULT_PIPEOFFSETS,
88 };
89 static const struct intel_device_info intel_i915gm_info = {
90         .gen = 3, .is_mobile = 1, .num_pipes = 2,
91         .cursor_needs_physical = 1,
92         .has_overlay = 1, .overlay_needs_physical = 1,
93         .supports_tv = 1,
94         .has_fbc = 1,
95         .ring_mask = RENDER_RING,
96         GEN_DEFAULT_PIPEOFFSETS,
97 };
98 static const struct intel_device_info intel_i945g_info = {
99         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
100         .has_overlay = 1, .overlay_needs_physical = 1,
101         .ring_mask = RENDER_RING,
102         GEN_DEFAULT_PIPEOFFSETS,
103 };
104 static const struct intel_device_info intel_i945gm_info = {
105         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
106         .has_hotplug = 1, .cursor_needs_physical = 1,
107         .has_overlay = 1, .overlay_needs_physical = 1,
108         .supports_tv = 1,
109         .has_fbc = 1,
110         .ring_mask = RENDER_RING,
111         GEN_DEFAULT_PIPEOFFSETS,
112 };
113
114 static const struct intel_device_info intel_i965g_info = {
115         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
116         .has_hotplug = 1,
117         .has_overlay = 1,
118         .ring_mask = RENDER_RING,
119         GEN_DEFAULT_PIPEOFFSETS,
120 };
121
122 static const struct intel_device_info intel_i965gm_info = {
123         .gen = 4, .is_crestline = 1, .num_pipes = 2,
124         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125         .has_overlay = 1,
126         .supports_tv = 1,
127         .ring_mask = RENDER_RING,
128         GEN_DEFAULT_PIPEOFFSETS,
129 };
130
131 static const struct intel_device_info intel_g33_info = {
132         .gen = 3, .is_g33 = 1, .num_pipes = 2,
133         .need_gfx_hws = 1, .has_hotplug = 1,
134         .has_overlay = 1,
135         .ring_mask = RENDER_RING,
136         GEN_DEFAULT_PIPEOFFSETS,
137 };
138
139 static const struct intel_device_info intel_g45_info = {
140         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
141         .has_pipe_cxsr = 1, .has_hotplug = 1,
142         .ring_mask = RENDER_RING | BSD_RING,
143         GEN_DEFAULT_PIPEOFFSETS,
144 };
145
146 static const struct intel_device_info intel_gm45_info = {
147         .gen = 4, .is_g4x = 1, .num_pipes = 2,
148         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
149         .has_pipe_cxsr = 1, .has_hotplug = 1,
150         .supports_tv = 1,
151         .ring_mask = RENDER_RING | BSD_RING,
152         GEN_DEFAULT_PIPEOFFSETS,
153 };
154
155 static const struct intel_device_info intel_pineview_info = {
156         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
157         .need_gfx_hws = 1, .has_hotplug = 1,
158         .has_overlay = 1,
159         GEN_DEFAULT_PIPEOFFSETS,
160 };
161
162 static const struct intel_device_info intel_ironlake_d_info = {
163         .gen = 5, .num_pipes = 2,
164         .need_gfx_hws = 1, .has_hotplug = 1,
165         .ring_mask = RENDER_RING | BSD_RING,
166         GEN_DEFAULT_PIPEOFFSETS,
167 };
168
169 static const struct intel_device_info intel_ironlake_m_info = {
170         .gen = 5, .is_mobile = 1, .num_pipes = 2,
171         .need_gfx_hws = 1, .has_hotplug = 1,
172         .has_fbc = 1,
173         .ring_mask = RENDER_RING | BSD_RING,
174         GEN_DEFAULT_PIPEOFFSETS,
175 };
176
177 static const struct intel_device_info intel_sandybridge_d_info = {
178         .gen = 6, .num_pipes = 2,
179         .need_gfx_hws = 1, .has_hotplug = 1,
180         .has_fbc = 1,
181         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
182         .has_llc = 1,
183         GEN_DEFAULT_PIPEOFFSETS,
184 };
185
186 static const struct intel_device_info intel_sandybridge_m_info = {
187         .gen = 6, .is_mobile = 1, .num_pipes = 2,
188         .need_gfx_hws = 1, .has_hotplug = 1,
189         .has_fbc = 1,
190         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
191         .has_llc = 1,
192         GEN_DEFAULT_PIPEOFFSETS,
193 };
194
195 #define GEN7_FEATURES  \
196         .gen = 7, .num_pipes = 3, \
197         .need_gfx_hws = 1, .has_hotplug = 1, \
198         .has_fbc = 1, \
199         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
200         .has_llc = 1
201
202 static const struct intel_device_info intel_ivybridge_d_info = {
203         GEN7_FEATURES,
204         .is_ivybridge = 1,
205         GEN_DEFAULT_PIPEOFFSETS,
206 };
207
208 static const struct intel_device_info intel_ivybridge_m_info = {
209         GEN7_FEATURES,
210         .is_ivybridge = 1,
211         .is_mobile = 1,
212         GEN_DEFAULT_PIPEOFFSETS,
213 };
214
215 static const struct intel_device_info intel_ivybridge_q_info = {
216         GEN7_FEATURES,
217         .is_ivybridge = 1,
218         .num_pipes = 0, /* legal, last one wins */
219         GEN_DEFAULT_PIPEOFFSETS,
220 };
221
222 static const struct intel_device_info intel_valleyview_m_info = {
223         GEN7_FEATURES,
224         .is_mobile = 1,
225         .num_pipes = 2,
226         .is_valleyview = 1,
227         .display_mmio_offset = VLV_DISPLAY_BASE,
228         .has_fbc = 0, /* legal, last one wins */
229         .has_llc = 0, /* legal, last one wins */
230         GEN_DEFAULT_PIPEOFFSETS,
231 };
232
233 static const struct intel_device_info intel_valleyview_d_info = {
234         GEN7_FEATURES,
235         .num_pipes = 2,
236         .is_valleyview = 1,
237         .display_mmio_offset = VLV_DISPLAY_BASE,
238         .has_fbc = 0, /* legal, last one wins */
239         .has_llc = 0, /* legal, last one wins */
240         GEN_DEFAULT_PIPEOFFSETS,
241 };
242
243 static const struct intel_device_info intel_haswell_d_info = {
244         GEN7_FEATURES,
245         .is_haswell = 1,
246         .has_ddi = 1,
247         .has_fpga_dbg = 1,
248         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249         GEN_DEFAULT_PIPEOFFSETS,
250 };
251
252 static const struct intel_device_info intel_haswell_m_info = {
253         GEN7_FEATURES,
254         .is_haswell = 1,
255         .is_mobile = 1,
256         .has_ddi = 1,
257         .has_fpga_dbg = 1,
258         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259         GEN_DEFAULT_PIPEOFFSETS,
260 };
261
262 static const struct intel_device_info intel_broadwell_d_info = {
263         .gen = 8, .num_pipes = 3,
264         .need_gfx_hws = 1, .has_hotplug = 1,
265         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
266         .has_llc = 1,
267         .has_ddi = 1,
268         .has_fbc = 1,
269         GEN_DEFAULT_PIPEOFFSETS,
270 };
271
272 static const struct intel_device_info intel_broadwell_m_info = {
273         .gen = 8, .is_mobile = 1, .num_pipes = 3,
274         .need_gfx_hws = 1, .has_hotplug = 1,
275         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
276         .has_llc = 1,
277         .has_ddi = 1,
278         .has_fbc = 1,
279         GEN_DEFAULT_PIPEOFFSETS,
280 };
281
282 /*
283  * Make sure any device matches here are from most specific to most
284  * general.  For example, since the Quanta match is based on the subsystem
285  * and subvendor IDs, we need it to come before the more general IVB
286  * PCI ID matches, otherwise we'll use the wrong info struct above.
287  */
288 #define INTEL_PCI_IDS \
289         INTEL_I830_IDS(&intel_i830_info),       \
290         INTEL_I845G_IDS(&intel_845g_info),      \
291         INTEL_I85X_IDS(&intel_i85x_info),       \
292         INTEL_I865G_IDS(&intel_i865g_info),     \
293         INTEL_I915G_IDS(&intel_i915g_info),     \
294         INTEL_I915GM_IDS(&intel_i915gm_info),   \
295         INTEL_I945G_IDS(&intel_i945g_info),     \
296         INTEL_I945GM_IDS(&intel_i945gm_info),   \
297         INTEL_I965G_IDS(&intel_i965g_info),     \
298         INTEL_G33_IDS(&intel_g33_info),         \
299         INTEL_I965GM_IDS(&intel_i965gm_info),   \
300         INTEL_GM45_IDS(&intel_gm45_info),       \
301         INTEL_G45_IDS(&intel_g45_info),         \
302         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
303         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
304         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
305         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
306         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
307         INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
308         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
309         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
310         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
311         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
312         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
313         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
314         INTEL_BDW_M_IDS(&intel_broadwell_m_info),       \
315         INTEL_BDW_D_IDS(&intel_broadwell_d_info)
316
317 static const struct pci_device_id pciidlist[] = {               /* aka */
318         INTEL_PCI_IDS,
319         {0, 0, 0}
320 };
321
322 #if defined(CONFIG_DRM_I915_KMS)
323 MODULE_DEVICE_TABLE(pci, pciidlist);
324 #endif
325
326 void intel_detect_pch(struct drm_device *dev)
327 {
328         struct drm_i915_private *dev_priv = dev->dev_private;
329         struct pci_dev *pch = NULL;
330
331         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
332          * (which really amounts to a PCH but no South Display).
333          */
334         if (INTEL_INFO(dev)->num_pipes == 0) {
335                 dev_priv->pch_type = PCH_NOP;
336                 return;
337         }
338
339         /*
340          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
341          * make graphics device passthrough work easy for VMM, that only
342          * need to expose ISA bridge to let driver know the real hardware
343          * underneath. This is a requirement from virtualization team.
344          *
345          * In some virtualized environments (e.g. XEN), there is irrelevant
346          * ISA bridge in the system. To work reliably, we should scan trhough
347          * all the ISA bridge devices and check for the first match, instead
348          * of only checking the first one.
349          */
350         while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
351                 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
352                         unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
353                         dev_priv->pch_id = id;
354
355                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
356                                 dev_priv->pch_type = PCH_IBX;
357                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
358                                 WARN_ON(!IS_GEN5(dev));
359                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
360                                 dev_priv->pch_type = PCH_CPT;
361                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
362                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
363                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
364                                 /* PantherPoint is CPT compatible */
365                                 dev_priv->pch_type = PCH_CPT;
366                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
367                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
368                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
369                                 dev_priv->pch_type = PCH_LPT;
370                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
371                                 WARN_ON(!IS_HASWELL(dev));
372                                 WARN_ON(IS_ULT(dev));
373                         } else if (IS_BROADWELL(dev)) {
374                                 dev_priv->pch_type = PCH_LPT;
375                                 dev_priv->pch_id =
376                                         INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
377                                 DRM_DEBUG_KMS("This is Broadwell, assuming "
378                                               "LynxPoint LP PCH\n");
379                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
380                                 dev_priv->pch_type = PCH_LPT;
381                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
382                                 WARN_ON(!IS_HASWELL(dev));
383                                 WARN_ON(!IS_ULT(dev));
384                         } else
385                                 continue;
386
387                         break;
388                 }
389         }
390         if (!pch)
391                 DRM_DEBUG_KMS("No PCH found.\n");
392
393         pci_dev_put(pch);
394 }
395
396 bool i915_semaphore_is_enabled(struct drm_device *dev)
397 {
398         if (INTEL_INFO(dev)->gen < 6)
399                 return false;
400
401         if (i915.semaphores >= 0)
402                 return i915.semaphores;
403
404         /* Until we get further testing... */
405         if (IS_GEN8(dev))
406                 return false;
407
408 #ifdef CONFIG_INTEL_IOMMU
409         /* Enable semaphores on SNB when IO remapping is off */
410         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
411                 return false;
412 #endif
413
414         return true;
415 }
416
417 static int i915_drm_freeze(struct drm_device *dev)
418 {
419         struct drm_i915_private *dev_priv = dev->dev_private;
420         struct drm_crtc *crtc;
421
422         intel_runtime_pm_get(dev_priv);
423
424         /* ignore lid events during suspend */
425         mutex_lock(&dev_priv->modeset_restore_lock);
426         dev_priv->modeset_restore = MODESET_SUSPENDED;
427         mutex_unlock(&dev_priv->modeset_restore_lock);
428
429         /* We do a lot of poking in a lot of registers, make sure they work
430          * properly. */
431         intel_display_set_init_power(dev_priv, true);
432
433         drm_kms_helper_poll_disable(dev);
434
435         pci_save_state(dev->pdev);
436
437         /* If KMS is active, we do the leavevt stuff here */
438         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
439                 int error;
440
441                 error = i915_gem_suspend(dev);
442                 if (error) {
443                         dev_err(&dev->pdev->dev,
444                                 "GEM idle failed, resume might fail\n");
445                         return error;
446                 }
447
448                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
449
450                 drm_irq_uninstall(dev);
451                 dev_priv->enable_hotplug_processing = false;
452                 /*
453                  * Disable CRTCs directly since we want to preserve sw state
454                  * for _thaw.
455                  */
456                 mutex_lock(&dev->mode_config.mutex);
457                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
458                         dev_priv->display.crtc_disable(crtc);
459                 mutex_unlock(&dev->mode_config.mutex);
460
461                 intel_modeset_suspend_hw(dev);
462         }
463
464         i915_gem_suspend_gtt_mappings(dev);
465
466         i915_save_state(dev);
467
468         intel_opregion_fini(dev);
469         intel_uncore_fini(dev);
470
471         console_lock();
472         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
473         console_unlock();
474
475         dev_priv->suspend_count++;
476
477         return 0;
478 }
479
480 int i915_suspend(struct drm_device *dev, pm_message_t state)
481 {
482         int error;
483
484         if (!dev || !dev->dev_private) {
485                 DRM_ERROR("dev: %p\n", dev);
486                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
487                 return -ENODEV;
488         }
489
490         if (state.event == PM_EVENT_PRETHAW)
491                 return 0;
492
493
494         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
495                 return 0;
496
497         error = i915_drm_freeze(dev);
498         if (error)
499                 return error;
500
501         if (state.event == PM_EVENT_SUSPEND) {
502                 /* Shut down the device */
503                 pci_disable_device(dev->pdev);
504                 pci_set_power_state(dev->pdev, PCI_D3hot);
505         }
506
507         return 0;
508 }
509
510 void intel_console_resume(struct work_struct *work)
511 {
512         struct drm_i915_private *dev_priv =
513                 container_of(work, struct drm_i915_private,
514                              console_resume_work);
515         struct drm_device *dev = dev_priv->dev;
516
517         console_lock();
518         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
519         console_unlock();
520 }
521
522 static void intel_resume_hotplug(struct drm_device *dev)
523 {
524         struct drm_mode_config *mode_config = &dev->mode_config;
525         struct intel_encoder *encoder;
526
527         mutex_lock(&mode_config->mutex);
528         DRM_DEBUG_KMS("running encoder hotplug functions\n");
529
530         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
531                 if (encoder->hot_plug)
532                         encoder->hot_plug(encoder);
533
534         mutex_unlock(&mode_config->mutex);
535
536         /* Just fire off a uevent and let userspace tell us what to do */
537         drm_helper_hpd_irq_event(dev);
538 }
539
540 static int i915_drm_thaw_early(struct drm_device *dev)
541 {
542         struct drm_i915_private *dev_priv = dev->dev_private;
543
544         intel_uncore_early_sanitize(dev);
545         intel_uncore_sanitize(dev);
546         intel_power_domains_init_hw(dev_priv);
547
548         return 0;
549 }
550
551 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
552 {
553         struct drm_i915_private *dev_priv = dev->dev_private;
554         int error = 0;
555
556         if (drm_core_check_feature(dev, DRIVER_MODESET) &&
557             restore_gtt_mappings) {
558                 mutex_lock(&dev->struct_mutex);
559                 i915_gem_restore_gtt_mappings(dev);
560                 mutex_unlock(&dev->struct_mutex);
561         }
562
563         i915_restore_state(dev);
564         intel_opregion_setup(dev);
565
566         /* KMS EnterVT equivalent */
567         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
568                 intel_init_pch_refclk(dev);
569                 drm_mode_config_reset(dev);
570
571                 mutex_lock(&dev->struct_mutex);
572
573                 error = i915_gem_init_hw(dev);
574                 mutex_unlock(&dev->struct_mutex);
575
576                 /* We need working interrupts for modeset enabling ... */
577                 drm_irq_install(dev);
578
579                 intel_modeset_init_hw(dev);
580
581                 drm_modeset_lock_all(dev);
582                 intel_modeset_setup_hw_state(dev, true);
583                 drm_modeset_unlock_all(dev);
584
585                 /*
586                  * ... but also need to make sure that hotplug processing
587                  * doesn't cause havoc. Like in the driver load code we don't
588                  * bother with the tiny race here where we might loose hotplug
589                  * notifications.
590                  * */
591                 intel_hpd_init(dev);
592                 dev_priv->enable_hotplug_processing = true;
593                 /* Config may have changed between suspend and resume */
594                 intel_resume_hotplug(dev);
595         }
596
597         intel_opregion_init(dev);
598
599         /*
600          * The console lock can be pretty contented on resume due
601          * to all the printk activity.  Try to keep it out of the hot
602          * path of resume if possible.
603          */
604         if (console_trylock()) {
605                 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
606                 console_unlock();
607         } else {
608                 schedule_work(&dev_priv->console_resume_work);
609         }
610
611         mutex_lock(&dev_priv->modeset_restore_lock);
612         dev_priv->modeset_restore = MODESET_DONE;
613         mutex_unlock(&dev_priv->modeset_restore_lock);
614
615         intel_runtime_pm_put(dev_priv);
616         return error;
617 }
618
619 static int i915_drm_thaw(struct drm_device *dev)
620 {
621         if (drm_core_check_feature(dev, DRIVER_MODESET))
622                 i915_check_and_clear_faults(dev);
623
624         return __i915_drm_thaw(dev, true);
625 }
626
627 static int i915_resume_early(struct drm_device *dev)
628 {
629         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
630                 return 0;
631
632         /*
633          * We have a resume ordering issue with the snd-hda driver also
634          * requiring our device to be power up. Due to the lack of a
635          * parent/child relationship we currently solve this with an early
636          * resume hook.
637          *
638          * FIXME: This should be solved with a special hdmi sink device or
639          * similar so that power domains can be employed.
640          */
641         if (pci_enable_device(dev->pdev))
642                 return -EIO;
643
644         pci_set_master(dev->pdev);
645
646         return i915_drm_thaw_early(dev);
647 }
648
649 int i915_resume(struct drm_device *dev)
650 {
651         struct drm_i915_private *dev_priv = dev->dev_private;
652         int ret;
653
654         /*
655          * Platforms with opregion should have sane BIOS, older ones (gen3 and
656          * earlier) need to restore the GTT mappings since the BIOS might clear
657          * all our scratch PTEs.
658          */
659         ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
660         if (ret)
661                 return ret;
662
663         drm_kms_helper_poll_enable(dev);
664         return 0;
665 }
666
667 static int i915_resume_legacy(struct drm_device *dev)
668 {
669         i915_resume_early(dev);
670         i915_resume(dev);
671
672         return 0;
673 }
674
675 /**
676  * i915_reset - reset chip after a hang
677  * @dev: drm device to reset
678  *
679  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
680  * reset or otherwise an error code.
681  *
682  * Procedure is fairly simple:
683  *   - reset the chip using the reset reg
684  *   - re-init context state
685  *   - re-init hardware status page
686  *   - re-init ring buffer
687  *   - re-init interrupt state
688  *   - re-init display
689  */
690 int i915_reset(struct drm_device *dev)
691 {
692         struct drm_i915_private *dev_priv = dev->dev_private;
693         bool simulated;
694         int ret;
695
696         if (!i915.reset)
697                 return 0;
698
699         mutex_lock(&dev->struct_mutex);
700
701         i915_gem_reset(dev);
702
703         simulated = dev_priv->gpu_error.stop_rings != 0;
704
705         ret = intel_gpu_reset(dev);
706
707         /* Also reset the gpu hangman. */
708         if (simulated) {
709                 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
710                 dev_priv->gpu_error.stop_rings = 0;
711                 if (ret == -ENODEV) {
712                         DRM_INFO("Reset not implemented, but ignoring "
713                                  "error for simulated gpu hangs\n");
714                         ret = 0;
715                 }
716         }
717
718         if (ret) {
719                 DRM_ERROR("Failed to reset chip: %i\n", ret);
720                 mutex_unlock(&dev->struct_mutex);
721                 return ret;
722         }
723
724         /* Ok, now get things going again... */
725
726         /*
727          * Everything depends on having the GTT running, so we need to start
728          * there.  Fortunately we don't need to do this unless we reset the
729          * chip at a PCI level.
730          *
731          * Next we need to restore the context, but we don't use those
732          * yet either...
733          *
734          * Ring buffer needs to be re-initialized in the KMS case, or if X
735          * was running at the time of the reset (i.e. we weren't VT
736          * switched away).
737          */
738         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
739                         !dev_priv->ums.mm_suspended) {
740                 dev_priv->ums.mm_suspended = 0;
741
742                 ret = i915_gem_init_hw(dev);
743                 mutex_unlock(&dev->struct_mutex);
744                 if (ret) {
745                         DRM_ERROR("Failed hw init on reset %d\n", ret);
746                         return ret;
747                 }
748
749                 drm_irq_uninstall(dev);
750                 drm_irq_install(dev);
751
752                 /* rps/rc6 re-init is necessary to restore state lost after the
753                  * reset and the re-install of drm irq. Skip for ironlake per
754                  * previous concerns that it doesn't respond well to some forms
755                  * of re-init after reset. */
756                 if (INTEL_INFO(dev)->gen > 5) {
757                         mutex_lock(&dev->struct_mutex);
758                         intel_enable_gt_powersave(dev);
759                         mutex_unlock(&dev->struct_mutex);
760                 }
761
762                 intel_hpd_init(dev);
763         } else {
764                 mutex_unlock(&dev->struct_mutex);
765         }
766
767         return 0;
768 }
769
770 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
771 {
772         struct intel_device_info *intel_info =
773                 (struct intel_device_info *) ent->driver_data;
774
775         if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
776                 DRM_INFO("This hardware requires preliminary hardware support.\n"
777                          "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
778                 return -ENODEV;
779         }
780
781         /* Only bind to function 0 of the device. Early generations
782          * used function 1 as a placeholder for multi-head. This causes
783          * us confusion instead, especially on the systems where both
784          * functions have the same PCI-ID!
785          */
786         if (PCI_FUNC(pdev->devfn))
787                 return -ENODEV;
788
789         driver.driver_features &= ~(DRIVER_USE_AGP);
790
791         return drm_get_pci_dev(pdev, ent, &driver);
792 }
793
794 static void
795 i915_pci_remove(struct pci_dev *pdev)
796 {
797         struct drm_device *dev = pci_get_drvdata(pdev);
798
799         drm_put_dev(dev);
800 }
801
802 static int i915_pm_suspend(struct device *dev)
803 {
804         struct pci_dev *pdev = to_pci_dev(dev);
805         struct drm_device *drm_dev = pci_get_drvdata(pdev);
806
807         if (!drm_dev || !drm_dev->dev_private) {
808                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
809                 return -ENODEV;
810         }
811
812         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
813                 return 0;
814
815         return i915_drm_freeze(drm_dev);
816 }
817
818 static int i915_pm_suspend_late(struct device *dev)
819 {
820         struct pci_dev *pdev = to_pci_dev(dev);
821         struct drm_device *drm_dev = pci_get_drvdata(pdev);
822
823         /*
824          * We have a suspedn ordering issue with the snd-hda driver also
825          * requiring our device to be power up. Due to the lack of a
826          * parent/child relationship we currently solve this with an late
827          * suspend hook.
828          *
829          * FIXME: This should be solved with a special hdmi sink device or
830          * similar so that power domains can be employed.
831          */
832         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
833                 return 0;
834
835         pci_disable_device(pdev);
836         pci_set_power_state(pdev, PCI_D3hot);
837
838         return 0;
839 }
840
841 static int i915_pm_resume_early(struct device *dev)
842 {
843         struct pci_dev *pdev = to_pci_dev(dev);
844         struct drm_device *drm_dev = pci_get_drvdata(pdev);
845
846         return i915_resume_early(drm_dev);
847 }
848
849 static int i915_pm_resume(struct device *dev)
850 {
851         struct pci_dev *pdev = to_pci_dev(dev);
852         struct drm_device *drm_dev = pci_get_drvdata(pdev);
853
854         return i915_resume(drm_dev);
855 }
856
857 static int i915_pm_freeze(struct device *dev)
858 {
859         struct pci_dev *pdev = to_pci_dev(dev);
860         struct drm_device *drm_dev = pci_get_drvdata(pdev);
861
862         if (!drm_dev || !drm_dev->dev_private) {
863                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
864                 return -ENODEV;
865         }
866
867         return i915_drm_freeze(drm_dev);
868 }
869
870 static int i915_pm_thaw_early(struct device *dev)
871 {
872         struct pci_dev *pdev = to_pci_dev(dev);
873         struct drm_device *drm_dev = pci_get_drvdata(pdev);
874
875         return i915_drm_thaw_early(drm_dev);
876 }
877
878 static int i915_pm_thaw(struct device *dev)
879 {
880         struct pci_dev *pdev = to_pci_dev(dev);
881         struct drm_device *drm_dev = pci_get_drvdata(pdev);
882
883         return i915_drm_thaw(drm_dev);
884 }
885
886 static int i915_pm_poweroff(struct device *dev)
887 {
888         struct pci_dev *pdev = to_pci_dev(dev);
889         struct drm_device *drm_dev = pci_get_drvdata(pdev);
890
891         return i915_drm_freeze(drm_dev);
892 }
893
894 static int i915_runtime_suspend(struct device *device)
895 {
896         struct pci_dev *pdev = to_pci_dev(device);
897         struct drm_device *dev = pci_get_drvdata(pdev);
898         struct drm_i915_private *dev_priv = dev->dev_private;
899
900         WARN_ON(!HAS_RUNTIME_PM(dev));
901         assert_force_wake_inactive(dev_priv);
902
903         DRM_DEBUG_KMS("Suspending device\n");
904
905         if (HAS_PC8(dev))
906                 hsw_enable_pc8(dev_priv);
907
908         i915_gem_release_all_mmaps(dev_priv);
909
910         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
911         dev_priv->pm.suspended = true;
912
913         /*
914          * current versions of firmware which depend on this opregion
915          * notification have repurposed the D1 definition to mean
916          * "runtime suspended" vs. what you would normally expect (D3)
917          * to distinguish it from notifications that might be sent
918          * via the suspend path.
919          */
920         intel_opregion_notify_adapter(dev, PCI_D1);
921
922         DRM_DEBUG_KMS("Device suspended\n");
923         return 0;
924 }
925
926 static int i915_runtime_resume(struct device *device)
927 {
928         struct pci_dev *pdev = to_pci_dev(device);
929         struct drm_device *dev = pci_get_drvdata(pdev);
930         struct drm_i915_private *dev_priv = dev->dev_private;
931
932         WARN_ON(!HAS_RUNTIME_PM(dev));
933
934         DRM_DEBUG_KMS("Resuming device\n");
935
936         intel_opregion_notify_adapter(dev, PCI_D0);
937         dev_priv->pm.suspended = false;
938
939         if (HAS_PC8(dev))
940                 hsw_disable_pc8(dev_priv);
941
942         DRM_DEBUG_KMS("Device resumed\n");
943         return 0;
944 }
945
946 static const struct dev_pm_ops i915_pm_ops = {
947         .suspend = i915_pm_suspend,
948         .suspend_late = i915_pm_suspend_late,
949         .resume_early = i915_pm_resume_early,
950         .resume = i915_pm_resume,
951         .freeze = i915_pm_freeze,
952         .thaw_early = i915_pm_thaw_early,
953         .thaw = i915_pm_thaw,
954         .poweroff = i915_pm_poweroff,
955         .restore_early = i915_pm_resume_early,
956         .restore = i915_pm_resume,
957         .runtime_suspend = i915_runtime_suspend,
958         .runtime_resume = i915_runtime_resume,
959 };
960
961 static const struct vm_operations_struct i915_gem_vm_ops = {
962         .fault = i915_gem_fault,
963         .open = drm_gem_vm_open,
964         .close = drm_gem_vm_close,
965 };
966
967 static const struct file_operations i915_driver_fops = {
968         .owner = THIS_MODULE,
969         .open = drm_open,
970         .release = drm_release,
971         .unlocked_ioctl = drm_ioctl,
972         .mmap = drm_gem_mmap,
973         .poll = drm_poll,
974         .read = drm_read,
975 #ifdef CONFIG_COMPAT
976         .compat_ioctl = i915_compat_ioctl,
977 #endif
978         .llseek = noop_llseek,
979 };
980
981 static struct drm_driver driver = {
982         /* Don't use MTRRs here; the Xserver or userspace app should
983          * deal with them for Intel hardware.
984          */
985         .driver_features =
986             DRIVER_USE_AGP |
987             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
988             DRIVER_RENDER,
989         .load = i915_driver_load,
990         .unload = i915_driver_unload,
991         .open = i915_driver_open,
992         .lastclose = i915_driver_lastclose,
993         .preclose = i915_driver_preclose,
994         .postclose = i915_driver_postclose,
995
996         /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
997         .suspend = i915_suspend,
998         .resume = i915_resume_legacy,
999
1000         .device_is_agp = i915_driver_device_is_agp,
1001         .master_create = i915_master_create,
1002         .master_destroy = i915_master_destroy,
1003 #if defined(CONFIG_DEBUG_FS)
1004         .debugfs_init = i915_debugfs_init,
1005         .debugfs_cleanup = i915_debugfs_cleanup,
1006 #endif
1007         .gem_free_object = i915_gem_free_object,
1008         .gem_vm_ops = &i915_gem_vm_ops,
1009
1010         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1011         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1012         .gem_prime_export = i915_gem_prime_export,
1013         .gem_prime_import = i915_gem_prime_import,
1014
1015         .dumb_create = i915_gem_dumb_create,
1016         .dumb_map_offset = i915_gem_mmap_gtt,
1017         .dumb_destroy = drm_gem_dumb_destroy,
1018         .ioctls = i915_ioctls,
1019         .fops = &i915_driver_fops,
1020         .name = DRIVER_NAME,
1021         .desc = DRIVER_DESC,
1022         .date = DRIVER_DATE,
1023         .major = DRIVER_MAJOR,
1024         .minor = DRIVER_MINOR,
1025         .patchlevel = DRIVER_PATCHLEVEL,
1026 };
1027
1028 static struct pci_driver i915_pci_driver = {
1029         .name = DRIVER_NAME,
1030         .id_table = pciidlist,
1031         .probe = i915_pci_probe,
1032         .remove = i915_pci_remove,
1033         .driver.pm = &i915_pm_ops,
1034 };
1035
1036 static int __init i915_init(void)
1037 {
1038         driver.num_ioctls = i915_max_ioctl;
1039
1040         /*
1041          * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1042          * explicitly disabled with the module pararmeter.
1043          *
1044          * Otherwise, just follow the parameter (defaulting to off).
1045          *
1046          * Allow optional vga_text_mode_force boot option to override
1047          * the default behavior.
1048          */
1049 #if defined(CONFIG_DRM_I915_KMS)
1050         if (i915.modeset != 0)
1051                 driver.driver_features |= DRIVER_MODESET;
1052 #endif
1053         if (i915.modeset == 1)
1054                 driver.driver_features |= DRIVER_MODESET;
1055
1056 #ifdef CONFIG_VGA_CONSOLE
1057         if (vgacon_text_force() && i915.modeset == -1)
1058                 driver.driver_features &= ~DRIVER_MODESET;
1059 #endif
1060
1061         if (!(driver.driver_features & DRIVER_MODESET)) {
1062                 driver.get_vblank_timestamp = NULL;
1063 #ifndef CONFIG_DRM_I915_UMS
1064                 /* Silently fail loading to not upset userspace. */
1065                 return 0;
1066 #endif
1067         }
1068
1069         return drm_pci_init(&driver, &i915_pci_driver);
1070 }
1071
1072 static void __exit i915_exit(void)
1073 {
1074 #ifndef CONFIG_DRM_I915_UMS
1075         if (!(driver.driver_features & DRIVER_MODESET))
1076                 return; /* Never loaded a driver. */
1077 #endif
1078
1079         drm_pci_exit(&driver, &i915_pci_driver);
1080 }
1081
1082 module_init(i915_init);
1083 module_exit(i915_exit);
1084
1085 MODULE_AUTHOR(DRIVER_AUTHOR);
1086 MODULE_DESCRIPTION(DRIVER_DESC);
1087 MODULE_LICENSE("GPL and additional rights");