Merge branch 'linux-4.3' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into...
authorDave Airlie <airlied@redhat.com>
Fri, 11 Sep 2015 04:38:36 +0000 (14:38 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 11 Sep 2015 04:38:36 +0000 (14:38 +1000)
three nouveau regression fixes.
* 'linux-4.3' of git://anongit.freedesktop.org/git/nouveau/linux-2.6:
  drm/nouveau/device: enable c800 quirk for tecra w50
  drm/nouveau/clk/gt215: Unbreak engine pausing for GT21x/MCP7x
  drm/nouveau/gr/nv04: fix big endian setting on gr context

94 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cikd.h
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/fiji_smc.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
drivers/gpu/drm/amd/amdgpu/tonga_smc.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fb.h
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_plane.h
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/nouveau/dispnv04/dfp.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_dp_auxch.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
include/uapi/drm/i915_drm.h
include/video/samsung_fimd.h

index aa2dcf578dd6d0d4e51b6339e601195b5a83c67d..668939a14206b4113991b69636915d562ea3d55c 100644 (file)
@@ -98,6 +98,9 @@ extern int amdgpu_sched_hw_submission;
 #define AMDGPU_MAX_COMPUTE_RINGS               8
 #define AMDGPU_MAX_VCE_RINGS                   2
 
+/* max number of IP instances */
+#define AMDGPU_MAX_SDMA_INSTANCES              2
+
 /* number of hw syncs before falling back on blocking */
 #define AMDGPU_NUM_SYNCS                       4
 
@@ -262,7 +265,7 @@ struct amdgpu_buffer_funcs {
        unsigned        fill_num_dw;
 
        /* used for buffer clearing */
-       void (*emit_fill_buffer)(struct amdgpu_ring *ring,
+       void (*emit_fill_buffer)(struct amdgpu_ib *ib,
                                 /* value to write to memory */
                                 uint32_t src_data,
                                 /* dst addr in bytes */
@@ -340,6 +343,8 @@ struct amdgpu_ring_funcs {
        int (*test_ring)(struct amdgpu_ring *ring);
        int (*test_ib)(struct amdgpu_ring *ring);
        bool (*is_lockup)(struct amdgpu_ring *ring);
+       /* insert NOP packets */
+       void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
 };
 
 /*
@@ -440,12 +445,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
-                                      struct fence **array,
-                                      uint32_t count,
-                                      bool wait_all,
-                                      bool intr,
-                                      signed long t);
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+                                 struct fence **array,
+                                 uint32_t count,
+                                 bool intr,
+                                 signed long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -717,6 +721,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                     void *owner);
 int amdgpu_sync_rings(struct amdgpu_sync *sync,
                      struct amdgpu_ring *ring);
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                      struct fence *fence);
@@ -1214,6 +1219,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
 int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
@@ -1665,7 +1671,6 @@ struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
-       void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
        struct delayed_work     idle_work;
@@ -1709,6 +1714,7 @@ struct amdgpu_sdma {
        uint32_t                feature_version;
 
        struct amdgpu_ring      ring;
+       bool                    burst_nop;
 };
 
 /*
@@ -2057,7 +2063,7 @@ struct amdgpu_device {
        struct amdgpu_gfx               gfx;
 
        /* sdma */
-       struct amdgpu_sdma              sdma[2];
+       struct amdgpu_sdma              sdma[AMDGPU_MAX_SDMA_INSTANCES];
        struct amdgpu_irq_src           sdma_trap_irq;
        struct amdgpu_irq_src           sdma_illegal_inst_irq;
 
@@ -2196,6 +2202,21 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
        ring->ring_free_dw--;
 }
 
+static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       int i;
+
+       for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
+               if (&adev->sdma[i].ring == ring)
+                       break;
+
+       if (i < AMDGPU_MAX_SDMA_INSTANCES)
+               return &adev->sdma[i];
+       else
+               return NULL;
+}
+
 /*
  * ASICs macro.
  */
@@ -2248,7 +2269,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
-#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
+#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
index 6a206f15635fdf93bef32511faffc23627f20f35..3b355aeb62fd353320fd47260bb05263e0e998ab 100644 (file)
@@ -354,7 +354,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+                       if ((lobj->allowed_domains & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
index 42d1a22c119942decff10c8e92930792d6856afb..6ff6ae945794a24167d6403a48bb8aa57d2fa4a2 100644 (file)
@@ -244,7 +244,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
 
        if (adev->vram_scratch.robj == NULL) {
                r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
-                                    PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+                                    PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+                                    AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
                                     NULL, &adev->vram_scratch.robj);
                if (r) {
                        return r;
index 81b821247dde5ca4fa3992083813fe848ac97e99..8a122b1b77861028c123301726b8bb440537ad55 100644 (file)
@@ -126,8 +126,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = amdgpu_gem_object_create(adev, aligned_size, 0,
                                       AMDGPU_GEM_DOMAIN_VRAM,
-                                      0, true,
-                                      &gobj);
+                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                      true, &gobj);
        if (ret) {
                printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
                       aligned_size);
index f446bf2fedc99266576e2a9b0bc81cd141048435..1be2bd6d07eac6593274038967b484d9525f04e3 100644 (file)
@@ -851,22 +851,6 @@ static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
        return false;
 }
 
-static bool amdgpu_test_signaled_all(struct fence **fences, uint32_t count)
-{
-       int idx;
-       struct fence *fence;
-
-       for (idx = 0; idx < count; ++idx) {
-               fence = fences[idx];
-               if (fence) {
-                       if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
-                               return false;
-               }
-       }
-
-       return true;
-}
-
 struct amdgpu_wait_cb {
        struct fence_cb base;
        struct task_struct *task;
@@ -885,7 +869,7 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
        struct amdgpu_fence *fence = to_amdgpu_fence(f);
        struct amdgpu_device *adev = fence->ring->adev;
 
-       return amdgpu_fence_wait_multiple(adev, &f, 1, false, intr, t);
+       return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
 }
 
 /**
@@ -894,23 +878,18 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
  * @adev:     amdgpu device
  * @array:    the fence array with amdgpu fence pointer
  * @count:    the number of the fence array
- * @wait_all: the flag of wait all(true) or wait any(false)
  * @intr:     when sleep, set the current task interruptable or not
  * @t:        timeout to wait
  *
- * If wait_all is true, it will return when all fences are signaled or timeout.
- * If wait_all is false, it will return when any fence is signaled or timeout.
+ * It will return when any fence is signaled or timeout.
  */
-signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
-                                      struct fence **array,
-                                      uint32_t count,
-                                      bool wait_all,
-                                      bool intr,
-                                      signed long t)
-{
-       long idx = 0;
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+                                 struct fence **array, uint32_t count,
+                                 bool intr, signed long t)
+{
        struct amdgpu_wait_cb *cb;
        struct fence *fence;
+       unsigned idx;
 
        BUG_ON(!array);
 
@@ -927,10 +906,7 @@ signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
                        if (fence_add_callback(fence,
                                        &cb[idx].base, amdgpu_fence_wait_cb)) {
                                /* The fence is already signaled */
-                               if (wait_all)
-                                       continue;
-                               else
-                                       goto fence_rm_cb;
+                               goto fence_rm_cb;
                        }
                }
        }
@@ -945,9 +921,7 @@ signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
                 * amdgpu_test_signaled_any must be called after
                 * set_current_state to prevent a race with wake_up_process
                 */
-               if (!wait_all && amdgpu_test_signaled_any(array, count))
-                       break;
-               if (wait_all && amdgpu_test_signaled_all(array, count))
+               if (amdgpu_test_signaled_any(array, count))
                        break;
 
                if (adev->needs_reset) {
index e02db0b2e8393e47bda5b6e084c2ae1f0d3631cd..cbd3a486c5c2c0bc8ce29f965dec462eb88f6569 100644 (file)
@@ -125,7 +125,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
 
        if (adev->gart.robj == NULL) {
                r = amdgpu_bo_create(adev, adev->gart.table_size,
-                                    PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+                                    PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+                                    AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
                                     NULL, &adev->gart.robj);
                if (r) {
                        return r;
index 4afc507820c01db355600631e67f98a3e5d4a644..5839fab374bf62dac0a5781a4617da0e5335aafd 100644 (file)
@@ -615,6 +615,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
                info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
                info.domains = robj->initial_domain;
                info.domain_flags = robj->flags;
+               amdgpu_bo_unreserve(robj);
                if (copy_to_user(out, &info, sizeof(info)))
                        r = -EFAULT;
                break;
@@ -622,17 +623,19 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
        case AMDGPU_GEM_OP_SET_PLACEMENT:
                if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
                        r = -EPERM;
+                       amdgpu_bo_unreserve(robj);
                        break;
                }
                robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
                                                      AMDGPU_GEM_DOMAIN_GTT |
                                                      AMDGPU_GEM_DOMAIN_CPU);
+               amdgpu_bo_unreserve(robj);
                break;
        default:
+               amdgpu_bo_unreserve(robj);
                r = -EINVAL;
        }
 
-       amdgpu_bo_unreserve(robj);
 out:
        drm_gem_object_unreference_unlocked(gobj);
        return r;
@@ -653,7 +656,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 
        r = amdgpu_gem_object_create(adev, args->size, 0,
                                     AMDGPU_GEM_DOMAIN_VRAM,
-                                    0, ttm_bo_type_device,
+                                    AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                    ttm_bo_type_device,
                                     &gobj);
        if (r)
                return -ENOMEM;
index 57adcad2f7ba19e7261efd97faa0e5c7d57b9ab3..08b09d55b96fedbe77bfa7a9925e61f8bd3028a5 100644 (file)
@@ -127,7 +127,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
                        placements[c].fpfn =
                                adev->mc.visible_vram_size >> PAGE_SHIFT;
                        placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-                               TTM_PL_FLAG_VRAM;
+                               TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
                }
                placements[c].fpfn = 0;
                placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
index 7d442c51063e3a07f61fcf2e65de644e947a513f..9bec91484c24ee18001a9c32864f486c51eaaf02 100644 (file)
@@ -131,6 +131,21 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
        return 0;
 }
 
+/** amdgpu_ring_insert_nop - insert NOP packets
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @count: the number of NOP packets to insert
+ *
+ * This is the generic insert_nop function for rings except SDMA
+ */
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+       int i;
+
+       for (i = 0; i < count; i++)
+               amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * amdgpu_ring_commit - tell the GPU to execute the new
  * commands on the ring buffer
@@ -143,10 +158,13 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
  */
 void amdgpu_ring_commit(struct amdgpu_ring *ring)
 {
+       uint32_t count;
+
        /* We pad to match fetch size */
-       while (ring->wptr & ring->align_mask) {
-               amdgpu_ring_write(ring, ring->nop);
-       }
+       count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
+       count %= ring->align_mask + 1;
+       ring->funcs->insert_nop(ring, count);
+
        mb();
        amdgpu_ring_set_wptr(ring);
 }
index b92525329d6cef1d6a403e3442dfa9886b70c803..74dad270362caea3a0d96c4d05b9dec7eef6e6ae 100644 (file)
@@ -367,8 +367,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
                } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 
                spin_unlock(&sa_manager->wq.lock);
-               t = amdgpu_fence_wait_multiple(adev, fences, AMDGPU_MAX_RINGS, false, false,
-                                               MAX_SCHEDULE_TIMEOUT);
+               t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
+                                         false, MAX_SCHEDULE_TIMEOUT);
                r = (t > 0) ? 0 : t;
                spin_lock(&sa_manager->wq.lock);
                /* if we have nothing to wait for block */
index f93fb35414884dff65126e98d3b01dd281824507..de98fbd2971eded37ecb896921255d38787ce7b5 100644 (file)
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+{
+       struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
+       return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+}
+
 static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
 {
        struct amdgpu_job *sched_job;
@@ -75,6 +81,7 @@ static void amdgpu_sched_process_job(struct amd_sched_job *job)
 }
 
 struct amd_sched_backend_ops amdgpu_sched_ops = {
+       .dependency = amdgpu_sched_dependency,
        .run_job = amdgpu_sched_run_job,
        .process_job = amdgpu_sched_process_job
 };
index 4fffb253933184a23e49a46a8ce5dbc74a519fb0..068aeaff7183b8227f0a27873af71213ac968e30 100644 (file)
@@ -142,6 +142,18 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
        return 0;
 }
 
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+       if (s_fence)
+               return s_fence->owner;
+       else if (a_fence)
+               return a_fence->owner;
+       return AMDGPU_FENCE_OWNER_UNDEFINED;
+}
+
 /**
  * amdgpu_sync_resv - use the semaphores to sync to a reservation object
  *
@@ -158,7 +170,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 {
        struct reservation_object_list *flist;
        struct fence *f;
-       struct amdgpu_fence *fence;
+       void *fence_owner;
        unsigned i;
        int r = 0;
 
@@ -176,22 +188,22 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        for (i = 0; i < flist->shared_count; ++i) {
                f = rcu_dereference_protected(flist->shared[i],
                                              reservation_object_held(resv));
-               fence = f ? to_amdgpu_fence(f) : NULL;
-               if (fence && fence->ring->adev == adev) {
+               if (amdgpu_sync_same_dev(adev, f)) {
                        /* VM updates are only interesting
                         * for other VM updates and moves.
                         */
+                       fence_owner = amdgpu_sync_get_owner(f);
                        if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
-                           (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
+                           (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
                            ((owner == AMDGPU_FENCE_OWNER_VM) !=
-                            (fence->owner == AMDGPU_FENCE_OWNER_VM)))
+                            (fence_owner == AMDGPU_FENCE_OWNER_VM)))
                                continue;
 
                        /* Ignore fence from the same owner as
                         * long as it isn't undefined.
                         */
                        if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-                           fence->owner == owner)
+                           fence_owner == owner)
                                continue;
                }
 
@@ -202,6 +214,28 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        return r;
 }
 
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       struct fence *f;
+       int i;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+               f = e->fence;
+
+               hash_del(&e->node);
+               kfree(e);
+
+               if (!fence_is_signaled(f))
+                       return f;
+
+               fence_put(f);
+       }
+       return NULL;
+}
+
 int amdgpu_sync_wait(struct amdgpu_sync *sync)
 {
        struct amdgpu_sync_entry *e;
index 399143541d8a2c377967d8df3183a62a594abced..b5abd5cde413ffaa42934aa0bf4a731ec8bb8fe2 100644 (file)
@@ -859,7 +859,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
        r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
-                            AMDGPU_GEM_DOMAIN_VRAM, 0,
+                            AMDGPU_GEM_DOMAIN_VRAM,
+                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
                             NULL, &adev->stollen_vga_memory);
        if (r) {
                return r;
index b87355ccfb1d071282d183ceab7d6bc22c86fffe..2cf6c6b06e3b157b756fc978126361257becbacc 100644 (file)
@@ -154,7 +154,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
        r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-                            AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
+                            AMDGPU_GEM_DOMAIN_VRAM,
+                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                            NULL, &adev->uvd.vcpu_bo);
        if (r) {
                dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
                return r;
@@ -221,31 +223,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
-       unsigned size;
-       void *ptr;
-       const struct common_firmware_header *hdr;
-       int i;
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+       int i, r;
 
        if (adev->uvd.vcpu_bo == NULL)
                return 0;
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
-               if (atomic_read(&adev->uvd.handles[i]))
-                       break;
-
-       if (i == AMDGPU_MAX_UVD_HANDLES)
-               return 0;
+       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+               uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+               if (handle != 0) {
+                       struct fence *fence;
 
-       hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+                       amdgpu_uvd_note_usage(adev);
 
-       size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-       size -= le32_to_cpu(hdr->ucode_size_bytes);
+                       r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+                       if (r) {
+                               DRM_ERROR("Error destroying UVD (%d)!\n", r);
+                               continue;
+                       }
 
-       ptr = adev->uvd.cpu_addr;
-       ptr += le32_to_cpu(hdr->ucode_size_bytes);
+                       fence_wait(fence, false);
+                       fence_put(fence);
 
-       adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-       memcpy(adev->uvd.saved_bo, ptr, size);
+                       adev->uvd.filp[i] = NULL;
+                       atomic_set(&adev->uvd.handles[i], 0);
+               }
+       }
 
        return 0;
 }
@@ -270,12 +273,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
        ptr = adev->uvd.cpu_addr;
        ptr += le32_to_cpu(hdr->ucode_size_bytes);
 
-       if (adev->uvd.saved_bo != NULL) {
-               memcpy(ptr, adev->uvd.saved_bo, size);
-               kfree(adev->uvd.saved_bo);
-               adev->uvd.saved_bo = NULL;
-       } else
-               memset(ptr, 0, size);
+       memset(ptr, 0, size);
 
        return 0;
 }
@@ -905,7 +903,9 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        int r, i;
 
        r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-                            AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+                            AMDGPU_GEM_DOMAIN_VRAM,
+                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                            NULL, &bo);
        if (r)
                return r;
 
@@ -952,7 +952,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        int r, i;
 
        r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-                            AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+                            AMDGPU_GEM_DOMAIN_VRAM,
+                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                            NULL, &bo);
        if (r)
                return r;
 
index 1a984c934b1f274b7d099f9acf4c8352d3d6b59f..3cab96c42aa8843487190248b98d999fb6b3598c 100644 (file)
@@ -141,7 +141,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
        /* allocate firmware, stack and heap BO */
 
        r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
-                            AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
+                            AMDGPU_GEM_DOMAIN_VRAM,
+                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                            NULL, &adev->vce.vcpu_bo);
        if (r) {
                dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
                return r;
@@ -836,6 +838,10 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
        struct fence *fence = NULL;
        int r;
 
+       /* skip vce ring1 ib test for now, since it's not reliable */
+       if (ring == &ring->adev->vce.ring[1])
+               return 0;
+
        r = amdgpu_vce_get_create_msg(ring, 1, NULL);
        if (r) {
                DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
index 83b7ce6f5f72a19e1f1d3964cd37da2a1fac582b..f68b7cdc370a8694bb489e97c82d27e490e3c3b0 100644 (file)
@@ -627,9 +627,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 {
        uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
        uint64_t last_pte = ~0, last_dst = ~0;
+       void *owner = AMDGPU_FENCE_OWNER_VM;
        unsigned count = 0;
        uint64_t addr;
 
+       /* sync to everything on unmapping */
+       if (!(flags & AMDGPU_PTE_VALID))
+               owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
        /* walk over the address space and update the page tables */
        for (addr = start; addr < end; ) {
                uint64_t pt_idx = addr >> amdgpu_vm_block_size;
@@ -638,8 +643,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
                uint64_t pte;
                int r;
 
-               amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
-                                AMDGPU_FENCE_OWNER_VM);
+               amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
                r = reservation_object_reserve_shared(pt->tbo.resv);
                if (r)
                        return r;
@@ -790,17 +794,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        ib->length_dw = 0;
 
-       if (!(flags & AMDGPU_PTE_VALID)) {
-               unsigned i;
-
-               for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-                       struct amdgpu_fence *f = vm->ids[i].last_id_use;
-                       r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
-                       if (r)
-                               return r;
-               }
-       }
-
        r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
                                  mapping->it.last + 1, addr + mapping->offset,
                                  flags, gtt_flags);
@@ -1106,7 +1099,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
                r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
                                     AMDGPU_GPU_PAGE_SIZE, true,
-                                    AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+                                    AMDGPU_GEM_DOMAIN_VRAM,
+                                    AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+                                    NULL, &pt);
                if (r)
                        goto error_free;
 
@@ -1306,7 +1301,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        vm->page_directory_fence = NULL;
 
        r = amdgpu_bo_create(adev, pd_size, align, true,
-                            AMDGPU_GEM_DOMAIN_VRAM, 0,
+                            AMDGPU_GEM_DOMAIN_VRAM,
+                            AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
                             NULL, &vm->page_directory);
        if (r)
                return r;
index 9ba0a7d5bc8e8aa3eed21a30e834331bd97364a8..92b6acadfc5270188b958049e4a468ead9f21ddc 100644 (file)
@@ -139,7 +139,8 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
 
        tx_buf[0] = msg->address & 0xff;
        tx_buf[1] = msg->address >> 8;
-       tx_buf[2] = msg->request << 4;
+       tx_buf[2] = (msg->request << 4) |
+               ((msg->address >> 16) & 0xf);
        tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
        switch (msg->request & ~DP_AUX_I2C_MOT) {
index 3920c1e346f8e6dbf74981eba763ceca2cfe41d1..9ea9de457da373f702b401633ca4f7113748cbf6 100644 (file)
@@ -188,6 +188,19 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 }
 
+static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       int i;
+
+       for (i = 0; i < count; i++)
+               if (sdma && sdma->burst_nop && (i == 0))
+                       amdgpu_ring_write(ring, ring->nop |
+                                         SDMA_NOP_COUNT(count - 1));
+               else
+                       amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -213,8 +226,8 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
-       while ((ring->wptr & 7) != 4)
-               amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+       cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
+
        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
        amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
@@ -501,6 +514,8 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
                adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
                adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma[i].feature_version >= 20)
+                       adev->sdma[i].burst_nop = true;
                fw_data = (const __le32 *)
                        (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
@@ -815,8 +830,19 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       while (ib->length_dw & 0x7)
-               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       u32 pad_count;
+       int i;
+
+       pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+       for (i = 0; i < pad_count; i++)
+               if (sdma && sdma->burst_nop && (i == 0))
+                       ib->ptr[ib->length_dw++] =
+                                       SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
+                                       SDMA_NOP_COUNT(pad_count - 1);
+               else
+                       ib->ptr[ib->length_dw++] =
+                                       SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 }
 
 /**
@@ -1303,6 +1329,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
        .test_ring = cik_sdma_ring_test_ring,
        .test_ib = cik_sdma_ring_test_ib,
        .is_lockup = cik_sdma_ring_is_lockup,
+       .insert_nop = cik_sdma_ring_insert_nop,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1363,16 +1390,16 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
  *
  * Fill GPU buffers using the DMA engine (CIK).
  */
-static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
                                      uint32_t src_data,
                                      uint64_t dst_offset,
                                      uint32_t byte_count)
 {
-       amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0));
-       amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-       amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-       amdgpu_ring_write(ring, src_data);
-       amdgpu_ring_write(ring, byte_count);
+       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
+       ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = src_data;
+       ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
index a3e3dfaa01a4330a276f604ae912b65af3d9c512..7f6d457f250a477cbfc9cb82b0efecd0207a0ea4 100644 (file)
                                         (((op) & 0xFF) << 0))
 /* sDMA opcodes */
 #define        SDMA_OPCODE_NOP                                   0
+#      define SDMA_NOP_COUNT(x)                          (((x) & 0x3FFF) << 16)
 #define        SDMA_OPCODE_COPY                                  1
 #       define SDMA_COPY_SUB_OPCODE_LINEAR                0
 #       define SDMA_COPY_SUB_OPCODE_TILED                 1
index ace870afc7d45154a6bb6013cb45b773a8e63512..44fa96ad47099b765ac81e5c439766a8f9849392 100644 (file)
@@ -1596,9 +1596,9 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
 
        if (pi->sys_info.nb_dpm_enable) {
                if (ps->force_high)
-                       cz_dpm_nbdpm_lm_pstate_enable(adev, true);
-               else
                        cz_dpm_nbdpm_lm_pstate_enable(adev, false);
+               else
+                       cz_dpm_nbdpm_lm_pstate_enable(adev, true);
        }
 
        return ret;
index 4b255ac3043c9f5ad8d68ac150d20019c342d15c..e4d101b1252a47eaf7a2c7e35c2d8d83f737d762 100644 (file)
@@ -1353,7 +1353,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
        tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
        tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
        tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
        WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
        /* restore original selection */
index 70eee807421fd5cad9e2c5011a298a3886e4b721..6411e824467164831eef8af634051f95b8faba69 100644 (file)
@@ -1329,7 +1329,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
        tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
        tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
        tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
        WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
        /* restore original selection */
index 493c8c9c7faa77bf6fcb8963ab08fdf5670a38a9..322edea65857872ebd084ff322ee6fff6bf31e13 100644 (file)
@@ -762,7 +762,9 @@ int fiji_smu_init(struct amdgpu_device *adev)
 
        /* Allocate FW image data structure and header buffer */
        ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-                               true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+                              true, AMDGPU_GEM_DOMAIN_VRAM,
+                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                              NULL, toc_buf);
        if (ret) {
                DRM_ERROR("Failed to allocate memory for TOC buffer\n");
                return -ENOMEM;
@@ -770,7 +772,9 @@ int fiji_smu_init(struct amdgpu_device *adev)
 
        /* Allocate buffer for SMU internal buffer */
        ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-                               true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
+                              true, AMDGPU_GEM_DOMAIN_VRAM,
+                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                              NULL, smu_buf);
        if (ret) {
                DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
                return -ENOMEM;
index fab7b236f37fa7753eb6983b4b177aa3dfd70587..4bd1e5cf65ca81a04de64ec775164c5e5c410947 100644 (file)
@@ -3786,7 +3786,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
                /* save restore block */
                if (adev->gfx.rlc.save_restore_obj == NULL) {
                        r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-                                            AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj);
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, &adev->gfx.rlc.save_restore_obj);
                        if (r) {
                                dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
                                return r;
@@ -3827,7 +3829,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 
                if (adev->gfx.rlc.clear_state_obj == NULL) {
                        r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-                                            AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj);
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, &adev->gfx.rlc.clear_state_obj);
                        if (r) {
                                dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
                                gfx_v7_0_rlc_fini(adev);
@@ -3864,7 +3868,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
        if (adev->gfx.rlc.cp_table_size) {
                if (adev->gfx.rlc.cp_table_obj == NULL) {
                        r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
-                                            AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj);
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, &adev->gfx.rlc.cp_table_obj);
                        if (r) {
                                dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
                                gfx_v7_0_rlc_fini(adev);
@@ -5598,6 +5604,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .test_ring = gfx_v7_0_ring_test_ring,
        .test_ib = gfx_v7_0_ring_test_ib,
        .is_lockup = gfx_v7_0_ring_is_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5614,6 +5621,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .test_ring = gfx_v7_0_ring_test_ring,
        .test_ib = gfx_v7_0_ring_test_ib,
        .is_lockup = gfx_v7_0_ring_is_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
index 818edb37fa9cd8428d584d419597f22c3fe3045b..53f07439a51285dd29a729724d2a0bc8583c9c88 100644 (file)
@@ -2005,7 +2005,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
 }
 
 /**
- * gmc_v8_0_init_compute_vmid - gart enable
+ * gfx_v8_0_init_compute_vmid - gart enable
  *
  * @rdev: amdgpu_device pointer
  *
@@ -2015,7 +2015,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
 #define DEFAULT_SH_MEM_BASES   (0x6000)
 #define FIRST_COMPUTE_VMID     (8)
 #define LAST_COMPUTE_VMID      (16)
-static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 {
        int i;
        uint32_t sh_mem_config;
@@ -2282,7 +2282,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
        vi_srbm_select(adev, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
 
-       gmc_v8_0_init_compute_vmid(adev);
+       gfx_v8_0_init_compute_vmid(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        /*
@@ -3240,7 +3240,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
 
                /* enable the doorbell if requested */
                if (use_doorbell) {
-                       if (adev->asic_type == CHIP_CARRIZO) {
+                       if ((adev->asic_type == CHIP_CARRIZO) ||
+                           (adev->asic_type == CHIP_FIJI)) {
                                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
                                       AMDGPU_DOORBELL_KIQ << 2);
                                WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -4378,6 +4379,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .test_ring = gfx_v8_0_ring_test_ring,
        .test_ib = gfx_v8_0_ring_test_ib,
        .is_lockup = gfx_v8_0_ring_is_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -4394,6 +4396,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .test_ring = gfx_v8_0_ring_test_ring,
        .test_ib = gfx_v8_0_ring_test_ib,
        .is_lockup = gfx_v8_0_ring_is_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
index 10218828face140d13261ebc8e7ce3a76cf43113..774528ab8704387f00525618b1c48578387b70df 100644 (file)
@@ -523,17 +523,11 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        tmp = RREG32(mmVM_CONTEXT1_CNTL);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
                            amdgpu_vm_block_size - 9);
@@ -852,6 +846,13 @@ static int gmc_v7_0_early_init(void *handle)
        return 0;
 }
 
+static int gmc_v7_0_late_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v7_0_sw_init(void *handle)
 {
        int r;
@@ -976,6 +977,7 @@ static int gmc_v7_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
        gmc_v7_0_gart_disable(adev);
 
        return 0;
@@ -1301,7 +1303,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
 
 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
        .early_init = gmc_v7_0_early_init,
-       .late_init = NULL,
+       .late_init = gmc_v7_0_late_init,
        .sw_init = gmc_v7_0_sw_init,
        .sw_fini = gmc_v7_0_sw_fini,
        .hw_init = gmc_v7_0_hw_init,
index 78109b750d29728d4066e1cd34932b63667b6c80..9a07742620d0361ad054930ff3b07c75c9bbcf2c 100644 (file)
@@ -653,19 +653,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        tmp = RREG32(mmVM_CONTEXT1_CNTL);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
                            amdgpu_vm_block_size - 9);
@@ -852,6 +845,13 @@ static int gmc_v8_0_early_init(void *handle)
        return 0;
 }
 
+static int gmc_v8_0_late_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v8_0_sw_init(void *handle)
 {
        int r;
@@ -978,6 +978,7 @@ static int gmc_v8_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
        gmc_v8_0_gart_disable(adev);
 
        return 0;
@@ -1288,7 +1289,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
 
 const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
        .early_init = gmc_v8_0_early_init,
-       .late_init = NULL,
+       .late_init = gmc_v8_0_late_init,
        .sw_init = gmc_v8_0_sw_init,
        .sw_fini = gmc_v8_0_sw_fini,
        .hw_init = gmc_v8_0_hw_init,
index c723602c7b0cc865570200646b79fe6e890f9056..ee6a041cb288d7c84044883191c0e44a14d17df1 100644 (file)
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __ICELAND_SDMA_PKT_OPEN_H_ */
index c6f1e2f12b5f7fc1a5689349a0195e99b3637044..c900aa942adef4f31674ddb98070c7702c6600b3 100644 (file)
@@ -623,7 +623,9 @@ int iceland_smu_init(struct amdgpu_device *adev)
 
        /* Allocate FW image data structure and header buffer */
        ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-                              true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+                              true, AMDGPU_GEM_DOMAIN_VRAM,
+                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                              NULL, toc_buf);
        if (ret) {
                DRM_ERROR("Failed to allocate memory for TOC buffer\n");
                return -ENOMEM;
index 715e02d3bfbacb4e5ccf472bc2b56a6c442516df..14e87234171aeacb9cbbbedafa358e22825b5526 100644 (file)
@@ -146,6 +146,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
                hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
                adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
                adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma[i].feature_version >= 20)
+                       adev->sdma[i].burst_nop = true;
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -218,6 +220,19 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 }
 
+static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       int i;
+
+       for (i = 0; i < count; i++)
+               if (sdma && sdma->burst_nop && (i == 0))
+                       amdgpu_ring_write(ring, ring->nop |
+                               SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+               else
+                       amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -245,8 +260,8 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
-       while ((ring->wptr & 7) != 2)
-               amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+       sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
+
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
                          SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
        /* base must be 32 byte aligned */
@@ -879,8 +894,19 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       while (ib->length_dw & 0x7)
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       u32 pad_count;
+       int i;
+
+       pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+       for (i = 0; i < pad_count; i++)
+               if (sdma && sdma->burst_nop && (i == 0))
+                       ib->ptr[ib->length_dw++] =
+                               SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+                               SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+               else
+                       ib->ptr[ib->length_dw++] =
+                               SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1314,6 +1340,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
        .test_ring = sdma_v2_4_ring_test_ring,
        .test_ib = sdma_v2_4_ring_test_ib,
        .is_lockup = sdma_v2_4_ring_is_lockup,
+       .insert_nop = sdma_v2_4_ring_insert_nop,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1375,16 +1402,16 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
                                       uint32_t src_data,
                                       uint64_t dst_offset,
                                       uint32_t byte_count)
 {
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-       amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-       amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-       amdgpu_ring_write(ring, src_data);
-       amdgpu_ring_write(ring, byte_count);
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+       ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = src_data;
+       ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
index 67128c8e78b847f66198baf86e57c9538db510a6..9bfe92df15f712b86f45e674a8c995f10812ed0f 100644 (file)
@@ -218,6 +218,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
                hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
                adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
                adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma[i].feature_version >= 20)
+                       adev->sdma[i].burst_nop = true;
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -304,6 +306,19 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
        }
 }
 
+static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       int i;
+
+       for (i = 0; i < count; i++)
+               if (sdma && sdma->burst_nop && (i == 0))
+                       amdgpu_ring_write(ring, ring->nop |
+                               SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+               else
+                       amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -330,8 +345,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
-       while ((ring->wptr & 7) != 2)
-               amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+       sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
                          SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
@@ -999,8 +1013,19 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       while (ib->length_dw & 0x7)
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       u32 pad_count;
+       int i;
+
+       pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+       for (i = 0; i < pad_count; i++)
+               if (sdma && sdma->burst_nop && (i == 0))
+                       ib->ptr[ib->length_dw++] =
+                               SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+                               SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+               else
+                       ib->ptr[ib->length_dw++] =
+                               SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1438,6 +1463,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
        .test_ring = sdma_v3_0_ring_test_ring,
        .test_ib = sdma_v3_0_ring_test_ib,
        .is_lockup = sdma_v3_0_ring_is_lockup,
+       .insert_nop = sdma_v3_0_ring_insert_nop,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1499,16 +1525,16 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
                                       uint32_t src_data,
                                       uint64_t dst_offset,
                                       uint32_t byte_count)
 {
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-       amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-       amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-       amdgpu_ring_write(ring, src_data);
-       amdgpu_ring_write(ring, byte_count);
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+       ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = src_data;
+       ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
index 099b7b56113c6e5cfab9ed977dcdd6e4b7d3d3e3..e5ebd084288dbc3ab7437de8a06fbde44b7658eb 100644 (file)
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __TONGA_SDMA_PKT_OPEN_H_ */
index 5fc53a40c7ac190c2a06f52542546f52b5f0cda8..1f5ac941a610819f434958cb04b8bb5bb6946439 100644 (file)
@@ -761,7 +761,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
 
        /* Allocate FW image data structure and header buffer */
        ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-                               true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+                              true, AMDGPU_GEM_DOMAIN_VRAM,
+                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                              NULL, toc_buf);
        if (ret) {
                DRM_ERROR("Failed to allocate memory for TOC buffer\n");
                return -ENOMEM;
@@ -769,7 +771,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
 
        /* Allocate buffer for SMU internal buffer */
        ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-                               true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
+                              true, AMDGPU_GEM_DOMAIN_VRAM,
+                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                              NULL, smu_buf);
        if (ret) {
                DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
                return -ENOMEM;
index 9ac383bc6c1fad659ba69d9b9f212fe7e888ac17..5fac5da694f0d12a1bb415d2a5f557cb25461c43 100644 (file)
@@ -886,6 +886,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
        .test_ring = uvd_v4_2_ring_test_ring,
        .test_ib = uvd_v4_2_ring_test_ib,
        .is_lockup = amdgpu_ring_test_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
index de4b3f57902d24320cc90c446bf9f4c4d5d27dde..2d5c59c318afb5b0265ccf98461208de81d983b7 100644 (file)
@@ -825,6 +825,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
        .test_ring = uvd_v5_0_ring_test_ring,
        .test_ib = uvd_v5_0_ring_test_ib,
        .is_lockup = amdgpu_ring_test_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
index 66c975870e974870011de9de0603c4c5926070f6..d9f553fce5310936c560647db64accda8bc9184e 100644 (file)
@@ -805,6 +805,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
        .test_ring = uvd_v6_0_ring_test_ring,
        .test_ib = uvd_v6_0_ring_test_ib,
        .is_lockup = amdgpu_ring_test_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
index 303d961d57bd49f8a40a4c263f00bf5ebbc1e85d..cd16df543f64e881eaee35fd8dea409385e055d8 100644 (file)
@@ -643,6 +643,7 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
        .test_ring = amdgpu_vce_ring_test_ring,
        .test_ib = amdgpu_vce_ring_test_ib,
        .is_lockup = amdgpu_ring_test_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
index 4349658081ff152c81897ade6741791b877274e3..f0656dfb53f3ad2adfda85c61666246fa6a0ae21 100644 (file)
@@ -32,8 +32,8 @@
 #include "vid.h"
 #include "vce/vce_3_0_d.h"
 #include "vce/vce_3_0_sh_mask.h"
-#include "oss/oss_2_0_d.h"
-#include "oss/oss_2_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
@@ -426,17 +426,41 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
 static bool vce_v3_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 mask = 0;
+       int idx;
 
-       return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+       for (idx = 0; idx < 2; ++idx) {
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
+
+               if (idx == 0)
+                       mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+               else
+                       mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+       }
+
+       return !(RREG32(mmSRBM_STATUS2) & mask);
 }
 
 static int vce_v3_0_wait_for_idle(void *handle)
 {
        unsigned i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 mask = 0;
+       int idx;
+
+       for (idx = 0; idx < 2; ++idx) {
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
+
+               if (idx == 0)
+                       mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+               else
+                       mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+       }
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+               if (!(RREG32(mmSRBM_STATUS2) & mask))
                        return 0;
        }
        return -ETIMEDOUT;
@@ -445,9 +469,21 @@ static int vce_v3_0_wait_for_idle(void *handle)
 static int vce_v3_0_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 mask = 0;
+       int idx;
+
+       for (idx = 0; idx < 2; ++idx) {
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
 
-       WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
-                       ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+               if (idx == 0)
+                       mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
+               else
+                       mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+       }
+       WREG32_P(mmSRBM_SOFT_RESET, mask,
+                ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
+                  SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
        mdelay(5);
 
        return vce_v3_0_start(adev);
@@ -608,6 +644,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
        .test_ring = amdgpu_vce_ring_test_ring,
        .test_ib = amdgpu_vce_ring_test_ib,
        .is_lockup = amdgpu_ring_test_lockup,
+       .insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
index c991973019d0b9eebcae91533b2264bf41c2749a..c6a1b4cc64581733a9a8218299571576e3e6957a 100644 (file)
@@ -31,7 +31,7 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include <linux/time.h>
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
index 35b98757463305c9d1740542704f64cfc9db702a..2b655103ba79655b9600c5b3b8020b0b1c952f5f 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/time.h>
 #include "kfd_priv.h"
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 
 /*
index d99fe90991dc47344974f43e6d078f3314469308..9259f1b6664c60cb94894d8b0c309ca8e9e8b26e 100644 (file)
@@ -27,6 +27,8 @@
 #include <drm/drmP.h>
 #include "gpu_scheduler.h"
 
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 
 /* Initialize a given run queue struct */
@@ -56,34 +58,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
 }
 
 /**
- * Select next entity from a specified run queue with round robin policy.
- * It could return the same entity as current one if current is the only
- * available one in the queue. Return NULL if nothing available.
+ * Select next job from a specified run queue with round robin policy.
+ * Return NULL if nothing available.
  */
-static struct amd_sched_entity *
-amd_sched_rq_select_entity(struct amd_sched_rq *rq)
+static struct amd_sched_job *
+amd_sched_rq_select_job(struct amd_sched_rq *rq)
 {
        struct amd_sched_entity *entity;
+       struct amd_sched_job *job;
 
        spin_lock(&rq->lock);
 
        entity = rq->current_entity;
        if (entity) {
                list_for_each_entry_continue(entity, &rq->entities, list) {
-                       if (!kfifo_is_empty(&entity->job_queue)) {
+                       job = amd_sched_entity_pop_job(entity);
+                       if (job) {
                                rq->current_entity = entity;
                                spin_unlock(&rq->lock);
-                               return rq->current_entity;
+                               return job;
                        }
                }
        }
 
        list_for_each_entry(entity, &rq->entities, list) {
 
-               if (!kfifo_is_empty(&entity->job_queue)) {
+               job = amd_sched_entity_pop_job(entity);
+               if (job) {
                        rq->current_entity = entity;
                        spin_unlock(&rq->lock);
-                       return rq->current_entity;
+                       return job;
                }
 
                if (entity == rq->current_entity)
@@ -188,6 +192,39 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
        kfifo_free(&entity->job_queue);
 }
 
+static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+{
+       struct amd_sched_entity *entity =
+               container_of(cb, struct amd_sched_entity, cb);
+       entity->dependency = NULL;
+       fence_put(f);
+       amd_sched_wakeup(entity->scheduler);
+}
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+{
+       struct amd_gpu_scheduler *sched = entity->scheduler;
+       struct amd_sched_job *job;
+
+       if (ACCESS_ONCE(entity->dependency))
+               return NULL;
+
+       if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+               return NULL;
+
+       while ((entity->dependency = sched->ops->dependency(job))) {
+
+               if (fence_add_callback(entity->dependency, &entity->cb,
+                                      amd_sched_entity_wakeup))
+                       fence_put(entity->dependency);
+               else
+                       return NULL;
+       }
+
+       return job;
+}
+
 /**
  * Helper to submit a job to the job queue
  *
@@ -227,7 +264,6 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
        struct amd_sched_entity *entity = sched_job->s_entity;
        struct amd_sched_fence *fence = amd_sched_fence_create(
                entity, sched_job->owner);
-       int r;
 
        if (!fence)
                return -ENOMEM;
@@ -235,10 +271,10 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
        fence_get(&fence->base);
        sched_job->s_fence = fence;
 
-       r = wait_event_interruptible(entity->scheduler->job_scheduled,
-                                    amd_sched_entity_in(sched_job));
+       wait_event(entity->scheduler->job_scheduled,
+                  amd_sched_entity_in(sched_job));
 
-       return r;
+       return 0;
 }
 
 /**
@@ -260,22 +296,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
 }
 
 /**
- * Select next entity containing real IB submissions
+ * Select next to run
 */
-static struct amd_sched_entity *
-amd_sched_select_context(struct amd_gpu_scheduler *sched)
+static struct amd_sched_job *
+amd_sched_select_job(struct amd_gpu_scheduler *sched)
 {
-       struct amd_sched_entity *tmp;
+       struct amd_sched_job *job;
 
        if (!amd_sched_ready(sched))
                return NULL;
 
        /* Kernel run queue has higher priority than normal run queue*/
-       tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
-       if (tmp == NULL)
-               tmp = amd_sched_rq_select_entity(&sched->sched_rq);
+       job = amd_sched_rq_select_job(&sched->kernel_rq);
+       if (job == NULL)
+               job = amd_sched_rq_select_job(&sched->sched_rq);
 
-       return tmp;
+       return job;
 }
 
 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -296,27 +332,24 @@ static int amd_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
        struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
-       int r;
+       int r, count;
 
        sched_setscheduler(current, SCHED_FIFO, &sparam);
 
        while (!kthread_should_stop()) {
-               struct amd_sched_entity *c_entity = NULL;
+               struct amd_sched_entity *entity;
                struct amd_sched_job *job;
                struct fence *fence;
 
                wait_event_interruptible(sched->wake_up_worker,
                        kthread_should_stop() ||
-                       (c_entity = amd_sched_select_context(sched)));
+                       (job = amd_sched_select_job(sched)));
 
-               if (!c_entity)
+               if (!job)
                        continue;
 
-               r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
-               if (r != sizeof(void *))
-                       continue;
+               entity = job->s_entity;
                atomic_inc(&sched->hw_rq_count);
-
                fence = sched->ops->run_job(job);
                if (fence) {
                        r = fence_add_callback(fence, &job->cb,
@@ -328,6 +361,8 @@ static int amd_sched_main(void *param)
                        fence_put(fence);
                }
 
+               count = kfifo_out(&entity->job_queue, &job, sizeof(job));
+               WARN_ON(count != sizeof(job));
                wake_up(&sched->job_scheduled);
        }
        return 0;
index e797796dcad755ed20a067b1cce139883db0462d..2af0e4d4d817a044fa0d1ff881d06ba75adc7032 100644 (file)
@@ -45,6 +45,8 @@ struct amd_sched_entity {
        spinlock_t                      queue_lock;
        struct amd_gpu_scheduler        *scheduler;
        uint64_t                        fence_context;
+       struct fence                    *dependency;
+       struct fence_cb                 cb;
 };
 
 /**
@@ -89,6 +91,7 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
  * these functions should be implemented in driver side
 */
 struct amd_sched_backend_ops {
+       struct fence *(*dependency)(struct amd_sched_job *job);
        struct fence *(*run_job)(struct amd_sched_job *job);
        void (*process_job)(struct amd_sched_job *job);
 };
index 434915448ea0be99b7786c6daf2650cfb93b728a..f7d5166f89b24ef740e854175927ad934652fed4 100644 (file)
@@ -1515,7 +1515,8 @@ retry:
                        copied_props++;
                }
 
-               if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) {
+               if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
+                   !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
                        plane = obj_to_plane(obj);
                        plane_mask |= (1 << drm_plane_index(plane));
                        plane->old_fb = plane->fb;
@@ -1537,10 +1538,11 @@ retry:
        }
 
        if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
+               /*
+                * Unlike commit, check_only does not clean up state.
+                * Below we call drm_atomic_state_free for it.
+                */
                ret = drm_atomic_check_only(state);
-               /* _check_only() does not free state, unlike _commit() */
-               if (!ret)
-                       drm_atomic_state_free(state);
        } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
                ret = drm_atomic_async_commit(state);
        } else {
@@ -1567,25 +1569,30 @@ out:
                plane->old_fb = NULL;
        }
 
+       if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+               /*
+                * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
+                * if they weren't, this code should be called on success
+                * for TEST_ONLY too.
+                */
+
+               for_each_crtc_in_state(state, crtc, crtc_state, i) {
+                       if (!crtc_state->event)
+                               continue;
+
+                       destroy_vblank_event(dev, file_priv,
+                                            crtc_state->event);
+               }
+       }
+
        if (ret == -EDEADLK) {
                drm_atomic_state_clear(state);
                drm_modeset_backoff(&ctx);
                goto retry;
        }
 
-       if (ret) {
-               if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-                       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-                               if (!crtc_state->event)
-                                       continue;
-
-                               destroy_vblank_event(dev, file_priv,
-                                                    crtc_state->event);
-                       }
-               }
-
+       if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
                drm_atomic_state_free(state);
-       }
 
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
index 80a02a412607f9c1b38e20b5494033b969c9e800..291734e87fca7457da9eb3ec8e0ba80f262bd232 100644 (file)
@@ -159,6 +159,8 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
 }
 EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
 
+#define AUX_RETRY_INTERVAL 500 /* us */
+
 /**
  * DOC: dp helpers
  *
@@ -213,7 +215,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
                        return -EIO;
 
                case DP_AUX_NATIVE_REPLY_DEFER:
-                       usleep_range(400, 500);
+                       usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
                        break;
                }
        }
@@ -422,6 +424,90 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
               I2C_FUNC_10BIT_ADDR;
 }
 
+#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */
+#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */
+#define AUX_STOP_LEN 4
+#define AUX_CMD_LEN 4
+#define AUX_ADDRESS_LEN 20
+#define AUX_REPLY_PAD_LEN 4
+#define AUX_LENGTH_LEN 8
+
+/*
+ * Calculate the duration of the AUX request/reply in usec. Gives the
+ * "best" case estimate, ie. successful while as short as possible.
+ */
+static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg)
+{
+       int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
+               AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN;
+
+       if ((msg->request & DP_AUX_I2C_READ) == 0)
+               len += msg->size * 8;
+
+       return len;
+}
+
+static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg)
+{
+       int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
+               AUX_CMD_LEN + AUX_REPLY_PAD_LEN;
+
+       /*
+        * For read we expect what was asked. For writes there will
+        * be 0 or 1 data bytes. Assume 0 for the "best" case.
+        */
+       if (msg->request & DP_AUX_I2C_READ)
+               len += msg->size * 8;
+
+       return len;
+}
+
+#define I2C_START_LEN 1
+#define I2C_STOP_LEN 1
+#define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */
+#define I2C_DATA_LEN 9 /* DATA + ACK/NACK */
+
+/*
+ * Calculate the length of the i2c transfer in usec, assuming
+ * the i2c bus speed is as specified. Gives the the "worst"
+ * case estimate, ie. successful while as long as possible.
+ * Doesn't account the the "MOT" bit, and instead assumes each
+ * message includes a START, ADDRESS and STOP. Neither does it
+ * account for additional random variables such as clock stretching.
+ */
+static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg,
+                                  int i2c_speed_khz)
+{
+       /* AUX bitrate is 1MHz, i2c bitrate as specified */
+       return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN +
+                            msg->size * I2C_DATA_LEN +
+                            I2C_STOP_LEN) * 1000, i2c_speed_khz);
+}
+
+/*
+ * Deterine how many retries should be attempted to successfully transfer
+ * the specified message, based on the estimated durations of the
+ * i2c and AUX transfers.
+ */
+static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg,
+                             int i2c_speed_khz)
+{
+       int aux_time_us = drm_dp_aux_req_duration(msg) +
+               drm_dp_aux_reply_duration(msg);
+       int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz);
+
+       return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL);
+}
+
+/*
+ * FIXME currently assumes 10 kHz as some real world devices seem
+ * to require it. We should query/set the speed via DPCD if supported.
+ */
+static int dp_aux_i2c_speed_khz __read_mostly = 10;
+module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644);
+MODULE_PARM_DESC(dp_aux_i2c_speed_khz,
+                "Assumed speed of the i2c bus in kHz, (1-400, default 10)");
+
 /*
  * Transfer a single I2C-over-AUX message and handle various error conditions,
  * retrying the transaction as appropriate.  It is assumed that the
@@ -434,13 +520,16 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 {
        unsigned int retry, defer_i2c;
        int ret;
-
        /*
         * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
         * is required to retry at least seven times upon receiving AUX_DEFER
         * before giving up the AUX transaction.
+        *
+        * We also try to account for the i2c bus speed.
         */
-       for (retry = 0, defer_i2c = 0; retry < (7 + defer_i2c); retry++) {
+       int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
+
+       for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
                mutex_lock(&aux->hw_mutex);
                ret = aux->transfer(aux, msg);
                mutex_unlock(&aux->hw_mutex);
@@ -476,7 +565,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                         * For now just defer for long enough to hopefully be
                         * safe for all use-cases.
                         */
-                       usleep_range(500, 600);
+                       usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
                        continue;
 
                default:
@@ -506,7 +595,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                        aux->i2c_defer_count++;
                        if (defer_i2c < 7)
                                defer_i2c++;
-                       usleep_range(400, 500);
+                       usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
                        continue;
 
                default:
index 484e312e0a22d58908ca1cf3a2183d7f7ce35467..b3c730770b0f07e4cf3885ceb2939eae062ff6bd 100644 (file)
@@ -54,6 +54,13 @@ static const char * const decon_clks_name[] = {
        "sclk_decon_eclk",
 };
 
+static const uint32_t decon_formats[] = {
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+};
+
 static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
 {
        struct decon_context *ctx = crtc->ctx;
@@ -219,6 +226,17 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win,
        writel(val, ctx->addr + DECON_SHADOWCON);
 }
 
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+                                       struct exynos_drm_plane *plane)
+{
+       struct decon_context *ctx = crtc->ctx;
+
+       if (ctx->suspended)
+               return;
+
+       decon_shadow_protect_win(ctx, plane->zpos, true);
+}
+
 static void decon_update_plane(struct exynos_drm_crtc *crtc,
                               struct exynos_drm_plane *plane)
 {
@@ -232,8 +250,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
        if (ctx->suspended)
                return;
 
-       decon_shadow_protect_win(ctx, win, true);
-
        val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
        writel(val, ctx->addr + DECON_VIDOSDxA(win));
 
@@ -265,15 +281,10 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
        val |= WINCONx_ENWIN_F;
        writel(val, ctx->addr + DECON_WINCONx(win));
 
-       decon_shadow_protect_win(ctx, win, false);
-
        /* standalone update */
        val = readl(ctx->addr + DECON_UPDATE);
        val |= STANDALONE_UPDATE_F;
        writel(val, ctx->addr + DECON_UPDATE);
-
-       if (ctx->i80_if)
-               atomic_set(&ctx->win_updated, 1);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -301,6 +312,20 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
        writel(val, ctx->addr + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+                               struct exynos_drm_plane *plane)
+{
+       struct decon_context *ctx = crtc->ctx;
+
+       if (ctx->suspended)
+               return;
+
+       decon_shadow_protect_win(ctx, plane->zpos, false);
+
+       if (ctx->i80_if)
+               atomic_set(&ctx->win_updated, 1);
+}
+
 static void decon_swreset(struct decon_context *ctx)
 {
        unsigned int tries;
@@ -455,8 +480,10 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
        .enable_vblank          = decon_enable_vblank,
        .disable_vblank         = decon_disable_vblank,
        .commit                 = decon_commit,
+       .atomic_begin           = decon_atomic_begin,
        .update_plane           = decon_update_plane,
        .disable_plane          = decon_disable_plane,
+       .atomic_flush           = decon_atomic_flush,
        .te_handler             = decon_te_irq_handler,
 };
 
@@ -477,7 +504,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
                type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
                                                        DRM_PLANE_TYPE_OVERLAY;
                ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-                               1 << ctx->pipe, type, zpos);
+                               1 << ctx->pipe, type, decon_formats,
+                               ARRAY_SIZE(decon_formats), zpos);
                if (ret)
                        return ret;
        }
@@ -542,13 +570,21 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
 {
        struct decon_context *ctx = dev_id;
        u32 val;
+       int win;
 
        if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
                goto out;
 
        val = readl(ctx->addr + DECON_VIDINTCON1);
        if (val & VIDINTCON1_INTFRMDONEPEND) {
-               exynos_drm_crtc_finish_pageflip(ctx->crtc);
+               for (win = 0 ; win < WINDOWS_NR ; win++) {
+                       struct exynos_drm_plane *plane = &ctx->planes[win];
+
+                       if (!plane->pending_fb)
+                               continue;
+
+                       exynos_drm_crtc_finish_update(ctx->crtc, plane);
+               }
 
                /* clear */
                writel(VIDINTCON1_INTFRMDONEPEND,
index 07926547c94ff733090db523a2cb0be2ffa7e150..cbdb78ef3baca57cd1ddf701425b953f140377f2 100644 (file)
@@ -70,6 +70,18 @@ static const struct of_device_id decon_driver_dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
 
+static const uint32_t decon_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_RGBX8888,
+       DRM_FORMAT_BGRX8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_RGBA8888,
+       DRM_FORMAT_BGRA8888,
+};
+
 static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
 {
        struct decon_context *ctx = crtc->ctx;
@@ -383,6 +395,17 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
        writel(val, ctx->regs + SHADOWCON);
 }
 
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+                                       struct exynos_drm_plane *plane)
+{
+       struct decon_context *ctx = crtc->ctx;
+
+       if (ctx->suspended)
+               return;
+
+       decon_shadow_protect_win(ctx, plane->zpos, true);
+}
+
 static void decon_update_plane(struct exynos_drm_crtc *crtc,
                               struct exynos_drm_plane *plane)
 {
@@ -410,9 +433,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
         * is set.
         */
 
-       /* protect windows */
-       decon_shadow_protect_win(ctx, win, true);
-
        /* buffer start address */
        val = (unsigned long)plane->dma_addr[0];
        writel(val, ctx->regs + VIDW_BUF_START(win));
@@ -510,14 +530,22 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
        val &= ~WINCONx_ENWIN;
        writel(val, ctx->regs + WINCON(win));
 
-       /* unprotect windows */
-       decon_shadow_protect_win(ctx, win, false);
-
        val = readl(ctx->regs + DECON_UPDATE);
        val |= DECON_UPDATE_STANDALONE_F;
        writel(val, ctx->regs + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+                                       struct exynos_drm_plane *plane)
+{
+       struct decon_context *ctx = crtc->ctx;
+
+       if (ctx->suspended)
+               return;
+
+       decon_shadow_protect_win(ctx, plane->zpos, false);
+}
+
 static void decon_init(struct decon_context *ctx)
 {
        u32 val;
@@ -614,8 +642,10 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .enable_vblank = decon_enable_vblank,
        .disable_vblank = decon_disable_vblank,
        .wait_for_vblank = decon_wait_for_vblank,
+       .atomic_begin = decon_atomic_begin,
        .update_plane = decon_update_plane,
        .disable_plane = decon_disable_plane,
+       .atomic_flush = decon_atomic_flush,
 };
 
 
@@ -623,6 +653,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 {
        struct decon_context *ctx = (struct decon_context *)dev_id;
        u32 val, clear_bit;
+       int win;
 
        val = readl(ctx->regs + VIDINTCON1);
 
@@ -636,7 +667,14 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 
        if (!ctx->i80_if) {
                drm_crtc_handle_vblank(&ctx->crtc->base);
-               exynos_drm_crtc_finish_pageflip(ctx->crtc);
+               for (win = 0 ; win < WINDOWS_NR ; win++) {
+                       struct exynos_drm_plane *plane = &ctx->planes[win];
+
+                       if (!plane->pending_fb)
+                               continue;
+
+                       exynos_drm_crtc_finish_update(ctx->crtc, plane);
+               }
 
                /* set wait vsync event to zero and wake up queue. */
                if (atomic_read(&ctx->wait_vsync_event)) {
@@ -667,7 +705,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
                type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
                                                DRM_PLANE_TYPE_OVERLAY;
                ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-                                       1 << ctx->pipe, type, zpos);
+                                       1 << ctx->pipe, type, decon_formats,
+                                       ARRAY_SIZE(decon_formats), zpos);
                if (ret)
                        return ret;
        }
index c47899738eb48fe44ff60681fff06b9122afaed8..0872aa2f450f273a992bc414081e8501e11bf787 100644 (file)
@@ -25,14 +25,9 @@ static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-       if (exynos_crtc->enabled)
-               return;
-
        if (exynos_crtc->ops->enable)
                exynos_crtc->ops->enable(exynos_crtc);
 
-       exynos_crtc->enabled = true;
-
        drm_crtc_vblank_on(crtc);
 }
 
@@ -40,20 +35,10 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-       if (!exynos_crtc->enabled)
-               return;
-
-       /* wait for the completion of page flip. */
-       if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
-                               (exynos_crtc->event == NULL), HZ/20))
-               exynos_crtc->event = NULL;
-
        drm_crtc_vblank_off(crtc);
 
        if (exynos_crtc->ops->disable)
                exynos_crtc->ops->disable(exynos_crtc);
-
-       exynos_crtc->enabled = false;
 }
 
 static bool
@@ -83,16 +68,32 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
                                     struct drm_crtc_state *old_crtc_state)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+       struct drm_plane *plane;
+
+       exynos_crtc->event = crtc->state->event;
 
-       if (crtc->state->event) {
-               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-               exynos_crtc->event = crtc->state->event;
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+               if (exynos_crtc->ops->atomic_begin)
+                       exynos_crtc->ops->atomic_begin(exynos_crtc,
+                                                       exynos_plane);
        }
 }
 
 static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
                                     struct drm_crtc_state *old_crtc_state)
 {
+       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+       struct drm_plane *plane;
+
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+               if (exynos_crtc->ops->atomic_flush)
+                       exynos_crtc->ops->atomic_flush(exynos_crtc,
+                                                       exynos_plane);
+       }
 }
 
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -140,13 +141,13 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
        if (!exynos_crtc)
                return ERR_PTR(-ENOMEM);
 
-       init_waitqueue_head(&exynos_crtc->pending_flip_queue);
-
        exynos_crtc->pipe = pipe;
        exynos_crtc->type = type;
        exynos_crtc->ops = ops;
        exynos_crtc->ctx = ctx;
 
+       init_waitqueue_head(&exynos_crtc->wait_update);
+
        crtc = &exynos_crtc->base;
 
        private->crtc[pipe] = crtc;
@@ -172,9 +173,6 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
        struct exynos_drm_crtc *exynos_crtc =
                to_exynos_crtc(private->crtc[pipe]);
 
-       if (!exynos_crtc->enabled)
-               return -EPERM;
-
        if (exynos_crtc->ops->enable_vblank)
                return exynos_crtc->ops->enable_vblank(exynos_crtc);
 
@@ -187,26 +185,31 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
        struct exynos_drm_crtc *exynos_crtc =
                to_exynos_crtc(private->crtc[pipe]);
 
-       if (!exynos_crtc->enabled)
-               return;
-
        if (exynos_crtc->ops->disable_vblank)
                exynos_crtc->ops->disable_vblank(exynos_crtc);
 }
 
-void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc)
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc)
+{
+       wait_event_timeout(exynos_crtc->wait_update,
+                          (atomic_read(&exynos_crtc->pending_update) == 0),
+                          msecs_to_jiffies(50));
+}
+
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+                               struct exynos_drm_plane *exynos_plane)
 {
        struct drm_crtc *crtc = &exynos_crtc->base;
        unsigned long flags;
 
-       spin_lock_irqsave(&crtc->dev->event_lock, flags);
-       if (exynos_crtc->event) {
+       exynos_plane->pending_fb = NULL;
 
-               drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
-               drm_crtc_vblank_put(crtc);
-               wake_up(&exynos_crtc->pending_flip_queue);
+       if (atomic_dec_and_test(&exynos_crtc->pending_update))
+               wake_up(&exynos_crtc->wait_update);
 
-       }
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
+       if (exynos_crtc->event)
+               drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
 
        exynos_crtc->event = NULL;
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
index 9e7027d6c2f6de7a2b58bd5852165063c6625547..f87d4abda6f7b5ca0f69712a887acd065f252d69 100644 (file)
@@ -25,7 +25,9 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
                                        void *context);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
-void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc);
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+                                  struct exynos_drm_plane *exynos_plane);
 void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
 
 /* This function gets pipe value to crtc device matched with out_type. */
index fa5194caf2590dbc34df0313dc858c05a454642e..831d2e4cacf9d0bb951f5bbd9d7bbfab4b681c91 100644 (file)
@@ -13,6 +13,8 @@
 
 #include <linux/pm_runtime.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 
 #include <linux/component.h>
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
+struct exynos_atomic_commit {
+       struct work_struct      work;
+       struct drm_device       *dev;
+       struct drm_atomic_state *state;
+       u32                     crtcs;
+};
+
+static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int i, ret;
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+               if (!crtc->state->enable)
+                       continue;
+
+               ret = drm_crtc_vblank_get(crtc);
+               if (ret)
+                       continue;
+
+               exynos_drm_crtc_wait_pending_update(exynos_crtc);
+               drm_crtc_vblank_put(crtc);
+       }
+}
+
+static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
+{
+       struct drm_device *dev = commit->dev;
+       struct exynos_drm_private *priv = dev->dev_private;
+       struct drm_atomic_state *state = commit->state;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc;
+       struct drm_plane_state *plane_state;
+       struct drm_crtc_state *crtc_state;
+       int i;
+
+       drm_atomic_helper_commit_modeset_disables(dev, state);
+
+       drm_atomic_helper_commit_modeset_enables(dev, state);
+
+       /*
+        * Exynos can't update planes with CRTCs and encoders disabled,
+        * its updates routines, specially for FIMD, requires the clocks
+        * to be enabled. So it is necessary to handle the modeset operations
+        * *before* the commit_planes() step, this way it will always
+        * have the relevant clocks enabled to perform the update.
+        */
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+               atomic_set(&exynos_crtc->pending_update, 0);
+       }
+
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               struct exynos_drm_crtc *exynos_crtc =
+                                               to_exynos_crtc(plane->crtc);
+
+               if (!plane->crtc)
+                       continue;
+
+               atomic_inc(&exynos_crtc->pending_update);
+       }
+
+       drm_atomic_helper_commit_planes(dev, state);
+
+       exynos_atomic_wait_for_commit(state);
+
+       drm_atomic_helper_cleanup_planes(dev, state);
+
+       drm_atomic_state_free(state);
+
+       spin_lock(&priv->lock);
+       priv->pending &= ~commit->crtcs;
+       spin_unlock(&priv->lock);
+
+       wake_up_all(&priv->wait);
+
+       kfree(commit);
+}
+
+static void exynos_drm_atomic_work(struct work_struct *work)
+{
+       struct exynos_atomic_commit *commit = container_of(work,
+                               struct exynos_atomic_commit, work);
+
+       exynos_atomic_commit_complete(commit);
+}
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
        struct exynos_drm_private *private;
@@ -47,6 +141,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        if (!private)
                return -ENOMEM;
 
+       init_waitqueue_head(&private->wait);
+       spin_lock_init(&private->lock);
+
        dev_set_drvdata(dev->dev, dev);
        dev->dev_private = (void *)private;
 
@@ -149,6 +246,64 @@ static int exynos_drm_unload(struct drm_device *dev)
        return 0;
 }
 
+static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
+{
+       bool pending;
+
+       spin_lock(&priv->lock);
+       pending = priv->pending & crtcs;
+       spin_unlock(&priv->lock);
+
+       return pending;
+}
+
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+                        bool async)
+{
+       struct exynos_drm_private *priv = dev->dev_private;
+       struct exynos_atomic_commit *commit;
+       int i, ret;
+
+       commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+       if (!commit)
+               return -ENOMEM;
+
+       ret = drm_atomic_helper_prepare_planes(dev, state);
+       if (ret) {
+               kfree(commit);
+               return ret;
+       }
+
+       /* This is the point of no return */
+
+       INIT_WORK(&commit->work, exynos_drm_atomic_work);
+       commit->dev = dev;
+       commit->state = state;
+
+       /* Wait until all affected CRTCs have completed previous commits and
+        * mark them as pending.
+        */
+       for (i = 0; i < dev->mode_config.num_crtc; ++i) {
+               if (state->crtcs[i])
+                       commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
+       }
+
+       wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
+
+       spin_lock(&priv->lock);
+       priv->pending |= commit->crtcs;
+       spin_unlock(&priv->lock);
+
+       drm_atomic_helper_swap_state(dev, state);
+
+       if (async)
+               schedule_work(&commit->work);
+       else
+               exynos_atomic_commit_complete(commit);
+
+       return 0;
+}
+
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
        struct drm_connector *connector;
@@ -248,25 +403,25 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
 
 static const struct drm_ioctl_desc exynos_ioctls[] = {
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
+                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
                        DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
-                       exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
-                       vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
-                       exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
-                       exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
-                       exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
-                       exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
-                       exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
-                       exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
-                       exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
+                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -283,11 +438,10 @@ static const struct file_operations exynos_drm_driver_fops = {
 };
 
 static struct drm_driver exynos_drm_driver = {
-       .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+       .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
+                                 | DRIVER_ATOMIC | DRIVER_RENDER,
        .load                   = exynos_drm_load,
        .unload                 = exynos_drm_unload,
-       .suspend                = exynos_drm_suspend,
-       .resume                 = exynos_drm_resume,
        .open                   = exynos_drm_open,
        .preclose               = exynos_drm_preclose,
        .lastclose              = exynos_drm_lastclose,
index 6b8a30f23473d694e68e302aeb4f3593147f1c35..b7ba21dfb69641f36410550711f024fde541bb72 100644 (file)
@@ -74,6 +74,7 @@ struct exynos_drm_plane {
        unsigned int v_ratio;
        dma_addr_t dma_addr[MAX_FB_BUFFER];
        unsigned int zpos;
+       struct drm_framebuffer *pending_fb;
 };
 
 /*
@@ -87,6 +88,8 @@ struct exynos_drm_plane {
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  * @wait_for_vblank: wait for vblank interrupt to make sure that
  *     hardware overlay is updated.
+ * @atomic_begin: prepare a window to receive a update
+ * @atomic_flush: mark the end of a window update
  * @update_plane: apply hardware specific overlay data to registers.
  * @disable_plane: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
@@ -107,10 +110,14 @@ struct exynos_drm_crtc_ops {
        int (*enable_vblank)(struct exynos_drm_crtc *crtc);
        void (*disable_vblank)(struct exynos_drm_crtc *crtc);
        void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
+       void (*atomic_begin)(struct exynos_drm_crtc *crtc,
+                             struct exynos_drm_plane *plane);
        void (*update_plane)(struct exynos_drm_crtc *crtc,
                             struct exynos_drm_plane *plane);
        void (*disable_plane)(struct exynos_drm_crtc *crtc,
                              struct exynos_drm_plane *plane);
+       void (*atomic_flush)(struct exynos_drm_crtc *crtc,
+                             struct exynos_drm_plane *plane);
        void (*te_handler)(struct exynos_drm_crtc *crtc);
        void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
 };
@@ -129,6 +136,8 @@ struct exynos_drm_crtc_ops {
  *     this pipe value.
  * @enabled: if the crtc is enabled or not
  * @event: vblank event that is currently queued for flip
+ * @wait_update: wait all pending planes updates to finish
+ * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
  */
@@ -136,9 +145,9 @@ struct exynos_drm_crtc {
        struct drm_crtc                 base;
        enum exynos_drm_output_type     type;
        unsigned int                    pipe;
-       bool                            enabled;
-       wait_queue_head_t               pending_flip_queue;
        struct drm_pending_vblank_event *event;
+       wait_queue_head_t               wait_update;
+       atomic_t                        pending_update;
        const struct exynos_drm_crtc_ops        *ops;
        void                            *ctx;
 };
@@ -164,6 +173,9 @@ struct drm_exynos_file_private {
  * @da_space_size: size of device address space.
  *     if 0 then default value is used for it.
  * @pipe: the pipe number for this crtc/manager.
+ * @pending: the crtcs that have pending updates to finish
+ * @lock: protect access to @pending
+ * @wait: wait an atomic commit to finish
  */
 struct exynos_drm_private {
        struct drm_fb_helper *fb_helper;
@@ -179,6 +191,11 @@ struct exynos_drm_private {
        unsigned long da_space_size;
 
        unsigned int pipe;
+
+       /* for atomic commit */
+       u32                     pending;
+       spinlock_t              lock;
+       wait_queue_head_t       wait;
 };
 
 /*
@@ -237,6 +254,9 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
 }
 #endif
 
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+                        bool async);
+
 
 extern struct platform_driver fimd_driver;
 extern struct platform_driver exynos5433_decon_driver;
index 9738f4e0c6eb07d7ac442525e9113a6f774fda06..084280859589669d17f5e3e04ac334f21710b4ac 100644 (file)
@@ -23,7 +23,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 #include "exynos_drm_crtc.h"
 
  * exynos specific framebuffer structure.
  *
  * @fb: drm framebuffer obejct.
- * @buf_cnt: a buffer count to drm framebuffer.
  * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
  */
 struct exynos_drm_fb {
        struct drm_framebuffer          fb;
-       unsigned int                    buf_cnt;
        struct exynos_drm_gem_obj       *exynos_gem_obj[MAX_FB_BUFFER];
 };
 
@@ -98,10 +95,6 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
 
-       /* This fb should have only one gem object. */
-       if (WARN_ON(exynos_fb->buf_cnt != 1))
-               return -EINVAL;
-
        return drm_gem_handle_create(file_priv,
                        &exynos_fb->exynos_gem_obj[0]->base, handle);
 }
@@ -122,119 +115,77 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
        .dirty          = exynos_drm_fb_dirty,
 };
 
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-                                               unsigned int cnt)
-{
-       struct exynos_drm_fb *exynos_fb;
-
-       exynos_fb = to_exynos_fb(fb);
-
-       exynos_fb->buf_cnt = cnt;
-}
-
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
-{
-       struct exynos_drm_fb *exynos_fb;
-
-       exynos_fb = to_exynos_fb(fb);
-
-       return exynos_fb->buf_cnt;
-}
-
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
                            struct drm_mode_fb_cmd2 *mode_cmd,
-                           struct drm_gem_object *obj)
+                           struct exynos_drm_gem_obj **gem_obj,
+                           int count)
 {
        struct exynos_drm_fb *exynos_fb;
-       struct exynos_drm_gem_obj *exynos_gem_obj;
+       int i;
        int ret;
 
-       exynos_gem_obj = to_exynos_gem_obj(obj);
-
-       ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-       if (ret < 0)
-               return ERR_PTR(ret);
-
        exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
        if (!exynos_fb)
                return ERR_PTR(-ENOMEM);
 
+       for (i = 0; i < count; i++) {
+               ret = check_fb_gem_memory_type(dev, gem_obj[i]);
+               if (ret < 0)
+                       goto err;
+
+               exynos_fb->exynos_gem_obj[i] = gem_obj[i];
+       }
+
        drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-       exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
 
        ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-       if (ret) {
-               kfree(exynos_fb);
+       if (ret < 0) {
                DRM_ERROR("failed to initialize framebuffer\n");
-               return ERR_PTR(ret);
+               goto err;
        }
 
        return &exynos_fb->fb;
+
+err:
+       kfree(exynos_fb);
+       return ERR_PTR(ret);
 }
 
 static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       struct exynos_drm_gem_obj *gem_objs[MAX_FB_BUFFER];
        struct drm_gem_object *obj;
-       struct exynos_drm_gem_obj *exynos_gem_obj;
-       struct exynos_drm_fb *exynos_fb;
-       int i, ret;
-
-       exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-       if (!exynos_fb)
-               return ERR_PTR(-ENOMEM);
-
-       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
-       if (!obj) {
-               DRM_ERROR("failed to lookup gem object\n");
-               ret = -ENOENT;
-               goto err_free;
-       }
-
-       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-       exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
-
-       DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
+       struct drm_framebuffer *fb;
+       int i;
+       int ret;
 
-       for (i = 1; i < exynos_fb->buf_cnt; i++) {
+       for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
                obj = drm_gem_object_lookup(dev, file_priv,
-                               mode_cmd->handles[i]);
+                                           mode_cmd->handles[i]);
                if (!obj) {
                        DRM_ERROR("failed to lookup gem object\n");
                        ret = -ENOENT;
-                       exynos_fb->buf_cnt = i;
-                       goto err_unreference;
+                       goto err;
                }
 
-               exynos_gem_obj = to_exynos_gem_obj(obj);
-               exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
-
-               ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-               if (ret < 0)
-                       goto err_unreference;
+               gem_objs[i] = to_exynos_gem_obj(obj);
        }
 
-       ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-       if (ret) {
-               DRM_ERROR("failed to init framebuffer.\n");
-               goto err_unreference;
+       fb = exynos_drm_framebuffer_init(dev, mode_cmd, gem_objs, i);
+       if (IS_ERR(fb)) {
+               ret = PTR_ERR(fb);
+               goto err;
        }
 
-       return &exynos_fb->fb;
+       return fb;
 
-err_unreference:
-       for (i = 0; i < exynos_fb->buf_cnt; i++) {
-               struct drm_gem_object *obj;
+err:
+       while (i--)
+               drm_gem_object_unreference_unlocked(&gem_objs[i]->base);
 
-               obj = &exynos_fb->exynos_gem_obj[i]->base;
-               if (obj)
-                       drm_gem_object_unreference_unlocked(obj);
-       }
-err_free:
-       kfree(exynos_fb);
        return ERR_PTR(ret);
 }
 
@@ -267,41 +218,6 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
                exynos_drm_fbdev_init(dev);
 }
 
-static int exynos_atomic_commit(struct drm_device *dev,
-                               struct drm_atomic_state *state,
-                               bool async)
-{
-       int ret;
-
-       ret = drm_atomic_helper_prepare_planes(dev, state);
-       if (ret)
-               return ret;
-
-       /* This is the point of no return */
-
-       drm_atomic_helper_swap_state(dev, state);
-
-       drm_atomic_helper_commit_modeset_disables(dev, state);
-
-       drm_atomic_helper_commit_modeset_enables(dev, state);
-
-       /*
-        * Exynos can't update planes with CRTCs and encoders disabled,
-        * its updates routines, specially for FIMD, requires the clocks
-        * to be enabled. So it is necessary to handle the modeset operations
-        * *before* the commit_planes() step, this way it will always
-        * have the relevant clocks enabled to perform the update.
-        */
-
-       drm_atomic_helper_commit_planes(dev, state);
-
-       drm_atomic_helper_cleanup_planes(dev, state);
-
-       drm_atomic_state_free(state);
-
-       return 0;
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
        .fb_create = exynos_user_fb_create,
        .output_poll_changed = exynos_drm_output_poll_changed,
index 1c9e27c32cd1b230c569cb73fdbff56081bbf660..85e4445b920e30673bb7f3ae38591c5a29a5f902 100644 (file)
 #ifndef _EXYNOS_DRM_FB_H_
 #define _EXYNOS_DRM_FB_H
 
+#include "exynos_drm_gem.h"
+
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
                            struct drm_mode_fb_cmd2 *mode_cmd,
-                           struct drm_gem_object *obj);
+                           struct exynos_drm_gem_obj **gem_obj,
+                           int count);
 
 /* get gem object of a drm framebuffer */
 struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
@@ -25,11 +28,4 @@ struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
 
 void exynos_drm_mode_config_init(struct drm_device *dev);
 
-/* set a buffer count to drm framebuffer. */
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-                                               unsigned int cnt);
-
-/* get a buffer count to drm framebuffer. */
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
-
 #endif
index 624595afbce0ecf077f02a7064a8e2d38cac8e7d..a221f753ad9c7a971450b2cf2836dc43f63b3d1b 100644 (file)
@@ -21,7 +21,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
 #define MAX_CONNECTOR          4
@@ -32,7 +31,7 @@
 
 struct exynos_drm_fbdev {
        struct drm_fb_helper            drm_fb_helper;
-       struct exynos_drm_gem_obj       *exynos_gem_obj;
+       struct exynos_drm_gem_obj       *obj;
 };
 
 static int exynos_drm_fb_mmap(struct fb_info *info,
@@ -40,7 +39,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
 {
        struct drm_fb_helper *helper = info->par;
        struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
-       struct exynos_drm_gem_obj *obj = exynos_fbd->exynos_gem_obj;
+       struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
        unsigned long vm_size;
        int ret;
 
@@ -75,37 +74,38 @@ static struct fb_ops exynos_drm_fb_ops = {
 };
 
 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
-                                    struct drm_fb_helper_surface_size *sizes,
-                                    struct drm_framebuffer *fb)
+                                  struct drm_fb_helper_surface_size *sizes,
+                                  struct exynos_drm_gem_obj *obj)
 {
-       struct fb_info *fbi = helper->fbdev;
-       struct exynos_drm_gem_obj *obj;
+       struct fb_info *fbi;
+       struct drm_framebuffer *fb = helper->fb;
        unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
        unsigned int nr_pages;
        unsigned long offset;
 
+       fbi = drm_fb_helper_alloc_fbi(helper);
+       if (IS_ERR(fbi)) {
+               DRM_ERROR("failed to allocate fb info.\n");
+               return PTR_ERR(fbi);
+       }
+
+       fbi->par = helper;
+       fbi->flags = FBINFO_FLAG_DEFAULT;
+       fbi->fbops = &exynos_drm_fb_ops;
+
        drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
-       /* RGB formats use only one buffer */
-       obj = exynos_drm_fb_gem_obj(fb, 0);
-       if (!obj) {
-               DRM_DEBUG_KMS("gem object is null.\n");
-               return -EFAULT;
-       }
-
        nr_pages = obj->size >> PAGE_SHIFT;
 
        obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
                        pgprot_writecombine(PAGE_KERNEL));
        if (!obj->kvaddr) {
                DRM_ERROR("failed to map pages to kernel space.\n");
+               drm_fb_helper_release_fbi(helper);
                return -EIO;
        }
 
-       /* buffer count to framebuffer always is 1 at booting time. */
-       exynos_drm_fb_set_buf_cnt(fb, 1);
-
        offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
        offset += fbi->var.yoffset * fb->pitches[0];
 
@@ -120,9 +120,8 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
                                    struct drm_fb_helper_surface_size *sizes)
 {
        struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
-       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct exynos_drm_gem_obj *obj;
        struct drm_device *dev = helper->dev;
-       struct fb_info *fbi;
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
        struct platform_device *pdev = dev->platformdev;
        unsigned long size;
@@ -140,47 +139,34 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
        mutex_lock(&dev->struct_mutex);
 
-       fbi = drm_fb_helper_alloc_fbi(helper);
-       if (IS_ERR(fbi)) {
-               DRM_ERROR("failed to allocate fb info.\n");
-               ret = PTR_ERR(fbi);
-               goto out;
-       }
-
        size = mode_cmd.pitches[0] * mode_cmd.height;
 
-       exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+       obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
        /*
         * If physically contiguous memory allocation fails and if IOMMU is
         * supported then try to get buffer from non physically contiguous
         * memory area.
         */
-       if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+       if (IS_ERR(obj) && is_drm_iommu_supported(dev)) {
                dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
-               exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
-                                                       size);
+               obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, size);
        }
 
-       if (IS_ERR(exynos_gem_obj)) {
-               ret = PTR_ERR(exynos_gem_obj);
-               goto err_release_fbi;
+       if (IS_ERR(obj)) {
+               ret = PTR_ERR(obj);
+               goto out;
        }
 
-       exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+       exynos_fbdev->obj = obj;
 
-       helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
-                       &exynos_gem_obj->base);
+       helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &obj, 1);
        if (IS_ERR(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
                ret = PTR_ERR(helper->fb);
                goto err_destroy_gem;
        }
 
-       fbi->par = helper;
-       fbi->flags = FBINFO_FLAG_DEFAULT;
-       fbi->fbops = &exynos_drm_fb_ops;
-
-       ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
+       ret = exynos_drm_fbdev_update(helper, sizes, obj);
        if (ret < 0)
                goto err_destroy_framebuffer;
 
@@ -190,9 +176,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 err_destroy_framebuffer:
        drm_framebuffer_cleanup(helper->fb);
 err_destroy_gem:
-       exynos_drm_gem_destroy(exynos_gem_obj);
-err_release_fbi:
-       drm_fb_helper_release_fbi(helper);
+       exynos_drm_gem_destroy(obj);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -285,11 +269,11 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
                                      struct drm_fb_helper *fb_helper)
 {
        struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
-       struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+       struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
        struct drm_framebuffer *fb;
 
-       if (exynos_gem_obj->kvaddr)
-               vunmap(exynos_gem_obj->kvaddr);
+       if (obj->kvaddr)
+               vunmap(obj->kvaddr);
 
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
index 5def6bc073ebb222ea4656a865108efd557a0eb2..750a9e6b9e8d92c312e3bb685fb524f249ad0488 100644 (file)
@@ -59,6 +59,7 @@
 #define VIDWnALPHA1(win)       (VIDW_ALPHA + 0x04 + (win) * 8)
 
 #define VIDWx_BUF_START(win, buf)      (VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_START_S(win, buf)    (VIDW_BUF_START_S(buf) + (win) * 8)
 #define VIDWx_BUF_END(win, buf)                (VIDW_BUF_END(buf) + (win) * 8)
 #define VIDWx_BUF_SIZE(win, buf)       (VIDW_BUF_SIZE(buf) + (win) * 4)
 
@@ -187,6 +188,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
 
+static const uint32_t fimd_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+};
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
        struct platform_device *pdev)
 {
@@ -591,6 +600,16 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
 {
        u32 reg, bits, val;
 
+       /*
+        * SHADOWCON/PRTCON register is used for enabling timing.
+        *
+        * for example, once only width value of a register is set,
+        * if the dma is started then fimd hardware could malfunction so
+        * with protect window setting, the register fields with prefix '_F'
+        * wouldn't be updated at vsync also but updated once unprotect window
+        * is set.
+        */
+
        if (ctx->driver_data->has_shadowcon) {
                reg = SHADOWCON;
                bits = SHADOWCON_WINx_PROTECT(win);
@@ -607,6 +626,28 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
        writel(val, ctx->regs + reg);
 }
 
+static void fimd_atomic_begin(struct exynos_drm_crtc *crtc,
+                              struct exynos_drm_plane *plane)
+{
+       struct fimd_context *ctx = crtc->ctx;
+
+       if (ctx->suspended)
+               return;
+
+       fimd_shadow_protect_win(ctx, plane->zpos, true);
+}
+
+static void fimd_atomic_flush(struct exynos_drm_crtc *crtc,
+                              struct exynos_drm_plane *plane)
+{
+       struct fimd_context *ctx = crtc->ctx;
+
+       if (ctx->suspended)
+               return;
+
+       fimd_shadow_protect_win(ctx, plane->zpos, false);
+}
+
 static void fimd_update_plane(struct exynos_drm_crtc *crtc,
                              struct exynos_drm_plane *plane)
 {
@@ -622,20 +663,6 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
        if (ctx->suspended)
                return;
 
-       /*
-        * SHADOWCON/PRTCON register is used for enabling timing.
-        *
-        * for example, once only width value of a register is set,
-        * if the dma is started then fimd hardware could malfunction so
-        * with protect window setting, the register fields with prefix '_F'
-        * wouldn't be updated at vsync also but updated once unprotect window
-        * is set.
-        */
-
-       /* protect windows */
-       fimd_shadow_protect_win(ctx, win, true);
-
-
        offset = plane->src_x * bpp;
        offset += plane->src_y * pitch;
 
@@ -707,9 +734,6 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
        if (ctx->driver_data->has_shadowcon)
                fimd_enable_shadow_channel_path(ctx, win, true);
 
-       /* Enable DMA channel and unprotect windows */
-       fimd_shadow_protect_win(ctx, win, false);
-
        if (ctx->i80_if)
                atomic_set(&ctx->win_updated, 1);
 }
@@ -723,16 +747,10 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
        if (ctx->suspended)
                return;
 
-       /* protect windows */
-       fimd_shadow_protect_win(ctx, win, true);
-
        fimd_enable_video_output(ctx, win, false);
 
        if (ctx->driver_data->has_shadowcon)
                fimd_enable_shadow_channel_path(ctx, win, false);
-
-       /* unprotect windows */
-       fimd_shadow_protect_win(ctx, win, false);
 }
 
 static void fimd_enable(struct exynos_drm_crtc *crtc)
@@ -875,8 +893,10 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
        .wait_for_vblank = fimd_wait_for_vblank,
+       .atomic_begin = fimd_atomic_begin,
        .update_plane = fimd_update_plane,
        .disable_plane = fimd_disable_plane,
+       .atomic_flush = fimd_atomic_flush,
        .te_handler = fimd_te_handler,
        .clock_enable = fimd_dp_clock_enable,
 };
@@ -884,7 +904,8 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 {
        struct fimd_context *ctx = (struct fimd_context *)dev_id;
-       u32 val, clear_bit;
+       u32 val, clear_bit, start, start_s;
+       int win;
 
        val = readl(ctx->regs + VIDINTCON1);
 
@@ -896,15 +917,25 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
        if (ctx->pipe < 0 || !ctx->drm_dev)
                goto out;
 
-       if (ctx->i80_if) {
-               exynos_drm_crtc_finish_pageflip(ctx->crtc);
+       if (!ctx->i80_if)
+               drm_crtc_handle_vblank(&ctx->crtc->base);
+
+       for (win = 0 ; win < WINDOWS_NR ; win++) {
+               struct exynos_drm_plane *plane = &ctx->planes[win];
 
+               if (!plane->pending_fb)
+                       continue;
+
+               start = readl(ctx->regs + VIDWx_BUF_START(win, 0));
+               start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0));
+               if (start == start_s)
+                       exynos_drm_crtc_finish_update(ctx->crtc, plane);
+       }
+
+       if (ctx->i80_if) {
                /* Exits triggering mode */
                atomic_set(&ctx->triggering, 0);
        } else {
-               drm_crtc_handle_vblank(&ctx->crtc->base);
-               exynos_drm_crtc_finish_pageflip(ctx->crtc);
-
                /* set wait vsync event to zero and wake up queue. */
                if (atomic_read(&ctx->wait_vsync_event)) {
                        atomic_set(&ctx->wait_vsync_event, 0);
@@ -933,7 +964,8 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
                type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
                                                DRM_PLANE_TYPE_OVERLAY;
                ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-                                       1 << ctx->pipe, type, zpos);
+                                       1 << ctx->pipe, type, fimd_formats,
+                                       ARRAY_SIZE(fimd_formats), zpos);
                if (ret)
                        return ret;
        }
index ba008391a2fcceddfa78713856fde689df172b56..535b4ad6c4b14783a6ce099f81f4775759bd5ee6 100644 (file)
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR              0x0304
+#define G2D_SRC_STRIDE_REG             0x0308
 #define G2D_SRC_COLOR_MODE             0x030C
 #define G2D_SRC_LEFT_TOP               0x0310
 #define G2D_SRC_RIGHT_BOTTOM           0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR       0x0318
 #define G2D_DST_BASE_ADDR              0x0404
+#define G2D_DST_STRIDE_REG             0x0408
 #define G2D_DST_COLOR_MODE             0x040C
 #define G2D_DST_LEFT_TOP               0x0410
 #define G2D_DST_RIGHT_BOTTOM           0x0414
@@ -148,6 +150,7 @@ struct g2d_cmdlist {
  * A structure of buffer description
  *
  * @format: color format
+ * @stride: buffer stride/pitch in bytes
  * @left_x: the x coordinates of left top corner
  * @top_y: the y coordinates of left top corner
  * @right_x: the x coordinates of right bottom corner
@@ -156,6 +159,7 @@ struct g2d_cmdlist {
  */
 struct g2d_buf_desc {
        unsigned int    format;
+       unsigned int    stride;
        unsigned int    left_x;
        unsigned int    top_y;
        unsigned int    right_x;
@@ -589,6 +593,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
 
        switch (reg_offset) {
        case G2D_SRC_BASE_ADDR:
+       case G2D_SRC_STRIDE_REG:
        case G2D_SRC_COLOR_MODE:
        case G2D_SRC_LEFT_TOP:
        case G2D_SRC_RIGHT_BOTTOM:
@@ -598,6 +603,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
                reg_type = REG_TYPE_SRC_PLANE2;
                break;
        case G2D_DST_BASE_ADDR:
+       case G2D_DST_STRIDE_REG:
        case G2D_DST_COLOR_MODE:
        case G2D_DST_LEFT_TOP:
        case G2D_DST_RIGHT_BOTTOM:
@@ -652,8 +658,8 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
                                                enum g2d_reg_type reg_type,
                                                unsigned long size)
 {
-       unsigned int width, height;
-       unsigned long area;
+       int width, height;
+       unsigned long bpp, last_pos;
 
        /*
         * check source and destination buffers only.
@@ -662,22 +668,37 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
        if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
                return true;
 
-       width = buf_desc->right_x - buf_desc->left_x;
+       /* This check also makes sure that right_x > left_x. */
+       width = (int)buf_desc->right_x - (int)buf_desc->left_x;
        if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
-               DRM_ERROR("width[%u] is out of range!\n", width);
+               DRM_ERROR("width[%d] is out of range!\n", width);
                return false;
        }
 
-       height = buf_desc->bottom_y - buf_desc->top_y;
+       /* This check also makes sure that bottom_y > top_y. */
+       height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
        if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
-               DRM_ERROR("height[%u] is out of range!\n", height);
+               DRM_ERROR("height[%d] is out of range!\n", height);
                return false;
        }
 
-       area = (unsigned long)width * (unsigned long)height *
-                                       g2d_get_buf_bpp(buf_desc->format);
-       if (area > size) {
-               DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+       bpp = g2d_get_buf_bpp(buf_desc->format);
+
+       /* Compute the position of the last byte that the engine accesses. */
+       last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
+               (unsigned long)buf_desc->stride +
+               (unsigned long)buf_desc->right_x * bpp - 1;
+
+       /*
+        * Since right_x > left_x and bottom_y > top_y we already know
+        * that the first_pos < last_pos (first_pos being the position
+        * of the first byte the engine accesses), it just remains to
+        * check if last_pos is smaller then the buffer size.
+        */
+
+       if (last_pos >= size) {
+               DRM_ERROR("last engine access position [%lu] "
+                       "is out of range [%lu]!\n", last_pos, size);
                return false;
        }
 
@@ -973,8 +994,6 @@ static int g2d_check_reg_offset(struct device *dev,
                                goto err;
 
                        reg_type = g2d_get_reg_type(reg_offset);
-                       if (reg_type == REG_TYPE_NONE)
-                               goto err;
 
                        /* check userptr buffer type. */
                        if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -983,14 +1002,22 @@ static int g2d_check_reg_offset(struct device *dev,
                        } else
                                buf_info->types[reg_type] = BUF_TYPE_GEM;
                        break;
+               case G2D_SRC_STRIDE_REG:
+               case G2D_DST_STRIDE_REG:
+                       if (for_addr)
+                               goto err;
+
+                       reg_type = g2d_get_reg_type(reg_offset);
+
+                       buf_desc = &buf_info->descs[reg_type];
+                       buf_desc->stride = cmdlist->data[index + 1];
+                       break;
                case G2D_SRC_COLOR_MODE:
                case G2D_DST_COLOR_MODE:
                        if (for_addr)
                                goto err;
 
                        reg_type = g2d_get_reg_type(reg_offset);
-                       if (reg_type == REG_TYPE_NONE)
-                               goto err;
 
                        buf_desc = &buf_info->descs[reg_type];
                        value = cmdlist->data[index + 1];
@@ -1003,8 +1030,6 @@ static int g2d_check_reg_offset(struct device *dev,
                                goto err;
 
                        reg_type = g2d_get_reg_type(reg_offset);
-                       if (reg_type == REG_TYPE_NONE)
-                               goto err;
 
                        buf_desc = &buf_info->descs[reg_type];
                        value = cmdlist->data[index + 1];
@@ -1018,8 +1043,6 @@ static int g2d_check_reg_offset(struct device *dev,
                                goto err;
 
                        reg_type = g2d_get_reg_type(reg_offset);
-                       if (reg_type == REG_TYPE_NONE)
-                               goto err;
 
                        buf_desc = &buf_info->descs[reg_type];
                        value = cmdlist->data[index + 1];
index 67461b77f0401c989599897909bc3106273bb372..62b9ea1b07fb005c04da732490c06e91a2996e21 100644 (file)
@@ -668,7 +668,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
        exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
        if (IS_ERR(exynos_gem_obj)) {
                ret = PTR_ERR(exynos_gem_obj);
-               goto err;
+               return ERR_PTR(ret);
        }
 
        exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
index d9a68fd83120a5e35164fa5e5df5c03badfed7b5..7148224414672c37ef96a562b5f0d7f91931411a 100644 (file)
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
 
-static const uint32_t formats[] = {
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_NV12,
-};
-
 /*
  * This function is to get X or Y size shown via screen. This needs length and
  * start position of CRTC.
@@ -132,7 +126,7 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
        if (!state->fb)
                return 0;
 
-       nr = exynos_drm_fb_get_buf_cnt(state->fb);
+       nr = drm_format_num_planes(state->fb->pixel_format);
        for (i = 0; i < nr; i++) {
                struct exynos_drm_gem_obj *obj =
                                        exynos_drm_fb_gem_obj(state->fb, i);
@@ -168,6 +162,8 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
                              state->src_x >> 16, state->src_y >> 16,
                              state->src_w >> 16, state->src_h >> 16);
 
+       exynos_plane->pending_fb = state->fb;
+
        if (exynos_crtc->ops->update_plane)
                exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
 }
@@ -215,13 +211,14 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
 int exynos_plane_init(struct drm_device *dev,
                      struct exynos_drm_plane *exynos_plane,
                      unsigned long possible_crtcs, enum drm_plane_type type,
+                     const uint32_t *formats, unsigned int fcount,
                      unsigned int zpos)
 {
        int err;
 
        err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
-                                      &exynos_plane_funcs, formats,
-                                      ARRAY_SIZE(formats), type);
+                                      &exynos_plane_funcs, formats, fcount,
+                                      type);
        if (err) {
                DRM_ERROR("failed to initialize plane\n");
                return err;
index 8c88ae983c38c5220203f901e7afe2952f65f725..476c9340b591cb107f6fc66f4dde0e0da217c49f 100644 (file)
@@ -12,4 +12,5 @@
 int exynos_plane_init(struct drm_device *dev,
                      struct exynos_drm_plane *exynos_plane,
                      unsigned long possible_crtcs, enum drm_plane_type type,
+                     const uint32_t *formats, unsigned int fcount,
                      unsigned int zpos);
index 581af35861a6fcd5781cf207d5c1a821c0583d0d..75718e1bc3dd29eb41df43b43922548c77240b4c 100644 (file)
@@ -83,6 +83,12 @@ static const char fake_edid_info[] = {
        0x00, 0x00, 0x00, 0x06
 };
 
+static const uint32_t formats[] = {
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_NV12,
+};
+
 static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
 {
        struct vidi_context *ctx = crtc->ctx;
@@ -179,6 +185,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
 {
        struct vidi_context *ctx = container_of(work, struct vidi_context,
                                        work);
+       int win;
 
        if (ctx->pipe < 0)
                return;
@@ -197,7 +204,14 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
 
        mutex_unlock(&ctx->lock);
 
-       exynos_drm_crtc_finish_pageflip(ctx->crtc);
+       for (win = 0 ; win < WINDOWS_NR ; win++) {
+               struct exynos_drm_plane *plane = &ctx->planes[win];
+
+               if (!plane->pending_fb)
+                       continue;
+
+               exynos_drm_crtc_finish_update(ctx->crtc, plane);
+       }
 }
 
 static int vidi_show_connection(struct device *dev,
@@ -435,7 +449,8 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
                type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
                                                DRM_PLANE_TYPE_OVERLAY;
                ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-                                       1 << ctx->pipe, type, zpos);
+                                       1 << ctx->pipe, type, formats,
+                                       ARRAY_SIZE(formats), zpos);
                if (ret)
                        return ret;
        }
index e68340c77676f3f0a8f56f07467b7558d37f56f5..7f81cce966d4b49e17516cbc76c1974124fa1b65 100644 (file)
@@ -43,6 +43,7 @@
 
 #define MIXER_WIN_NR           3
 #define MIXER_DEFAULT_WIN      0
+#define VP_DEFAULT_WIN         2
 
 /* The pixelformats that are natively supported by the mixer. */
 #define MXR_FORMAT_RGB565      4
@@ -74,6 +75,19 @@ enum mixer_flag_bits {
        MXR_BIT_VSYNC,
 };
 
+static const uint32_t mixer_formats[] = {
+       DRM_FORMAT_XRGB4444,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+};
+
+static const uint32_t vp_formats[] = {
+       DRM_FORMAT_NV12,
+       DRM_FORMAT_NV21,
+};
+
 struct mixer_context {
        struct platform_device *pdev;
        struct device           *dev;
@@ -716,6 +730,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
        struct mixer_context *ctx = arg;
        struct mixer_resources *res = &ctx->mixer_res;
        u32 val, base, shadow;
+       int win;
 
        spin_lock(&res->reg_slock);
 
@@ -742,7 +757,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
                }
 
                drm_crtc_handle_vblank(&ctx->crtc->base);
-               exynos_drm_crtc_finish_pageflip(ctx->crtc);
+               for (win = 0 ; win < MIXER_WIN_NR ; win++) {
+                       struct exynos_drm_plane *plane = &ctx->planes[win];
+
+                       if (!plane->pending_fb)
+                               continue;
+
+                       exynos_drm_crtc_finish_update(ctx->crtc, plane);
+               }
 
                /* set wait vsync event to zero and wake up queue. */
                if (atomic_read(&ctx->wait_vsync_event)) {
@@ -1163,7 +1185,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
        struct mixer_context *ctx = dev_get_drvdata(dev);
        struct drm_device *drm_dev = data;
        struct exynos_drm_plane *exynos_plane;
-       enum drm_plane_type type;
        unsigned int zpos;
        int ret;
 
@@ -1172,10 +1193,23 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
                return ret;
 
        for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+               enum drm_plane_type type;
+               const uint32_t *formats;
+               unsigned int fcount;
+
                type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
                                                DRM_PLANE_TYPE_OVERLAY;
+               if (zpos < VP_DEFAULT_WIN) {
+                       formats = mixer_formats;
+                       fcount = ARRAY_SIZE(mixer_formats);
+               } else {
+                       formats = vp_formats;
+                       fcount = ARRAY_SIZE(vp_formats);
+               }
+
                ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-                                       1 << ctx->pipe, type, zpos);
+                                       1 << ctx->pipe, type, formats, fcount,
+                                       zpos);
                if (ret)
                        return ret;
        }
index 33aabc79813b70e03ae9eb2e07c54df847ef93f8..e3ec9049081fd89a774f055c270da8a4524cf09c 100644 (file)
@@ -2562,6 +2562,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
                return "PORT_DDI_D_2_LANES";
        case POWER_DOMAIN_PORT_DDI_D_4_LANES:
                return "PORT_DDI_D_4_LANES";
+       case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+               return "PORT_DDI_E_2_LANES";
        case POWER_DOMAIN_PORT_DSI:
                return "PORT_DSI";
        case POWER_DOMAIN_PORT_CRT:
index 1d887459e37fd717992ddbfb52228d8550e55a09..8edcec8ae592577297bd68f8c89e5c22cd9de609 100644 (file)
@@ -662,15 +662,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 
        pci_disable_device(drm_dev->pdev);
        /*
-        * During hibernation on some GEN4 platforms the BIOS may try to access
+        * During hibernation on some platforms the BIOS may try to access
         * the device even though it's already in D3 and hang the machine. So
         * leave the device in D0 on those platforms and hope the BIOS will
-        * power down the device properly. Platforms where this was seen:
-        * Lenovo Thinkpad X301, X61s
+        * power down the device properly. The issue was seen on multiple old
+        * GENs with different BIOS vendors, so having an explicit blacklist
+        * is inpractical; apply the workaround on everything pre GEN6. The
+        * platforms where the issue was seen:
+        * Lenovo Thinkpad X301, X61s, X60, T60, X41
+        * Fujitsu FSC S7110
+        * Acer Aspire 1830T
         */
-       if (!(hibernation &&
-             drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
-             INTEL_INFO(dev_priv)->gen == 4))
+       if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
                pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
        return 0;
index 599441beea17e539f13a122ea398923101b1764b..b06e0308077193eaf2a2fff0931c228823099bbf 100644 (file)
@@ -182,6 +182,7 @@ enum intel_display_power_domain {
        POWER_DOMAIN_PORT_DDI_C_4_LANES,
        POWER_DOMAIN_PORT_DDI_D_2_LANES,
        POWER_DOMAIN_PORT_DDI_D_4_LANES,
+       POWER_DOMAIN_PORT_DDI_E_2_LANES,
        POWER_DOMAIN_PORT_DSI,
        POWER_DOMAIN_PORT_CRT,
        POWER_DOMAIN_PORT_OTHER,
@@ -214,6 +215,7 @@ enum hpd_pin {
        HPD_PORT_B,
        HPD_PORT_C,
        HPD_PORT_D,
+       HPD_PORT_E,
        HPD_NUM_PINS
 };
 
@@ -1415,6 +1417,10 @@ enum modeset_restore {
 #define DP_AUX_C 0x20
 #define DP_AUX_D 0x30
 
+#define DDC_PIN_B  0x05
+#define DDC_PIN_C  0x04
+#define DDC_PIN_D  0x06
+
 struct ddi_vbt_port_info {
        /*
         * This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1429,6 +1435,7 @@ struct ddi_vbt_port_info {
        uint8_t supports_dp:1;
 
        uint8_t alternate_aux_channel;
+       uint8_t alternate_ddc_pin;
 
        uint8_t dp_boost_level;
        uint8_t hdmi_boost_level;
@@ -1921,6 +1928,8 @@ struct drm_i915_private {
                        struct skl_wm_values skl_hw;
                        struct vlv_wm_values vlv;
                };
+
+               uint8_t max_level;
        } wm;
 
        struct i915_runtime_pm pm;
@@ -3376,13 +3385,13 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 #define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({                      \
-       u32 upper, lower, tmp;                                          \
-       tmp = I915_READ(upper_reg);                                     \
+       u32 upper, lower, old_upper, loop = 0;                          \
+       upper = I915_READ(upper_reg);                                   \
        do {                                                            \
-               upper = tmp;                                            \
+               old_upper = upper;                                      \
                lower = I915_READ(lower_reg);                           \
-               tmp = I915_READ(upper_reg);                             \
-       } while (upper != tmp);                                         \
+               upper = I915_READ(upper_reg);                           \
+       } while (upper != old_upper && loop++ < 2);                     \
        (u64)upper << 32 | lower; })
 
 #define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
index 923a3c4bf0b79c71b8a05c417211ae9507f919aa..a953d4975b8c08d237bac9aa4a7ecd3eaf29c28e 100644 (file)
@@ -1032,6 +1032,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
 
+               obj->dirty = 1; /* be paranoid  */
                obj->base.write_domain = obj->base.pending_write_domain;
                if (obj->base.write_domain == 0)
                        obj->base.pending_read_domains |= obj->base.read_domains;
@@ -1039,7 +1040,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 
                i915_vma_move_to_active(vma, req);
                if (obj->base.write_domain) {
-                       obj->dirty = 1;
                        i915_gem_request_assign(&obj->last_write_req, req);
 
                        intel_fb_obj_invalidate(obj, ORIGIN_CS);
index a36cb95ec798ec8a8a3e09da884cf6528d84c126..f361c4a5699550dc581ad06bb79aa36a8aebb803 100644 (file)
@@ -348,7 +348,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
         * memory, so just consider the start. */
        reserved_total = stolen_top - reserved_base;
 
-       DRM_DEBUG_KMS("Memory reserved for graphics device: %luK, usable: %luK\n",
+       DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
                      dev_priv->gtt.stolen_size >> 10,
                      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
index 1118c39281f98cc272b23bcbded120c7ecdd4502..a2bceb70a3fdf7a63436ac5e6efa6c765b429e38 100644 (file)
@@ -61,6 +61,13 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
 };
 
+static const u32 hpd_spt[HPD_NUM_PINS] = {
+       [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+       [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+       [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
+       [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+};
+
 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
        [HPD_CRT] = CRT_HOTPLUG_INT_EN,
        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
@@ -1252,6 +1259,8 @@ static bool pch_port_hotplug_long_detect(enum port port, u32 val)
                return val & PORTC_HOTPLUG_LONG_DETECT;
        case PORT_D:
                return val & PORTD_HOTPLUG_LONG_DETECT;
+       case PORT_E:
+               return val & PORTE_HOTPLUG_LONG_DETECT;
        default:
                return false;
        }
@@ -1549,7 +1558,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
                intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
-                                  hotplug_trigger, hpd_status_g4x,
+                                  hotplug_trigger, hpd_status_i915,
                                   i9xx_port_hotplug_long_detect);
                intel_hpd_irq_handler(dev, pin_mask, long_mask);
        }
@@ -1752,7 +1761,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
-       u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+       u32 hotplug_trigger;
+
+       if (HAS_PCH_SPT(dev))
+               hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
+       else
+               hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
        if (hotplug_trigger) {
                u32 dig_hotplug_reg, pin_mask, long_mask;
@@ -1760,9 +1774,23 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
                I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
-               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
-                                  dig_hotplug_reg, hpd_cpt,
-                                  pch_port_hotplug_long_detect);
+               if (HAS_PCH_SPT(dev)) {
+                       intel_get_hpd_pins(&pin_mask, &long_mask,
+                                          hotplug_trigger,
+                                          dig_hotplug_reg, hpd_spt,
+                                          pch_port_hotplug_long_detect);
+
+                       /* detect PORTE HP event */
+                       dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+                       if (pch_port_hotplug_long_detect(PORT_E,
+                                                        dig_hotplug_reg))
+                               long_mask |= 1 << HPD_PORT_E;
+               } else
+                       intel_get_hpd_pins(&pin_mask, &long_mask,
+                                          hotplug_trigger,
+                                          dig_hotplug_reg, hpd_cpt,
+                                          pch_port_hotplug_long_detect);
+
                intel_hpd_irq_handler(dev, pin_mask, long_mask);
        }
 
@@ -2984,6 +3012,11 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
                for_each_intel_encoder(dev, intel_encoder)
                        if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
                                enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
+       } else if (HAS_PCH_SPT(dev)) {
+               hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+               for_each_intel_encoder(dev, intel_encoder)
+                       if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
+                               enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
        } else {
                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
                for_each_intel_encoder(dev, intel_encoder)
@@ -3005,6 +3038,13 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
        hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
        hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+       /* enable SPT PORTE hot plug */
+       if (HAS_PCH_SPT(dev)) {
+               hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+               hotplug |= PORTE_HOTPLUG_ENABLE;
+               I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+       }
 }
 
 static void bxt_hpd_irq_setup(struct drm_device *dev)
index 8e46c348366bc867c7b81ed9ccdd72d7bc57c2fc..83a0888756d68402af1c4211b51372edba53f19d 100644 (file)
@@ -5949,6 +5949,7 @@ enum skl_disp_power_wells {
 #define SDE_AUXC_CPT           (1 << 26)
 #define SDE_AUXB_CPT           (1 << 25)
 #define SDE_AUX_MASK_CPT       (7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT  (1 << 25)
 #define SDE_PORTD_HOTPLUG_CPT  (1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT  (1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT  (1 << 21)
@@ -5959,6 +5960,10 @@ enum skl_disp_power_wells {
                                 SDE_PORTD_HOTPLUG_CPT |        \
                                 SDE_PORTC_HOTPLUG_CPT |        \
                                 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT   (SDE_PORTE_HOTPLUG_SPT |        \
+                                SDE_PORTD_HOTPLUG_CPT |        \
+                                SDE_PORTC_HOTPLUG_CPT |        \
+                                SDE_PORTB_HOTPLUG_CPT)
 #define SDE_GMBUS_CPT          (1 << 17)
 #define SDE_ERROR_CPT          (1 << 16)
 #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
@@ -6030,6 +6035,13 @@ enum skl_disp_power_wells {
 #define  PORTB_HOTPLUG_SHORT_DETECT    (1 << 0)
 #define  PORTB_HOTPLUG_LONG_DETECT     (2 << 0)
 
+#define PCH_PORT_HOTPLUG2        0xc403C               /* SHOTPLUG_CTL2 */
+#define PORTE_HOTPLUG_ENABLE            (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK      (0x3 << 0)
+#define  PORTE_HOTPLUG_NO_DETECT       (0 << 0)
+#define  PORTE_HOTPLUG_SHORT_DETECT    (1 << 0)
+#define  PORTE_HOTPLUG_LONG_DETECT     (2 << 0)
+
 #define PCH_GPIOA               0xc5010
 #define PCH_GPIOB               0xc5014
 #define PCH_GPIOC               0xc5018
index c5b82fed95be235759bfd8a1f94f4fc97c4b87ee..b3e437b3bb54fe4d3e64839b87a161a7dd1240d8 100644 (file)
@@ -401,7 +401,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 {
        struct sdvo_device_mapping *p_mapping;
        const struct bdb_general_definitions *p_defs;
-       const union child_device_config *p_child;
+       const struct old_child_dev_config *child; /* legacy */
        int i, child_device_num, count;
        u16     block_size;
 
@@ -410,14 +410,14 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
                return;
        }
-       /* judge whether the size of child device meets the requirements.
-        * If the child device size obtained from general definition block
-        * is different with sizeof(struct child_device_config), skip the
-        * parsing of sdvo device info
+
+       /*
+        * Only parse SDVO mappings when the general definitions block child
+        * device size matches that of the *legacy* child device config
+        * struct. Thus, SDVO mapping will be skipped for newer VBT.
         */
-       if (p_defs->child_dev_size != sizeof(*p_child)) {
-               /* different child dev size . Ignore it */
-               DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+       if (p_defs->child_dev_size != sizeof(*child)) {
+               DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
                return;
        }
        /* get the block size of general definitions */
@@ -427,37 +427,37 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                p_defs->child_dev_size;
        count = 0;
        for (i = 0; i < child_device_num; i++) {
-               p_child = child_device_ptr(p_defs, i);
-               if (!p_child->old.device_type) {
+               child = &child_device_ptr(p_defs, i)->old;
+               if (!child->device_type) {
                        /* skip the device block if device type is invalid */
                        continue;
                }
-               if (p_child->old.slave_addr != SLAVE_ADDR1 &&
-                       p_child->old.slave_addr != SLAVE_ADDR2) {
+               if (child->slave_addr != SLAVE_ADDR1 &&
+                   child->slave_addr != SLAVE_ADDR2) {
                        /*
                         * If the slave address is neither 0x70 nor 0x72,
                         * it is not a SDVO device. Skip it.
                         */
                        continue;
                }
-               if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
-                       p_child->old.dvo_port != DEVICE_PORT_DVOC) {
+               if (child->dvo_port != DEVICE_PORT_DVOB &&
+                   child->dvo_port != DEVICE_PORT_DVOC) {
                        /* skip the incorrect SDVO port */
                        DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
                        continue;
                }
                DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
-                               " %s port\n",
-                               p_child->old.slave_addr,
-                               (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
-                                       "SDVOB" : "SDVOC");
-               p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
+                             " %s port\n",
+                             child->slave_addr,
+                             (child->dvo_port == DEVICE_PORT_DVOB) ?
+                             "SDVOB" : "SDVOC");
+               p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
                if (!p_mapping->initialized) {
-                       p_mapping->dvo_port = p_child->old.dvo_port;
-                       p_mapping->slave_addr = p_child->old.slave_addr;
-                       p_mapping->dvo_wiring = p_child->old.dvo_wiring;
-                       p_mapping->ddc_pin = p_child->old.ddc_pin;
-                       p_mapping->i2c_pin = p_child->old.i2c_pin;
+                       p_mapping->dvo_port = child->dvo_port;
+                       p_mapping->slave_addr = child->slave_addr;
+                       p_mapping->dvo_wiring = child->dvo_wiring;
+                       p_mapping->ddc_pin = child->ddc_pin;
+                       p_mapping->i2c_pin = child->i2c_pin;
                        p_mapping->initialized = 1;
                        DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
                                      p_mapping->dvo_port,
@@ -469,7 +469,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                        DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
                                         "two SDVO device.\n");
                }
-               if (p_child->old.slave2_addr) {
+               if (child->slave2_addr) {
                        /* Maybe this is a SDVO device with multiple inputs */
                        /* And the mapping info is not added */
                        DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -905,23 +905,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        uint8_t hdmi_level_shift;
        int i, j;
        bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
-       uint8_t aux_channel;
+       uint8_t aux_channel, ddc_pin;
        /* Each DDI port can have more than one value on the "DVO Port" field,
         * so look for all the possible values for each port and abort if more
         * than one is found. */
-       int dvo_ports[][2] = {
-               {DVO_PORT_HDMIA, DVO_PORT_DPA},
-               {DVO_PORT_HDMIB, DVO_PORT_DPB},
-               {DVO_PORT_HDMIC, DVO_PORT_DPC},
-               {DVO_PORT_HDMID, DVO_PORT_DPD},
-               {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+       int dvo_ports[][3] = {
+               {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+               {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+               {DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+               {DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+               {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
        };
 
        /* Find the child device to use, abort if more than one found. */
        for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
                it = dev_priv->vbt.child_dev + i;
 
-               for (j = 0; j < 2; j++) {
+               for (j = 0; j < 3; j++) {
                        if (dvo_ports[port][j] == -1)
                                break;
 
@@ -939,6 +939,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
                return;
 
        aux_channel = child->raw[25];
+       ddc_pin = child->common.ddc_pin;
 
        is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
        is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -970,11 +971,27 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
                DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
        if (is_dvi) {
-               if (child->common.ddc_pin == 0x05 && port != PORT_B)
+               if (port == PORT_E) {
+                       info->alternate_ddc_pin = ddc_pin;
+                       /* if DDIE share ddc pin with other port, then
+                        * dvi/hdmi couldn't exist on the shared port.
+                        * Otherwise they share the same ddc bin and system
+                        * couldn't communicate with them seperately. */
+                       if (ddc_pin == DDC_PIN_B) {
+                               dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
+                               dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
+                       } else if (ddc_pin == DDC_PIN_C) {
+                               dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
+                               dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
+                       } else if (ddc_pin == DDC_PIN_D) {
+                               dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
+                               dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
+                       }
+               } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
                        DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
-               if (child->common.ddc_pin == 0x04 && port != PORT_C)
+               else if (ddc_pin == DDC_PIN_C && port != PORT_C)
                        DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
-               if (child->common.ddc_pin == 0x06 && port != PORT_D)
+               else if (ddc_pin == DDC_PIN_D && port != PORT_D)
                        DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
        }
 
@@ -1051,17 +1068,39 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        const union child_device_config *p_child;
        union child_device_config *child_dev_ptr;
        int i, child_device_num, count;
-       u16     block_size;
+       u8 expected_size;
+       u16 block_size;
 
        p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (!p_defs) {
                DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
                return;
        }
-       if (p_defs->child_dev_size < sizeof(*p_child)) {
-               DRM_ERROR("General definiton block child device size is too small.\n");
+       if (bdb->version < 195) {
+               expected_size = sizeof(struct old_child_dev_config);
+       } else if (bdb->version == 195) {
+               expected_size = 37;
+       } else if (bdb->version <= 197) {
+               expected_size = 38;
+       } else {
+               expected_size = 38;
+               BUILD_BUG_ON(sizeof(*p_child) < 38);
+               DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
+                                bdb->version, expected_size);
+       }
+
+       /* The legacy sized child device config is the minimum we need. */
+       if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+               DRM_ERROR("Child device config size %u is too small.\n",
+                         p_defs->child_dev_size);
                return;
        }
+
+       /* Flag an error for unexpected size, but continue anyway. */
+       if (p_defs->child_dev_size != expected_size)
+               DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
+                         p_defs->child_dev_size, expected_size, bdb->version);
+
        /* get the block size of general definitions */
        block_size = get_blocksize(p_defs);
        /* get the number of child device */
@@ -1106,7 +1145,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 
                child_dev_ptr = dev_priv->vbt.child_dev + count;
                count++;
-               memcpy(child_dev_ptr, p_child, sizeof(*p_child));
+
+               /*
+                * Copy as much as we know (sizeof) and is available
+                * (child_dev_size) of the child device. Accessing the data must
+                * depend on VBT version.
+                */
+               memcpy(child_dev_ptr, p_child,
+                      min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
        }
        return;
 }
index 6d909efbf43f3a52655551f71ab91f683ecb16bf..46cd5c7ebacd3e8873b624e0cfe0e1d03ba72aa8 100644 (file)
@@ -203,9 +203,11 @@ struct bdb_general_features {
 #define DEVICE_PORT_DVOB       0x01
 #define DEVICE_PORT_DVOC       0x02
 
-/* We used to keep this struct but without any version control. We should avoid
+/*
+ * We used to keep this struct but without any version control. We should avoid
  * using it in the future, but it should be safe to keep using it in the old
- * code. */
+ * code. Do not change; we rely on its size.
+ */
 struct old_child_dev_config {
        u16 handle;
        u16 device_type;
@@ -756,11 +758,6 @@ int intel_parse_bios(struct drm_device *dev);
 #define                DVO_C           2
 #define                DVO_D           3
 
-/* define the PORT for DP output type */
-#define                PORT_IDPB       7
-#define                PORT_IDPC       8
-#define                PORT_IDPD       9
-
 /* Possible values for the "DVO Port" field for versions >= 155: */
 #define DVO_PORT_HDMIA 0
 #define DVO_PORT_HDMIB 1
@@ -773,6 +770,8 @@ int intel_parse_bios(struct drm_device *dev);
 #define DVO_PORT_DPC   8
 #define DVO_PORT_DPD   9
 #define DVO_PORT_DPA   10
+#define DVO_PORT_DPE   11
+#define DVO_PORT_HDMIE 12
 #define DVO_PORT_MIPIA 21
 #define DVO_PORT_MIPIB 22
 #define DVO_PORT_MIPIC 23
index ba1ae031e6fd47ff7873fe739a4efd5e5c5fddac..d0f1b8d833cd2d890e0328df50cf5839cd3fba2f 100644 (file)
@@ -350,7 +350,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
        }
        csr->mmio_count = dmc_header->mmio_count;
        for (i = 0; i < dmc_header->mmio_count; i++) {
-               if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE &&
+               if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
                        dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
                        DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
                                                dmc_header->mmioaddr[i]);
index 6cfe65d6a8cf96358f7858494d8232a2cc0b7ba6..61575f67a62630a0575e575d133d2cf93ce2a6c6 100644 (file)
@@ -128,7 +128,7 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
        { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9:   1000    1000    0       */
 };
 
-/* Skylake H, S, and Skylake Y with 0.95V VccIO */
+/* Skylake H and S */
 static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
        { 0x00002016, 0x000000A0, 0x0 },
        { 0x00005012, 0x0000009B, 0x0 },
@@ -143,23 +143,23 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
 
 /* Skylake U */
 static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
-       { 0x00002016, 0x000000A2, 0x0 },
+       { 0x0000201B, 0x000000A2, 0x0 },
        { 0x00005012, 0x00000088, 0x0 },
        { 0x00007011, 0x00000087, 0x0 },
-       { 0x80009010, 0x000000C7, 0x1 },        /* Uses I_boost */
-       { 0x00002016, 0x0000009D, 0x0 },
+       { 0x80009010, 0x000000C7, 0x1 },        /* Uses I_boost level 0x1 */
+       { 0x0000201B, 0x0000009D, 0x0 },
        { 0x00005012, 0x000000C7, 0x0 },
        { 0x00007011, 0x000000C7, 0x0 },
        { 0x00002016, 0x00000088, 0x0 },
        { 0x00005012, 0x000000C7, 0x0 },
 };
 
-/* Skylake Y with 0.85V VccIO */
-static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = {
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
        { 0x00000018, 0x000000A2, 0x0 },
        { 0x00005012, 0x00000088, 0x0 },
        { 0x00007011, 0x00000087, 0x0 },
-       { 0x80009010, 0x000000C7, 0x1 },        /* Uses I_boost */
+       { 0x80009010, 0x000000C7, 0x3 },        /* Uses I_boost level 0x3 */
        { 0x00000018, 0x0000009D, 0x0 },
        { 0x00005012, 0x000000C7, 0x0 },
        { 0x00007011, 0x000000C7, 0x0 },
@@ -168,7 +168,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = {
 };
 
 /*
- * Skylake H and S, and Skylake Y with 0.95V VccIO
+ * Skylake H and S
  * eDP 1.4 low vswing translation parameters
  */
 static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@@ -202,10 +202,10 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
 };
 
 /*
- * Skylake Y with 0.95V VccIO
+ * Skylake Y
  * eDP 1.4 low vswing translation parameters
  */
-static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = {
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
        { 0x00000018, 0x000000A8, 0x0 },
        { 0x00004013, 0x000000AB, 0x0 },
        { 0x00007011, 0x000000A4, 0x0 },
@@ -218,7 +218,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = {
        { 0x00000018, 0x0000008A, 0x0 },
 };
 
-/* Skylake H, S and U, and Skylake Y with 0.95V VccIO */
+/* Skylake U, H and S */
 static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
        { 0x00000018, 0x000000AC, 0x0 },
        { 0x00005012, 0x0000009D, 0x0 },
@@ -233,8 +233,8 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
        { 0x00000018, 0x000000C7, 0x0 },
 };
 
-/* Skylake Y with 0.85V VccIO */
-static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = {
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
        { 0x00000018, 0x000000A1, 0x0 },
        { 0x00005012, 0x000000DF, 0x0 },
        { 0x00007011, 0x00000084, 0x0 },
@@ -244,7 +244,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = {
        { 0x00006013, 0x000000C7, 0x0 },
        { 0x00000018, 0x0000008A, 0x0 },
        { 0x00003015, 0x000000C7, 0x0 },        /* Default */
-       { 0x80003015, 0x000000C7, 0x7 },        /* Uses I_boost */
+       { 0x80003015, 0x000000C7, 0x7 },        /* Uses I_boost level 0x7 */
        { 0x00000018, 0x000000C7, 0x0 },
 };
 
@@ -335,19 +335,11 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
 static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
                                                        int *n_entries)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        const struct ddi_buf_trans *ddi_translations;
-       static int is_095v = -1;
-
-       if (is_095v == -1) {
-               u32 spr1 = I915_READ(UAIMI_SPR1);
-
-               is_095v = spr1 & SKL_VCCIO_MASK;
-       }
 
-       if (IS_SKL_ULX(dev) && !is_095v) {
-               ddi_translations = skl_y_085v_ddi_translations_dp;
-               *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+       if (IS_SKL_ULX(dev)) {
+               ddi_translations = skl_y_ddi_translations_dp;
+               *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
        } else if (IS_SKL_ULT(dev)) {
                ddi_translations = skl_u_ddi_translations_dp;
                *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
@@ -364,23 +356,14 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        const struct ddi_buf_trans *ddi_translations;
-       static int is_095v = -1;
-
-       if (is_095v == -1) {
-               u32 spr1 = I915_READ(UAIMI_SPR1);
-
-               is_095v = spr1 & SKL_VCCIO_MASK;
-       }
 
-       if (IS_SKL_ULX(dev) && !is_095v) {
+       if (IS_SKL_ULX(dev)) {
                if (dev_priv->edp_low_vswing) {
-                       ddi_translations = skl_y_085v_ddi_translations_edp;
-                       *n_entries =
-                               ARRAY_SIZE(skl_y_085v_ddi_translations_edp);
+                       ddi_translations = skl_y_ddi_translations_edp;
+                       *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
                } else {
-                       ddi_translations = skl_y_085v_ddi_translations_dp;
-                       *n_entries =
-                               ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+                       ddi_translations = skl_y_ddi_translations_dp;
+                       *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
                }
        } else if (IS_SKL_ULT(dev)) {
                if (dev_priv->edp_low_vswing) {
@@ -407,19 +390,11 @@ static const struct ddi_buf_trans *
 skl_get_buf_trans_hdmi(struct drm_device *dev,
                       int *n_entries)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        const struct ddi_buf_trans *ddi_translations;
-       static int is_095v = -1;
-
-       if (is_095v == -1) {
-               u32 spr1 = I915_READ(UAIMI_SPR1);
-
-               is_095v = spr1 & SKL_VCCIO_MASK;
-       }
 
-       if (IS_SKL_ULX(dev) && !is_095v) {
-               ddi_translations = skl_y_085v_ddi_translations_hdmi;
-               *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_hdmi);
+       if (IS_SKL_ULX(dev)) {
+               ddi_translations = skl_y_ddi_translations_hdmi;
+               *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
        } else {
                ddi_translations = skl_ddi_translations_hdmi;
                *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
@@ -1579,17 +1554,14 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
                         DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
                         wrpll_params.central_freq;
        } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
-               struct drm_encoder *encoder = &intel_encoder->base;
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-               switch (intel_dp->link_bw) {
-               case DP_LINK_BW_1_62:
+               switch (crtc_state->port_clock / 2) {
+               case 81000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
                        break;
-               case DP_LINK_BW_2_7:
+               case 135000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
                        break;
-               case DP_LINK_BW_5_4:
+               case 270000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
                        break;
                }
index 83936403502fb8a768783dbc5e24cf9f22035d61..8cc9264f78094c8cfcb101c16d61caf40729323e 100644 (file)
@@ -1098,6 +1098,9 @@ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
                case PORT_D:
                        bit = SDE_PORTD_HOTPLUG_CPT;
                        break;
+               case PORT_E:
+                       bit = SDE_PORTE_HOTPLUG_SPT;
+                       break;
                default:
                        return true;
                }
@@ -5147,7 +5150,6 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
 {
        switch (port) {
        case PORT_A:
-       case PORT_E:
                return POWER_DOMAIN_PORT_DDI_A_4_LANES;
        case PORT_B:
                return POWER_DOMAIN_PORT_DDI_B_4_LANES;
@@ -5155,6 +5157,8 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
                return POWER_DOMAIN_PORT_DDI_C_4_LANES;
        case PORT_D:
                return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+       case PORT_E:
+               return POWER_DOMAIN_PORT_DDI_E_2_LANES;
        default:
                WARN_ON_ONCE(1);
                return POWER_DOMAIN_PORT_OTHER;
@@ -5709,16 +5713,13 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
        /* enable PG1 and Misc I/O */
        intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
-       /* DPLL0 already enabed !? */
-       if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) {
-               DRM_DEBUG_DRIVER("DPLL0 already running\n");
-               return;
+       /* DPLL0 not enabled (happens on early BIOS versions) */
+       if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
+               /* enable DPLL0 */
+               required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+               skl_dpll0_enable(dev_priv, required_vco);
        }
 
-       /* enable DPLL0 */
-       required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
-       skl_dpll0_enable(dev_priv, required_vco);
-
        /* set CDCLK to the frequency the BIOS chose */
        skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
 
@@ -6304,7 +6305,7 @@ static void intel_connector_check_state(struct intel_connector *connector)
                      connector->base.name);
 
        if (connector->get_hw_state(connector)) {
-               struct drm_encoder *encoder = &connector->encoder->base;
+               struct intel_encoder *encoder = connector->encoder;
                struct drm_connector_state *conn_state = connector->base.state;
 
                I915_STATE_WARN(!crtc,
@@ -6316,13 +6317,13 @@ static void intel_connector_check_state(struct intel_connector *connector)
                I915_STATE_WARN(!crtc->state->active,
                      "connector is active, but attached crtc isn't\n");
 
-               if (!encoder)
+               if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
                        return;
 
-               I915_STATE_WARN(conn_state->best_encoder != encoder,
+               I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
                        "atomic encoder doesn't match attached encoder\n");
 
-               I915_STATE_WARN(conn_state->crtc != encoder->crtc,
+               I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
                        "attached encoder crtc differs from connector crtc\n");
        } else {
                I915_STATE_WARN(crtc && crtc->state->active,
@@ -13960,6 +13961,15 @@ static void intel_setup_outputs(struct drm_device *dev)
                        intel_ddi_init(dev, PORT_C);
                if (found & SFUSE_STRAP_DDID_DETECTED)
                        intel_ddi_init(dev, PORT_D);
+               /*
+                * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+                */
+               if (IS_SKYLAKE(dev) &&
+                   (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
+                    dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
+                    dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+                       intel_ddi_init(dev, PORT_E);
+
        } else if (HAS_PCH_SPLIT(dev)) {
                int found;
                dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
@@ -14730,6 +14740,24 @@ void intel_modeset_init(struct drm_device *dev)
        if (INTEL_INFO(dev)->num_pipes == 0)
                return;
 
+       /*
+        * There may be no VBT; and if the BIOS enabled SSC we can
+        * just keep using it to avoid unnecessary flicker.  Whereas if the
+        * BIOS isn't using it, don't assume it will work even if the VBT
+        * indicates as much.
+        */
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+               bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+                                           DREF_SSC1_ENABLE);
+
+               if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+                       DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+                                    bios_lvds_use_ssc ? "en" : "dis",
+                                    dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+                       dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+               }
+       }
+
        intel_init_display(dev);
        intel_init_audio(dev);
 
@@ -15289,7 +15317,6 @@ err:
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *c;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -15298,16 +15325,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
        intel_init_gt_powersave(dev);
        mutex_unlock(&dev->struct_mutex);
 
-       /*
-        * There may be no VBT; and if the BIOS enabled SSC we can
-        * just keep using it to avoid unnecessary flicker.  Whereas if the
-        * BIOS isn't using it, don't assume it will work even if the VBT
-        * indicates as much.
-        */
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
-                                               DREF_SSC1_ENABLE);
-
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev);
index d0f4eb793cf56c1a52157d96ef98f3a8dfaebe8c..0a2e33fbf20dd2902817d85c10aa6b402d40e869 100644 (file)
 #define INTEL_DP_RESOLUTION_FAILSAFE   (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
 
 struct dp_link_dpll {
-       int link_bw;
+       int clock;
        struct dpll dpll;
 };
 
 static const struct dp_link_dpll gen4_dpll[] = {
-       { DP_LINK_BW_1_62,
+       { 162000,
                { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
-       { DP_LINK_BW_2_7,
+       { 270000,
                { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
 };
 
 static const struct dp_link_dpll pch_dpll[] = {
-       { DP_LINK_BW_1_62,
+       { 162000,
                { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
-       { DP_LINK_BW_2_7,
+       { 270000,
                { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
 };
 
 static const struct dp_link_dpll vlv_dpll[] = {
-       { DP_LINK_BW_1_62,
+       { 162000,
                { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
-       { DP_LINK_BW_2_7,
+       { 270000,
                { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
 };
 
@@ -83,11 +83,11 @@ static const struct dp_link_dpll chv_dpll[] = {
         * m2 is stored in fixed point format using formula below
         * (m2_int << 22) | m2_fraction
         */
-       { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
+       { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
                { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
-       { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
+       { 270000,       /* m2_int = 27, m2_fraction = 0 */
                { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
-       { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
+       { 540000,       /* m2_int = 27, m2_fraction = 0 */
                { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
 };
 
@@ -1130,7 +1130,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
 }
 
 static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
 {
        u32 ctrl1;
 
@@ -1142,7 +1142,7 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
        pipe_config->dpll_hw_state.cfgcr2 = 0;
 
        ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-       switch (link_clock / 2) {
+       switch (pipe_config->port_clock / 2) {
        case 81000:
                ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
                                              SKL_DPLL0);
@@ -1175,20 +1175,20 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
        pipe_config->dpll_hw_state.ctrl1 = ctrl1;
 }
 
-static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
+void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
 {
        memset(&pipe_config->dpll_hw_state, 0,
               sizeof(pipe_config->dpll_hw_state));
 
-       switch (link_bw) {
-       case DP_LINK_BW_1_62:
+       switch (pipe_config->port_clock / 2) {
+       case 81000:
                pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
                break;
-       case DP_LINK_BW_2_7:
+       case 135000:
                pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
                break;
-       case DP_LINK_BW_5_4:
+       case 270000:
                pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
                break;
        }
@@ -1223,26 +1223,29 @@ static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
+       int size;
+
        if (IS_BROXTON(dev)) {
                *source_rates = bxt_rates;
-               return ARRAY_SIZE(bxt_rates);
+               size = ARRAY_SIZE(bxt_rates);
        } else if (IS_SKYLAKE(dev)) {
                *source_rates = skl_rates;
-               return ARRAY_SIZE(skl_rates);
+               size = ARRAY_SIZE(skl_rates);
+       } else {
+               *source_rates = default_rates;
+               size = ARRAY_SIZE(default_rates);
        }
 
-       *source_rates = default_rates;
-
        /* This depends on the fact that 5.4 is last value in the array */
-       if (intel_dp_source_supports_hbr2(dev))
-               return (DP_LINK_BW_5_4 >> 3) + 1;
-       else
-               return (DP_LINK_BW_2_7 >> 3) + 1;
+       if (!intel_dp_source_supports_hbr2(dev))
+               size--;
+
+       return size;
 }
 
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
-                  struct intel_crtc_state *pipe_config, int link_bw)
+                  struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
        const struct dp_link_dpll *divisor = NULL;
@@ -1264,7 +1267,7 @@ intel_dp_set_clock(struct intel_encoder *encoder,
 
        if (divisor && count) {
                for (i = 0; i < count; i++) {
-                       if (link_bw == divisor[i].link_bw) {
+                       if (pipe_config->port_clock == divisor[i].clock) {
                                pipe_config->dpll = divisor[i].dpll;
                                pipe_config->clock_set = true;
                                break;
@@ -1541,13 +1544,13 @@ found:
        }
 
        if (IS_SKYLAKE(dev) && is_edp(intel_dp))
-               skl_edp_set_pll_config(pipe_config, common_rates[clock]);
+               skl_edp_set_pll_config(pipe_config);
        else if (IS_BROXTON(dev))
                /* handled in ddi */;
        else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+               hsw_dp_set_ddi_pll_sel(pipe_config);
        else
-               intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+               intel_dp_set_clock(encoder, pipe_config);
 
        return true;
 }
@@ -4958,9 +4961,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
                intel_dp_probe_oui(intel_dp);
 
-               if (!intel_dp_probe_mst(intel_dp))
+               if (!intel_dp_probe_mst(intel_dp)) {
+                       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+                       intel_dp_check_link_status(intel_dp);
+                       drm_modeset_unlock(&dev->mode_config.connection_mutex);
                        goto mst_fail;
-
+               }
        } else {
                if (intel_dp->is_mst) {
                        if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
@@ -4968,10 +4974,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                }
 
                if (!intel_dp->is_mst) {
-                       /*
-                        * we'll check the link status via the normal hot plug path later -
-                        * but for short hpds we should check it now
-                        */
                        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
                        intel_dp_check_link_status(intel_dp);
                        drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -5013,16 +5015,17 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
        return -1;
 }
 
-/* check the VBT to see whether the eDP is on DP-D port */
+/* check the VBT to see whether the eDP is on another port */
 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        union child_device_config *p_child;
        int i;
        static const short port_mapping[] = {
-               [PORT_B] = PORT_IDPB,
-               [PORT_C] = PORT_IDPC,
-               [PORT_D] = PORT_IDPD,
+               [PORT_B] = DVO_PORT_DPB,
+               [PORT_C] = DVO_PORT_DPC,
+               [PORT_D] = DVO_PORT_DPD,
+               [PORT_E] = DVO_PORT_DPE,
        };
 
        if (port == PORT_A)
@@ -5857,6 +5860,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        case PORT_D:
                intel_encoder->hpd_pin = HPD_PORT_D;
                break;
+       case PORT_E:
+               intel_encoder->hpd_pin = HPD_PORT_E;
+               break;
        default:
                BUG();
        }
index 369f8b6b804fe23381cee2146cd8e289fba8c526..3e4be5a3becdddf9fd2a23e6be26f02da90a28f2 100644 (file)
@@ -33,6 +33,7 @@
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
                                        struct intel_crtc_state *pipe_config)
 {
+       struct drm_device *dev = encoder->base.dev;
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -97,6 +98,10 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
                               &pipe_config->dp_m_n);
 
        pipe_config->dp_m_n.tu = slots;
+
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               hsw_dp_set_ddi_pll_sel(pipe_config);
+
        return true;
 
 }
@@ -168,6 +173,11 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
                return;
        }
 
+       /* MST encoders are bound to a crtc, not to a connector,
+        * force the mapping here for get_hw_state.
+        */
+       found->encoder = encoder;
+
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
        intel_mst->port = found->port;
 
@@ -395,7 +405,7 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
 
 static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
 {
-       if (connector->encoder) {
+       if (connector->encoder && connector->base.state->crtc) {
                enum pipe pipe;
                if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
                        return false;
index 93008fbb815d93012d7b5dbcdfd52b0bbfa10af0..2b9e6f9775c5314511a577e82965bb79c8be788f 100644 (file)
@@ -1185,6 +1185,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
 void intel_edp_drrs_invalidate(struct drm_device *dev,
                unsigned frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
 /* intel_dp_mst.c */
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
index 4a601cf90f16c68d694babd065bbae09cbe6e9f7..32a6c7184ca4fcbcc73786e678f634a22453224d 100644 (file)
@@ -1048,11 +1048,7 @@ void intel_dsi_init(struct drm_device *dev)
        intel_connector->unregister = intel_connector_unregister;
 
        /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
-       if (dev_priv->vbt.dsi.config->dual_link) {
-               /* XXX: does dual link work on either pipe? */
-               intel_encoder->crtc_mask = (1 << PIPE_A);
-               intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
-       } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+       if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
                intel_encoder->crtc_mask = (1 << PIPE_A);
                intel_dsi->ports = (1 << PORT_A);
        } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
@@ -1060,6 +1056,9 @@ void intel_dsi_init(struct drm_device *dev)
                intel_dsi->ports = (1 << PORT_C);
        }
 
+       if (dev_priv->vbt.dsi.config->dual_link)
+               intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+
        /* Create a DSI host (and a device) for each port. */
        for_each_dsi_port(port, intel_dsi->ports) {
                struct intel_dsi_host *host;
index 51cbea8247fe9b2cfc6987e6dec114b06d09cc00..dcd336bcdfe750037901f87a6fb5e2c78c57b17c 100644 (file)
@@ -1958,6 +1958,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
+       uint8_t alternate_ddc_pin;
 
        drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_HDMIA);
@@ -1991,6 +1992,26 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                        intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
                intel_encoder->hpd_pin = HPD_PORT_D;
                break;
+       case PORT_E:
+               /* On SKL PORT E doesn't have seperate GMBUS pin
+                *  We rely on VBT to set a proper alternate GMBUS pin. */
+               alternate_ddc_pin =
+                       dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
+               switch (alternate_ddc_pin) {
+               case DDC_PIN_B:
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+                       break;
+               case DDC_PIN_C:
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+                       break;
+               case DDC_PIN_D:
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+                       break;
+               default:
+                       MISSING_CASE(alternate_ddc_pin);
+               }
+               intel_encoder->hpd_pin = HPD_PORT_E;
+               break;
        case PORT_A:
                intel_encoder->hpd_pin = HPD_PORT_A;
                /* Internal port only for eDP. */
index 032a0bf75f3b1d4d7fbbfd44025653865f30ac0b..53c0173a39fe182d5d2e50ac2ffc6637f77526fd 100644 (file)
@@ -91,6 +91,9 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
        case HPD_PORT_D:
                *port = PORT_D;
                return true;
+       case HPD_PORT_E:
+               *port = PORT_E;
+               return true;
        default:
                return false;   /* no hpd */
        }
index fff0c22682ee32f947907da7bb27f4fda0463073..ddbb7ed0a193229355700926006578ca5f06b937 100644 (file)
@@ -955,8 +955,6 @@ enum vlv_wm_level {
        VLV_WM_LEVEL_PM2,
        VLV_WM_LEVEL_PM5,
        VLV_WM_LEVEL_DDR_DVFS,
-       CHV_WM_NUM_LEVELS,
-       VLV_WM_NUM_LEVELS = 1,
 };
 
 /* latency must be in 0.1us units. */
@@ -982,9 +980,13 @@ static void vlv_setup_wm_latency(struct drm_device *dev)
        /* all latencies in usec */
        dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
 
+       dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+
        if (IS_CHERRYVIEW(dev_priv)) {
                dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
                dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+
+               dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
        }
 }
 
@@ -1137,10 +1139,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
        memset(wm_state, 0, sizeof(*wm_state));
 
        wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
-       if (IS_CHERRYVIEW(dev))
-               wm_state->num_levels = CHV_WM_NUM_LEVELS;
-       else
-               wm_state->num_levels = VLV_WM_NUM_LEVELS;
+       wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
 
        wm_state->num_active_planes = 0;
 
@@ -1220,7 +1219,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
        }
 
        /* clear any (partially) filled invalid levels */
-       for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+       for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
                memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
                memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
        }
@@ -1324,10 +1323,7 @@ static void vlv_merge_wm(struct drm_device *dev,
        struct intel_crtc *crtc;
        int num_active_crtcs = 0;
 
-       if (IS_CHERRYVIEW(dev))
-               wm->level = VLV_WM_LEVEL_DDR_DVFS;
-       else
-               wm->level = VLV_WM_LEVEL_PM2;
+       wm->level = to_i915(dev)->wm.max_level;
        wm->cxsr = true;
 
        for_each_intel_crtc(dev, crtc) {
@@ -4083,9 +4079,29 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
                if (val & DSP_MAXFIFO_PM5_ENABLE)
                        wm->level = VLV_WM_LEVEL_PM5;
 
+               /*
+                * If DDR DVFS is disabled in the BIOS, Punit
+                * will never ack the request. So if that happens
+                * assume we don't have to enable/disable DDR DVFS
+                * dynamically. To test that just set the REQ_ACK
+                * bit to poke the Punit, but don't change the
+                * HIGH/LOW bits so that we don't actually change
+                * the current state.
+                */
                val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
-               if ((val & FORCE_DDR_HIGH_FREQ) == 0)
-                       wm->level = VLV_WM_LEVEL_DDR_DVFS;
+               val |= FORCE_DDR_FREQ_REQ_ACK;
+               vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+               if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+                             FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+                       DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
+                                     "assuming DDR DVFS is disabled\n");
+                       dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+               } else {
+                       val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+                       if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+                               wm->level = VLV_WM_LEVEL_DDR_DVFS;
+               }
 
                mutex_unlock(&dev_priv->rps.hw_lock);
        }
index 821644d1b544eb618e57db8196014768ebe9d7b8..af7fdb3bd663aef062a5cd41a2cdacbb4492515d 100644 (file)
@@ -297,6 +297,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
        BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
        BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
        BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |          \
        BIT(POWER_DOMAIN_AUX_B) |                       \
        BIT(POWER_DOMAIN_AUX_C) |                       \
        BIT(POWER_DOMAIN_AUX_D) |                       \
@@ -316,6 +317,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (            \
        BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
        BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |          \
        BIT(POWER_DOMAIN_INIT))
 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (              \
        BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
index 522e91ab53607508ec3752c3302267e3b23e94b2..429ab5e3025a621c1d2a580880e87b5993d95ea0 100644 (file)
@@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 {
 #ifdef __powerpc__
        struct drm_device *dev = encoder->dev;
-       struct nvif_device *device = &nouveau_drm(dev)->device;
+       struct nvif_object *device = &nouveau_drm(dev)->device.object;
 
        /* BIOS scripts usually take care of the backlight, thanks
         * Apple for your consistency.
index 29a37f03ebf1c7af1065d8027a3e65f77252fda3..bd60d7dd09f51a45b70f120597ca38adaf8c102b 100644 (file)
@@ -21,7 +21,7 @@
  *
  */
 #include "priv.h"
-
+#include <core/pci.h>
 
 #if defined(__powerpc__)
 struct priv {
@@ -43,7 +43,7 @@ of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-       struct pci_dev *pdev = bios->subdev.device->pdev;
+       struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
        struct device_node *dn;
        struct priv *priv;
        if (!(dn = pci_device_to_OF_node(pdev)))
index a8dbb3ef4e3c9602aa7734e3d5984b46ba607fb6..7c6225c84ba6745919377fa9bfbd3506a7d8f7e5 100644 (file)
@@ -160,9 +160,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
        *pwidth = head->width;
        *pheight = head->height;
        drm_mode_probed_add(connector, mode);
+       /* remember the last custom size for mode validation */
+       qdev->monitors_config_width = mode->hdisplay;
+       qdev->monitors_config_height = mode->vdisplay;
        return 1;
 }
 
+static struct mode_size {
+       int w;
+       int h;
+} common_modes[] = {
+       { 640,  480},
+       { 720,  480},
+       { 800,  600},
+       { 848,  480},
+       {1024,  768},
+       {1152,  768},
+       {1280,  720},
+       {1280,  800},
+       {1280,  854},
+       {1280,  960},
+       {1280, 1024},
+       {1440,  900},
+       {1400, 1050},
+       {1680, 1050},
+       {1600, 1200},
+       {1920, 1080},
+       {1920, 1200}
+};
+
 static int qxl_add_common_modes(struct drm_connector *connector,
                                 unsigned pwidth,
                                 unsigned pheight)
@@ -170,29 +196,6 @@ static int qxl_add_common_modes(struct drm_connector *connector,
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode = NULL;
        int i;
-       struct mode_size {
-               int w;
-               int h;
-       } common_modes[] = {
-               { 640,  480},
-               { 720,  480},
-               { 800,  600},
-               { 848,  480},
-               {1024,  768},
-               {1152,  768},
-               {1280,  720},
-               {1280,  800},
-               {1280,  854},
-               {1280,  960},
-               {1280, 1024},
-               {1440,  900},
-               {1400, 1050},
-               {1680, 1050},
-               {1600, 1200},
-               {1920, 1080},
-               {1920, 1200}
-       };
-
        for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
                mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
                                    60, false, false, false);
@@ -823,11 +826,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
 static int qxl_conn_mode_valid(struct drm_connector *connector,
                               struct drm_display_mode *mode)
 {
+       struct drm_device *ddev = connector->dev;
+       struct qxl_device *qdev = ddev->dev_private;
+       int i;
+
        /* TODO: is this called for user defined modes? (xrandr --add-mode)
         * TODO: check that the mode fits in the framebuffer */
-       DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
-                 mode->vdisplay, mode->status);
-       return MODE_OK;
+
+       if(qdev->monitors_config_width == mode->hdisplay &&
+          qdev->monitors_config_height == mode->vdisplay)
+               return MODE_OK;
+
+       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+               if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
+                       return MODE_OK;
+       }
+       return MODE_BAD;
 }
 
 static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
index d8549690801d20fbc2dfabfed52006fc3752bdf8..01a86948eb8cd6007a1db70785b714df9bc9cbb1 100644 (file)
@@ -325,6 +325,8 @@ struct qxl_device {
        struct work_struct fb_work;
 
        struct drm_property *hotplug_mode_update_property;
+       int monitors_config_width;
+       int monitors_config_height;
 };
 
 /* forward declaration for QXL_INFO_IO */
index f81e0d7d023290813ed9cdc981155ee8d1a12c4c..9cd49c584263c895ad368586a1df58120d9c9a71 100644 (file)
@@ -171,8 +171,9 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                return -E2BIG;
 
        tx_buf[0] = msg->address & 0xff;
-       tx_buf[1] = msg->address >> 8;
-       tx_buf[2] = msg->request << 4;
+       tx_buf[1] = (msg->address >> 8) & 0xff;
+       tx_buf[2] = (msg->request << 4) |
+               ((msg->address >> 16) & 0xf);
        tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
        switch (msg->request & ~DP_AUX_I2C_MOT) {
index fbc8d88d6e5de1afe43c11884340e90bfa8e57a2..2c02e99b5f95a65a26357f26d67e91a45f8224a4 100644 (file)
@@ -522,13 +522,15 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
                return err;
        }
 
-       if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
-               if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
-                       frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
-               else
-                       frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
-       } else {
-               frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+       if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
+               if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+                       if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+                               frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+                       else
+                               frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+               } else {
+                       frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+               }
        }
 
        err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
index c097d3a82bda734888ca46d05c14794738163642..a9b01bcf7d0a2242cf181ef31a77cf9150e6e8f8 100644 (file)
@@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
            rdev->pdev->subsystem_device == 0x30ae)
                return;
 
+       /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+        * - it hangs on resume inside the dynclk 1 table.
+        */
+       if (rdev->family == CHIP_RS480 &&
+           rdev->pdev->subsystem_vendor == 0x103c &&
+           rdev->pdev->subsystem_device == 0x280a)
+               return;
+
        /* DYN CLK 1 */
        table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
        if (table)
index fcbd60bb03495740d435b7a33521ff698b171c40..3b0c229d7dcd23ffb7184ad79e2ebaa8f001cca9 100644 (file)
@@ -116,8 +116,8 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
               AUX_SW_WR_BYTES(bytes));
 
        /* write the data header into the registers */
-       /* request, addres, msg size */
-       byte = (msg->request << 4);
+       /* request, address, msg size */
+       byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
        WREG32(AUX_SW_DATA + aux_offset[instance],
               AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
 
index 34b78e73653248316fcd24732d03d253454c6e50..5d8ae5e49c440f98afda4c02d6aea8e335c78c82 100644 (file)
@@ -50,6 +50,8 @@
 
 #define VOP_WIN_SET(x, win, name, v) \
                REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_SCL_SET(x, win, name, v) \
+               REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
 #define VOP_CTRL_SET(x, name, v) \
                REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
 
@@ -164,7 +166,37 @@ struct vop_ctrl {
        struct vop_reg vpost_st_end;
 };
 
+struct vop_scl_regs {
+       struct vop_reg cbcr_vsd_mode;
+       struct vop_reg cbcr_vsu_mode;
+       struct vop_reg cbcr_hsd_mode;
+       struct vop_reg cbcr_ver_scl_mode;
+       struct vop_reg cbcr_hor_scl_mode;
+       struct vop_reg yrgb_vsd_mode;
+       struct vop_reg yrgb_vsu_mode;
+       struct vop_reg yrgb_hsd_mode;
+       struct vop_reg yrgb_ver_scl_mode;
+       struct vop_reg yrgb_hor_scl_mode;
+       struct vop_reg line_load_mode;
+       struct vop_reg cbcr_axi_gather_num;
+       struct vop_reg yrgb_axi_gather_num;
+       struct vop_reg vsd_cbcr_gt2;
+       struct vop_reg vsd_cbcr_gt4;
+       struct vop_reg vsd_yrgb_gt2;
+       struct vop_reg vsd_yrgb_gt4;
+       struct vop_reg bic_coe_sel;
+       struct vop_reg cbcr_axi_gather_en;
+       struct vop_reg yrgb_axi_gather_en;
+
+       struct vop_reg lb_mode;
+       struct vop_reg scale_yrgb_x;
+       struct vop_reg scale_yrgb_y;
+       struct vop_reg scale_cbcr_x;
+       struct vop_reg scale_cbcr_y;
+};
+
 struct vop_win_phy {
+       const struct vop_scl_regs *scl;
        const uint32_t *data_formats;
        uint32_t nformats;
 
@@ -222,7 +254,36 @@ static const uint32_t formats_234[] = {
        DRM_FORMAT_BGR565,
 };
 
+static const struct vop_scl_regs win_full_scl = {
+       .cbcr_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 31),
+       .cbcr_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 30),
+       .cbcr_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 28),
+       .cbcr_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 26),
+       .cbcr_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 24),
+       .yrgb_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 23),
+       .yrgb_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 22),
+       .yrgb_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 20),
+       .yrgb_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 18),
+       .yrgb_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 16),
+       .line_load_mode = VOP_REG(WIN0_CTRL1, 0x1, 15),
+       .cbcr_axi_gather_num = VOP_REG(WIN0_CTRL1, 0x7, 12),
+       .yrgb_axi_gather_num = VOP_REG(WIN0_CTRL1, 0xf, 8),
+       .vsd_cbcr_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 7),
+       .vsd_cbcr_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 6),
+       .vsd_yrgb_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 5),
+       .vsd_yrgb_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 4),
+       .bic_coe_sel = VOP_REG(WIN0_CTRL1, 0x3, 2),
+       .cbcr_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 1),
+       .yrgb_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 0),
+       .lb_mode = VOP_REG(WIN0_CTRL0, 0x7, 5),
+       .scale_yrgb_x = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+       .scale_yrgb_y = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+       .scale_cbcr_x = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+       .scale_cbcr_y = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
 static const struct vop_win_phy win01_data = {
+       .scl = &win_full_scl,
        .data_formats = formats_01,
        .nformats = ARRAY_SIZE(formats_01),
        .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
@@ -279,6 +340,12 @@ static const struct vop_reg_data vop_init_reg_table[] = {
        {DSP_CTRL0, 0x00000000},
        {WIN0_CTRL0, 0x00000080},
        {WIN1_CTRL0, 0x00000080},
+       /* TODO: Win2/3 support multiple area function, but we haven't found
+        * a suitable way to use it yet, so let's just use them as other windows
+        * with only area 0 enabled.
+        */
+       {WIN2_CTRL0, 0x00000010},
+       {WIN3_CTRL0, 0x00000010},
 };
 
 /*
@@ -393,6 +460,18 @@ static enum vop_data_format vop_convert_format(uint32_t format)
        }
 }
 
+static bool is_yuv_support(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV24:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static bool is_alpha_support(uint32_t format)
 {
        switch (format) {
@@ -404,6 +483,126 @@ static bool is_alpha_support(uint32_t format)
        }
 }
 
+static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
+                                 uint32_t dst, bool is_horizontal,
+                                 int vsu_mode, int *vskiplines)
+{
+       uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
+
+       if (is_horizontal) {
+               if (mode == SCALE_UP)
+                       val = GET_SCL_FT_BIC(src, dst);
+               else if (mode == SCALE_DOWN)
+                       val = GET_SCL_FT_BILI_DN(src, dst);
+       } else {
+               if (mode == SCALE_UP) {
+                       if (vsu_mode == SCALE_UP_BIL)
+                               val = GET_SCL_FT_BILI_UP(src, dst);
+                       else
+                               val = GET_SCL_FT_BIC(src, dst);
+               } else if (mode == SCALE_DOWN) {
+                       if (vskiplines) {
+                               *vskiplines = scl_get_vskiplines(src, dst);
+                               val = scl_get_bili_dn_vskip(src, dst,
+                                                           *vskiplines);
+                       } else {
+                               val = GET_SCL_FT_BILI_DN(src, dst);
+                       }
+               }
+       }
+
+       return val;
+}
+
+static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
+                            uint32_t src_w, uint32_t src_h, uint32_t dst_w,
+                            uint32_t dst_h, uint32_t pixel_format)
+{
+       uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
+       uint16_t cbcr_hor_scl_mode = SCALE_NONE;
+       uint16_t cbcr_ver_scl_mode = SCALE_NONE;
+       int hsub = drm_format_horz_chroma_subsampling(pixel_format);
+       int vsub = drm_format_vert_chroma_subsampling(pixel_format);
+       bool is_yuv = is_yuv_support(pixel_format);
+       uint16_t cbcr_src_w = src_w / hsub;
+       uint16_t cbcr_src_h = src_h / vsub;
+       uint16_t vsu_mode;
+       uint16_t lb_mode;
+       uint32_t val;
+       int vskiplines;
+
+       if (dst_w > 3840) {
+               DRM_ERROR("Maximum destination width (3840) exceeded\n");
+               return;
+       }
+
+       yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+       yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+
+       if (is_yuv) {
+               cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+               cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+               if (cbcr_hor_scl_mode == SCALE_DOWN)
+                       lb_mode = scl_vop_cal_lb_mode(dst_w, true);
+               else
+                       lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
+       } else {
+               if (yrgb_hor_scl_mode == SCALE_DOWN)
+                       lb_mode = scl_vop_cal_lb_mode(dst_w, false);
+               else
+                       lb_mode = scl_vop_cal_lb_mode(src_w, false);
+       }
+
+       VOP_SCL_SET(vop, win, lb_mode, lb_mode);
+       if (lb_mode == LB_RGB_3840X2) {
+               if (yrgb_ver_scl_mode != SCALE_NONE) {
+                       DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+                       return;
+               }
+               if (cbcr_ver_scl_mode != SCALE_NONE) {
+                       DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+                       return;
+               }
+               vsu_mode = SCALE_UP_BIL;
+       } else if (lb_mode == LB_RGB_2560X4) {
+               vsu_mode = SCALE_UP_BIL;
+       } else {
+               vsu_mode = SCALE_UP_BIC;
+       }
+
+       val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
+                               true, 0, NULL);
+       VOP_SCL_SET(vop, win, scale_yrgb_x, val);
+       val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
+                               false, vsu_mode, &vskiplines);
+       VOP_SCL_SET(vop, win, scale_yrgb_y, val);
+
+       VOP_SCL_SET(vop, win, vsd_yrgb_gt4, vskiplines == 4);
+       VOP_SCL_SET(vop, win, vsd_yrgb_gt2, vskiplines == 2);
+
+       VOP_SCL_SET(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
+       VOP_SCL_SET(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
+       VOP_SCL_SET(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
+       VOP_SCL_SET(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
+       VOP_SCL_SET(vop, win, yrgb_vsu_mode, vsu_mode);
+       if (is_yuv) {
+               val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
+                                       dst_w, true, 0, NULL);
+               VOP_SCL_SET(vop, win, scale_cbcr_x, val);
+               val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
+                                       dst_h, false, vsu_mode, &vskiplines);
+               VOP_SCL_SET(vop, win, scale_cbcr_y, val);
+
+               VOP_SCL_SET(vop, win, vsd_cbcr_gt4, vskiplines == 4);
+               VOP_SCL_SET(vop, win, vsd_cbcr_gt2, vskiplines == 2);
+               VOP_SCL_SET(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
+               VOP_SCL_SET(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
+               VOP_SCL_SET(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
+               VOP_SCL_SET(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
+               VOP_SCL_SET(vop, win, cbcr_vsu_mode, vsu_mode);
+       }
+}
+
 static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
 {
        unsigned long flags;
@@ -478,6 +677,7 @@ static void vop_enable(struct drm_crtc *crtc)
                goto err_disable_aclk;
        }
 
+       memcpy(vop->regs, vop->regsbak, vop->len);
        /*
         * At here, vop clock & iommu is enable, R/W vop regs would be safe.
         */
@@ -598,17 +798,22 @@ static int vop_update_plane_event(struct drm_plane *plane,
        struct vop *vop = to_vop(crtc);
        struct drm_gem_object *obj;
        struct rockchip_gem_object *rk_obj;
+       struct drm_gem_object *uv_obj;
+       struct rockchip_gem_object *rk_uv_obj;
        unsigned long offset;
        unsigned int actual_w;
        unsigned int actual_h;
        unsigned int dsp_stx;
        unsigned int dsp_sty;
        unsigned int y_vir_stride;
+       unsigned int uv_vir_stride = 0;
        dma_addr_t yrgb_mst;
+       dma_addr_t uv_mst = 0;
        enum vop_data_format format;
        uint32_t val;
        bool is_alpha;
        bool rb_swap;
+       bool is_yuv;
        bool visible;
        int ret;
        struct drm_rect dest = {
@@ -629,11 +834,15 @@ static int vop_update_plane_event(struct drm_plane *plane,
                .y2 = crtc->mode.vdisplay,
        };
        bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+       int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
+                                       DRM_PLANE_HELPER_NO_SCALING;
+       int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
+                                       DRM_PLANE_HELPER_NO_SCALING;
 
        ret = drm_plane_helper_check_update(plane, crtc, fb,
                                            &src, &dest, &clip,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           min_scale,
+                                           max_scale,
                                            can_position, false, &visible);
        if (ret)
                return ret;
@@ -643,6 +852,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
 
        is_alpha = is_alpha_support(fb->pixel_format);
        rb_swap = has_rb_swapped(fb->pixel_format);
+       is_yuv = is_yuv_support(fb->pixel_format);
+
        format = vop_convert_format(fb->pixel_format);
        if (format < 0)
                return format;
@@ -655,19 +866,46 @@ static int vop_update_plane_event(struct drm_plane *plane,
 
        rk_obj = to_rockchip_obj(obj);
 
+       if (is_yuv) {
+               /*
+                * Src.x1 can be odd when do clip, but yuv plane start point
+                * need align with 2 pixel.
+                */
+               val = (src.x1 >> 16) % 2;
+               src.x1 += val << 16;
+               src.x2 += val << 16;
+       }
+
        actual_w = (src.x2 - src.x1) >> 16;
        actual_h = (src.y2 - src.y1) >> 16;
-       crtc_x = max(0, crtc_x);
-       crtc_y = max(0, crtc_y);
 
-       dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
-       dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+       dsp_stx = dest.x1 + crtc->mode.htotal - crtc->mode.hsync_start;
+       dsp_sty = dest.y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
 
-       offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+       offset = (src.x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
        offset += (src.y1 >> 16) * fb->pitches[0];
-       yrgb_mst = rk_obj->dma_addr + offset;
 
-       y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+       yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+       y_vir_stride = fb->pitches[0] >> 2;
+
+       if (is_yuv) {
+               int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+               int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+               int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+
+               uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+               if (!uv_obj) {
+                       DRM_ERROR("fail to get uv object from framebuffer\n");
+                       return -EINVAL;
+               }
+               rk_uv_obj = to_rockchip_obj(uv_obj);
+               uv_vir_stride = fb->pitches[1] >> 2;
+
+               offset = (src.x1 >> 16) * bpp / hsub;
+               offset += (src.y1 >> 16) * fb->pitches[1] / vsub;
+
+               uv_mst = rk_uv_obj->dma_addr + offset + fb->offsets[1];
+       }
 
        /*
         * If this plane update changes the plane's framebuffer, (or more
@@ -704,9 +942,22 @@ static int vop_update_plane_event(struct drm_plane *plane,
        VOP_WIN_SET(vop, win, format, format);
        VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
        VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+       if (is_yuv) {
+               VOP_WIN_SET(vop, win, uv_vir, uv_vir_stride);
+               VOP_WIN_SET(vop, win, uv_mst, uv_mst);
+       }
+
+       if (win->phy->scl)
+               scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
+                                   dest.x2 - dest.x1, dest.y2 - dest.y1,
+                                   fb->pixel_format);
+
        val = (actual_h - 1) << 16;
        val |= (actual_w - 1) & 0xffff;
        VOP_WIN_SET(vop, win, act_info, val);
+
+       val = (dest.y2 - dest.y1 - 1) << 16;
+       val |= (dest.x2 - dest.x1 - 1) & 0xffff;
        VOP_WIN_SET(vop, win, dsp_info, val);
        val = (dsp_sty - 1) << 16;
        val |= (dsp_stx - 1) & 0xffff;
index 63e9b3a084c5bcea84a339d69ee7133fa93416d8..a2d4ddb896fa37bb76936ddb61c81227ecfe9862 100644 (file)
@@ -198,4 +198,92 @@ enum factor_mode {
        ALPHA_SRC_GLOBAL,
 };
 
+enum scale_mode {
+       SCALE_NONE = 0x0,
+       SCALE_UP   = 0x1,
+       SCALE_DOWN = 0x2
+};
+
+enum lb_mode {
+       LB_YUV_3840X5 = 0x0,
+       LB_YUV_2560X8 = 0x1,
+       LB_RGB_3840X2 = 0x2,
+       LB_RGB_2560X4 = 0x3,
+       LB_RGB_1920X5 = 0x4,
+       LB_RGB_1280X8 = 0x5
+};
+
+enum sacle_up_mode {
+       SCALE_UP_BIL = 0x0,
+       SCALE_UP_BIC = 0x1
+};
+
+enum scale_down_mode {
+       SCALE_DOWN_BIL = 0x0,
+       SCALE_DOWN_AVG = 0x1
+};
+
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+#define SCL_FT_DEFAULT_FIXPOINT_SHIFT  12
+#define SCL_MAX_VSKIPLINES             4
+#define MIN_SCL_FT_AFTER_VSKIP         1
+
+static inline uint16_t scl_cal_scale(int src, int dst, int shift)
+{
+       return ((src * 2 - 3) << (shift - 1)) / (dst - 1);
+}
+
+#define GET_SCL_FT_BILI_DN(src, dst)   scl_cal_scale(src, dst, 12)
+#define GET_SCL_FT_BILI_UP(src, dst)   scl_cal_scale(src, dst, 16)
+#define GET_SCL_FT_BIC(src, dst)       scl_cal_scale(src, dst, 16)
+
+static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
+                                            int vskiplines)
+{
+       int act_height;
+
+       act_height = (src_h + vskiplines - 1) / vskiplines;
+
+       return GET_SCL_FT_BILI_DN(act_height, dst_h);
+}
+
+static inline enum scale_mode scl_get_scl_mode(int src, int dst)
+{
+       if (src < dst)
+               return SCALE_UP;
+       else if (src > dst)
+               return SCALE_DOWN;
+
+       return SCALE_NONE;
+}
+
+static inline int scl_get_vskiplines(uint32_t srch, uint32_t dsth)
+{
+       uint32_t vskiplines;
+
+       for (vskiplines = SCL_MAX_VSKIPLINES; vskiplines > 1; vskiplines /= 2)
+               if (srch >= vskiplines * dsth * MIN_SCL_FT_AFTER_VSKIP)
+                       break;
+
+       return vskiplines;
+}
+
+static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
+{
+       int lb_mode;
+
+       if (width > 2560)
+               lb_mode = LB_RGB_3840X2;
+       else if (width > 1920)
+               lb_mode = LB_RGB_2560X4;
+       else if (!is_yuv)
+               lb_mode = LB_RGB_1920X5;
+       else if (width > 1280)
+               lb_mode = LB_YUV_3840X5;
+       else
+               lb_mode = LB_YUV_2560X8;
+
+       return lb_mode;
+}
+
 #endif /* _ROCKCHIP_DRM_VOP_H */
index 03854d606d589bd6730423e34aa48e5f1c388c13..e13b20bd9908d65b4b906d11f2a9d3aae0d8e847 100644 (file)
@@ -1052,10 +1052,15 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
        }
 
        /*
-        * Check if we were previously master, but now dropped.
+        * Check if we were previously master, but now dropped. In that
+        * case, allow at least render node functionality.
         */
        if (vmw_fp->locked_master) {
                mutex_unlock(&dev->master_mutex);
+
+               if (flags & DRM_RENDER_ALLOW)
+                       return NULL;
+
                DRM_ERROR("Dropped master trying to access ioctl that "
                          "requires authentication.\n");
                return ERR_PTR(-EACCES);
index 5b8595b784296d80c9b27f91a874f12a4f750b9c..3361769842f4d91a3b7dc68a3e2818c2572d75a8 100644 (file)
@@ -911,6 +911,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
                                  "surface reference.\n");
                        return -EACCES;
                }
+               if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+                       DRM_ERROR("Locked master refused legacy "
+                                 "surface reference.\n");
+                       return -EACCES;
+               }
+
                handle = u_handle;
        }
 
index dbd16a2d37db6defcadf264d5f5a5b474d819be2..fd5aa47bd6892841a285dbce151afa71dac4b618 100644 (file)
@@ -358,7 +358,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
 
 typedef struct drm_i915_getparam {
-       s32 param;
+       __s32 param;
        /*
         * WARNING: Using pointers instead of fixed-size u64 means we need to write
         * compat32 code. Don't repeat this mistake.
index 0530e5a4c6b1f6a84ecc16a343e9d767dd7bf8af..d8fc96ed11e9d29ecde7654419d1554aaeb7ab16 100644 (file)
 
 /* Video buffer addresses */
 #define VIDW_BUF_START(_buff)                  (0xA0 + ((_buff) * 8))
+#define VIDW_BUF_START_S(_buff)                        (0x40A0 + ((_buff) * 8))
 #define VIDW_BUF_START1(_buff)                 (0xA4 + ((_buff) * 8))
 #define VIDW_BUF_END(_buff)                    (0xD0 + ((_buff) * 8))
 #define VIDW_BUF_END1(_buff)                   (0xD4 + ((_buff) * 8))