MALI: rockchip: upgrade midgard DDK to r13p0-00rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_jm_hw.c
index 657717579c1cbed88ee66d4b6fcccffa14d03de7..00900a99a898fe6a173c90b2d4a75e1070985cc2 100644 (file)
@@ -26,8 +26,8 @@
 #include <mali_kbase_gator.h>
 #endif
 #include <mali_kbase_tlstream.h>
+#include <mali_kbase_vinstr.h>
 #include <mali_kbase_hw.h>
-#include <mali_kbase_config_defaults.h>
 #include <mali_kbase_hwaccess_jm.h>
 #include <backend/gpu/mali_kbase_device_internal.h>
 #include <backend/gpu/mali_kbase_irq_internal.h>
@@ -82,19 +82,30 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
        /* start MMU, medium priority, cache clean/flush on end, clean/flush on
         * start */
        cfg = kctx->as_nr;
+
        if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
                cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
 
 #ifndef CONFIG_MALI_COH_GPU
-       cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
-       cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
-#endif
+       if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START))
+               cfg |= JS_CONFIG_START_FLUSH_NO_ACTION;
+       else
+               cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
+
+       if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END))
+               cfg |= JS_CONFIG_END_FLUSH_NO_ACTION;
+       else
+               cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+#endif /* CONFIG_MALI_COH_GPU */
+
+       if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10649) ||
+               !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3982))
+               cfg |= JS_CONFIG_START_MMU;
 
-       cfg |= JS_CONFIG_START_MMU;
        cfg |= JS_CONFIG_THREAD_PRI(8);
 
        if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
-               (katom->atom_flags & KBASE_KATOM_FLAG_SECURE))
+               (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED))
                cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
 
        if (kbase_hw_has_feature(kbdev,
@@ -465,7 +476,7 @@ static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
 {
        bool soft_stops_allowed = true;
 
-       if (kbase_jd_katom_is_secure(katom)) {
+       if (kbase_jd_katom_is_protected(katom)) {
                soft_stops_allowed = false;
        } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
                if ((katom->core_req & BASE_JD_REQ_T) != 0)
@@ -475,7 +486,7 @@ static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
 }
 
 static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
-                                                               u16 core_reqs)
+                                               base_jd_core_req core_reqs)
 {
        bool hard_stops_allowed = true;
 
@@ -489,7 +500,7 @@ static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
 void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
                                        int js,
                                        u32 action,
-                                       u16 core_reqs,
+                                       base_jd_core_req core_reqs,
                                        struct kbase_jd_atom *target_katom)
 {
        struct kbase_context *kctx = target_katom->kctx;
@@ -734,7 +745,6 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
                                struct kbase_jd_atom *target_katom)
 {
        struct kbase_device *kbdev;
-       struct kbasep_js_device_data *js_devdata;
        int js = target_katom->slot_nr;
        int priority = target_katom->sched_priority;
        int i;
@@ -742,7 +752,6 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
        KBASE_DEBUG_ASSERT(kctx != NULL);
        kbdev = kctx->kbdev;
        KBASE_DEBUG_ASSERT(kbdev != NULL);
-       js_devdata = &kbdev->js_data;
 
        lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
 
@@ -1074,7 +1083,7 @@ void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
  * state when the soft/hard-stop action is complete
  */
 void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
-               u16 core_reqs, struct kbase_jd_atom *target_katom)
+               base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom)
 {
        u32 hw_action = action & JS_COMMAND_MASK;
 
@@ -1151,26 +1160,6 @@ static void kbase_debug_dump_registers(struct kbase_device *kbdev)
                kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL));
 }
 
-static void kbasep_save_hwcnt_setup(struct kbase_device *kbdev,
-                               struct kbase_context *kctx,
-                               struct kbase_uk_hwcnt_setup *hwcnt_setup)
-{
-       hwcnt_setup->dump_buffer =
-               kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), kctx) &
-                                                               0xffffffff;
-       hwcnt_setup->dump_buffer |= (u64)
-               kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), kctx) <<
-                                                                       32;
-       hwcnt_setup->jm_bm =
-               kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), kctx);
-       hwcnt_setup->shader_bm =
-               kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), kctx);
-       hwcnt_setup->tiler_bm =
-               kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), kctx);
-       hwcnt_setup->mmu_l2_bm =
-               kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), kctx);
-}
-
 static void kbasep_reset_timeout_worker(struct work_struct *data)
 {
        unsigned long flags, mmu_flags;
@@ -1178,10 +1167,8 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
        int i;
        ktime_t end_timestamp = ktime_get();
        struct kbasep_js_device_data *js_devdata;
-       struct kbase_uk_hwcnt_setup hwcnt_setup = { {0} };
-       enum kbase_instr_state bckp_state;
        bool try_schedule = false;
-       bool restore_hwc = false;
+       bool silent = false;
 
        KBASE_DEBUG_ASSERT(data);
 
@@ -1191,8 +1178,16 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
        KBASE_DEBUG_ASSERT(kbdev);
        js_devdata = &kbdev->js_data;
 
+       if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+                       KBASE_RESET_GPU_SILENT)
+               silent = true;
+
        KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
 
+       /* Suspend vinstr.
+        * This call will block until vinstr is suspended. */
+       kbase_vinstr_suspend(kbdev->vinstr_ctx);
+
        /* Make sure the timer has completed - this cannot be done from
         * interrupt context, so this cannot be done within
         * kbasep_try_reset_gpu_early. */
@@ -1242,39 +1237,14 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
         * assume that anything that is still left on the GPU is stuck there and
         * we'll kill it when we reset the GPU */
 
-       dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
+       if (!silent)
+               dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
                                                                RESET_TIMEOUT);
 
-       spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-
-       if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
-               /* the same interrupt handler preempted itself */
-               /* GPU is being reset */
-               spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-               wait_event(kbdev->hwcnt.backend.wait,
-                                       kbdev->hwcnt.backend.triggered != 0);
-               spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       }
-       /* Save the HW counters setup */
-       if (kbdev->hwcnt.kctx != NULL) {
-               struct kbase_context *kctx = kbdev->hwcnt.kctx;
-
-               if (kctx->jctx.sched_info.ctx.is_scheduled) {
-                       kbasep_save_hwcnt_setup(kbdev, kctx, &hwcnt_setup);
-
-                       restore_hwc = true;
-               }
-       }
-
        /* Output the state of some interesting registers to help in the
         * debugging of GPU resets */
-       kbase_debug_dump_registers(kbdev);
-
-       bckp_state = kbdev->hwcnt.backend.state;
-       kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_RESETTING;
-       kbdev->hwcnt.backend.triggered = 0;
-
-       spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+       if (!silent)
+               kbase_debug_dump_registers(kbdev);
 
        /* Reset the GPU */
        kbase_pm_init_hw(kbdev, 0);
@@ -1314,112 +1284,14 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
        kbase_disjoint_state_down(kbdev);
 
        wake_up(&kbdev->hwaccess.backend.reset_wait);
-       dev_err(kbdev->dev, "Reset complete");
+       if (!silent)
+               dev_err(kbdev->dev, "Reset complete");
 
        if (js_devdata->nr_contexts_pullable > 0 && !kbdev->poweroff_pending)
                try_schedule = true;
 
        mutex_unlock(&js_devdata->runpool_mutex);
 
-       spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       /* Restore the HW counters setup */
-       if (restore_hwc) {
-               struct kbase_context *kctx = kbdev->hwcnt.kctx;
-               u32 prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
-
-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
-               u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
-               u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
-                       >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
-               int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
-
-               if (arch_v6)
-                       prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
-#endif
-
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
-                               prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx);
-
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
-                               hwcnt_setup.dump_buffer & 0xFFFFFFFF, kctx);
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
-                               hwcnt_setup.dump_buffer >> 32,        kctx);
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
-                               hwcnt_setup.jm_bm,                    kctx);
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
-                               hwcnt_setup.shader_bm,                kctx);
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
-                               hwcnt_setup.mmu_l2_bm,                kctx);
-
-               /* Due to PRLAM-8186 we need to disable the Tiler before we
-                * enable the HW counter dump. */
-               if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
-                       kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
-                                               0, kctx);
-               else
-                       kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
-                                               hwcnt_setup.tiler_bm, kctx);
-
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
-                               prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL,
-                               kctx);
-
-               /* If HW has PRLAM-8186 we can now re-enable the tiler HW
-                * counters dump */
-               if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
-                       kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
-                                               hwcnt_setup.tiler_bm, kctx);
-       }
-       kbdev->hwcnt.backend.state = bckp_state;
-       switch (kbdev->hwcnt.backend.state) {
-       /* Cases for waking kbasep_cache_clean_worker worker */
-       case KBASE_INSTR_STATE_CLEANED:
-               /* Cache-clean IRQ occurred, but we reset:
-                * Wakeup incase the waiter saw RESETTING */
-       case KBASE_INSTR_STATE_REQUEST_CLEAN:
-               /* After a clean was requested, but before the regs were
-                * written:
-                * Wakeup incase the waiter saw RESETTING */
-               wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
-               break;
-       case KBASE_INSTR_STATE_CLEANING:
-               /* Either:
-                * 1) We've not got the Cache-clean IRQ yet: it was lost, or:
-                * 2) We got it whilst resetting: it was voluntarily lost
-                *
-                * So, move to the next state and wakeup: */
-               kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
-               wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
-               break;
-
-       /* Cases for waking anyone else */
-       case KBASE_INSTR_STATE_DUMPING:
-               /* If dumping, abort the dump, because we may've lost the IRQ */
-               kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
-               kbdev->hwcnt.backend.triggered = 1;
-               wake_up(&kbdev->hwcnt.backend.wait);
-               break;
-       case KBASE_INSTR_STATE_DISABLED:
-       case KBASE_INSTR_STATE_IDLE:
-       case KBASE_INSTR_STATE_FAULT:
-               /* Every other reason: wakeup in that state */
-               kbdev->hwcnt.backend.triggered = 1;
-               wake_up(&kbdev->hwcnt.backend.wait);
-               break;
-
-       /* Unhandled cases */
-       case KBASE_INSTR_STATE_RESETTING:
-       default:
-               BUG();
-               break;
-       }
-       spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-
-       /* Resume the vinstr core */
-       kbase_vinstr_hwc_resume(kbdev->vinstr_ctx);
-
-       /* Note: counter dumping may now resume */
-
        mutex_lock(&kbdev->pm.lock);
 
        /* Find out what cores are required now */
@@ -1439,6 +1311,10 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
        }
 
        kbase_pm_context_idle(kbdev);
+
+       /* Release vinstr */
+       kbase_vinstr_resume(kbdev->vinstr_ctx);
+
        KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
 }
 
@@ -1520,7 +1396,7 @@ static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
  *
  * Return:
  *   The function returns a boolean which should be interpreted as follows:
- *   true - Prepared for reset, kbase_reset_gpu should be called.
+ *   true - Prepared for reset, kbase_reset_gpu_locked should be called.
  *   false - Another thread is performing a reset, kbase_reset_gpu should
  *   not be called.
  */
@@ -1614,4 +1490,29 @@ void kbase_reset_gpu_locked(struct kbase_device *kbdev)
        /* Try resetting early */
        kbasep_try_reset_gpu_early_locked(kbdev);
 }
+
+void kbase_reset_gpu_silent(struct kbase_device *kbdev)
+{
+       if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+                                               KBASE_RESET_GPU_NOT_PENDING,
+                                               KBASE_RESET_GPU_SILENT) !=
+                                               KBASE_RESET_GPU_NOT_PENDING) {
+               /* Some other thread is already resetting the GPU */
+               return;
+       }
+
+       kbase_disjoint_state_up(kbdev);
+
+       queue_work(kbdev->hwaccess.backend.reset_workq,
+                       &kbdev->hwaccess.backend.reset_work);
+}
+
+bool kbase_reset_gpu_active(struct kbase_device *kbdev)
+{
+       if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+                       KBASE_RESET_GPU_NOT_PENDING)
+               return false;
+
+       return true;
+}
 #endif /* KBASE_GPU_RESET_EN */