MALI: rockchip: upgrade midgard DDK to r13p0-00rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_instr_backend.c
index 4e70b34ffaa6707a9fba8e19680160c15c1c9d5d..3f06a10f7fed4e15d2718045fe3a7bab3dbdb8b5 100644 (file)
@@ -22,8 +22,8 @@
  */
 
 #include <mali_kbase.h>
-#include <mali_kbase_config_defaults.h>
 #include <mali_midg_regmap.h>
+#include <mali_kbase_hwaccess_instr.h>
 #include <backend/gpu/mali_kbase_device_internal.h>
 #include <backend/gpu/mali_kbase_pm_internal.h>
 #include <backend/gpu/mali_kbase_instr_internal.h>
@@ -41,14 +41,6 @@ static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
        u32 irq_mask;
 
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       /* Wait for any reset to complete */
-       while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
-               spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-               wait_event(kbdev->hwcnt.backend.cache_clean_wait,
-                               kbdev->hwcnt.backend.state !=
-                                               KBASE_INSTR_STATE_RESETTING);
-               spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       }
        KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
                                        KBASE_INSTR_STATE_REQUEST_CLEAN);
 
@@ -75,19 +67,14 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
 {
        unsigned long flags, pm_flags;
        int err = -EINVAL;
-       struct kbasep_js_device_data *js_devdata;
        u32 irq_mask;
        int ret;
        u64 shader_cores_needed;
        u32 prfcnt_config;
 
-       KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx);
-
        shader_cores_needed = kbase_pm_get_present_cores(kbdev,
                                                        KBASE_PM_CORE_SHADER);
 
-       js_devdata = &kbdev->js_data;
-
        /* alignment failure */
        if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
                goto out_err;
@@ -102,14 +89,6 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
 
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
-       if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
-               /* GPU is being reset */
-               spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-               wait_event(kbdev->hwcnt.backend.wait,
-                                       kbdev->hwcnt.backend.triggered != 0);
-               spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       }
-
        if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
                /* Instrumentation is already enabled */
                spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
@@ -127,10 +106,6 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
        kbdev->hwcnt.kctx = kctx;
        /* Remember the dump address so we can reprogram it later */
        kbdev->hwcnt.addr = setup->dump_buffer;
-       /* Remember all the settings for suspend/resume */
-       if (&kbdev->hwcnt.suspended_state != setup)
-               memcpy(&kbdev->hwcnt.suspended_state, setup,
-                                       sizeof(kbdev->hwcnt.suspended_state));
 
        /* Request the clean */
        kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
@@ -199,14 +174,6 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
 
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
-       if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
-               /* GPU is being reset */
-               spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-               wait_event(kbdev->hwcnt.backend.wait,
-                                       kbdev->hwcnt.backend.triggered != 0);
-               spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       }
-
        kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
        kbdev->hwcnt.backend.triggered = 1;
        wake_up(&kbdev->hwcnt.backend.wait);
@@ -373,15 +340,11 @@ void kbasep_cache_clean_worker(struct work_struct *data)
 
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
        /* Wait for our condition, and any reset to complete */
-       while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING ||
-                       kbdev->hwcnt.backend.state ==
-                                               KBASE_INSTR_STATE_CLEANING) {
+       while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
                spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
                wait_event(kbdev->hwcnt.backend.cache_clean_wait,
-                               (kbdev->hwcnt.backend.state !=
-                                               KBASE_INSTR_STATE_RESETTING &&
                                kbdev->hwcnt.backend.state !=
-                                               KBASE_INSTR_STATE_CLEANING));
+                                               KBASE_INSTR_STATE_CLEANING);
                spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
        }
        KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
@@ -414,9 +377,6 @@ void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
                                        &kbdev->hwcnt.backend.cache_clean_work);
                KBASE_DEBUG_ASSERT(ret);
        }
-       /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a reset,
-        * and the instrumentation state hasn't been restored yet -
-        * kbasep_reset_timeout_worker() will do the rest of the work */
 
        spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 }
@@ -444,10 +404,6 @@ void kbase_clean_caches_done(struct kbase_device *kbdev)
                        kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
                        wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
                }
-               /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a
-                * reset, and the instrumentation state hasn't been restored yet
-                * - kbasep_reset_timeout_worker() will do the rest of the work
-                */
 
                spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
        }
@@ -465,14 +421,6 @@ int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
 
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
-       if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
-               /* GPU is being reset */
-               spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-               wait_event(kbdev->hwcnt.backend.wait,
-                                       kbdev->hwcnt.backend.triggered != 0);
-               spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       }
-
        if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
                err = -EINVAL;
                kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
@@ -496,14 +444,6 @@ int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
 
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
-       if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
-               /* GPU is being reset */
-               spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-               wait_event(kbdev->hwcnt.backend.wait,
-                                       kbdev->hwcnt.backend.triggered != 0);
-               spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-       }
-
        /* Check it's the context previously set up and we're not already
         * dumping */
        if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=