ARM64: DTS: Add rk3399-firefly uart4 device, node as /dev/ttyS1
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_pm_backend.c
index bbf39b0ce03fae974161d79645fffe2eacfa970a..93baf7cafd4bf7a11bfdd2840fe11a13f11df5e5 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *
- * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -16,8 +16,7 @@
 
 
 
-/**
- * @file mali_kbase_pm_hwaccess.c
+/*
  * GPU backend implementation of base kernel power management APIs
  */
 
 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
 
 #include <mali_kbase_pm.h>
-#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
 #include <backend/gpu/mali_kbase_pm_internal.h>
 
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
+
 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
 {
        struct kbase_pm_callback_conf *callbacks;
 
-#ifdef CONFIG_MALI_PLATFORM_DEVICETREE
-       pm_runtime_enable(kbdev->dev);
-#endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
        callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
 
        if (callbacks)
@@ -57,9 +56,6 @@ void kbase_pm_register_access_disable(struct kbase_device *kbdev)
                callbacks->power_off_callback(kbdev);
 
        kbdev->pm.backend.gpu_powered = false;
-#ifdef CONFIG_MALI_PLATFORM_DEVICETREE
-       pm_runtime_disable(kbdev->dev);
-#endif
 }
 
 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
@@ -71,6 +67,14 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
 
        mutex_init(&kbdev->pm.lock);
 
+       kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
+                       WQ_HIGHPRI | WQ_UNBOUND, 1);
+       if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
+               return -ENOMEM;
+
+       INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
+                       kbase_pm_gpu_poweroff_wait_wq);
+
        kbdev->pm.backend.gpu_powered = false;
        kbdev->pm.suspending = false;
 #ifdef CONFIG_MALI_DEBUG
@@ -97,6 +101,8 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
                                        callbacks->power_runtime_on_callback;
                kbdev->pm.backend.callback_power_runtime_off =
                                        callbacks->power_runtime_off_callback;
+               kbdev->pm.backend.callback_power_runtime_idle =
+                                       callbacks->power_runtime_idle_callback;
        } else {
                kbdev->pm.backend.callback_power_on = NULL;
                kbdev->pm.backend.callback_power_off = NULL;
@@ -106,6 +112,7 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
                kbdev->pm.callback_power_runtime_term = NULL;
                kbdev->pm.backend.callback_power_runtime_on = NULL;
                kbdev->pm.backend.callback_power_runtime_off = NULL;
+               kbdev->pm.backend.callback_power_runtime_idle = NULL;
        }
 
        /* Initialise the metrics subsystem */
@@ -122,10 +129,11 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
        init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
        kbdev->pm.active_count = 0;
 
-       spin_lock_init(&kbdev->pm.power_change_lock);
        spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
        spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
 
+       init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
+
        if (kbase_pm_ca_init(kbdev) != 0)
                goto workq_fail;
 
@@ -160,46 +168,127 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
         * will wait for that state to be reached anyway */
 }
 
-bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
 {
+       struct kbase_device *kbdev = container_of(data, struct kbase_device,
+                       pm.backend.gpu_poweroff_wait_work);
+       struct kbase_pm_device_data *pm = &kbdev->pm;
+       struct kbase_pm_backend_data *backend = &pm->backend;
+       struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
        unsigned long flags;
-       bool cores_are_available;
-
-       lockdep_assert_held(&kbdev->pm.lock);
-
-       spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
 
-       /* Force all cores off */
-       kbdev->pm.backend.desired_shader_state = 0;
-
-       /* Force all cores to be unavailable, in the situation where
-        * transitions are in progress for some cores but not others,
-        * and kbase_pm_check_transitions_nolock can not immediately
-        * power off the cores */
-       kbdev->shader_available_bitmap = 0;
-       kbdev->tiler_available_bitmap = 0;
-       kbdev->l2_available_bitmap = 0;
+/* rk_ext: adaption in DDK r14 for solution_1_for_glitch. */
+#define NOT_TO_WAIT_CORES_POWER_TRANSITIONS_BEFORE_POWER_OFF_GPU
 
+#ifdef NOT_TO_WAIT_CORES_POWER_TRANSITIONS_BEFORE_POWER_OFF_GPU
+#else
+       /* Wait for power transitions to complete. We do this with no locks held
+        * so that we don't deadlock with any pending workqueues */
        KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
                                SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
-       cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+       kbase_pm_check_transitions_sync(kbdev);
        KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
                                SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
-       /* Don't need 'cores_are_available', because we don't return anything */
-       CSTD_UNUSED(cores_are_available);
+#endif
 
-       spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+       mutex_lock(&js_devdata->runpool_mutex);
+       mutex_lock(&kbdev->pm.lock);
 
-       /* NOTE: We won't wait to reach the core's desired state, even if we're
-        * powering off the GPU itself too. It's safe to cut the power whilst
-        * they're transitioning to off, because the cores should be idle and
-        * all cache flushes should already have occurred */
+       if (!backend->poweron_required) {
+               WARN_ON(kbdev->l2_available_bitmap ||
+                               kbdev->shader_available_bitmap ||
+                               kbdev->tiler_available_bitmap);
+
+               /* Consume any change-state events */
+               kbase_timeline_pm_check_handle_event(kbdev,
+                                       KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+
+               /* Disable interrupts and turn the clock off */
+               if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
+                       /*
+                        * Page/bus faults are pending, must drop locks to
+                        * process.  Interrupts are disabled so no more faults
+                        * should be generated at this point.
+                        */
+                       mutex_unlock(&kbdev->pm.lock);
+                       mutex_unlock(&js_devdata->runpool_mutex);
+                       kbase_flush_mmu_wqs(kbdev);
+                       mutex_lock(&js_devdata->runpool_mutex);
+                       mutex_lock(&kbdev->pm.lock);
+
+                       /* Turn off clock now that fault have been handled. We
+                        * dropped locks so poweron_required may have changed -
+                        * power back on if this is the case.*/
+                       if (backend->poweron_required)
+                               kbase_pm_clock_on(kbdev, false);
+                       else
+                               WARN_ON(!kbase_pm_clock_off(kbdev,
+                                               backend->poweroff_is_suspend));
+               }
+       }
 
-       /* Consume any change-state events */
-       kbase_timeline_pm_check_handle_event(kbdev,
-                               KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
-       /* Disable interrupts and turn the clock off */
-       return kbase_pm_clock_off(kbdev, is_suspend);
+       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+       backend->poweroff_wait_in_progress = false;
+       if (backend->poweron_required) {
+               backend->poweron_required = false;
+               kbase_pm_update_cores_state_nolock(kbdev);
+       }
+       spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+       mutex_unlock(&kbdev->pm.lock);
+       mutex_unlock(&js_devdata->runpool_mutex);
+
+       wake_up(&kbdev->pm.backend.poweroff_wait);
+}
+
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
+{
+       unsigned long flags;
+
+       lockdep_assert_held(&kbdev->pm.lock);
+
+       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+       if (!kbdev->pm.backend.poweroff_wait_in_progress) {
+               /* Force all cores off */
+               kbdev->pm.backend.desired_shader_state = 0;
+               kbdev->pm.backend.desired_tiler_state = 0;
+
+               /* Force all cores to be unavailable, in the situation where
+                * transitions are in progress for some cores but not others,
+                * and kbase_pm_check_transitions_nolock can not immediately
+                * power off the cores */
+               kbdev->shader_available_bitmap = 0;
+               kbdev->tiler_available_bitmap = 0;
+               kbdev->l2_available_bitmap = 0;
+
+               kbdev->pm.backend.poweroff_wait_in_progress = true;
+               kbdev->pm.backend.poweroff_is_suspend = is_suspend;
+
+               spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+               /*Kick off wq here. Callers will have to wait*/
+               queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
+                               &kbdev->pm.backend.gpu_poweroff_wait_work);
+       } else {
+               spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+       }
+}
+
+static bool is_poweroff_in_progress(struct kbase_device *kbdev)
+{
+       bool ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+       ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
+       spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+       return ret;
+}
+
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
+{
+       wait_event_killable(kbdev->pm.backend.poweroff_wait,
+                       is_poweroff_in_progress(kbdev));
 }
 
 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
@@ -228,7 +317,10 @@ int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
 
        kbasep_pm_read_present_cores(kbdev);
 
-       kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;
+       kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
+                       kbdev->pm.debug_core_mask[1] =
+                       kbdev->pm.debug_core_mask[2] =
+                       kbdev->gpu_props.props.raw_props.shader_present;
 
        /* Pretend the GPU is active to prevent a power policy turning the GPU
         * cores off */
@@ -268,15 +360,7 @@ void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
 
        mutex_lock(&kbdev->pm.lock);
        kbase_pm_cancel_deferred_poweroff(kbdev);
-       if (!kbase_pm_do_poweroff(kbdev, false)) {
-               /* Page/bus faults are pending, must drop pm.lock to process.
-                * Interrupts are disabled so no more faults should be
-                * generated at this point */
-               mutex_unlock(&kbdev->pm.lock);
-               kbase_flush_mmu_wqs(kbdev);
-               mutex_lock(&kbdev->pm.lock);
-               WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
-       }
+       kbase_pm_do_poweroff(kbdev, false);
        mutex_unlock(&kbdev->pm.lock);
 }
 
@@ -294,6 +378,8 @@ void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
 
        /* Shut down the metrics subsystem */
        kbasep_pm_metrics_term(kbdev);
+
+       destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
 }
 
 void kbase_pm_power_changed(struct kbase_device *kbdev)
@@ -303,9 +389,8 @@ void kbase_pm_power_changed(struct kbase_device *kbdev)
 
        KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
                                SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
-       spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
        cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
-       spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
        KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
                                SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
 
@@ -315,15 +400,20 @@ void kbase_pm_power_changed(struct kbase_device *kbdev)
                kbase_timeline_pm_handle_event(kbdev,
                                KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
 
-               spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
-               kbase_gpu_slot_update(kbdev);
-               spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
+               kbase_backend_slot_update(kbdev);
        }
+       spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
 }
 
-void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask)
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+               u64 new_core_mask_js0, u64 new_core_mask_js1,
+               u64 new_core_mask_js2)
 {
-       kbdev->pm.debug_core_mask = new_core_mask;
+       kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
+       kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
+       kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
+       kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
+                       new_core_mask_js2;
 
        kbase_pm_update_cores_state_nolock(kbdev);
 }
@@ -347,19 +437,16 @@ void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
         * off prematurely) */
        mutex_lock(&js_devdata->runpool_mutex);
        mutex_lock(&kbdev->pm.lock);
+
        kbase_pm_cancel_deferred_poweroff(kbdev);
-       if (!kbase_pm_do_poweroff(kbdev, true)) {
-               /* Page/bus faults are pending, must drop pm.lock to process.
-                * Interrupts are disabled so no more faults should be
-                * generated at this point */
-               mutex_unlock(&kbdev->pm.lock);
-               kbase_flush_mmu_wqs(kbdev);
-               mutex_lock(&kbdev->pm.lock);
-               WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
-       }
+       kbase_pm_do_poweroff(kbdev, true);
+
+       kbase_backend_timer_suspend(kbdev);
 
        mutex_unlock(&kbdev->pm.lock);
        mutex_unlock(&js_devdata->runpool_mutex);
+
+       kbase_pm_wait_for_poweroff_complete(kbdev);
 }
 
 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
@@ -368,8 +455,12 @@ void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
 
        mutex_lock(&js_devdata->runpool_mutex);
        mutex_lock(&kbdev->pm.lock);
+
        kbdev->pm.suspending = false;
        kbase_pm_do_poweron(kbdev, true);
+
+       kbase_backend_timer_resume(kbdev);
+
        mutex_unlock(&kbdev->pm.lock);
        mutex_unlock(&js_devdata->runpool_mutex);
 }