u64 prev_shader_state = kbdev->pm.backend.desired_shader_state;
u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state;
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->pm.power_change_lock);
kbdev->pm.backend.desired_shader_state &=
~kbdev->pm.backend.shader_poweroff_pending;
kbdev = container_of(timer, struct kbase_device,
pm.backend.gpu_poweroff_timer);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
/* It is safe for this call to do nothing if the work item is already
* queued. The worker function will read the must up-to-date state of
}
if (kbdev->pm.backend.poweroff_timer_needed) {
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
}
kbdev->pm.backend.poweroff_timer_running = false;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
return HRTIMER_NORESTART;
}
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_poweroff_pending == 0);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
/* Only power off the GPU if a request is still pending */
if (!kbdev->pm.backend.pm_current_policy->get_core_active(kbdev))
do_poweroff = true;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
if (do_poweroff) {
kbdev->pm.backend.poweroff_timer_needed = false;
kbdev->pm.backend.poweroff_timer_running = false;
/* Power off the GPU */
- kbase_pm_do_poweroff(kbdev, false);
+ if (!kbase_pm_do_poweroff(kbdev, false)) {
+ /* GPU can not be powered off at present */
+ kbdev->pm.backend.poweroff_timer_needed = true;
+ kbdev->pm.backend.poweroff_timer_running = true;
+ hrtimer_start(&kbdev->pm.backend.gpu_poweroff_timer,
+ kbdev->pm.gpu_poweroff_time,
+ HRTIMER_MODE_REL);
+ }
}
mutex_unlock(&kbdev->pm.lock);
kbdev->pm.backend.poweroff_timer_needed = false;
hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
kbdev->pm.backend.poweroff_timer_running = false;
/* If wq is already running but is held off by pm.lock, make sure it has
kbdev->pm.backend.tiler_poweroff_pending = 0;
kbdev->pm.backend.shader_poweroff_pending_time = 0;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
void kbase_pm_update_active(struct kbase_device *kbdev)
/* pm_current_policy will never be NULL while pm.lock is held */
KBASE_DEBUG_ASSERT(backend->pm_current_policy);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&pm->power_change_lock, flags);
active = backend->pm_current_policy->get_core_active(kbdev);
/* If a request was pending then the GPU was still
* powered, so no need to continue */
if (!kbdev->poweroff_pending) {
- spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ spin_unlock_irqrestore(&pm->power_change_lock,
flags);
return;
}
HRTIMER_MODE_REL);
}
+ spin_unlock_irqrestore(&pm->power_change_lock, flags);
+
/* Power on the GPU and any cores requested by the policy */
- if (pm->backend.poweroff_wait_in_progress) {
- pm->backend.poweron_required = true;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- } else {
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- kbase_pm_do_poweron(kbdev, false);
- }
+ kbase_pm_do_poweron(kbdev, false);
} else {
/* It is an error for the power policy to power off the GPU
* when there are contexts active */
pm->gpu_poweroff_time,
HRTIMER_MODE_REL);
}
- spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ spin_unlock_irqrestore(&pm->power_change_lock,
flags);
} else {
- spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ spin_unlock_irqrestore(&pm->power_change_lock,
flags);
/* Power off the GPU immediately */
- kbase_pm_do_poweroff(kbdev, false);
+ if (!kbase_pm_do_poweroff(kbdev, false)) {
+ /* GPU can not be powered off at present
+ */
+ spin_lock_irqsave(
+ &pm->power_change_lock,
+ flags);
+ backend->poweroff_timer_needed = true;
+ if (!backend->poweroff_timer_running) {
+ backend->poweroff_timer_running
+ = true;
+ hrtimer_start(
+ &backend->gpu_poweroff_timer,
+ pm->gpu_poweroff_time,
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(
+ &pm->power_change_lock,
+ flags);
+ }
}
} else {
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&pm->power_change_lock, flags);
}
}
}
bool cores_are_available;
bool do_poweroff = false;
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->pm.power_change_lock);
if (kbdev->pm.backend.pm_current_policy == NULL)
return;
- if (kbdev->pm.backend.poweroff_wait_in_progress)
- return;
- if (kbdev->protected_mode_transition && !kbdev->shader_needed_bitmap &&
- !kbdev->shader_inuse_bitmap && !kbdev->tiler_needed_cnt
- && !kbdev->tiler_inuse_cnt) {
- /* We are trying to change in/out of protected mode - force all
- * cores off so that the L2 powers down */
- desired_bitmap = 0;
- desired_tiler_bitmap = 0;
- } else {
- desired_bitmap =
+ desired_bitmap =
kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
- desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
+ desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
+
+ if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
+ desired_tiler_bitmap = 1;
+ else
+ desired_tiler_bitmap = 0;
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
+ /* Unless XAFFINITY is supported, enable core 0 if tiler
+ * required, regardless of core availability */
if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
- desired_tiler_bitmap = 1;
- else
- desired_tiler_bitmap = 0;
-
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
- /* Unless XAFFINITY is supported, enable core 0 if tiler
- * required, regardless of core availability */
- if (kbdev->tiler_needed_cnt > 0 ||
- kbdev->tiler_inuse_cnt > 0)
- desired_bitmap |= 1;
- }
+ desired_bitmap |= 1;
}
if (kbdev->pm.backend.desired_shader_state != desired_bitmap)
(kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap);
- if (kbdev->pm.poweroff_shader_ticks &&
- !kbdev->protected_mode_transition)
+ if (kbdev->pm.poweroff_shader_ticks)
kbdev->pm.backend.shader_poweroff_pending_time =
kbdev->pm.poweroff_shader_ticks;
else
kbdev->pm.backend.tiler_poweroff_pending |=
(kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap);
- if (kbdev->pm.poweroff_shader_ticks &&
- !kbdev->protected_mode_transition)
+ if (kbdev->pm.poweroff_shader_ticks)
kbdev->pm.backend.shader_poweroff_pending_time =
kbdev->pm.poweroff_shader_ticks;
else
{
unsigned long flags;
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
kbase_pm_update_cores_state_nolock(kbdev);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
mutex_lock(&kbdev->pm.lock);
/* Remove the policy to prevent IRQ handlers from working on it */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
old_policy = kbdev->pm.backend.pm_current_policy;
kbdev->pm.backend.pm_current_policy = NULL;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u,
old_policy->id);
if (new_policy->init)
new_policy->init(kbdev);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
kbdev->pm.backend.pm_current_policy = new_policy;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
/* If any core power state changes were previously attempted, but
* couldn't be made because the policy was changing (current_policy was
void kbase_pm_request_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
+ unsigned long flags;
u64 cores;
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
cores = shader_cores;
while (cores) {
KBASE_PM_FUNC_ID_REQUEST_CORES_END,
change_gpu_state);
}
+
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_request_cores);
void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
+ unsigned long flags;
+
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
while (shader_cores) {
int bitnum = fls64(shader_cores) - 1;
* - no-one will wait on the state change */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
+
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores);
kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
+ unsigned long flags;
u64 prev_shader_needed; /* Just for tracing */
u64 prev_shader_inuse; /* Just for tracing */
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
prev_shader_needed = kbdev->shader_needed_bitmap;
prev_shader_inuse = kbdev->shader_inuse_bitmap;
* be chosen */
if ((kbdev->pm.backend.desired_shader_state & shader_cores) !=
shader_cores) {
- return (kbdev->pm.backend.poweroff_wait_in_progress ||
- kbdev->pm.backend.pm_current_policy == NULL) ?
- KBASE_CORES_NOT_READY : KBASE_NEW_AFFINITY;
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+
+ return KBASE_NEW_AFFINITY;
}
if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores ||
(tiler_required && !kbdev->tiler_available_bitmap)) {
/* Trace ongoing core transition */
kbase_timeline_pm_l2_transition_start(kbdev);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
return KBASE_CORES_NOT_READY;
}
KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL,
NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+
return KBASE_CORES_READY;
}
void kbase_pm_release_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
+ unsigned long flags;
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
while (shader_cores) {
int bitnum = fls64(shader_cores) - 1;
/* Trace that any state change completed immediately */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
+
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_release_cores);
bool tiler_required,
u64 shader_cores)
{
- unsigned long flags;
-
- kbase_pm_wait_for_poweroff_complete(kbdev);
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_pm_request_cores(kbdev, tiler_required, shader_cores);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
kbase_pm_check_transitions_sync(kbdev);
}
unsigned long flags;
u32 prior_l2_users_count;
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
prior_l2_users_count = kbdev->l2_users_count++;
if (!prior_l2_users_count || !kbdev->l2_available_bitmap)
kbase_pm_check_transitions_nolock(kbdev);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
wait_event(kbdev->pm.backend.l2_powered_wait,
kbdev->pm.backend.l2_powered == 1);
void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev)
{
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
kbdev->l2_users_count++;
+
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on);
void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
{
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
KBASE_DEBUG_ASSERT(kbdev->l2_users_count > 0);
/* Trace that any state change completed immediately */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
+
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches);