2 * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 #include "mali_kernel_common.h"
14 #include "mali_osk_mali.h"
15 #include "mali_scheduler.h"
16 #include "mali_group.h"
17 #include "mali_pm_domain.h"
20 #include "mali_executor.h"
21 #include "mali_control_timer.h"
24 u32 num_pm_runtime_resume = 0;
25 u32 num_pm_updates = 0;
26 u32 num_pm_updates_up = 0;
27 u32 num_pm_updates_down = 0;
30 #define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY)
32 /* lock protecting power state (including pm_domains) */
33 static _mali_osk_spinlock_irq_t *pm_lock_state = NULL;
35 /* the wanted domain mask (protected by pm_lock_state) */
36 static u32 pd_mask_wanted = 0;
38 /* used to deferring the actual power changes */
39 static _mali_osk_wq_work_t *pm_work = NULL;
41 /* lock protecting power change execution */
42 static _mali_osk_mutex_t *pm_lock_exec = NULL;
44 /* PMU domains which are actually powered on (protected by pm_lock_exec) */
45 static u32 pmu_mask_current = 0;
48 * domains which marked as powered on (protected by pm_lock_exec)
49 * This can be different from pmu_mask_current right after GPU power on
50 * if the PMU domains default to powered up.
52 static u32 pd_mask_current = 0;
54 static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {
55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 1 << MALI_DOMAIN_INDEX_DUMMY
59 /* The relative core power cost */
60 #define MALI_GP_COST 3
61 #define MALI_PP_COST 6
62 #define MALI_L2_COST 1
65 *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix
66 *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8.
68 static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS];
70 * Keep track of runtime PM state, so that we know
71 * how to resume during OS resume.
73 #ifdef CONFIG_PM_RUNTIME
74 static mali_bool mali_pm_runtime_active = MALI_FALSE;
76 /* when kernel don't enable PM_RUNTIME, set the flag always true,
77 * for GPU will not power off by runtime */
78 static mali_bool mali_pm_runtime_active = MALI_TRUE;
81 static void mali_pm_state_lock(void);
82 static void mali_pm_state_unlock(void);
83 static _mali_osk_errcode_t mali_pm_create_pm_domains(void);
84 static void mali_pm_set_pmu_domain_config(void);
85 static u32 mali_pm_get_registered_cores_mask(void);
86 static void mali_pm_update_sync_internal(void);
87 static mali_bool mali_pm_common_suspend(void);
88 static void mali_pm_update_work(void *data);
90 const char *mali_pm_mask_to_string(u32 mask);
91 const char *mali_pm_group_stats_to_string(void);
94 _mali_osk_errcode_t mali_pm_initialize(void)
96 _mali_osk_errcode_t err;
97 struct mali_pmu_core *pmu;
99 pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
100 _MALI_OSK_LOCK_ORDER_PM_STATE);
101 if (NULL == pm_lock_state) {
103 return _MALI_OSK_ERR_FAULT;
106 pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
107 _MALI_OSK_LOCK_ORDER_PM_STATE);
108 if (NULL == pm_lock_exec) {
110 return _MALI_OSK_ERR_FAULT;
113 pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL);
114 if (NULL == pm_work) {
116 return _MALI_OSK_ERR_FAULT;
119 pmu = mali_pmu_get_global_pmu_core();
122 * We have a Mali PMU, set the correct domain
123 * configuration (default or custom)
126 u32 registered_cores_mask;
128 mali_pm_set_pmu_domain_config();
130 registered_cores_mask = mali_pm_get_registered_cores_mask();
131 mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask);
133 MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
136 /* Create all power domains needed (at least one dummy domain) */
137 err = mali_pm_create_pm_domains();
138 if (_MALI_OSK_ERR_OK != err) {
143 return _MALI_OSK_ERR_OK;
146 void mali_pm_terminate(void)
148 if (NULL != pm_work) {
149 _mali_osk_wq_delete_work(pm_work);
153 mali_pm_domain_terminate();
155 if (NULL != pm_lock_exec) {
156 _mali_osk_mutex_term(pm_lock_exec);
160 if (NULL != pm_lock_state) {
161 _mali_osk_spinlock_irq_term(pm_lock_state);
162 pm_lock_state = NULL;
166 struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
167 struct mali_l2_cache_core *l2_cache)
169 struct mali_pm_domain *domain;
171 domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
172 if (NULL == domain) {
173 MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
174 domain = mali_pm_domain_get_from_index(
175 MALI_DOMAIN_INDEX_DUMMY);
176 domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
178 MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
181 MALI_DEBUG_ASSERT(NULL != domain);
183 mali_pm_domain_add_l2_cache(domain, l2_cache);
185 return domain; /* return the actual domain this was registered in */
188 struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
189 struct mali_group *group)
191 struct mali_pm_domain *domain;
193 domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
194 if (NULL == domain) {
195 MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
196 domain = mali_pm_domain_get_from_index(
197 MALI_DOMAIN_INDEX_DUMMY);
198 domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
200 MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
203 MALI_DEBUG_ASSERT(NULL != domain);
205 mali_pm_domain_add_group(domain, group);
207 return domain; /* return the actual domain this was registered in */
210 mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
211 struct mali_group **groups,
214 mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */
217 mali_pm_state_lock();
219 for (i = 0; i < num_domains; i++) {
220 MALI_DEBUG_ASSERT_POINTER(domains[i]);
221 pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]);
222 if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) {
224 * Tell caller that the corresponding group
225 * was not already powered on.
230 * There is a time gap between we power on the domain and
231 * set the power state of the corresponding groups to be on.
233 if (NULL != groups[i] &&
234 MALI_FALSE == mali_group_power_is_on(groups[i])) {
240 MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted));
242 mali_pm_state_unlock();
247 mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
254 mali_pm_state_lock();
256 for (i = 0; i < num_domains; i++) {
257 MALI_DEBUG_ASSERT_POINTER(domains[i]);
258 mask |= mali_pm_domain_ref_put(domains[i]);
262 /* return false, all domains should still stay on */
265 /* Assert that we are dealing with a change */
266 MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask);
268 /* Update our desired domain mask */
269 pd_mask_wanted &= ~mask;
271 /* return true; one or more domains can now be powered down */
275 MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted));
277 mali_pm_state_unlock();
282 void mali_pm_init_begin(void)
284 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
286 _mali_osk_pm_dev_ref_get_sync();
288 /* Ensure all PMU domains are on */
290 mali_pmu_power_up_all(pmu);
294 void mali_pm_init_end(void)
296 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
298 /* Ensure all PMU domains are off */
300 mali_pmu_power_down_all(pmu);
303 _mali_osk_pm_dev_ref_put();
306 void mali_pm_update_sync(void)
310 if (MALI_TRUE == mali_pm_runtime_active) {
312 * Only update if GPU is powered on.
313 * Deactivation of the last group will result in both a
314 * deferred runtime PM suspend operation and
315 * deferred execution of this function.
316 * mali_pm_runtime_active will be false if runtime PM
317 * executed first and thus the GPU is now fully powered off.
319 mali_pm_update_sync_internal();
322 mali_pm_exec_unlock();
325 void mali_pm_update_async(void)
327 _mali_osk_wq_schedule_work(pm_work);
330 void mali_pm_os_suspend(mali_bool os_suspend)
334 MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
336 /* Suspend execution of all jobs, and go to inactive state */
337 mali_executor_suspend();
340 mali_control_timer_suspend(MALI_TRUE);
345 ret = mali_pm_common_suspend();
347 MALI_DEBUG_ASSERT(MALI_TRUE == ret);
350 mali_pm_exec_unlock();
353 void mali_pm_os_resume(void)
355 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
357 MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
362 mali_pm_state_lock();
364 /* Assert that things are as we left them in os_suspend(). */
365 MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
366 MALI_DEBUG_ASSERT(0 == pd_mask_current);
367 MALI_DEBUG_ASSERT(0 == pmu_mask_current);
369 MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
371 mali_pm_state_unlock();
374 if (MALI_TRUE == mali_pm_runtime_active) {
375 /* Runtime PM was active, so reset PMU */
378 pmu_mask_current = mali_pmu_get_mask(pmu);
380 MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current));
383 mali_pm_update_sync_internal();
386 mali_pm_exec_unlock();
388 /* Start executing jobs again */
389 mali_executor_resume();
392 mali_bool mali_pm_runtime_suspend(void)
396 MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
401 * Put SW state directly into "off" state, and do not bother to power
402 * down each power domain, because entire GPU will be powered off
404 * For runtime PM suspend, in contrast to OS suspend, there is a race
405 * between this function and the mali_pm_update_sync_internal(), which
408 ret = mali_pm_common_suspend();
409 if (MALI_TRUE == ret) {
410 mali_pm_runtime_active = MALI_FALSE;
413 * Process the "power up" instead,
414 * which could have been "lost"
416 mali_pm_update_sync_internal();
419 mali_pm_exec_unlock();
424 void mali_pm_runtime_resume(void)
426 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
430 mali_pm_runtime_active = MALI_TRUE;
433 ++num_pm_runtime_resume;
435 mali_pm_state_lock();
438 * Assert that things are as we left them in runtime_suspend(),
439 * except for pd_mask_wanted which normally will be the reason we
440 * got here (job queued => domains wanted)
442 MALI_DEBUG_ASSERT(0 == pd_mask_current);
443 MALI_DEBUG_ASSERT(0 == pmu_mask_current);
445 mali_pm_state_unlock();
450 pmu_mask_current = mali_pmu_get_mask(pmu);
451 MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current));
455 * Normally we are resumed because a job has just been queued.
456 * pd_mask_wanted should thus be != 0.
457 * It is however possible for others to take a Mali Runtime PM ref
458 * without having a job queued.
459 * We should however always call mali_pm_update_sync_internal(),
460 * because this will take care of any potential mismatch between
461 * pmu_mask_current and pd_mask_current.
463 mali_pm_update_sync_internal();
465 mali_pm_exec_unlock();
468 #if MALI_STATE_TRACKING
469 u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
474 n += _mali_osk_snprintf(buf + n, size - n,
475 "\tPower domain: id %u\n",
476 mali_pm_domain_get_id(domain));
478 n += _mali_osk_snprintf(buf + n, size - n,
479 "\t\tMask: 0x%04x\n",
480 mali_pm_domain_get_mask(domain));
482 n += _mali_osk_snprintf(buf + n, size - n,
483 "\t\tUse count: %u\n",
484 mali_pm_domain_get_use_count(domain));
486 n += _mali_osk_snprintf(buf + n, size - n,
487 "\t\tCurrent power state: %s\n",
488 (mali_pm_domain_get_mask(domain) & pd_mask_current) ?
491 n += _mali_osk_snprintf(buf + n, size - n,
492 "\t\tWanted power state: %s\n",
493 (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ?
500 static void mali_pm_state_lock(void)
502 _mali_osk_spinlock_irq_lock(pm_lock_state);
505 static void mali_pm_state_unlock(void)
507 _mali_osk_spinlock_irq_unlock(pm_lock_state);
510 void mali_pm_exec_lock(void)
512 _mali_osk_mutex_wait(pm_lock_exec);
515 void mali_pm_exec_unlock(void)
517 _mali_osk_mutex_signal(pm_lock_exec);
520 static void mali_pm_domain_power_up(u32 power_up_mask,
521 struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS],
523 struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
527 u32 notify_mask = power_up_mask;
529 MALI_DEBUG_ASSERT(0 != power_up_mask);
530 MALI_DEBUG_ASSERT_POINTER(groups_up);
531 MALI_DEBUG_ASSERT_POINTER(num_groups_up);
532 MALI_DEBUG_ASSERT(0 == *num_groups_up);
533 MALI_DEBUG_ASSERT_POINTER(l2_up);
534 MALI_DEBUG_ASSERT_POINTER(num_l2_up);
535 MALI_DEBUG_ASSERT(0 == *num_l2_up);
537 MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
538 MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
541 ("PM update: Powering up domains: . [%s]\n",
542 mali_pm_mask_to_string(power_up_mask)));
544 pd_mask_current |= power_up_mask;
546 domain_bit = _mali_osk_fls(notify_mask);
547 while (0 != domain_bit) {
548 u32 domain_id = domain_bit - 1;
549 struct mali_pm_domain *domain =
550 mali_pm_domain_get_from_index(
552 struct mali_l2_cache_core *l2_cache;
553 struct mali_l2_cache_core *l2_cache_tmp;
554 struct mali_group *group;
555 struct mali_group *group_tmp;
557 /* Mark domain as powered up */
558 mali_pm_domain_set_power_on(domain, MALI_TRUE);
561 * Make a note of the L2 and/or group(s) to notify
562 * (need to release the PM state lock before doing so)
565 _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
567 mali_pm_domain_get_l2_cache_list(
569 struct mali_l2_cache_core,
571 MALI_DEBUG_ASSERT(*num_l2_up <
572 MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
573 l2_up[*num_l2_up] = l2_cache;
577 _MALI_OSK_LIST_FOREACHENTRY(group,
579 mali_pm_domain_get_group_list(domain),
582 MALI_DEBUG_ASSERT(*num_groups_up <
583 MALI_MAX_NUMBER_OF_GROUPS);
584 groups_up[*num_groups_up] = group;
589 /* Remove current bit and find next */
590 notify_mask &= ~(1 << (domain_id));
591 domain_bit = _mali_osk_fls(notify_mask);
594 static void mali_pm_domain_power_down(u32 power_down_mask,
595 struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS],
596 u32 *num_groups_down,
597 struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
601 u32 notify_mask = power_down_mask;
603 MALI_DEBUG_ASSERT(0 != power_down_mask);
604 MALI_DEBUG_ASSERT_POINTER(groups_down);
605 MALI_DEBUG_ASSERT_POINTER(num_groups_down);
606 MALI_DEBUG_ASSERT(0 == *num_groups_down);
607 MALI_DEBUG_ASSERT_POINTER(l2_down);
608 MALI_DEBUG_ASSERT_POINTER(num_l2_down);
609 MALI_DEBUG_ASSERT(0 == *num_l2_down);
611 MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
612 MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
615 ("PM update: Powering down domains: [%s]\n",
616 mali_pm_mask_to_string(power_down_mask)));
618 pd_mask_current &= ~power_down_mask;
620 domain_bit = _mali_osk_fls(notify_mask);
621 while (0 != domain_bit) {
622 u32 domain_id = domain_bit - 1;
623 struct mali_pm_domain *domain =
624 mali_pm_domain_get_from_index(domain_id);
625 struct mali_l2_cache_core *l2_cache;
626 struct mali_l2_cache_core *l2_cache_tmp;
627 struct mali_group *group;
628 struct mali_group *group_tmp;
630 /* Mark domain as powered down */
631 mali_pm_domain_set_power_on(domain, MALI_FALSE);
634 * Make a note of the L2s and/or groups to notify
635 * (need to release the PM state lock before doing so)
638 _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
640 mali_pm_domain_get_l2_cache_list(domain),
641 struct mali_l2_cache_core,
643 MALI_DEBUG_ASSERT(*num_l2_down <
644 MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
645 l2_down[*num_l2_down] = l2_cache;
649 _MALI_OSK_LIST_FOREACHENTRY(group,
651 mali_pm_domain_get_group_list(domain),
654 MALI_DEBUG_ASSERT(*num_groups_down <
655 MALI_MAX_NUMBER_OF_GROUPS);
656 groups_down[*num_groups_down] = group;
657 (*num_groups_down)++;
660 /* Remove current bit and find next */
661 notify_mask &= ~(1 << (domain_id));
662 domain_bit = _mali_osk_fls(notify_mask);
667 * Execute pending power domain changes
668 * pm_lock_exec lock must be taken by caller.
670 static void mali_pm_update_sync_internal(void)
673 * This should only be called in non-atomic context
674 * (normally as deferred work)
676 * Look at the pending power domain changes, and execute these.
677 * Make sure group and schedulers are notified about changes.
680 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
685 MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
691 /* Hold PM state lock while we look at (and obey) the wanted state */
692 mali_pm_state_lock();
694 MALI_DEBUG_PRINT(5, ("PM update pre: Wanted domain mask: .. [%s]\n",
695 mali_pm_mask_to_string(pd_mask_wanted)));
696 MALI_DEBUG_PRINT(5, ("PM update pre: Current domain mask: . [%s]\n",
697 mali_pm_mask_to_string(pd_mask_current)));
698 MALI_DEBUG_PRINT(5, ("PM update pre: Current PMU mask: .... [%s]\n",
699 mali_pm_mask_to_string(pmu_mask_current)));
700 MALI_DEBUG_PRINT(5, ("PM update pre: Group power stats: ... <%s>\n",
701 mali_pm_group_stats_to_string()));
703 /* Figure out which cores we need to power on */
704 power_up_mask = pd_mask_wanted &
705 (pd_mask_wanted ^ pd_mask_current);
707 if (0 != power_up_mask) {
708 u32 power_up_mask_pmu;
709 struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS];
710 u32 num_groups_up = 0;
711 struct mali_l2_cache_core *
712 l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
721 * Make sure dummy/global domain is always included when
722 * powering up, since this is controlled by runtime PM,
723 * and device power is on at this stage.
725 power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK;
727 /* Power up only real PMU domains */
728 power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK;
730 /* But not those that happen to be powered on already */
731 power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) &
734 if (0 != power_up_mask_pmu) {
735 MALI_DEBUG_ASSERT(NULL != pmu);
736 pmu_mask_current |= power_up_mask_pmu;
737 mali_pmu_power_up(pmu, power_up_mask_pmu);
741 * Put the domains themselves in power up state.
742 * We get the groups and L2s to notify in return.
744 mali_pm_domain_power_up(power_up_mask,
745 groups_up, &num_groups_up,
748 /* Need to unlock PM state lock before notifying L2 + groups */
749 mali_pm_state_unlock();
751 /* Notify each L2 cache that we have be powered up */
752 for (i = 0; i < num_l2_up; i++) {
753 mali_l2_cache_power_up(l2_up[i]);
757 * Tell execution module about all the groups we have
758 * powered up. Groups will be notified as a result of this.
760 mali_executor_group_power_up(groups_up, num_groups_up);
762 /* Lock state again before checking for power down */
763 mali_pm_state_lock();
766 /* Figure out which cores we need to power off */
767 power_down_mask = pd_mask_current &
768 (pd_mask_wanted ^ pd_mask_current);
771 * Never power down the dummy/global domain here. This is to be done
772 * from a suspend request (since this domain is only physicall powered
773 * down at that point)
775 power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK;
777 if (0 != power_down_mask) {
778 u32 power_down_mask_pmu;
779 struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
780 u32 num_groups_down = 0;
781 struct mali_l2_cache_core *
782 l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
787 ++num_pm_updates_down;
791 * Put the domains themselves in power down state.
792 * We get the groups and L2s to notify in return.
794 mali_pm_domain_power_down(power_down_mask,
795 groups_down, &num_groups_down,
796 l2_down, &num_l2_down);
798 /* Need to unlock PM state lock before notifying L2 + groups */
799 mali_pm_state_unlock();
802 * Tell execution module about all the groups we will be
803 * powering down. Groups will be notified as a result of this.
805 if (0 < num_groups_down) {
806 mali_executor_group_power_down(groups_down, num_groups_down);
809 /* Notify each L2 cache that we will be powering down */
810 for (i = 0; i < num_l2_down; i++) {
811 mali_l2_cache_power_down(l2_down[i]);
815 * Power down only PMU domains which should not stay on
816 * Some domains might for instance currently be incorrectly
817 * powered up if default domain power state is all on.
819 power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
821 if (0 != power_down_mask_pmu) {
822 MALI_DEBUG_ASSERT(NULL != pmu);
823 pmu_mask_current &= ~power_down_mask_pmu;
824 mali_pmu_power_down(pmu, power_down_mask_pmu);
829 * Power down only PMU domains which should not stay on
830 * Some domains might for instance currently be incorrectly
831 * powered up if default domain power state is all on.
833 u32 power_down_mask_pmu;
835 /* No need for state lock since we'll only update PMU */
836 mali_pm_state_unlock();
838 power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
840 if (0 != power_down_mask_pmu) {
841 MALI_DEBUG_ASSERT(NULL != pmu);
842 pmu_mask_current &= ~power_down_mask_pmu;
843 mali_pmu_power_down(pmu, power_down_mask_pmu);
847 MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n",
848 mali_pm_mask_to_string(pd_mask_current)));
849 MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n",
850 mali_pm_mask_to_string(pmu_mask_current)));
851 MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n",
852 mali_pm_group_stats_to_string()));
855 static mali_bool mali_pm_common_suspend(void)
857 mali_pm_state_lock();
859 if (0 != pd_mask_wanted) {
860 MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n"));
861 mali_pm_state_unlock();
865 MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n",
866 mali_pm_mask_to_string(pd_mask_wanted)));
867 MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n",
868 mali_pm_mask_to_string(pd_mask_current)));
869 MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n",
870 mali_pm_mask_to_string(pmu_mask_current)));
871 MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n",
872 mali_pm_group_stats_to_string()));
874 if (0 != pd_mask_current) {
876 * We have still some domains powered on.
877 * It is for instance very normal that at least the
878 * dummy/global domain is marked as powered on at this point.
879 * (because it is physically powered on until this function
883 struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
884 u32 num_groups_down = 0;
885 struct mali_l2_cache_core *
886 l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
891 * Put the domains themselves in power down state.
892 * We get the groups and L2s to notify in return.
894 mali_pm_domain_power_down(pd_mask_current,
900 MALI_DEBUG_ASSERT(0 == pd_mask_current);
901 MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
903 /* Need to unlock PM state lock before notifying L2 + groups */
904 mali_pm_state_unlock();
907 * Tell execution module about all the groups we will be
908 * powering down. Groups will be notified as a result of this.
910 if (0 < num_groups_down) {
911 mali_executor_group_power_down(groups_down, num_groups_down);
914 /* Notify each L2 cache that we will be powering down */
915 for (i = 0; i < num_l2_down; i++) {
916 mali_l2_cache_power_down(l2_down[i]);
919 pmu_mask_current = 0;
921 MALI_DEBUG_ASSERT(0 == pmu_mask_current);
923 MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
925 mali_pm_state_unlock();
928 MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask: [%s]\n",
929 mali_pm_mask_to_string(pd_mask_current)));
930 MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n",
931 mali_pm_mask_to_string(pmu_mask_current)));
932 MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n",
933 mali_pm_group_stats_to_string()));
938 static void mali_pm_update_work(void *data)
941 mali_pm_update_sync();
944 static _mali_osk_errcode_t mali_pm_create_pm_domains(void)
948 /* Create all domains (including dummy domain) */
949 for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
950 if (0x0 == domain_config[i]) continue;
952 if (NULL == mali_pm_domain_create(domain_config[i])) {
953 return _MALI_OSK_ERR_NOMEM;
957 return _MALI_OSK_ERR_OK;
960 static void mali_pm_set_default_pm_domain_config(void)
962 MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address());
965 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
966 MALI_OFFSET_GP, NULL)) {
967 domain_config[MALI_DOMAIN_INDEX_GP] = 0x01;
971 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
972 MALI_OFFSET_PP0, NULL)) {
973 if (mali_is_mali400()) {
974 domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2;
975 } else if (mali_is_mali450()) {
976 domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1;
977 } else if (mali_is_mali470()) {
978 domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0;
982 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
983 MALI_OFFSET_PP1, NULL)) {
984 if (mali_is_mali400()) {
985 domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3;
986 } else if (mali_is_mali450()) {
987 domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2;
988 } else if (mali_is_mali470()) {
989 domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1;
993 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
994 MALI_OFFSET_PP2, NULL)) {
995 if (mali_is_mali400()) {
996 domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4;
997 } else if (mali_is_mali450()) {
998 domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2;
999 } else if (mali_is_mali470()) {
1000 domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1;
1004 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1005 MALI_OFFSET_PP3, NULL)) {
1006 if (mali_is_mali400()) {
1007 domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5;
1008 } else if (mali_is_mali450()) {
1009 domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2;
1010 } else if (mali_is_mali470()) {
1011 domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1;
1016 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1017 MALI_OFFSET_PP4, NULL)) {
1018 domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3;
1021 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1022 MALI_OFFSET_PP5, NULL)) {
1023 domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3;
1026 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1027 MALI_OFFSET_PP6, NULL)) {
1028 domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3;
1031 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1032 MALI_OFFSET_PP7, NULL)) {
1033 domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3;
1036 /* L2gp/L2PP0/L2PP4 */
1037 if (mali_is_mali400()) {
1038 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1039 MALI400_OFFSET_L2_CACHE0, NULL)) {
1040 domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1;
1042 } else if (mali_is_mali450()) {
1043 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1044 MALI450_OFFSET_L2_CACHE0, NULL)) {
1045 domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0;
1048 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1049 MALI450_OFFSET_L2_CACHE1, NULL)) {
1050 domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1;
1053 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1054 MALI450_OFFSET_L2_CACHE2, NULL)) {
1055 domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3;
1057 } else if (mali_is_mali470()) {
1058 if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
1059 MALI470_OFFSET_L2_CACHE1, NULL)) {
1060 domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0;
1065 static u32 mali_pm_get_registered_cores_mask(void)
1070 for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) {
1071 mask |= domain_config[i];
1077 static void mali_pm_set_pmu_domain_config(void)
1081 _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1);
1083 for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
1084 if (0 != domain_config[i]) {
1085 MALI_DEBUG_PRINT(2, ("Using customer pmu config:\n"));
1090 if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) {
1091 MALI_DEBUG_PRINT(2, ("Using hw detect pmu config:\n"));
1092 mali_pm_set_default_pm_domain_config();
1095 for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
1096 if (domain_config[i]) {
1097 MALI_DEBUG_PRINT(2, ("domain_config[%d] = 0x%x \n", i, domain_config[i]));
1100 /* Can't override dummy domain mask */
1101 domain_config[MALI_DOMAIN_INDEX_DUMMY] =
1102 1 << MALI_DOMAIN_INDEX_DUMMY;
1106 const char *mali_pm_mask_to_string(u32 mask)
1108 static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1];
1112 /* Must be protected by lock since we use shared string buffer */
1113 if (NULL != pm_lock_exec) {
1114 MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
1117 for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) {
1118 if (mask & (1 << bit)) {
1119 bit_str[str_pos] = 'X';
1121 bit_str[str_pos] = '-';
1126 bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0';
1131 const char *mali_pm_group_stats_to_string(void)
1133 static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1];
1134 u32 num_groups = mali_group_get_glob_num_groups();
1137 /* Must be protected by lock since we use shared string buffer */
1138 if (NULL != pm_lock_exec) {
1139 MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
1142 for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) {
1143 struct mali_group *group;
1145 group = mali_group_get_glob_group(i);
1147 if (MALI_TRUE == mali_group_power_is_on(group)) {
1161 * num_pp is the number of PP cores which will be powered on given this mask
1162 * cost is the total power cost of cores which will be powered on given this mask
1164 static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost)
1168 /* loop through all cores */
1169 for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
1170 if (!(domain_config[i] & mask)) {
1175 case MALI_DOMAIN_INDEX_GP:
1176 *cost += MALI_GP_COST;
1179 case MALI_DOMAIN_INDEX_PP0: /* Fall through */
1180 case MALI_DOMAIN_INDEX_PP1: /* Fall through */
1181 case MALI_DOMAIN_INDEX_PP2: /* Fall through */
1182 case MALI_DOMAIN_INDEX_PP3:
1183 if (mali_is_mali400()) {
1184 if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask)
1185 || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
1186 == domain_config[MALI_DOMAIN_INDEX_L20])) {
1190 if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask)
1191 || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
1192 == domain_config[MALI_DOMAIN_INDEX_L21])) {
1197 *cost += MALI_PP_COST;
1199 case MALI_DOMAIN_INDEX_PP4: /* Fall through */
1200 case MALI_DOMAIN_INDEX_PP5: /* Fall through */
1201 case MALI_DOMAIN_INDEX_PP6: /* Fall through */
1202 case MALI_DOMAIN_INDEX_PP7:
1203 MALI_DEBUG_ASSERT(mali_is_mali450());
1205 if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask)
1206 || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
1207 == domain_config[MALI_DOMAIN_INDEX_L22])) {
1211 *cost += MALI_PP_COST;
1213 case MALI_DOMAIN_INDEX_L20: /* Fall through */
1214 case MALI_DOMAIN_INDEX_L21: /* Fall through */
1215 case MALI_DOMAIN_INDEX_L22:
1216 *cost += MALI_L2_COST;
1223 void mali_pm_power_cost_setup(void)
1226 * Two parallel arrays which store the best domain mask and its cost
1227 * The index is the number of PP cores, E.g. Index 0 is for 1 PP option,
1228 * might have mask 0x2 and with cost of 1, lower cost is better
1230 u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
1231 u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
1232 /* Array cores_in_domain is used to store the total pp cores in each pm domain. */
1233 u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
1234 /* Domain_count is used to represent the max domain we have.*/
1235 u32 max_domain_mask = 0;
1236 u32 max_domain_id = 0;
1237 u32 always_on_pp_cores = 0;
1239 u32 num_pp, cost, mask;
1242 /* Initialize statistics */
1243 for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
1245 best_cost[i] = 0xFFFFFFFF; /* lower cost is better */
1248 for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) {
1249 for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
1250 mali_pm_domain_power_cost_result[i][j] = 0;
1254 /* Caculate number of pp cores of a given domain config. */
1255 for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) {
1256 if (0 < domain_config[i]) {
1257 /* Get the max domain mask value used to caculate power cost
1258 * and we don't count in always on pp cores. */
1259 if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i]
1260 && max_domain_mask < domain_config[i]) {
1261 max_domain_mask = domain_config[i];
1264 if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) {
1265 always_on_pp_cores++;
1269 max_domain_id = _mali_osk_fls(max_domain_mask);
1272 * Try all combinations of power domains and check how many PP cores
1273 * they have and their power cost.
1275 for (mask = 0; mask < (1 << max_domain_id); mask++) {
1279 mali_pm_stat_from_mask(mask, &num_pp, &cost);
1281 /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */
1282 for (i = 0; i < num_pp; i++) {
1283 if (best_cost[i] >= cost) {
1284 best_cost[i] = cost;
1285 best_mask[i] = mask;
1291 * If we want to enable x pp cores, if x is less than number of always_on pp cores,
1292 * all of pp cores we will enable must be always_on pp cores.
1294 for (i = 0; i < mali_executor_get_num_cores_total(); i++) {
1295 if (i < always_on_pp_cores) {
1296 mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
1299 mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
1300 = always_on_pp_cores;
1304 /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */
1305 for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) {
1306 if (best_mask[i] == 0) {
1307 /* This MP variant is not available */
1311 for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
1312 cores_in_domain[j] = 0;
1315 for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) {
1316 if (0 < domain_config[j]
1317 && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) {
1318 cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++;
1322 /* In this loop, j represent for the number we have already enabled.*/
1323 for (j = 0; j <= i;) {
1324 /* j used to visit all of domain to get the number of pp cores remained in it. */
1325 for (k = 0; k < max_domain_id; k++) {
1326 /* If domain k in best_mask[i] is enabled and this domain has extra pp cores,
1327 * we know we must pick at least one pp core from this domain.
1328 * And then we move to next enabled pm domain. */
1329 if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) {
1330 cores_in_domain[k]--;
1331 mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++;
1343 * When we are doing core scaling,
1344 * this function is called to return the best mask to
1345 * achieve the best pp group power cost.
1347 void mali_pm_get_best_power_cost_mask(int num_requested, int *dst)
1349 MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested));
1351 _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int));
1354 u32 mali_pm_get_current_mask(void)
1356 return pd_mask_current;
1359 u32 mali_pm_get_wanted_mask(void)
1361 return pd_mask_wanted;