mali_kbase_utility.c \
mali_kbase_debug.c \
mali_kbase_trace_timeline.c \
- mali_kbase_gpu_memory_debugfs.c \
mali_kbase_mem_linux.c \
mali_kbase_core_linux.c \
mali_kbase_sync.c \
mali_kbase_disjoint_events.c \
mali_kbase_gator_api.c
+ifeq ($(CONFIG_DEBUG_FS),y)
+ SRC += mali_kbase_gpu_memory_debugfs.c
+endif
+
ifeq ($(MALI_CUSTOMER_RELEASE),0)
SRC += \
mali_kbase_pm_ca_random.c \
#define _KBASE_CONFIG_DEFAULTS_H_
/* Include mandatory definitions per platform */
-#include <mali_kbase_config_platform.h>
/**
* Irq throttle. It is the minimum desired time in between two
* Attached value: number in micro seconds
*/
#define DEFAULT_IRQ_THROTTLE_TIME_US 20
-
+#define GPU_FREQ_KHZ_MAX 500000
+#define GPU_FREQ_KHZ_MIN 100000
/*** Begin Scheduling defaults ***/
/**
#endif /* MALI_UNIT_TEST */
#define KBASE_DRV_NAME "mali"
+#define ROCKCHIP_VERSION 0x0b
static const char kbase_drv_name[] = KBASE_DRV_NAME;
/* version buffer size check is made in compile time assert */
memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+ get_version->rk_version = ROCKCHIP_VERSION;
break;
}
mali_error mali_err;
#endif /* CONFIG_MALI_NO_MALI */
#ifdef CONFIG_OF
-#ifdef CONFIG_MALI_PLATFORM_FAKE
struct kbase_platform_config *config;
int attribute_count;
attribute_count * sizeof(config->attributes[0]));
if (err)
return err;
-#endif /* CONFIG_MALI_PLATFORM_FAKE */
#endif /* CONFIG_OF */
kbdev = kbase_device_alloc();
#ifdef CONFIG_OF
static const struct of_device_id kbase_dt_ids[] = {
- { .compatible = "arm,malit6xx" },
+ { .compatible = "arm,malit7xx" },
{ .compatible = "arm,mali-midgard" },
{ /* sentinel */ }
};
* anymore when using Device Tree.
*/
#ifdef CONFIG_OF
+#if 0
module_platform_driver(kbase_platform_driver);
-#else /* CONFIG_MALI_PLATFORM_FAKE */
-
-extern int kbase_platform_early_init(void);
+#else
+static int __init rockchip_gpu_init_driver(void)
+{
+ return platform_driver_register(&kbase_platform_driver);
+}
+late_initcall(rockchip_gpu_init_driver);
+#endif
+#else
#ifdef CONFIG_MALI_PLATFORM_FAKE
extern int kbase_platform_fake_register(void);
extern void kbase_platform_fake_unregister(void);
struct list_head entry;
struct device *dev;
+ unsigned int kbase_group_error;
struct miscdevice mdev;
u64 reg_start;
size_t reg_size;
address |= kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx", status & 0xFF, kbase_exception_name(status), address);
+ kbdev->kbase_group_error++;
if (multiple)
dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
}
list_for_each_entry(element, &kbdev->kctx_list, link) {
/* output the memory usage and cap for each kctx
* opened on this device */
- ret = seq_printf(sfile, " %s-0x%p %10u\n", \
- "kctx",
+ ret = seq_printf(sfile, " %s-0x%p %10u %10u %10u %10u\n", \
+ "kctx", \
element->kctx, \
- atomic_read(&(element->kctx->used_pages)));
+ element->kctx->pid, \
+ atomic_read(&(element->kctx->osalloc.free_list_size)), \
+ atomic_read(&(element->kctx->used_pages)), \
+ atomic_read(&(element->kctx->nonmapped_pages)));
}
mutex_unlock(&kbdev->kctx_list_lock);
}
/* fall throught */
default:
dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)", i, completion_code, kbase_exception_name(completion_code));
+ kbdev->kbase_group_error++;
}
}
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <asm/cacheflush.h>
+
+void kbase_sync_to_memory(phys_addr_t paddr, void *vaddr, size_t sz)
+{
+#ifdef CONFIG_ARM
+ __cpuc_flush_dcache_area(vaddr, sz);
+ outer_flush_range(paddr, paddr + sz);
+#elif defined(CONFIG_ARM64)
+ /* TODO (MID64-46): There's no other suitable cache flush function for ARM64 */
+ flush_cache_all();
+#elif defined(CONFIG_X86)
+ struct scatterlist scl = { 0, };
+ sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
+ dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_TO_DEVICE);
+ mb(); /* for outer_sync (if needed) */
+#else
+#error Implement cache maintenance for your architecture here
+#endif
+}
+
+void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz)
+{
+#ifdef CONFIG_ARM
+ __cpuc_flush_dcache_area(vaddr, sz);
+ outer_flush_range(paddr, paddr + sz);
+#elif defined(CONFIG_ARM64)
+ /* TODO (MID64-46): There's no other suitable cache flush function for ARM64 */
+ flush_cache_all();
+#elif defined(CONFIG_X86)
+ struct scatterlist scl = { 0, };
+ sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
+ dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
+#else
+#error Implement cache maintenance for your architecture here
+#endif
+}
* We need to switch to UNMAPPED mode - but we do this in a
* worker so that we can sleep
*/
+ kbdev->kbase_group_error++;
KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
INIT_WORK(&as->work_busfault, bus_fault_worker);
queue_work(as->pf_wq, &as->work_busfault);
} else {
+ kbdev->kbase_group_error++;
KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
INIT_WORK(&as->work_pagefault, page_fault_worker);
queue_work(as->pf_wq, &as->work_pagefault);
static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
{
+ if(!katom)
+ {
+ pr_err("katom null.forbiden return\n");
+ return;
+ }
+ if(!katom->fence)
+ {
+ pr_info("katom->fence null.may release out of order.so continue unfinished step\n");
+ /*
+ if return here,may result in infinite loop?
+ we need to delete dep_item[0] from kctx->waiting_soft_jobs?
+ jd_done_nolock function move the dep_item[0] to complete job list and then delete?
+ */
+ goto finish_softjob;
+ }
+
if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
/* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
return;
}
/* Wait was cancelled - zap the atoms */
+finish_softjob:
katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
kbase_finish_soft_job(katom);
if (jd_done_nolock(katom))
kbasep_js_try_schedule_head_ctx(katom->kctx->kbdev);
+
+ return;
}
#endif /* CONFIG_SYNC */
break;
case BASE_JD_REQ_SOFT_FENCE_WAIT:
/* Release the reference to the fence object */
- sync_fence_put(katom->fence);
- katom->fence = NULL;
+ if(katom->fence) {
+ sync_fence_put(katom->fence);
+ katom->fence = NULL;
+ }
break;
#endif /* CONFIG_SYNC */
}
char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
u32 version_string_size;
u32 padding;
+ u32 rk_version;
};
struct kbase_uk_disjoint_query {
--- /dev/null
+#
+# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+ccflags-y += -I$(srctree)/drivers/staging/android
+ifeq ($(CONFIG_MALI_MIDGARD),y)
+obj-y += mali_kbase_config_rk.o
+obj-y += mali_kbase_dvfs.o
+obj-y += mali_kbase_platform.o
+else ifeq ($(CONFIG_MALI_MIDGARD),m)
+SRC += platform/rk/mali_kbase_config_rk.c
+SRC += platform/rk/mali_kbase_dvfs.c
+SRC += platform/rk/mali_kbase_platform.c
+endif
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#ifdef CONFIG_UMP
+#include <linux/ump-common.h>
+#endif /* CONFIG_UMP */
+#include <platform/rk/mali_kbase_platform.h>
+#include <platform/rk/mali_kbase_dvfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+
+int get_cpu_clock_speed(u32 *cpu_clock);
+
+#define HZ_IN_MHZ (1000000)
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+#define RUNTIME_PM_DELAY_TIME 50
+#endif
+
+/* Versatile Express (VE) configuration defaults shared between config_attributes[]
+ * and config_attributes_hw_issue_8408[]. Settings are not shared for
+ * JS_HARD_STOP_TICKS_SS and JS_RESET_TICKS_SS.
+ */
+#define KBASE_VE_MEMORY_PER_PROCESS_LIMIT (512 * 1024 * 1024UL) /* 512MB */
+#define KBASE_VE_MEMORY_OS_SHARED_MAX (2048 * 1024 * 1024UL) /* 768MB */
+#define KBASE_VE_MEMORY_OS_SHARED_PERF_GPU KBASE_MEM_PERF_FAST/*KBASE_MEM_PERF_SLOW*/
+#define KBASE_VE_GPU_FREQ_KHZ_MAX 500000
+#define KBASE_VE_GPU_FREQ_KHZ_MIN 100000
+#ifdef CONFIG_UMP
+#define KBASE_VE_UMP_DEVICE UMP_DEVICE_Z_SHIFT
+#endif /* CONFIG_UMP */
+
+#define KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG 15000000u /* 15ms, an agressive tick for testing purposes. This will reduce performance significantly */
+#define KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG 1 /* between 15ms and 30ms before soft-stop a job */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS_DEBUG 333 /* 5s before hard-stop */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401_DEBUG 2000 /* 30s before hard-stop, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
+#define KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG 100000 /* 1500s (25mins) before NSS hard-stop */
+#define KBASE_VE_JS_RESET_TICKS_SS_DEBUG 500 /* 45s before resetting GPU, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) */
+#define KBASE_VE_JS_RESET_TICKS_SS_8401_DEBUG 3000 /* 7.5s before resetting GPU - for issue 8401 */
+#define KBASE_VE_JS_RESET_TICKS_NSS_DEBUG 100166 /* 1502s before resetting GPU */
+
+#define KBASE_VE_JS_SCHEDULING_TICK_NS 2500000000u /* 2.5s */
+#define KBASE_VE_JS_SOFT_STOP_TICKS 1 /* 2.5s before soft-stop a job */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS 2 /* 5s before hard-stop */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401 12 /* 30s before hard-stop, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
+#define KBASE_VE_JS_HARD_STOP_TICKS_NSS 600 /* 1500s before NSS hard-stop */
+#define KBASE_VE_JS_RESET_TICKS_SS 3 /* 7.5s before resetting GPU */
+#define KBASE_VE_JS_RESET_TICKS_SS_8401 18 /* 45s before resetting GPU, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
+#define KBASE_VE_JS_RESET_TICKS_NSS 601 /* 1502s before resetting GPU */
+
+#define KBASE_VE_JS_RESET_TIMEOUT_MS 500 /* 3s before cancelling stuck jobs */
+#define KBASE_VE_JS_CTX_TIMESLICE_NS 1000000 /* 1ms - an agressive timeslice for testing purposes (causes lots of scheduling out for >4 ctxs) */
+#define KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE ((uintptr_t)MALI_FALSE) /* By default we prefer performance over security on r0p0-15dev0 and KBASE_CONFIG_ATTR_ earlier */
+/*#define KBASE_VE_POWER_MANAGEMENT_CALLBACKS ((uintptr_t)&pm_callbacks)*/
+#define KBASE_VE_CPU_SPEED_FUNC ((uintptr_t)&get_cpu_clock_speed)
+
+static int mali_pm_notifier(struct notifier_block *nb,unsigned long event,void* cmd);
+static struct notifier_block mali_pm_nb = {
+ .notifier_call = mali_pm_notifier
+};
+static int mali_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+
+ pr_info("%s enter\n",__func__);
+ if (kbase_platform_dvfs_enable(false, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
+ return -EPERM;
+ pr_info("%s exit\n",__func__);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mali_reboot_notifier = {
+ .notifier_call = mali_reboot_notifier_event,
+};
+
+#ifndef CONFIG_OF
+static kbase_io_resources io_resources = {
+ .job_irq_number = 68,
+ .mmu_irq_number = 69,
+ .gpu_irq_number = 70,
+ .io_memory_region = {
+ .start = 0xFC010000,
+ .end = 0xFC010000 + (4096 * 5) - 1}
+};
+#endif
+int get_cpu_clock_speed(u32 *cpu_clock)
+{
+#if 0
+ struct clk *cpu_clk;
+ u32 freq = 0;
+ cpu_clk = clk_get(NULL, "armclk");
+ if (IS_ERR(cpu_clk))
+ return 1;
+ freq = clk_get_rate(cpu_clk);
+ *cpu_clock = (freq / HZ_IN_MHZ);
+#endif
+ return 0;
+}
+
+static int mali_pm_notifier(struct notifier_block *nb,unsigned long event,void* cmd)
+{
+ int err = NOTIFY_OK;
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ /*
+ pr_info("%s,PM_SUSPEND_PREPARE\n",__func__);
+ */
+ if (kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
+ err = NOTIFY_BAD;
+#endif
+ break;
+ case PM_POST_SUSPEND:
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ /*
+ pr_info("%s,PM_POST_SUSPEND\n",__func__);
+ */
+ if (kbase_platform_dvfs_enable(true, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
+ err = NOTIFY_BAD;
+#endif
+ break;
+ default:
+ break;
+ }
+ return err;
+}
+
+/*
+ rk3288 hardware specific initialization
+ */
+mali_bool kbase_platform_rk_init(struct kbase_device *kbdev)
+{
+ if(MALI_ERROR_NONE == kbase_platform_init(kbdev))
+ {
+ if (register_pm_notifier(&mali_pm_nb)) {
+ return MALI_FALSE;
+ }
+ pr_info("%s,register_reboot_notifier\n",__func__);
+ register_reboot_notifier(&mali_reboot_notifier);
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+/*
+ rk3288 hardware specific termination
+*/
+void kbase_platform_rk_term(struct kbase_device *kbdev)
+{
+ unregister_pm_notifier(&mali_pm_nb);
+#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
+ kbase_platform_remove_sysfs_file(kbdev->dev);
+#endif /* CONFIG_MALI_MIDGARD_DEBUG_SYS */
+ kbase_platform_term(kbdev);
+}
+
+kbase_platform_funcs_conf platform_funcs = {
+ .platform_init_func = &kbase_platform_rk_init,
+ .platform_term_func = &kbase_platform_rk_term,
+};
+
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ int result;
+ int ret_val;
+ struct device *dev = kbdev->dev;
+ struct rk_context *platform;
+ platform = (struct rk_context *)kbdev->platform_context;
+
+ if (pm_runtime_status_suspended(dev))
+ ret_val = 1;
+ else
+ ret_val = 0;
+
+ if(dev->power.disable_depth > 0) {
+ if(platform->cmu_pmu_status == 0)
+ kbase_platform_cmu_pmu_control(kbdev, 1);
+ return ret_val;
+ }
+ result = pm_runtime_resume(dev);
+
+ if (result < 0 && result == -EAGAIN)
+ kbase_platform_cmu_pmu_control(kbdev, 1);
+ else if (result < 0)
+ printk(KERN_ERR "pm_runtime_get_sync failed (%d)\n", result);
+
+ return ret_val;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+ struct device *dev = kbdev->dev;
+ pm_schedule_suspend(dev, RUNTIME_PM_DELAY_TIME);
+}
+
+mali_error kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+ pm_suspend_ignore_children(kbdev->dev, true);
+ pm_runtime_enable(kbdev->dev);
+#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
+ if (kbase_platform_create_sysfs_file(kbdev->dev))
+ return MALI_ERROR_FUNCTION_FAILED;
+#endif /* CONFIG_MALI_MIDGARD_DEBUG_SYS */
+ return MALI_ERROR_NONE;
+}
+
+void kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+ pm_runtime_disable(kbdev->dev);
+}
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
+ unsigned long flags;
+ unsigned int clock;
+#endif
+
+ kbase_platform_power_on(kbdev);
+
+ kbase_platform_clock_on(kbdev);
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ if (platform->dvfs_enabled) {
+ if(platform->gpu_in_touch) {
+ clock = p_mali_dvfs_infotbl[MALI_DVFS_STEP-1].clock;
+ spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+ platform->gpu_in_touch = false;
+ spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
+ } else {
+ clock = MALI_DVFS_CURRENT_FREQ;
+ }
+ /*
+ pr_info("%s,clock = %d\n",__func__,clock);
+ */
+ if (kbase_platform_dvfs_enable(true, clock)!= MALI_TRUE)
+ return -EPERM;
+
+ } else {
+ if (kbase_platform_dvfs_enable(false, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
+ return -EPERM;
+ }
+#endif
+ return 0;
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
+ unsigned long flags;
+#endif
+
+ kbase_platform_clock_off(kbdev);
+ kbase_platform_power_off(kbdev);
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ if (platform->dvfs_enabled)
+ {
+ /*printk("%s\n",__func__);*/
+ if (kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
+ printk("[err] disabling dvfs is faled\n");
+ spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+ platform->gpu_in_touch = false;
+ spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
+ }
+#endif
+}
+
+static kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+#ifdef CONFIG_PM_RUNTIME
+ .power_runtime_init_callback = kbase_device_runtime_init,
+ .power_runtime_term_callback = kbase_device_runtime_disable,
+ .power_runtime_on_callback = pm_callback_runtime_on,
+ .power_runtime_off_callback = pm_callback_runtime_off,
+
+#else /* CONFIG_PM_RUNTIME */
+ .power_runtime_init_callback = NULL,
+ .power_runtime_term_callback = NULL,
+ .power_runtime_on_callback = NULL,
+ .power_runtime_off_callback = NULL,
+
+#endif /* CONFIG_PM_RUNTIME */
+};
+#endif
+
+
+/* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
+static kbase_attribute config_attributes[] = {
+#ifdef CONFIG_UMP
+ {
+ KBASE_CONFIG_ATTR_UMP_DEVICE,
+ KBASE_VE_UMP_DEVICE},
+#endif /* CONFIG_UMP */
+#ifdef CONFIG_MALI_MIDGARD_RT_PM
+ {
+ KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
+ (uintptr_t)&pm_callbacks},
+#endif
+ {
+ KBASE_CONFIG_ATTR_PLATFORM_FUNCS,
+ (uintptr_t) &platform_funcs},
+
+ {
+ KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS,
+ KBASE_VE_JS_RESET_TIMEOUT_MS},
+ {
+ KBASE_CONFIG_ATTR_END,
+ 0}
+};
+
+static kbase_platform_config rk_platform_config = {
+ .attributes = config_attributes,
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+#if 1
+kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &rk_platform_config;
+}
+#endif
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
--- /dev/null
+/* drivers/gpu/t6xx/kbase/src/platform/manta/mali_kbase_dvfs.c
+ *
+ *
+ * Rockchip SoC Mali-T764 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file mali_kbase_dvfs.c
+ * DVFS
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_uku.h>
+#include <mali_kbase_mem.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_mem_linux.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/fb.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/rk_fb.h>
+#include <linux/input.h>
+#include <linux/rockchip/common.h>
+
+#include <platform/rk/mali_kbase_platform.h>
+#include <platform/rk/mali_kbase_dvfs.h>
+#include <mali_kbase_gator.h>
+#include <linux/rockchip/dvfs.h>
+/***********************************************************/
+/* This table and variable are using the check time share of GPU Clock */
+/***********************************************************/
+extern int rockchip_tsadc_get_temp(int chn);
+#define gpu_temp_limit 110
+#define gpu_temp_statis_time 1
+#define level0_min 0
+#define level0_max 70
+#define levelf_max 100
+static u32 div_dvfs = 0 ;
+
+static mali_dvfs_info mali_dvfs_infotbl[] = {
+ {925000, 100000, 0, 70, 0},
+ {925000, 160000, 50, 65, 0},
+ {1025000, 266000, 60, 78, 0},
+ {1075000, 350000, 65, 75, 0},
+ {1125000, 400000, 70, 75, 0},
+ {1200000, 500000, 90, 100, 0},
+};
+mali_dvfs_info *p_mali_dvfs_infotbl = NULL;
+
+unsigned int MALI_DVFS_STEP = ARRAY_SIZE(mali_dvfs_infotbl);
+
+static struct cpufreq_frequency_table *mali_freq_table = NULL;
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+typedef struct _mali_dvfs_status_type {
+ struct kbase_device *kbdev;
+ int step;
+ int utilisation;
+ u32 temperature;
+ u32 temperature_time;
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ int upper_lock;
+ int under_lock;
+#endif
+
+} mali_dvfs_status;
+
+static struct workqueue_struct *mali_dvfs_wq = 0;
+spinlock_t mali_dvfs_spinlock;
+struct mutex mali_set_clock_lock;
+struct mutex mali_enable_clock_lock;
+
+#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
+static void update_time_in_state(int level);
+#endif
+/*dvfs status*/
+static mali_dvfs_status mali_dvfs_status_current;
+
+#define LIMIT_FPS 60
+#define LIMIT_FPS_POWER_SAVE 50
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static void gpufreq_input_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ mali_dvfs_status *dvfs_status;
+ struct rk_context *platform;
+ unsigned long flags;
+
+ if (type != EV_ABS)
+ return;
+
+ dvfs_status = &mali_dvfs_status_current;
+ platform = (struct rk_context *)dvfs_status->kbdev->platform_context;
+
+ spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+ platform->gpu_in_touch = true;
+ spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
+}
+
+static int gpufreq_input_connect(struct input_handler *handler,
+ struct input_dev *dev, const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "gpufreq";
+
+ error = input_register_handle(handle);
+ if (error)
+ goto err2;
+
+ error = input_open_device(handle);
+ if (error)
+ goto err1;
+ pr_info("%s\n",__func__);
+ return 0;
+err1:
+ input_unregister_handle(handle);
+err2:
+ kfree(handle);
+ return error;
+}
+
+static void gpufreq_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+ pr_info("%s\n",__func__);
+}
+
+static const struct input_device_id gpufreq_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ .absbit = { [BIT_WORD(ABS_X)] =
+ BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+ },
+ { },
+};
+
+static struct input_handler gpufreq_input_handler = {
+ .event = gpufreq_input_event,
+ .connect = gpufreq_input_connect,
+ .disconnect = gpufreq_input_disconnect,
+ .name = "gpufreq",
+ .id_table = gpufreq_ids,
+};
+#endif
+
+static void mali_dvfs_event_proc(struct work_struct *w)
+{
+ unsigned long flags;
+ mali_dvfs_status *dvfs_status;
+ static int level_down_time = 0;
+ static int level_up_time = 0;
+ static u32 temp_tmp;
+ struct rk_context *platform;
+ u32 fps=0;
+ u32 fps_limit;
+ u32 policy;
+ mutex_lock(&mali_enable_clock_lock);
+ dvfs_status = &mali_dvfs_status_current;
+
+ if (!kbase_platform_dvfs_get_enable_status()) {
+ mutex_unlock(&mali_enable_clock_lock);
+ return;
+ }
+ platform = (struct rk_context *)dvfs_status->kbdev->platform_context;
+
+ fps = rk_get_real_fps(0);
+
+ dvfs_status->temperature_time++;
+
+ temp_tmp += rockchip_tsadc_get_temp(1);
+
+ if(dvfs_status->temperature_time >= gpu_temp_statis_time) {
+ dvfs_status->temperature_time = 0;
+ dvfs_status->temperature = temp_tmp / gpu_temp_statis_time;
+ temp_tmp = 0;
+ }
+
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ /*
+ policy = rockchip_pm_get_policy();
+ */
+ policy = ROCKCHIP_PM_POLICY_NORMAL;
+
+ if (ROCKCHIP_PM_POLICY_PERFORMANCE == policy) {
+ dvfs_status->step = MALI_DVFS_STEP - 1;
+ } else {
+ fps_limit = (ROCKCHIP_PM_POLICY_NORMAL == policy)?LIMIT_FPS : LIMIT_FPS_POWER_SAVE;
+ /*
+ printk("policy : %d , fps_limit = %d\n",policy,fps_limit);
+ */
+
+ /*give priority to temperature unless in performance mode */
+ if (dvfs_status->temperature > gpu_temp_limit) {
+ if(dvfs_status->step > 0)
+ dvfs_status->step--;
+
+ if(gpu_temp_statis_time > 1)
+ dvfs_status->temperature = 0;
+ /*
+ pr_info("decrease step for temperature over %d,next clock = %d\n",
+ gpu_temp_limit, mali_dvfs_infotbl[dvfs_status->step].clock);
+ */
+ } else if ((dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) &&
+ (dvfs_status->step < MALI_DVFS_STEP-1) && fps < fps_limit) {
+ level_up_time++;
+ if (level_up_time == MALI_DVFS_UP_TIME_INTERVAL) {
+ /*
+ printk("up,utilisation=%d,current clock=%d,fps = %d,temperature = %d",
+ dvfs_status->utilisation, mali_dvfs_infotbl[dvfs_status->step].clock,
+ fps,dvfs_status->temperature);
+ */
+ dvfs_status->step++;
+ level_up_time = 0;
+ /*
+ printk(" next clock=%d\n",mali_dvfs_infotbl[dvfs_status->step].clock);
+ */
+ BUG_ON(dvfs_status->step >= MALI_DVFS_STEP);
+ }
+ level_down_time = 0;
+ } else if ((dvfs_status->step > 0) &&
+ (dvfs_status->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) {
+ level_down_time++;
+ if (level_down_time==MALI_DVFS_DOWN_TIME_INTERVAL) {
+ /*
+ printk("down,utilisation=%d,current clock=%d,fps = %d,temperature = %d",
+ dvfs_status->utilisation,
+ mali_dvfs_infotbl[dvfs_status->step].clock,fps,dvfs_status->temperature);
+ */
+ BUG_ON(dvfs_status->step <= 0);
+ dvfs_status->step--;
+ level_down_time = 0;
+ /*
+ printk(" next clock=%d\n",mali_dvfs_infotbl[dvfs_status->step].clock);
+ */
+ }
+ level_up_time = 0;
+ } else {
+ level_down_time = 0;
+ level_up_time = 0;
+ /*
+ printk("keep,utilisation=%d,current clock=%d,fps = %d,temperature = %d\n",
+ dvfs_status->utilisation,
+ mali_dvfs_infotbl[dvfs_status->step].clock,fps,dvfs_status->temperature);
+ */
+ }
+ }
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ if ((dvfs_status->upper_lock >= 0) && (dvfs_status->step > dvfs_status->upper_lock))
+ dvfs_status->step = dvfs_status->upper_lock;
+
+ if (dvfs_status->under_lock > 0) {
+ if (dvfs_status->step < dvfs_status->under_lock)
+ dvfs_status->step = dvfs_status->under_lock;
+ }
+#endif
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+ kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);
+
+ mutex_unlock(&mali_enable_clock_lock);
+}
+
+static DECLARE_WORK(mali_dvfs_work, mali_dvfs_event_proc);
+
+int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
+ u32 util_gl_share_no_use,u32 util_cl_share_no_use[2])
+{
+ unsigned long flags;
+ struct rk_context *platform;
+
+ BUG_ON(!kbdev);
+ platform = (struct rk_context *)kbdev->platform_context;
+
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ if (platform->time_tick < MALI_DVFS_UP_TIME_INTERVAL) {
+ platform->time_tick++;
+ platform->time_busy += kbdev->pm.metrics.time_busy;
+ platform->time_idle += kbdev->pm.metrics.time_idle;
+ } else {
+ platform->time_busy = kbdev->pm.metrics.time_busy;
+ platform->time_idle = kbdev->pm.metrics.time_idle;
+ platform->time_tick = 0;
+ }
+
+ if ((platform->time_tick == MALI_DVFS_UP_TIME_INTERVAL) &&
+ (platform->time_idle + platform->time_busy > 0))
+ platform->utilisation = (100 * platform->time_busy) /
+ (platform->time_idle + platform->time_busy);
+
+ mali_dvfs_status_current.utilisation = utilisation;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+
+ queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
+ /*add error handle here */
+ return MALI_TRUE;
+}
+
+int kbase_platform_dvfs_get_utilisation(void)
+{
+ unsigned long flags;
+ int utilisation = 0;
+
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ utilisation = mali_dvfs_status_current.utilisation;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+
+ return utilisation;
+}
+
+int kbase_platform_dvfs_get_enable_status(void)
+{
+ struct kbase_device *kbdev;
+ unsigned long flags;
+ int enable;
+
+ kbdev = mali_dvfs_status_current.kbdev;
+ spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
+ enable = kbdev->pm.metrics.timer_active;
+ spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
+
+ return enable;
+}
+
+int kbase_platform_dvfs_enable(bool enable, int freq)
+{
+ mali_dvfs_status *dvfs_status;
+ struct kbase_device *kbdev;
+ unsigned long flags;
+ struct rk_context *platform;
+
+ dvfs_status = &mali_dvfs_status_current;
+ kbdev = mali_dvfs_status_current.kbdev;
+
+ BUG_ON(kbdev == NULL);
+ platform = (struct rk_context *)kbdev->platform_context;
+
+ mutex_lock(&mali_enable_clock_lock);
+
+ if (enable != kbdev->pm.metrics.timer_active) {
+ if (enable) {
+ spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
+ kbdev->pm.metrics.timer_active = MALI_TRUE;
+ spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
+ hrtimer_start(&kbdev->pm.metrics.timer,
+ HR_TIMER_DELAY_MSEC(KBASE_PM_DVFS_FREQUENCY),
+ HRTIMER_MODE_REL);
+ } else {
+ spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
+ kbdev->pm.metrics.timer_active = MALI_FALSE;
+ spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
+ hrtimer_cancel(&kbdev->pm.metrics.timer);
+ }
+ }
+
+ if (freq != MALI_DVFS_CURRENT_FREQ) {
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ platform->time_tick = 0;
+ platform->time_busy = 0;
+ platform->time_idle = 0;
+ platform->utilisation = 0;
+ dvfs_status->step = kbase_platform_dvfs_get_level(freq);
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+ kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);
+ }
+
+ mutex_unlock(&mali_enable_clock_lock);
+
+ return MALI_TRUE;
+}
+#define dividend 7
+#define fix_float(a) ((((a)*dividend)%10)?((((a)*dividend)/10)+1):(((a)*dividend)/10))
+static bool calculate_dvfs_max_min_threshold(u32 level)
+{
+ u32 pre_level;
+ u32 tmp ;
+ if (0 == level) {
+ if ((MALI_DVFS_STEP-1) == level) {
+ mali_dvfs_infotbl[level].min_threshold = level0_min;
+ mali_dvfs_infotbl[level].max_threshold = levelf_max;
+ } else {
+ mali_dvfs_infotbl[level].min_threshold = level0_min;
+ mali_dvfs_infotbl[level].max_threshold = level0_max;
+ }
+ } else {
+ pre_level = level - 1;
+ if ((MALI_DVFS_STEP-1) == level) {
+ mali_dvfs_infotbl[level].max_threshold = levelf_max;
+ } else {
+ mali_dvfs_infotbl[level].max_threshold = mali_dvfs_infotbl[pre_level].max_threshold +
+ div_dvfs;
+ }
+ mali_dvfs_infotbl[level].min_threshold = (mali_dvfs_infotbl[pre_level].max_threshold *
+ (mali_dvfs_infotbl[pre_level].clock/1000)) /
+ (mali_dvfs_infotbl[level].clock/1000);
+
+ tmp = mali_dvfs_infotbl[level].max_threshold - mali_dvfs_infotbl[level].min_threshold;
+
+ mali_dvfs_infotbl[level].min_threshold += fix_float(tmp);
+ }
+ pr_info("mali_dvfs_infotbl[%d].clock=%d,min_threshold=%d,max_threshold=%d\n",
+ level,mali_dvfs_infotbl[level].clock, mali_dvfs_infotbl[level].min_threshold,
+ mali_dvfs_infotbl[level].max_threshold);
+ return MALI_TRUE;
+}
+
+int kbase_platform_dvfs_init(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ /*default status
+ add here with the right function to get initilization value.
+ */
+ struct rk_context *platform;
+ int i;
+ int rc;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (NULL == platform)
+ panic("oops");
+
+ mali_freq_table = dvfs_get_freq_volt_table(platform->mali_clk_node);
+
+ if (mali_freq_table == NULL) {
+ printk("mali freq table not assigned yet,use default\n");
+ goto not_assigned ;
+ } else {
+ /*recalculte step*/
+ MALI_DVFS_STEP = 0;
+ for (i = 0; mali_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ mali_dvfs_infotbl[i].clock = mali_freq_table[i].frequency;
+ MALI_DVFS_STEP++;
+ }
+ if(MALI_DVFS_STEP > 1)
+ div_dvfs = round_up(((levelf_max - level0_max)/(MALI_DVFS_STEP-1)),1);
+ printk("MALI_DVFS_STEP=%d,div_dvfs=%d\n",MALI_DVFS_STEP,div_dvfs);
+
+ for(i=0;i<MALI_DVFS_STEP;i++)
+ calculate_dvfs_max_min_threshold(i);
+ p_mali_dvfs_infotbl = mali_dvfs_infotbl;
+ }
+not_assigned :
+ if (!mali_dvfs_wq)
+ mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
+
+ spin_lock_init(&mali_dvfs_spinlock);
+ mutex_init(&mali_set_clock_lock);
+ mutex_init(&mali_enable_clock_lock);
+
+ spin_lock_init(&platform->gpu_in_touch_lock);
+ rc = input_register_handler(&gpufreq_input_handler);
+
+ /*add a error handling here */
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ mali_dvfs_status_current.kbdev = kbdev;
+ mali_dvfs_status_current.utilisation = 0;
+ mali_dvfs_status_current.step = 0;
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ mali_dvfs_status_current.upper_lock = -1;
+ mali_dvfs_status_current.under_lock = -1;
+#endif
+
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+
+ return MALI_TRUE;
+}
+
+void kbase_platform_dvfs_term(void)
+{
+ if (mali_dvfs_wq)
+ destroy_workqueue(mali_dvfs_wq);
+
+ mali_dvfs_wq = NULL;
+
+ input_unregister_handler(&gpufreq_input_handler);
+}
+#endif /*CONFIG_MALI_MIDGARD_DVFS*/
+
+int mali_get_dvfs_upper_locked_freq(void)
+{
+ unsigned long flags;
+ int locked_level = -1;
+
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ if (mali_dvfs_status_current.upper_lock >= 0)
+ locked_level = mali_dvfs_infotbl[mali_dvfs_status_current.upper_lock].clock;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+#endif
+ return locked_level;
+}
+
+int mali_get_dvfs_under_locked_freq(void)
+{
+ unsigned long flags;
+ int locked_level = -1;
+
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ if (mali_dvfs_status_current.under_lock >= 0)
+ locked_level = mali_dvfs_infotbl[mali_dvfs_status_current.under_lock].clock;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+#endif
+ return locked_level;
+}
+
+int mali_get_dvfs_current_level(void)
+{
+ unsigned long flags;
+ int current_level = -1;
+
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ current_level = mali_dvfs_status_current.step;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+#endif
+ return current_level;
+}
+
+int mali_dvfs_freq_lock(int level)
+{
+ unsigned long flags;
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ if (mali_dvfs_status_current.under_lock >= 0 &&
+ mali_dvfs_status_current.under_lock > level) {
+ printk(KERN_ERR " Upper lock Error : Attempting to set upper lock to below under lock\n");
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+ return -1;
+ }
+ mali_dvfs_status_current.upper_lock = level;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+
+ printk(KERN_DEBUG " Upper Lock Set : %d\n", level);
+#endif
+ return 0;
+}
+
+void mali_dvfs_freq_unlock(void)
+{
+ unsigned long flags;
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ mali_dvfs_status_current.upper_lock = -1;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+#endif
+ printk(KERN_DEBUG "mali Upper Lock Unset\n");
+}
+
+int mali_dvfs_freq_under_lock(int level)
+{
+ unsigned long flags;
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ if (mali_dvfs_status_current.upper_lock >= 0 &&
+ mali_dvfs_status_current.upper_lock < level) {
+ printk(KERN_ERR "mali Under lock Error : Attempting to set under lock to above upper lock\n");
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+ return -1;
+ }
+ mali_dvfs_status_current.under_lock = level;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+
+ printk(KERN_DEBUG "mali Under Lock Set : %d\n", level);
+#endif
+ return 0;
+}
+
+void mali_dvfs_freq_under_unlock(void)
+{
+ unsigned long flags;
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ spin_lock_irqsave(&mali_dvfs_spinlock, flags);
+ mali_dvfs_status_current.under_lock = -1;
+ spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
+#endif
+ printk(KERN_DEBUG " mali clock Under Lock Unset\n");
+}
+
+void kbase_platform_dvfs_set_clock(struct kbase_device *kbdev, int freq)
+{
+ struct rk_context *platform;
+
+ if (!kbdev)
+ panic("oops");
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (NULL == platform)
+ panic("oops");
+
+ if (!platform->mali_clk_node) {
+ printk("mali_clk_node not init\n");
+ return;
+ }
+ mali_dvfs_clk_set(platform->mali_clk_node,freq);
+
+ return;
+}
+
+
+int kbase_platform_dvfs_get_level(int freq)
+{
+ int i;
+ for (i = 0; i < MALI_DVFS_STEP; i++) {
+ if (mali_dvfs_infotbl[i].clock == freq)
+ return i;
+ }
+ return -1;
+}
+void kbase_platform_dvfs_set_level(struct kbase_device *kbdev, int level)
+{
+ static int prev_level = -1;
+
+ if (level == prev_level)
+ return;
+
+ if (WARN_ON((level >= MALI_DVFS_STEP) || (level < 0))) {
+ printk("unkown mali dvfs level:level = %d,set clock not done \n",level);
+ return ;
+ }
+ /*panic("invalid level");*/
+#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
+ if (mali_dvfs_status_current.upper_lock >= 0 &&
+ level > mali_dvfs_status_current.upper_lock)
+ level = mali_dvfs_status_current.upper_lock;
+ if (mali_dvfs_status_current.under_lock >= 0 &&
+ level < mali_dvfs_status_current.under_lock)
+ level = mali_dvfs_status_current.under_lock;
+#endif
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ mutex_lock(&mali_set_clock_lock);
+#endif
+
+ kbase_platform_dvfs_set_clock(kbdev, mali_dvfs_infotbl[level].clock);
+#if defined(CONFIG_MALI_MIDGARD_DEBUG_SYS) && defined(CONFIG_MALI_MIDGARD_DVFS)
+ update_time_in_state(prev_level);
+#endif
+ prev_level = level;
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ mutex_unlock(&mali_set_clock_lock);
+#endif
+}
+
+#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static void update_time_in_state(int level)
+{
+ u64 current_time;
+ static u64 prev_time=0;
+
+ if (level < 0)
+ return;
+
+ if (!kbase_platform_dvfs_get_enable_status())
+ return;
+
+ if (prev_time ==0)
+ prev_time=get_jiffies_64();
+
+ current_time = get_jiffies_64();
+ mali_dvfs_infotbl[level].time += current_time-prev_time;
+
+ prev_time = current_time;
+}
+#endif
+
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret = 0;
+ int i;
+
+ kbdev = dev_get_drvdata(dev);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ update_time_in_state(mali_dvfs_status_current.step);
+#endif
+ if (!kbdev)
+ return -ENODEV;
+
+ for (i = 0; i < MALI_DVFS_STEP; i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "%d %llu\n",
+ mali_dvfs_infotbl[i].clock, mali_dvfs_infotbl[i].time);
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i;
+
+ for (i = 0; i < MALI_DVFS_STEP; i++)
+ mali_dvfs_infotbl[i].time = 0;
+
+ printk(KERN_DEBUG "time_in_state value is reset complete.\n");
+ return count;
+}
+#endif
--- /dev/null
+/* drivers/gpu/midgard/platform/rk/mali_kbase_dvfs.h
+ *
+ * Rockchip SoC Mali-T764 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file mali_kbase_dvfs.h
+ * DVFS
+ */
+
+#ifndef _KBASE_DVFS_H_
+#define _KBASE_DVFS_H_
+
+/* Frequency that DVFS clock frequency decisions should be made */
+#define KBASE_PM_DVFS_FREQUENCY 100
+
+#define MALI_DVFS_KEEP_STAY_CNT 10
+#define MALI_DVFS_UP_TIME_INTERVAL 1
+#define MALI_DVFS_DOWN_TIME_INTERVAL 2
+#define MALI_DVFS_CURRENT_FREQ 0
+#if 0
+#define MALI_DVFS_BL_CONFIG_FREQ 500
+#define MALI_DVFS_START_FREQ 400
+#endif
+typedef struct _mali_dvfs_info {
+ unsigned int voltage;
+ unsigned int clock;
+ int min_threshold;
+ int max_threshold;
+ unsigned long long time;
+} mali_dvfs_info;
+
+#define MALI_KHZ 1000
+extern mali_dvfs_info *p_mali_dvfs_infotbl;
+extern unsigned int MALI_DVFS_STEP;
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+#define CONFIG_MALI_MIDGARD_FREQ_LOCK
+#endif
+
+void kbase_platform_dvfs_set_clock(struct kbase_device *kbdev, int freq);
+void kbase_platform_dvfs_set_level(struct kbase_device *kbdev, int level);
+int kbase_platform_dvfs_get_level(int freq);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+int kbase_platform_dvfs_init(struct kbase_device *dev);
+void kbase_platform_dvfs_term(void);
+/*int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation);*/
+/*int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,u32 util_gl_share, u32 util_cl_share[2]);*/
+int kbase_platform_dvfs_get_enable_status(void);
+int kbase_platform_dvfs_enable(bool enable, int freq);
+int kbase_platform_dvfs_get_utilisation(void);
+#endif
+
+int mali_get_dvfs_current_level(void);
+int mali_get_dvfs_upper_locked_freq(void);
+int mali_get_dvfs_under_locked_freq(void);
+int mali_dvfs_freq_lock(int level);
+void mali_dvfs_freq_unlock(void);
+int mali_dvfs_freq_under_lock(int level);
+void mali_dvfs_freq_under_unlock(void);
+
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf);
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+
+#endif /* _KBASE_DVFS_H_ */
--- /dev/null
+/* drivers/gpu/t6xx/kbase/src/platform/rk/mali_kbase_platform.c
+ *
+ * Rockchip SoC Mali-T764 platform-dependent codes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file mali_kbase_platform.c
+ * Platform-dependent init.
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_uku.h>
+#include <mali_kbase_mem.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_mem_linux.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <linux/fb.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <platform/rk/mali_kbase_platform.h>
+#include <platform/rk/mali_kbase_dvfs.h>
+
+#include <mali_kbase_gator.h>
+
+#include <linux/rockchip/dvfs.h>
+
+#define MALI_T7XX_DEFAULT_CLOCK 100000
+
+
+static int mali_clk_status = 0;
+static int mali_pd_status = 0;
+
+u32 kbase_group_error = 0;
+static struct kobject *rk_gpu;
+
+int mali_dvfs_clk_set(struct dvfs_node *node,unsigned long rate)
+{
+ int ret = 0;
+ if(!node)
+ {
+ printk("clk_get_dvfs_node error \r\n");
+ ret = -1;
+ }
+ ret = dvfs_clk_set_rate(node,rate * MALI_KHZ);
+ if(ret)
+ {
+ printk("dvfs_clk_set_rate error \r\n");
+ }
+ return ret;
+}
+static int kbase_platform_power_clock_init(struct kbase_device *kbdev)
+{
+ /*struct device *dev = kbdev->dev;*/
+ struct rk_context *platform;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (NULL == platform)
+ panic("oops");
+
+ /* enable mali t760 powerdomain*/
+ platform->mali_pd = clk_get(NULL,"pd_gpu");
+ if(IS_ERR_OR_NULL(platform->mali_pd))
+ {
+ platform->mali_pd = NULL;
+ printk(KERN_ERR "%s, %s(): failed to get [platform->mali_pd]\n", __FILE__, __func__);
+ goto out;
+ }
+ else
+ {
+ clk_prepare_enable(platform->mali_pd);
+ printk("mali pd enabled\n");
+ }
+ mali_pd_status = 1;
+
+ /* enable mali t760 clock */
+ platform->mali_clk_node = clk_get_dvfs_node("clk_gpu");
+ if (IS_ERR_OR_NULL(platform->mali_clk_node))
+ {
+ platform->mali_clk_node = NULL;
+ printk(KERN_ERR "%s, %s(): failed to get [platform->mali_clk_node]\n", __FILE__, __func__);
+ goto out;
+ }
+ else
+ {
+ dvfs_clk_prepare_enable(platform->mali_clk_node);
+ printk("clk enabled\n");
+ }
+ mali_dvfs_clk_set(platform->mali_clk_node,MALI_T7XX_DEFAULT_CLOCK);
+
+ mali_clk_status = 1;
+ return 0;
+
+out:
+ if(platform->mali_pd)
+ clk_put(platform->mali_pd);
+
+ return -EPERM;
+
+}
+int kbase_platform_clock_off(struct kbase_device *kbdev)
+{
+ struct rk_context *platform;
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ if (mali_clk_status == 0)
+ return 0;
+
+ if((platform->mali_clk_node))
+ dvfs_clk_disable_unprepare(platform->mali_clk_node);
+
+ mali_clk_status = 0;
+
+ return 0;
+}
+
+int kbase_platform_clock_on(struct kbase_device *kbdev)
+{
+ struct rk_context *platform;
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ if (mali_clk_status == 1)
+ return 0;
+
+ if(platform->mali_clk_node)
+ dvfs_clk_prepare_enable(platform->mali_clk_node);
+
+ mali_clk_status = 1;
+
+ return 0;
+}
+int kbase_platform_is_power_on(void)
+{
+ return mali_pd_status;
+}
+
+/*turn on power domain*/
+int kbase_platform_power_on(struct kbase_device *kbdev)
+{
+ struct rk_context *platform;
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ if (mali_pd_status == 1)
+ return 0;
+#if 1
+ if(platform->mali_pd)
+ clk_prepare_enable(platform->mali_pd);
+#endif
+ mali_pd_status = 1;
+ KBASE_TIMELINE_GPU_POWER(kbdev, 1);
+
+ return 0;
+}
+
+/*turn off power domain*/
+int kbase_platform_power_off(struct kbase_device *kbdev)
+{
+ struct rk_context *platform;
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ if (mali_pd_status== 0)
+ return 0;
+#if 1
+ if(platform->mali_pd)
+ clk_disable_unprepare(platform->mali_pd);
+#endif
+ mali_pd_status = 0;
+ KBASE_TIMELINE_GPU_POWER(kbdev, 0);
+
+ return 0;
+}
+
+int kbase_platform_cmu_pmu_control(struct kbase_device *kbdev, int control)
+{
+ unsigned long flags;
+ struct rk_context *platform;
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ spin_lock_irqsave(&platform->cmu_pmu_lock, flags);
+
+ /* off */
+ if (control == 0)
+ {
+ if (platform->cmu_pmu_status == 0)
+ {
+ spin_unlock_irqrestore(&platform->cmu_pmu_lock, flags);
+ return 0;
+ }
+
+ if (kbase_platform_power_off(kbdev))
+ panic("failed to turn off mali power domain\n");
+ if (kbase_platform_clock_off(kbdev))
+ panic("failed to turn off mali clock\n");
+
+ platform->cmu_pmu_status = 0;
+ printk("turn off mali power \n");
+ }
+ else
+ {
+ /* on */
+ if (platform->cmu_pmu_status == 1)
+ {
+ spin_unlock_irqrestore(&platform->cmu_pmu_lock, flags);
+ return 0;
+ }
+
+ if (kbase_platform_power_on(kbdev))
+ panic("failed to turn on mali power domain\n");
+ if (kbase_platform_clock_on(kbdev))
+ panic("failed to turn on mali clock\n");
+
+ platform->cmu_pmu_status = 1;
+ printk(KERN_ERR "turn on mali power\n");
+ }
+
+ spin_unlock_irqrestore(&platform->cmu_pmu_lock, flags);
+
+ return 0;
+}
+
+static ssize_t error_count_show(struct device *dev,struct device_attribute *attr, char *buf)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->kbase_group_error);
+ return ret;
+}
+static DEVICE_ATTR(error_count, S_IRUGO, error_count_show, NULL);
+
+#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
+static ssize_t show_clock(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kbase_device *kbdev;
+ struct rk_context *platform;
+ ssize_t ret = 0;
+ unsigned int clkrate;
+ int i ;
+ kbdev = dev_get_drvdata(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->mali_clk_node)
+ {
+ printk("mali_clk_node not init\n");
+ return -ENODEV;
+ }
+ clkrate = dvfs_clk_get_rate(platform->mali_clk_node);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current clk mali = %dMhz", clkrate / 1000000);
+
+ /* To be revised */
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings:");
+ for(i=0;i<MALI_DVFS_STEP;i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ",p_mali_dvfs_infotbl[i].clock/1000);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+static ssize_t set_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ struct rk_context *platform;
+ unsigned int tmp = 0, freq = 0;
+ kbdev = dev_get_drvdata(dev);
+ tmp = 0;
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->mali_clk_node)
+ return -ENODEV;
+#if 0
+ if (sysfs_streq("500", buf)) {
+ freq = 500;
+ } else if (sysfs_streq("400", buf)) {
+ freq = 400;
+ } else if (sysfs_streq("350", buf)) {
+ freq = 350;
+ } else if (sysfs_streq("266", buf)) {
+ freq = 266;
+ } else if (sysfs_streq("160", buf)) {
+ freq = 160;
+ } else if (sysfs_streq("100", buf)) {
+ freq = 100;
+ } else {
+ dev_err(dev, "set_clock: invalid value\n");
+ return -ENOENT;
+ }
+#endif
+ freq = simple_strtoul(buf, NULL, 10);
+
+ kbase_platform_dvfs_set_level(kbdev, kbase_platform_dvfs_get_level(freq));
+ return count;
+}
+
+static ssize_t show_fbdev(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret = 0;
+ int i;
+
+ kbdev = dev_get_drvdata(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ for (i = 0; i < num_registered_fb; i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "fb[%d] xres=%d, yres=%d, addr=0x%lx\n", i, registered_fb[i]->var.xres, registered_fb[i]->var.yres, registered_fb[i]->fix.smem_start);
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+typedef enum {
+ L1_I_tag_RAM = 0x00,
+ L1_I_data_RAM = 0x01,
+ L1_I_BTB_RAM = 0x02,
+ L1_I_GHB_RAM = 0x03,
+ L1_I_TLB_RAM = 0x04,
+ L1_I_indirect_predictor_RAM = 0x05,
+ L1_D_tag_RAM = 0x08,
+ L1_D_data_RAM = 0x09,
+ L1_D_load_TLB_array = 0x0A,
+ L1_D_store_TLB_array = 0x0B,
+ L2_tag_RAM = 0x10,
+ L2_data_RAM = 0x11,
+ L2_snoop_tag_RAM = 0x12,
+ L2_data_ECC_RAM = 0x13,
+ L2_dirty_RAM = 0x14,
+ L2_TLB_RAM = 0x18
+} RAMID_type;
+
+static inline void asm_ramindex_mrc(u32 *DL1Data0, u32 *DL1Data1, u32 *DL1Data2, u32 *DL1Data3)
+{
+ u32 val;
+
+ if (DL1Data0) {
+ asm volatile ("mrc p15, 0, %0, c15, c1, 0" : "=r" (val));
+ *DL1Data0 = val;
+ }
+ if (DL1Data1) {
+ asm volatile ("mrc p15, 0, %0, c15, c1, 1" : "=r" (val));
+ *DL1Data1 = val;
+ }
+ if (DL1Data2) {
+ asm volatile ("mrc p15, 0, %0, c15, c1, 2" : "=r" (val));
+ *DL1Data2 = val;
+ }
+ if (DL1Data3) {
+ asm volatile ("mrc p15, 0, %0, c15, c1, 3" : "=r" (val));
+ *DL1Data3 = val;
+ }
+}
+
+static inline void asm_ramindex_mcr(u32 val)
+{
+ asm volatile ("mcr p15, 0, %0, c15, c4, 0" : : "r" (val));
+ asm volatile ("dsb");
+ asm volatile ("isb");
+}
+
+static void get_tlb_array(u32 val, u32 *DL1Data0, u32 *DL1Data1, u32 *DL1Data2, u32 *DL1Data3)
+{
+ asm_ramindex_mcr(val);
+ asm_ramindex_mrc(DL1Data0, DL1Data1, DL1Data2, DL1Data3);
+}
+
+static RAMID_type ramindex = L1_D_load_TLB_array;
+static ssize_t show_dtlb(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret = 0;
+ int entries, ways;
+ u32 DL1Data0 = 0, DL1Data1 = 0, DL1Data2 = 0, DL1Data3 = 0;
+
+ kbdev = dev_get_drvdata(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ /* L1-I tag RAM */
+ if (ramindex == L1_I_tag_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L1-I data RAM */
+ else if (ramindex == L1_I_data_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L1-I BTB RAM */
+ else if (ramindex == L1_I_BTB_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L1-I GHB RAM */
+ else if (ramindex == L1_I_GHB_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L1-I TLB RAM */
+ else if (ramindex == L1_I_TLB_RAM) {
+ printk(KERN_DEBUG "L1-I TLB RAM\n");
+ for (entries = 0; entries < 32; entries++) {
+ get_tlb_array((((u8) ramindex) << 24) + entries, &DL1Data0, &DL1Data1, &DL1Data2, NULL);
+ printk(KERN_DEBUG "entries[%d], DL1Data0=%08x, DL1Data1=%08x DL1Data2=%08x\n", entries, DL1Data0, DL1Data1 & 0xffff, 0x0);
+ }
+ }
+ /* L1-I indirect predictor RAM */
+ else if (ramindex == L1_I_indirect_predictor_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L1-D tag RAM */
+ else if (ramindex == L1_D_tag_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L1-D data RAM */
+ else if (ramindex == L1_D_data_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L1-D load TLB array */
+ else if (ramindex == L1_D_load_TLB_array) {
+ printk(KERN_DEBUG "L1-D load TLB array\n");
+ for (entries = 0; entries < 32; entries++) {
+ get_tlb_array((((u8) ramindex) << 24) + entries, &DL1Data0, &DL1Data1, &DL1Data2, &DL1Data3);
+ printk(KERN_DEBUG "entries[%d], DL1Data0=%08x, DL1Data1=%08x, DL1Data2=%08x, DL1Data3=%08x\n", entries, DL1Data0, DL1Data1, DL1Data2, DL1Data3 & 0x3f);
+ }
+ }
+ /* L1-D store TLB array */
+ else if (ramindex == L1_D_store_TLB_array) {
+ printk(KERN_DEBUG "\nL1-D store TLB array\n");
+ for (entries = 0; entries < 32; entries++) {
+ get_tlb_array((((u8) ramindex) << 24) + entries, &DL1Data0, &DL1Data1, &DL1Data2, &DL1Data3);
+ printk(KERN_DEBUG "entries[%d], DL1Data0=%08x, DL1Data1=%08x, DL1Data2=%08x, DL1Data3=%08x\n", entries, DL1Data0, DL1Data1, DL1Data2, DL1Data3 & 0x3f);
+ }
+ }
+ /* L2 tag RAM */
+ else if (ramindex == L2_tag_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L2 data RAM */
+ else if (ramindex == L2_data_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L2 snoop tag RAM */
+ else if (ramindex == L2_snoop_tag_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L2 data ECC RAM */
+ else if (ramindex == L2_data_ECC_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+ /* L2 dirty RAM */
+ else if (ramindex == L2_dirty_RAM)
+ printk(KERN_DEBUG "Not implemented yet\n");
+
+ /* L2 TLB array */
+ else if (ramindex == L2_TLB_RAM) {
+ printk(KERN_DEBUG "\nL2 TLB array\n");
+ for (ways = 0; ways < 4; ways++) {
+ for (entries = 0; entries < 512; entries++) {
+ get_tlb_array((ramindex << 24) + (ways << 18) + entries, &DL1Data0, &DL1Data1, &DL1Data2, &DL1Data3);
+ printk(KERN_DEBUG "ways[%d]:entries[%d], DL1Data0=%08x, DL1Data1=%08x, DL1Data2=%08x, DL1Data3=%08x\n", ways, entries, DL1Data0, DL1Data1, DL1Data2, DL1Data3);
+ }
+ }
+ } else {
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Succeeded...\n");
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+ return ret;
+}
+
+static ssize_t set_dtlb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ kbdev = dev_get_drvdata(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ if (sysfs_streq("L1_I_tag_RAM", buf)) {
+ ramindex = L1_I_tag_RAM;
+ } else if (sysfs_streq("L1_I_data_RAM", buf)) {
+ ramindex = L1_I_data_RAM;
+ } else if (sysfs_streq("L1_I_BTB_RAM", buf)) {
+ ramindex = L1_I_BTB_RAM;
+ } else if (sysfs_streq("L1_I_GHB_RAM", buf)) {
+ ramindex = L1_I_GHB_RAM;
+ } else if (sysfs_streq("L1_I_TLB_RAM", buf)) {
+ ramindex = L1_I_TLB_RAM;
+ } else if (sysfs_streq("L1_I_indirect_predictor_RAM", buf)) {
+ ramindex = L1_I_indirect_predictor_RAM;
+ } else if (sysfs_streq("L1_D_tag_RAM", buf)) {
+ ramindex = L1_D_tag_RAM;
+ } else if (sysfs_streq("L1_D_data_RAM", buf)) {
+ ramindex = L1_D_data_RAM;
+ } else if (sysfs_streq("L1_D_load_TLB_array", buf)) {
+ ramindex = L1_D_load_TLB_array;
+ } else if (sysfs_streq("L1_D_store_TLB_array", buf)) {
+ ramindex = L1_D_store_TLB_array;
+ } else if (sysfs_streq("L2_tag_RAM", buf)) {
+ ramindex = L2_tag_RAM;
+ } else if (sysfs_streq("L2_data_RAM", buf)) {
+ ramindex = L2_data_RAM;
+ } else if (sysfs_streq("L2_snoop_tag_RAM", buf)) {
+ ramindex = L2_snoop_tag_RAM;
+ } else if (sysfs_streq("L2_data_ECC_RAM", buf)) {
+ ramindex = L2_data_ECC_RAM;
+ } else if (sysfs_streq("L2_dirty_RAM", buf)) {
+ ramindex = L2_dirty_RAM;
+ } else if (sysfs_streq("L2_TLB_RAM", buf)) {
+ ramindex = L2_TLB_RAM;
+ } else {
+ printk(KERN_DEBUG "Invalid value....\n\n");
+ printk(KERN_DEBUG "Available options are one of below\n");
+ printk(KERN_DEBUG "L1_I_tag_RAM, L1_I_data_RAM, L1_I_BTB_RAM\n");
+ printk(KERN_DEBUG "L1_I_GHB_RAM, L1_I_TLB_RAM, L1_I_indirect_predictor_RAM\n");
+ printk(KERN_DEBUG "L1_D_tag_RAM, L1_D_data_RAM, L1_D_load_TLB_array, L1_D_store_TLB_array\n");
+ printk(KERN_DEBUG "L2_tag_RAM, L2_data_RAM, L2_snoop_tag_RAM, L2_data_ECC_RAM\n");
+ printk(KERN_DEBUG "L2_dirty_RAM, L2_TLB_RAM\n");
+ }
+
+ return count;
+}
+
+static ssize_t show_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kbase_device *kbdev;
+ struct rk_context *platform;
+ ssize_t ret = 0;
+ unsigned int clkrate;
+
+ kbdev = dev_get_drvdata(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (!platform)
+ return -ENODEV;
+
+ clkrate = dvfs_clk_get_rate(platform->mali_clk_node);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ if (kbase_platform_dvfs_get_enable_status())
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is on\nutilisation:%d\ncurrent clock:%dMhz", kbase_platform_dvfs_get_utilisation(),clkrate/1000000);
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is off,clock:%dMhz",clkrate/1000000);
+#else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is disabled");
+#endif
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+static ssize_t set_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ struct rk_context *platform;
+#endif
+
+ if (!kbdev)
+ return -ENODEV;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ platform = (struct rk_context *)kbdev->platform_context;
+ if (sysfs_streq("off", buf)) {
+ /*kbase_platform_dvfs_enable(false, MALI_DVFS_BL_CONFIG_FREQ);*/
+ kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[MALI_DVFS_STEP-1].clock);
+ platform->dvfs_enabled = false;
+ } else if (sysfs_streq("on", buf)) {
+ /*kbase_platform_dvfs_enable(true, MALI_DVFS_START_FREQ);*/
+ kbase_platform_dvfs_enable(true, p_mali_dvfs_infotbl[0].clock);
+ platform->dvfs_enabled = true;
+ } else {
+ printk(KERN_DEBUG "invalid val -only [on, off] is accepted\n");
+ }
+#else
+ printk(KERN_DEBUG "mali DVFS is disabled\n");
+#endif
+ return count;
+}
+
+static ssize_t show_upper_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret = 0;
+ int i;
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ int locked_level = -1;
+#endif
+
+ kbdev = dev_get_drvdata(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ locked_level = mali_get_dvfs_upper_locked_freq();
+ if (locked_level > 0)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current Upper Lock Level = %dMhz", locked_level);
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Unset the Upper Lock Level");
+ /*ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings : 400, 350,266, 160, 100, If you want to unlock : 600 or off");*/
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings :");
+ for(i=0;i<MALI_DVFS_STEP;i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ",p_mali_dvfs_infotbl[i].clock/1000);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, ", If you want to unlock : off");
+
+#else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is disabled. You can not set");
+#endif
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+static ssize_t set_upper_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int i;
+ unsigned int freq;
+ kbdev = dev_get_drvdata(dev);
+ freq = 0;
+
+ if (!kbdev)
+ return -ENODEV;
+
+freq = simple_strtoul(buf, NULL, 10);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ if (sysfs_streq("off", buf))
+ {
+ mali_dvfs_freq_unlock();
+ }
+ else
+ {
+ for(i=0;i<MALI_DVFS_STEP;i++)
+ {
+ if (p_mali_dvfs_infotbl[i].clock == freq)
+ {
+ mali_dvfs_freq_lock(i);
+ break;
+ }
+ if(i==MALI_DVFS_STEP)
+ {
+ dev_err(dev, "set_clock: invalid value\n");
+ return -ENOENT;
+ }
+ }
+ }
+#else /* CONFIG_MALI_MIDGARD_DVFS */
+ printk(KERN_DEBUG "mali DVFS is disabled. You can not set\n");
+#endif
+
+ return count;
+}
+
+static ssize_t show_under_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret = 0;
+ int i;
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ int locked_level = -1;
+#endif
+
+ kbdev = dev_get_drvdata(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ locked_level = mali_get_dvfs_under_locked_freq();
+ if (locked_level > 0)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current Under Lock Level = %dMhz", locked_level);
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Unset the Under Lock Level");
+ /*ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings : 600, 400, 350,266, 160, If you want to unlock : 100 or off");*/
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings :");
+ for(i=0;i<MALI_DVFS_STEP;i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ",p_mali_dvfs_infotbl[i].clock/1000);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, ", If you want to unlock : off");
+
+#else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is disabled. You can not set");
+#endif
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+static ssize_t set_under_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ int i;
+ unsigned int freq;
+ struct kbase_device *kbdev;
+ kbdev = dev_get_drvdata(dev);
+ freq = 0;
+
+ if (!kbdev)
+ return -ENODEV;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ if (sysfs_streq("off", buf))
+ {
+ mali_dvfs_freq_unlock();
+ }
+ else
+ {
+ for(i=0;i<MALI_DVFS_STEP;i++)
+ {
+ if (p_mali_dvfs_infotbl[i].clock == freq)
+ {
+ mali_dvfs_freq_lock(i);
+ break;
+ }
+ if(i==MALI_DVFS_STEP)
+ {
+ dev_err(dev, "set_clock: invalid value\n");
+ return -ENOENT;
+ }
+ }
+ }
+#else /* CONFIG_MALI_MIDGARD_DVFS */
+ printk(KERN_DEBUG "mali DVFS is disabled. You can not set\n");
+#endif
+ return count;
+}
+
+/** The sysfs file @c clock, fbdev.
+ *
+ * This is used for obtaining information about the mali t6xx operating clock & framebuffer address,
+ */
+DEVICE_ATTR(clock, S_IRUGO | S_IWUSR, show_clock, set_clock);
+DEVICE_ATTR(fbdev, S_IRUGO, show_fbdev, NULL);
+DEVICE_ATTR(dtlb, S_IRUGO | S_IWUSR, show_dtlb, set_dtlb);
+DEVICE_ATTR(dvfs, S_IRUGO | S_IWUSR, show_dvfs, set_dvfs);
+DEVICE_ATTR(dvfs_upper_lock, S_IRUGO | S_IWUSR, show_upper_lock_dvfs, set_upper_lock_dvfs);
+DEVICE_ATTR(dvfs_under_lock, S_IRUGO | S_IWUSR, show_under_lock_dvfs, set_under_lock_dvfs);
+DEVICE_ATTR(time_in_state, S_IRUGO | S_IWUSR, show_time_in_state, set_time_in_state);
+
+int kbase_platform_create_sysfs_file(struct device *dev)
+{
+ if (device_create_file(dev, &dev_attr_clock)) {
+ dev_err(dev, "Couldn't create sysfs file [clock]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_fbdev)) {
+ dev_err(dev, "Couldn't create sysfs file [fbdev]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_dtlb)) {
+ dev_err(dev, "Couldn't create sysfs file [dtlb]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_dvfs)) {
+ dev_err(dev, "Couldn't create sysfs file [dvfs]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_dvfs_upper_lock)) {
+ dev_err(dev, "Couldn't create sysfs file [dvfs_upper_lock]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_dvfs_under_lock)) {
+ dev_err(dev, "Couldn't create sysfs file [dvfs_under_lock]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_time_in_state)) {
+ dev_err(dev, "Couldn't create sysfs file [time_in_state]\n");
+ goto out;
+ }
+ return 0;
+ out:
+ return -ENOENT;
+}
+
+void kbase_platform_remove_sysfs_file(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_clock);
+ device_remove_file(dev, &dev_attr_fbdev);
+ device_remove_file(dev, &dev_attr_dtlb);
+ device_remove_file(dev, &dev_attr_dvfs);
+ device_remove_file(dev, &dev_attr_dvfs_upper_lock);
+ device_remove_file(dev, &dev_attr_dvfs_under_lock);
+ device_remove_file(dev, &dev_attr_time_in_state);
+}
+#endif /* CONFIG_MALI_MIDGARD_DEBUG_SYS */
+
+mali_error kbase_platform_init(struct kbase_device *kbdev)
+{
+ struct rk_context *platform;
+ int ret;
+
+ platform = kmalloc(sizeof(struct rk_context), GFP_KERNEL);
+
+ if (NULL == platform)
+ return MALI_ERROR_OUT_OF_MEMORY;
+
+ kbdev->platform_context = (void *)platform;
+
+ platform->cmu_pmu_status = 0;
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ platform->utilisation = 0;
+ platform->time_busy = 0;
+ platform->time_idle = 0;
+ platform->time_tick = 0;
+ platform->dvfs_enabled = true;
+#endif
+
+ rk_gpu = kobject_create_and_add("rk_gpu", NULL);
+ if (!rk_gpu)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ ret = sysfs_create_file(rk_gpu, &dev_attr_error_count.attr);
+ if(ret)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ spin_lock_init(&platform->cmu_pmu_lock);
+
+ if (kbase_platform_power_clock_init(kbdev))
+ goto clock_init_fail;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ kbase_platform_dvfs_init(kbdev);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+ /* Enable power */
+ kbase_platform_cmu_pmu_control(kbdev, 1);
+ return MALI_ERROR_NONE;
+
+ clock_init_fail:
+ kfree(platform);
+
+ return MALI_ERROR_FUNCTION_FAILED;
+}
+
+void kbase_platform_term(struct kbase_device *kbdev)
+{
+ struct rk_context *platform;
+
+ platform = (struct rk_context *)kbdev->platform_context;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ kbase_platform_dvfs_term();
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+ /* Disable power */
+ kbase_platform_cmu_pmu_control(kbdev, 0);
+ kfree(kbdev->platform_context);
+ kbdev->platform_context = 0;
+ return;
+}
--- /dev/null
+/* drivers/gpu/t6xx/kbase/src/platform/rk/mali_kbase_platform.h
+ * Rockchip SoC Mali-T764 platform-dependent codes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file mali_kbase_platform.h
+ * Platform-dependent init
+ */
+
+#ifndef _KBASE_PLATFORM_H_
+#define _KBASE_PLATFORM_H_
+
+struct rk_context {
+ /** Indicator if system clock to mail-t604 is active */
+ int cmu_pmu_status;
+ /** cmd & pmu lock */
+ spinlock_t cmu_pmu_lock;
+ struct clk *mali_pd;
+ struct dvfs_node * mali_clk_node;
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ /*To calculate utilization for x sec */
+ int time_tick;
+ int utilisation;
+ u32 time_busy;
+ u32 time_idle;
+ bool dvfs_enabled;
+ bool gpu_in_touch;
+ spinlock_t gpu_in_touch_lock;
+#endif
+};
+int mali_dvfs_clk_set(struct dvfs_node * node,unsigned long rate);
+
+/* All things that are needed for the Linux port. */
+int kbase_platform_cmu_pmu_control(struct kbase_device *kbdev, int control);
+int kbase_platform_create_sysfs_file(struct device *dev);
+void kbase_platform_remove_sysfs_file(struct device *dev);
+int kbase_platform_is_power_on(void);
+mali_error kbase_platform_init(struct kbase_device *kbdev);
+void kbase_platform_term(struct kbase_device *kbdev);
+
+int kbase_platform_clock_on(struct kbase_device *kbdev);
+int kbase_platform_clock_off(struct kbase_device *kbdev);
+int kbase_platform_power_off(struct kbase_device *kbdev);
+int kbase_platform_power_on(struct kbase_device *kbdev);
+
+#endif /* _KBASE_PLATFORM_H_ */