# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r4p1-02dev0"
+MALI_RELEASE_NAME ?= "r4p1-00rel0"
# Paths required for build
KBASE_PATH = $(src)
/* Write buffer can cause tile list corruption */
BASE_HW_ISSUE_11024,
+ /* Pause buffer can cause a fragment job hang */
+ BASE_HW_ISSUE_11035,
+
/* T76X hw issues */
/* Partial 16xMSAA support */
BASE_HW_ISSUE_10995,
BASE_HW_ISSUE_11012,
BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
/* List of hardware issues must end with BASE_HW_ISSUE_END */
BASE_HW_ISSUE_END
};
BASE_HW_ISSUE_10969,
BASE_HW_ISSUE_11012,
BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
/* List of hardware issues must end with BASE_HW_ISSUE_END */
BASE_HW_ISSUE_END
};
BASE_HW_ISSUE_10946,
BASE_HW_ISSUE_11012,
BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
/* List of hardware issues must end with BASE_HW_ISSUE_END */
BASE_HW_ISSUE_END
};
BASE_HW_ISSUE_11012,
BASE_HW_ISSUE_11020,
BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11035,
/* List of hardware issues must end with BASE_HW_ISSUE_END */
BASE_HW_ISSUE_END
};
u64 length;
};
+/**
+ * @brief Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
+ * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
+ * changing the structure size).
+ * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
+ * errors will not be propagated.
+ */
+typedef u8 base_jd_dep_type;
+
+
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+
/**
* @brief Job chain hardware requirements.
*
typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
+struct base_dependency {
+ base_atom_id atom_id; /**< An atom number */
+ base_jd_dep_type dependency_type; /**< Dependency type */
+};
+
typedef struct base_jd_atom_v2 {
mali_addr64 jc; /**< job-chain GPU address */
base_jd_udata udata; /**< user data */
kbase_pointer extres_list; /**< list of external resources */
u16 nr_extres; /**< nr of external resources */
base_jd_core_req core_req; /**< core requirements */
- base_atom_id pre_dep[2]; /**< pre-dependencies */
+ const struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field,
+ this is done in order to reduce possibility of improper assigment of a dependency field */
base_atom_id atom_number; /**< unique number to identify the atom */
s8 prio; /**< priority - smaller is higher priority */
u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
- u8 padding[7];
+ u8 padding[5];
} base_jd_atom_v2;
#if BASE_LEGACY_JD_API
}
#endif /* BASE_LEGACY_JD_API */
+
+/**
+ * @brief Setter for a dependency structure
+ *
+ * @param[in] dep The kbase jd atom dependency to be initialized.
+ * @param id The atom_id to be assigned.
+ * @param dep_type The dep_type to be assigned.
+ *
+ */
+static INLINE void base_jd_atom_dep_set(const struct base_dependency* const_dep, base_atom_id id, base_jd_dep_type dep_type)
+{
+ struct base_dependency* dep;
+
+ LOCAL_ASSERT(const_dep != NULL);
+ /* make sure we don't set not allowed combinations of atom_id/dependency_type */
+ LOCAL_ASSERT( ( id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
+ (id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID) );
+
+ dep = REINTERPRET_CAST(struct base_dependency*)const_dep;
+
+ dep->atom_id = id;
+ dep->dependency_type = dep_type;
+}
+
+/**
+ * @brief Make a copy of a dependency structure
+ *
+ * @param[in,out] dep The kbase jd atom dependency to be written.
+ * @param[in] from The dependency to make a copy from.
+ *
+ */
+static INLINE void base_jd_atom_dep_copy(const struct base_dependency* const_dep, const struct base_dependency* from)
+{
+ LOCAL_ASSERT(const_dep != NULL);
+
+ base_jd_atom_dep_set(const_dep, from->atom_id, from->dependency_type);
+}
+
/**
* @brief Soft-atom fence trigger setup.
*
{
struct device *dev = katom->kctx->kbdev->dev;
u32 clamped = 0;
- dev_warn(dev,"Called TILE_RANGE_FAULT workaround clamping function. \n");
+ dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n");
if (katom->core_req & BASE_JD_REQ_FS){
- kbase_va_region * region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, katom->jc );
+ kbase_va_region *region;
+
+ kbase_gpu_vm_lock(katom->kctx);
+ region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, katom->jc);
if (region){
phys_addr_t * page_array = kbase_get_phy_pages(region);
kunmap_atomic(page_1);
}
}
+ kbase_gpu_vm_unlock(katom->kctx);
}
return clamped;
}
/*** Begin Power Manager defaults */
/* Milliseconds */
-#define DEFAULT_PM_DVFS_FREQ 50
+#define DEFAULT_PM_DVFS_FREQ 20
/**
* Default poweroff tick granuality, in nanoseconds
#include <linux/syscalls.h>
#endif /* CONFIG_KDS */
-#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/poll.h>
#endif /* MALI_UNIT_TEST */
#define KBASE_DRV_NAME "mali"
-#define ROCKCHIP_VERSION 7
+#define ROCKCHIP_VERSION 9
static const char kbase_drv_name[] = KBASE_DRV_NAME;
static int kbase_dev_nr;
return MALI_ERROR_OUT_OF_MEMORY;
}
+ kbase_gpu_vm_lock(kctx);
for (res_id = 0; res_id < num_elems; res_id++, res++) {
int exclusive;
kbase_va_region *reg;
if (exclusive)
set_bit(res_id, resources_list->kds_access_bitmap);
}
+ kbase_gpu_vm_unlock(kctx);
/* did the loop run to completion? */
if (res_id == num_elems)
kbasep_kctx_list_element *element, *tmp;
mali_bool found_element = MALI_FALSE;
- msleep(500);
-
mutex_lock(&kbdev->kctx_list_lock);
list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
if (element->kctx == kctx) {
typedef struct kbase_jd_atom kbase_jd_atom;
+struct kbase_jd_atom_dependency
+{
+ struct kbase_jd_atom *atom;
+ u8 dep_type;
+};
+
+/**
+ * @brief The function retrieves a read-only reference to the atom field from
+ * the kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return readonly reference to dependent ATOM.
+ */
+static INLINE const struct kbase_jd_atom* const kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency* dep)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ return (const struct kbase_jd_atom* const )(dep->atom);
+}
+
+/**
+ * @brief The function retrieves a read-only reference to the dependency type field from
+ * the kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return A dependency type value.
+ */
+static INLINE const u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency* dep)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ return dep->dep_type;
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep The kbase jd atom dependency.
+ * @param[in] a The ATOM to be set as a dependency.
+ * @param type The ATOM dependency type to be set.
+ *
+ */
+static INLINE void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency* const_dep,
+ struct kbase_jd_atom * a,
+ u8 type)
+{
+ struct kbase_jd_atom_dependency* dep;
+
+ LOCAL_ASSERT(const_dep != NULL);
+
+ dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
+
+ dep->atom = a;
+ dep->dep_type = type;
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep The kbase jd atom dependency to be cleared.
+ *
+ */
+static INLINE void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency* const_dep)
+{
+ struct kbase_jd_atom_dependency* dep;
+
+ LOCAL_ASSERT(const_dep != NULL);
+
+ dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
+
+ dep->atom = NULL;
+ dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
+}
+
struct kbase_ext_res
{
mali_addr64 gpu_address;
struct list_head dep_head[2];
struct list_head dep_item[2];
- struct kbase_jd_atom *dep_atom[2];
+ const struct kbase_jd_atom_dependency dep[2];
u16 nr_extres;
struct kbase_ext_res * extres;
#include <linux/ump.h>
#endif /* CONFIG_UMP */
#include <linux/random.h>
-#include <linux/delay.h>
+
#define beenthere(kctx,f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
katom->kds_dep_satisfied = MALI_TRUE;
/* Check whether the atom's other dependencies were already met */
- if (!katom->dep_atom[0] && !katom->dep_atom[1]) {
+ if (!kbase_jd_katom_dep_atom(&katom->dep[0]) && !kbase_jd_katom_dep_atom(&katom->dep[1])) {
/* katom dep complete, attempt to run it */
mali_bool resched = MALI_FALSE;
resched = jd_run_atom(katom);
return err;
}
-static void kbase_jd_umm_unmap(kbase_context *kctx, struct kbase_va_region *reg, int mmu_update)
+static void kbase_jd_umm_unmap(kbase_context *kctx, struct kbase_mem_phy_alloc *alloc)
{
KBASE_DEBUG_ASSERT(kctx);
- KBASE_DEBUG_ASSERT(reg);
- KBASE_DEBUG_ASSERT(reg->alloc->imported.umm.dma_attachment);
- KBASE_DEBUG_ASSERT(reg->alloc->imported.umm.sgt);
- if (mmu_update)
- kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg));
- dma_buf_unmap_attachment(reg->alloc->imported.umm.dma_attachment, reg->alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
- reg->alloc->imported.umm.sgt = NULL;
- reg->alloc->nents = 0;
+ KBASE_DEBUG_ASSERT(alloc);
+ KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
+ KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
+ dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+ alloc->imported.umm.sgt = NULL;
+ alloc->nents = 0;
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
katom->kds_dep_satisfied = MALI_TRUE;
#endif /* CONFIG_KDS */
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
- /* Lock also used in debug mode just for lock order checking */
kbase_gpu_vm_lock(katom->kctx);
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
/* only roll back if extres is non-NULL */
if (katom->extres) {
u32 res_no;
res_no = katom->nr_extres;
while (res_no-- > 0) {
+ struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
#ifdef CONFIG_DMA_SHARED_BUFFER
- if (katom->extres[res_no].alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
- kbase_va_region *reg;
- int mmu_update = 0;
- reg = kbase_region_tracker_find_region_base_address(katom->kctx, katom->extres[res_no].gpu_address);
- if (reg && reg->alloc == katom->extres[res_no].alloc)
- mmu_update = 1;
- if (1 == katom->extres[res_no].alloc->imported.umm.current_mapping_usage_count--)
- kbase_jd_umm_unmap(katom->kctx, reg, mmu_update);
+ if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+ alloc->imported.umm.current_mapping_usage_count--;
+
+ if (0 == alloc->imported.umm.current_mapping_usage_count) {
+ struct kbase_va_region *reg;
+ reg = kbase_region_tracker_find_region_base_address(
+ katom->kctx, katom->extres[res_no].gpu_address);
+
+ if (reg && reg->alloc == alloc) {
+ kbase_mmu_teardown_pages(katom->kctx, reg->start_pfn,
+ kbase_reg_current_backed_size(reg));
+ }
+
+ kbase_jd_umm_unmap(katom->kctx, alloc);
+ }
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
kbase_mem_phy_alloc_put(katom->extres[res_no].alloc);
kfree(katom->extres);
katom->extres = NULL;
}
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
- /* Lock also used in debug mode just for lock order checking */
kbase_gpu_vm_unlock(katom->kctx);
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
}
#if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
}
#endif /* CONFIG_KDS */
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
/* need to keep the GPU VM locked while we set up UMM buffers */
- /* Lock also used in debug mode just for lock order checking */
kbase_gpu_vm_lock(katom->kctx);
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
-
for (res_no = 0; res_no < katom->nr_extres; res_no++) {
base_external_resource *res;
kbase_va_region *reg;
katom->extres[res_no].alloc = kbase_mem_phy_alloc_get(reg->alloc);
}
/* successfully parsed the extres array */
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
/* drop the vm lock before we call into kds */
- /* Lock also used in debug mode just for lock order checking */
kbase_gpu_vm_unlock(katom->kctx);
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
#ifdef CONFIG_KDS
if (kds_res_count) {
#ifdef CONFIG_KDS
failed_kds_setup:
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
/* lock before we unmap */
- /* Lock also used in debug mode just for lock order checking */
kbase_gpu_vm_lock(katom->kctx);
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
#endif /* CONFIG_KDS */
failed_loop:
/* undo the loop work */
while (res_no-- > 0) {
+ struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
#ifdef CONFIG_DMA_SHARED_BUFFER
- if (katom->extres[res_no].alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
- struct kbase_va_region * reg;
- int mmu_update = 0;
- reg = kbase_region_tracker_find_region_base_address(katom->kctx, katom->extres[res_no].gpu_address);
- if (reg && reg->alloc == katom->extres[res_no].alloc && reg->alloc->type)
- mmu_update = 1;
- katom->extres[res_no].alloc->imported.umm.current_mapping_usage_count--;
- if (0 == reg->alloc->imported.umm.current_mapping_usage_count)
- kbase_jd_umm_unmap(katom->kctx, reg, mmu_update);
+ if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+ alloc->imported.umm.current_mapping_usage_count--;
+
+ if (0 == alloc->imported.umm.current_mapping_usage_count) {
+ struct kbase_va_region *reg;
+ reg = kbase_region_tracker_find_region_base_address(
+ katom->kctx, katom->extres[res_no].gpu_address);
+
+ if (reg && reg->alloc == alloc) {
+ kbase_mmu_teardown_pages(katom->kctx, reg->start_pfn,
+ kbase_reg_current_backed_size(reg));
+ }
+
+ kbase_jd_umm_unmap(katom->kctx, alloc);
+ }
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
- kbase_mem_phy_alloc_put(katom->extres[res_no].alloc);
+ kbase_mem_phy_alloc_put(alloc);
}
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
- /* Lock also used in debug mode just for lock order checking */
kbase_gpu_vm_unlock(katom->kctx);
-#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
early_err_out:
kfree(katom->extres);
kbase_jd_atom *dep_atom = list_entry(katom->dep_head[d].next, kbase_jd_atom, dep_item[d]);
list_del(katom->dep_head[d].next);
- dep_atom->dep_atom[d] = NULL;
+ kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
+
if (katom->event_code != BASE_JD_EVENT_DONE) {
/* Atom failed, so remove the other dependencies and immediately fail the atom */
- if (dep_atom->dep_atom[other_d]) {
+ if (kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
list_del(&dep_atom->dep_item[other_d]);
- dep_atom->dep_atom[other_d] = NULL;
+ kbase_jd_katom_dep_clear(&dep_atom->dep[other_d]);
}
#ifdef CONFIG_KDS
if (!dep_atom->kds_dep_satisfied) {
}
#endif
- dep_atom->event_code = katom->event_code;
- KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
- dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ /* at this point a dependency to the failed job is already removed */
+ if ( !( kbase_jd_katom_dep_type(&dep_atom->dep[d]) == BASE_JD_DEP_TYPE_ORDER &&
+ katom->event_code > BASE_JD_EVENT_ACTIVE) )
+ {
+ dep_atom->event_code = katom->event_code;
+ KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
+ dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ }
list_add_tail(&dep_atom->dep_item[0], out_list);
- } else if (!dep_atom->dep_atom[other_d]) {
+ } else if (!kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
#ifdef CONFIG_KDS
if (dep_atom->kds_dep_satisfied)
#endif
(katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
return;
for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
- if (kctx->jctx.atoms[i].dep_atom[0] == katom ||
- kctx->jctx.atoms[i].dep_atom[1] == katom) {
+ if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
+ kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
if ((dep_atom->core_req & BASEP_JD_REQ_ATOM_TYPE) ==
/* This is needed in case an atom is failed due to being invalid, this
* can happen *before* the jobs that the atom depends on have completed */
for (i = 0; i < 2; i++) {
- if (katom->dep_atom[i]) {
+ if ( kbase_jd_katom_dep_atom(&katom->dep[i])) {
list_del(&katom->dep_item[i]);
- katom->dep_atom[i] = NULL;
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
}
}
katom->nice_prio = user_atom->prio;
katom->atom_flags = 0;
katom->retry_count = 0;
+
+
#ifdef CONFIG_KDS
/* Start by assuming that the KDS dependencies are satisfied,
* kbase_jd_pre_external_resources will correct this if there are dependencies */
katom->kds_rset = NULL;
#endif /* CONFIG_KDS */
+
+ /* Don't do anything if there is a mess up with dependencies.
+ This is done in a separate cycle to check both the dependencies at ones, otherwise
+ it will be extra complexity to deal with 1st dependency ( just added to the list )
+ if only the 2nd one has invalid config.
+ */
+ for (i = 0; i < 2; i++) {
+ int dep_atom_number = user_atom->pre_dep[i].atom_id;
+ base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+ if (dep_atom_number) {
+ if ( dep_atom_type != BASE_JD_DEP_TYPE_ORDER && dep_atom_type != BASE_JD_DEP_TYPE_DATA )
+ {
+ katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ ret = jd_done_nolock(katom);
+ goto out;
+ }
+ }
+ }
+
/* Add dependencies */
for (i = 0; i < 2; i++) {
- int dep_atom_number = user_atom->pre_dep[i];
- katom->dep_atom[i] = NULL;
+ int dep_atom_number = user_atom->pre_dep[i].atom_id;
+ base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
+
if (dep_atom_number) {
kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED || dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
if (dep_atom->event_code != BASE_JD_EVENT_DONE) {
- if (i == 1 && katom->dep_atom[0]) {
+ /* don't stop this atom if it has an order dependency only to the failed one,
+ try to submit it throught the normal path */
+ if ( dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
+ dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
+ continue;
+ }
+
+ if (i == 1 && kbase_jd_katom_dep_atom(&katom->dep[0])) {
/* Remove the previous dependency */
list_del(&katom->dep_item[0]);
- katom->dep_atom[0] = NULL;
+ kbase_jd_katom_dep_clear(&katom->dep[0]);
}
+
/* Atom has completed, propagate the error code if any */
katom->event_code = dep_atom->event_code;
katom->status = KBASE_JD_ATOM_STATE_QUEUED;
}
}
ret = jd_done_nolock(katom);
+
goto out;
}
} else {
/* Atom is in progress, add this atom to the list */
list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
- katom->dep_atom[i] = dep_atom;
+ kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
queued = 1;
}
}
void kbase_jd_zap_context(kbase_context *kctx)
{
kbase_jd_atom *katom;
+ #if 0
struct list_head *entry,*entry1;
+ #endif
kbase_device *kbdev;
zap_reset_data reset_data;
unsigned long flags;
KBASE_DEBUG_ASSERT(kctx);
kbdev = kctx->kbdev;
-
+
KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
kbase_job_zap_context(kctx);
mutex_lock(&kctx->jctx.lock);
+
/*
* While holding the kbase_jd_context lock clean up jobs which are known to kbase but are
* queued outside the job scheduler.
*/
- /*
- pr_info("%p,%p,%p\n",&kctx->waiting_soft_jobs,kctx->waiting_soft_jobs.next,kctx->waiting_soft_jobs.prev);
- */
+
+ pr_info("%p,%p,%p\n",
+ &kctx->waiting_soft_jobs,
+ kctx->waiting_soft_jobs.next,
+ kctx->waiting_soft_jobs.prev);
+
+ while (!list_empty(&kctx->waiting_soft_jobs)) {
+ katom = list_first_entry(&kctx->waiting_soft_jobs,
+ struct kbase_jd_atom,
+ dep_item[0]);
+ list_del(&katom->dep_item[0]);
+ kbase_cancel_soft_job(katom);
+ }
+ #if 0
list_for_each_safe(entry, entry1, &kctx->waiting_soft_jobs) {
if(entry == (struct list_head *)LIST_POISON1)
pr_err("@get to the end of a list, error happened in list somewhere@\n");
pr_info("katom = %p,&katom->dep_item[0] = %p\n",katom,&katom->dep_item[0]);
kbase_cancel_soft_job(katom);
}
+ #endif
/* kctx->waiting_soft_jobs is not valid after this point */
#ifdef CONFIG_KDS
KBASE_DEBUG_ASSERT(NULL != kctx);
+ lockdep_assert_held(&kctx->reg_lock);
+
rbnode = kctx->reg_rbtree.rb_node;
while (rbnode) {
u64 tmp_start_pfn, tmp_end_pfn;
KBASE_DEBUG_ASSERT(NULL != kctx);
+ lockdep_assert_held(&kctx->reg_lock);
+
rbnode = kctx->reg_rbtree.rb_node;
while (rbnode) {
reg = rb_entry(rbnode, struct kbase_va_region, rblink);
KBASE_DEBUG_ASSERT(NULL != kctx);
KBASE_DEBUG_ASSERT(NULL != reg);
+ lockdep_assert_held(&kctx->reg_lock);
+
if (!align)
align = 1;
static INLINE struct kbase_mem_phy_alloc * kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
{
- struct kbase_mem_phy_alloc * alloc;
- const size_t extra_pages = (sizeof(*alloc) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ struct kbase_mem_phy_alloc *alloc;
/* Prevent nr_pages*sizeof + sizeof(*alloc) from wrapping around. */
- if (nr_pages > (((size_t) -1 / sizeof(*alloc->pages))) - extra_pages)
+ if (nr_pages > ((((size_t) -1) - sizeof(*alloc)) / sizeof(*alloc->pages)))
return ERR_PTR(-ENOMEM);
alloc = vzalloc(sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages);
- if (!alloc)
+ if (!alloc)
return ERR_PTR(-ENOMEM);
kref_init(&alloc->kref);
(first_bad << PAGE_SHIFT),
mapping->vm_end);
WARN(zap_res,
- "Failed to zap VA range (0x%lx -0x%lx);\n",
+ "Failed to zap VA range (0x%lx - 0x%lx);\n",
mapping->vm_start +
(first_bad << PAGE_SHIFT),
mapping->vm_end
.fault = kbase_cpu_vm_fault
};
-static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, int free_on_close)
+static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close)
{
struct kbase_cpu_mapping *map;
u64 start_off = vma->vm_pgoff - reg->start_pfn;
map->page_off = start_off;
map->region = free_on_close ? reg : NULL;
map->kctx = reg->kctx;
- map->vm_start = vma->vm_start;
- map->vm_end = vma->vm_end;
+ map->vm_start = vma->vm_start + aligned_offset;
+ if (aligned_offset) {
+ KBASE_DEBUG_ASSERT(!start_off);
+ map->vm_end = map->vm_start + (reg->nr_pages << PAGE_SHIFT);
+ } else {
+ map->vm_end = vma->vm_end;
+ }
map->alloc = kbase_mem_phy_alloc_get(reg->alloc);
map->count = 1; /* start with one ref */
int err = 0;
int free_on_close = 0;
struct device *dev = kctx->kbdev->dev;
+ size_t aligned_offset = 0;
dev_dbg(dev, "kbase_mmap\n");
nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
reg = kctx->pending_regions[cookie];
if (NULL != reg) {
- size_t aligned_offset = 0;
if (reg->flags & KBASE_REG_ALIGNED) {
/* nr_pages must be able to hold alignment pages
} /* default */
} /* switch */
map:
- err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, free_on_close);
+ err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close);
if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
/* MMU dump - userspace should now have a reference on
if (MALI_ERROR_NONE != err) {
/* failed to insert pages, handle as a normal PF */
mutex_unlock(&faulting_as->transaction_mutex);
- kbase_gpu_vm_unlock(kctx);
kbase_free_phy_pages_helper(region->alloc, new_pages);
+ kbase_gpu_vm_unlock(kctx);
/* The locked VA region will be unlocked and the cache invalidated in here */
kbase_mmu_report_fault_and_kill(kctx, faulting_as);
goto fault_done;
* reaches zero. */
wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
- /* Suspend PM Metric timer on system suspend.
- * It is ok if kbase_pm_context_idle() is still running, it is safe
- * to still complete the last active time period - the pm stats will
- * get reset on resume anyway.
- */
- kbasep_pm_metrics_term(kbdev);
-
/* NOTE: We synchronize with anything that was just finishing a
* kbase_pm_context_idle() call by locking the pm.lock below */
/* MUST happen before any pm_context_active calls occur */
mutex_lock(&kbdev->pm.lock);
kbdev->pm.suspending = MALI_FALSE;
-
kbase_pm_do_poweron(kbdev, MALI_TRUE);
-
mutex_unlock(&kbdev->pm.lock);
-
- /* Restart PM Metric timer on resume */
- kbasep_pm_metrics_init(kbdev);
- kbasep_pm_record_gpu_idle(kbdev);
/* Initial active call, to power on the GPU/cores if needed */
kbase_pm_context_active(kbdev);
if (is_resume && kbdev->pm.callback_power_resume) {
kbdev->pm.callback_power_resume(kbdev);
} else if (kbdev->pm.callback_power_on) {
- if (kbdev->pm.callback_power_on(kbdev))
- reset_required = MALI_TRUE;
+ kbdev->pm.callback_power_on(kbdev);
+ /* If your platform properly keeps the GPU state you may use the return
+ * value of the callback_power_on function to conditionally reset the
+ * GPU on power up. Currently we are conservative and always reset the
+ * GPU. */
+ reset_required = MALI_TRUE;
}
spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
u32 value = 0;
u32 config_value;
- /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443. */
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443))
+ /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
+ * and
+ * needed due to MIDGLES-3539. See PRLAM-11035 */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
value |= SC_LS_PAUSEBUFFER_DISABLE;
/* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327. */
/* Are any cores being powered on? */
if (~kbdev->pm.desired_shader_state & desired_bitmap ||
kbdev->pm.ca_in_transition != MALI_FALSE) {
+
+ /* Check if we are powering off any cores before updating shader state */
+ if (kbdev->pm.desired_shader_state & ~desired_bitmap) {
+ /* Start timer to power off cores */
+ kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap);
+ kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks;
+ }
+
kbdev->pm.desired_shader_state = desired_bitmap;
/* If any cores are being powered on, transition immediately */
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
-
- /* Ensure timer does not power off wanted cores */
- if (kbdev->pm.shader_poweroff_pending != 0) {
- kbdev->pm.shader_poweroff_pending &= ~kbdev->pm.desired_shader_state;
- if (kbdev->pm.shader_poweroff_pending == 0)
- kbdev->pm.shader_poweroff_pending_time = 0;
- }
} else if (kbdev->pm.desired_shader_state & ~desired_bitmap) {
/* Start timer to power off cores */
kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap);
hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);
}
+ /* Ensure timer does not power off wanted cores and make sure to power off unwanted cores */
+ if (kbdev->pm.shader_poweroff_pending != 0) {
+ kbdev->pm.shader_poweroff_pending &= ~(kbdev->pm.desired_shader_state & desired_bitmap);
+ if (kbdev->pm.shader_poweroff_pending == 0)
+ kbdev->pm.shader_poweroff_pending_time = 0;
+ }
+
/* Don't need 'cores_are_available', because we don't return anything */
CSTD_UNUSED(cores_are_available);
}
kbase_jd_atom *dep_atom)
{
katom->status = KBASE_JD_ATOM_STATE_QUEUED;
- katom->dep_atom[0] = dep_atom;
+ kbase_jd_katom_dep_set(&katom->dep[0],dep_atom, BASE_JD_DEP_TYPE_DATA);
list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
}
atom->prio = ((prio << 16) / ((20 << 16) / 128)) - 128;
atom->atom_number = atom_nr;
- atom->pre_dep[0] = 0;
- atom->pre_dep[1] = 0;
+ base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID);
+ base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID);
atom->udata.blob[0] = 0;
atom->udata.blob[1] = 0;
kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
- f_atom->pre_dep[0] = t_atom_nr;
+ base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA);
return MALI_ERROR_NONE;
}
static int kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
{
kbase_va_region *reg;
- phys_addr_t addr;
+ phys_addr_t addr = 0;
u64 pfn;
u32 offset;
char *page;
return 0;
}
+ kbase_gpu_vm_lock(kctx);
reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc);
- if (!reg)
- return 0;
-
- if (!(reg->flags & KBASE_REG_GPU_WR)) {
- /* Region is not writable by GPU so we won't write to it either */
- return 0;
- }
-
- if (!reg->alloc->pages)
- return 0;
+ if (reg &&
+ (reg->flags & KBASE_REG_GPU_WR) &&
+ reg->alloc && reg->alloc->pages)
+ addr = reg->alloc->pages[pfn - reg->start_pfn];
- addr = reg->alloc->pages[pfn - reg->start_pfn];
+ kbase_gpu_vm_unlock(kctx);
if (!addr)
return 0;
#include "mali_kbase_gpuprops_types.h"
-#define BASE_UK_VERSION_MAJOR 6
+#define BASE_UK_VERSION_MAJOR 7
#define BASE_UK_VERSION_MINOR 0
typedef struct kbase_uk_mem_alloc {
KBASE_FUNC_MEM_FLAGS_CHANGE,
KBASE_FUNC_MEM_ALIAS,
- KBASE_FUNC_JOB_SUBMIT,
-
- KBASE_FUNC_SYNC,
+ KBASE_FUNC_SYNC = (UK_FUNC_ID + 8),
KBASE_FUNC_POST_TERM,
KBASE_FUNC_FENCE_VALIDATE,
KBASE_FUNC_STREAM_CREATE,
KBASE_FUNC_GET_PROFILING_CONTROLS,
- KBASE_FUNC_SET_PROFILING_CONTROLS /* to be used only for testing
+ KBASE_FUNC_SET_PROFILING_CONTROLS, /* to be used only for testing
* purposes, otherwise these controls
* are set through gator API */
+ KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 27)
+
} kbase_uk_function_id;
+
#endif /* _KBASE_UKU_H_ */
{
#ifdef CONFIG_MALI_MIDGARD_DVFS
struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
+ unsigned long flags;
+ unsigned int clock;
#endif
kbase_platform_power_on(kbdev);
kbase_platform_clock_on(kbdev);
#ifdef CONFIG_MALI_MIDGARD_DVFS
if (platform->dvfs_enabled) {
- /*if (kbase_platform_dvfs_enable(true, MALI_DVFS_START_FREQ)!= MALI_TRUE)*/
- /*printk("%s\n",__func__);*/
- if (kbase_platform_dvfs_enable(true, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
+ if(platform->gpu_in_touch) {
+ clock = p_mali_dvfs_infotbl[MALI_DVFS_STEP-1].clock;
+ spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+ platform->gpu_in_touch = false;
+ spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
+ } else {
+ clock = MALI_DVFS_CURRENT_FREQ;
+ }
+ /*
+ pr_info("%s,clock = %d\n",__func__,clock);
+ */
+ if (kbase_platform_dvfs_enable(true, clock)!= MALI_TRUE)
return -EPERM;
+
} else {
if (kbase_platform_dvfs_enable(false, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
return -EPERM;
{
#ifdef CONFIG_MALI_MIDGARD_DVFS
struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
+ unsigned long flags;
#endif
kbase_platform_clock_off(kbdev);
/*printk("%s\n",__func__);*/
if (kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
printk("[err] disabling dvfs is faled\n");
+ spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+ platform->gpu_in_touch = false;
+ spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
}
#endif
}
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/rk_fb.h>
-
+#include <linux/input.h>
#include <linux/rockchip/common.h>
#include <platform/rk/mali_kbase_platform.h>
#define LIMIT_FPS 60
#define LIMIT_FPS_POWER_SAVE 50
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static void gpufreq_input_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ mali_dvfs_status *dvfs_status;
+ struct rk_context *platform;
+ unsigned long flags;
+
+ if (type != EV_ABS)
+ return;
+
+ dvfs_status = &mali_dvfs_status_current;
+ platform = (struct rk_context *)dvfs_status->kbdev->platform_context;
+
+ spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+ platform->gpu_in_touch = true;
+ spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
+}
+
+static int gpufreq_input_connect(struct input_handler *handler,
+ struct input_dev *dev, const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "gpufreq";
+
+ error = input_register_handle(handle);
+ if (error)
+ goto err2;
+
+ error = input_open_device(handle);
+ if (error)
+ goto err1;
+ pr_info("%s\n",__func__);
+ return 0;
+err1:
+ input_unregister_handle(handle);
+err2:
+ kfree(handle);
+ return error;
+}
+
+static void gpufreq_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+ pr_info("%s\n",__func__);
+}
+
+static const struct input_device_id gpufreq_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ .absbit = { [BIT_WORD(ABS_X)] =
+ BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+ },
+ { },
+};
+
+static struct input_handler gpufreq_input_handler = {
+ .event = gpufreq_input_event,
+ .connect = gpufreq_input_connect,
+ .disconnect = gpufreq_input_disconnect,
+ .name = "gpufreq",
+ .id_table = gpufreq_ids,
+};
+#endif
+
static void mali_dvfs_event_proc(struct work_struct *w)
{
unsigned long flags;
*/
struct rk_context *platform;
int i;
+ int rc;
platform = (struct rk_context *)kbdev->platform_context;
if (NULL == platform)
mutex_init(&mali_set_clock_lock);
mutex_init(&mali_enable_clock_lock);
+ spin_lock_init(&platform->gpu_in_touch_lock);
+ rc = input_register_handler(&gpufreq_input_handler);
+
/*add a error handling here */
spin_lock_irqsave(&mali_dvfs_spinlock, flags);
mali_dvfs_status_current.kbdev = kbdev;
destroy_workqueue(mali_dvfs_wq);
mali_dvfs_wq = NULL;
+
+ input_unregister_handler(&gpufreq_input_handler);
}
#endif /*CONFIG_MALI_MIDGARD_DVFS*/
u32 time_busy;
u32 time_idle;
bool dvfs_enabled;
+ bool gpu_in_touch;
+ spinlock_t gpu_in_touch_lock;
#endif
};
int mali_dvfs_clk_set(struct dvfs_node * node,unsigned long rate);