/*
*
- * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
#endif
#include <mali_kbase.h>
#include <mali_kbase_uku.h>
-#ifdef CONFIG_UMP
-#include <linux/ump.h>
-#endif /* CONFIG_UMP */
#include <linux/random.h>
#include <linux/version.h>
#include <linux/ratelimit.h>
-#include <linux/pagemap.h>
#include <mali_kbase_jm.h>
#include <mali_kbase_hwaccess_jm.h>
-
-#if defined(CONFIG_MALI_MIPE_ENABLED)
#include <mali_kbase_tlstream.h>
-#endif
+
+#include "mali_kbase_dma_fence.h"
#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
/* Return whether katom will run on the GPU or not. Currently only soft jobs and
* dependency-only atoms do not run on the GPU */
#define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) || \
- ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == \
+ ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == \
BASE_JD_REQ_DEP)))
/*
* This is the kernel side of the API. Only entry points are:
KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
- if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
+ if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
/* Dependency only atom */
katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
return 0;
} else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
/* Soft-job */
- if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
+ if (katom->will_fail_event_code) {
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ return 0;
+ }
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
== BASE_JD_REQ_SOFT_REPLAY) {
if (!kbase_replay_process(katom))
katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
} else if (kbase_process_soft_job(katom) == 0) {
kbase_finish_soft_job(katom);
katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
- } else {
- /* The job has not completed */
- list_add_tail(&katom->dep_item[0], &kctx->waiting_soft_jobs);
}
return 0;
}
return kbasep_js_add_job(kctx, katom);
}
+#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
+{
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(katom);
+ kbdev = katom->kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Check whether the atom's other dependencies were already met. If
+ * katom is a GPU atom then the job scheduler may be able to represent
+ * the dependencies, hence we may attempt to submit it before they are
+ * met. Other atoms must have had both dependencies resolved.
+ */
+ if (IS_GPU_ATOM(katom) ||
+ (!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
+ !kbase_jd_katom_dep_atom(&katom->dep[1]))) {
+ /* katom dep complete, attempt to run it */
+ bool resched = false;
+
+ resched = jd_run_atom(katom);
+
+ if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ /* The atom has already finished */
+ resched |= jd_done_nolock(katom, NULL);
+ }
+
+ if (resched)
+ kbase_js_sched_all(kbdev);
+ }
+}
+#endif
+
#ifdef CONFIG_KDS
/* Add the katom to the kds waiting list.
{
struct kbase_jd_atom *katom;
struct kbase_jd_context *ctx;
- struct kbase_device *kbdev;
katom = (struct kbase_jd_atom *)callback_parameter;
KBASE_DEBUG_ASSERT(katom);
+
ctx = &katom->kctx->jctx;
- kbdev = katom->kctx->kbdev;
- KBASE_DEBUG_ASSERT(kbdev);
+ /* If KDS resource has already been satisfied (e.g. due to zapping)
+ * do nothing.
+ */
mutex_lock(&ctx->lock);
-
- /* KDS resource has already been satisfied (e.g. due to zapping) */
- if (katom->kds_dep_satisfied)
- goto out;
-
- /* This atom's KDS dependency has now been met */
- katom->kds_dep_satisfied = true;
-
- /* Check whether the atom's other dependencies were already met. If
- * katom is a GPU atom then the job scheduler may be able to represent
- * the dependencies, hence we may attempt to submit it before they are
- * met. Other atoms must have had both dependencies resolved */
- if (IS_GPU_ATOM(katom) ||
- (!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
- !kbase_jd_katom_dep_atom(&katom->dep[1]))) {
- /* katom dep complete, attempt to run it */
- bool resched = false;
-
- resched = jd_run_atom(katom);
-
- if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
- /* The atom has already finished */
- resched |= jd_done_nolock(katom, NULL);
- }
-
- if (resched)
- kbase_js_sched_all(kbdev);
+ if (!katom->kds_dep_satisfied) {
+ katom->kds_dep_satisfied = true;
+ kbase_jd_dep_clear_locked(katom);
}
- out:
mutex_unlock(&ctx->lock);
}
}
#endif /* CONFIG_KDS */
-static int kbase_jd_user_buf_map(struct kbase_context *kctx,
- struct kbase_va_region *reg)
-{
- long pinned_pages;
- struct kbase_mem_phy_alloc *alloc;
- struct page **pages;
- phys_addr_t *pa;
- long i;
- int err = -ENOMEM;
- unsigned long address;
- struct task_struct *owner;
- struct device *dev;
- unsigned long offset;
- unsigned long local_size;
-
- alloc = reg->gpu_alloc;
- pa = kbase_get_gpu_phy_pages(reg);
- address = alloc->imported.user_buf.address;
- owner = alloc->imported.user_buf.owner;
-
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
-
- pages = alloc->imported.user_buf.pages;
-
- down_read(&owner->mm->mmap_sem);
- pinned_pages = get_user_pages(owner, owner->mm,
- address,
- alloc->imported.user_buf.nr_pages,
- reg->flags & KBASE_REG_GPU_WR,
- 0, pages, NULL);
- up_read(&owner->mm->mmap_sem);
-
- if (pinned_pages <= 0)
- return pinned_pages;
-
- if (pinned_pages != alloc->imported.user_buf.nr_pages) {
- for (i = 0; i < pinned_pages; i++)
- put_page(pages[i]);
- return -ENOMEM;
- }
-
- dev = kctx->kbdev->dev;
- offset = address & ~PAGE_MASK;
- local_size = alloc->imported.user_buf.size;
-
- for (i = 0; i < pinned_pages; i++) {
- dma_addr_t dma_addr;
- unsigned long min;
-
- min = MIN(PAGE_SIZE - offset, local_size);
- dma_addr = dma_map_page(dev, pages[i],
- offset, min,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, dma_addr))
- goto unwind;
-
- alloc->imported.user_buf.dma_addrs[i] = dma_addr;
- pa[i] = page_to_phys(pages[i]);
-
- local_size -= min;
- offset = 0;
- }
-
- alloc->nents = pinned_pages;
-
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn, pa,
- kbase_reg_current_backed_size(reg),
- reg->flags);
- if (err == 0)
- return 0;
-
- alloc->nents = 0;
- /* fall down */
-unwind:
- while (i--) {
- dma_unmap_page(kctx->kbdev->dev,
- alloc->imported.user_buf.dma_addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- put_page(pages[i]);
- pages[i] = NULL;
- }
-
- return err;
-}
-
-static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
- struct kbase_mem_phy_alloc *alloc, bool writeable)
-{
- long i;
- struct page **pages;
- unsigned long size = alloc->imported.user_buf.size;
-
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
- pages = alloc->imported.user_buf.pages;
- for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
- unsigned long local_size;
- dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
-
- local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK));
- dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size,
- DMA_BIDIRECTIONAL);
- if (writeable)
- set_page_dirty_lock(pages[i]);
- put_page(pages[i]);
- pages[i] = NULL;
-
- size -= local_size;
- }
- alloc->nents = 0;
-}
-
-/* not to use sg_dma_len. */
-#define MALI_SG_DMA_LEN(sg) ((sg)->length)
-
-#ifdef CONFIG_DMA_SHARED_BUFFER
-static int kbase_jd_umm_map(struct kbase_context *kctx, struct kbase_va_region *reg)
-{
- struct sg_table *sgt; /* scatterlist_table */
- struct scatterlist *s;
- int i;
- phys_addr_t *pa;
- int err;
- size_t count = 0;
- struct kbase_mem_phy_alloc *alloc;
-
- alloc = reg->gpu_alloc;
-
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM);
- KBASE_DEBUG_ASSERT(NULL == alloc->imported.umm.sgt);
- sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment, DMA_BIDIRECTIONAL);
-
- if (IS_ERR_OR_NULL(sgt))
- return -EINVAL;
-
- /* save for later */
- alloc->imported.umm.sgt = sgt;
-
- pa = kbase_get_gpu_phy_pages(reg);
- KBASE_DEBUG_ASSERT(pa);
-
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- int j;
- /* size_t pages = PFN_UP(sg_dma_len(s)); */
- size_t pages = PFN_UP(MALI_SG_DMA_LEN(s));
-
- WARN_ONCE(MALI_SG_DMA_LEN(s) & (PAGE_SIZE-1),
- "MALI_SG_DMA_LEN(s)=%u is not a multiple of PAGE_SIZE\n",
- MALI_SG_DMA_LEN(s));
- /*
- WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
- "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
- sg_dma_len(s));
- */
-
- WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
- "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
- (unsigned long long) sg_dma_address(s));
-
- for (j = 0; (j < pages) && (count < reg->nr_pages); j++, count++)
- *pa++ = sg_dma_address(s) + (j << PAGE_SHIFT);
-
- WARN_ONCE(j < pages,
- "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
- alloc->imported.umm.dma_buf->size);
- }
-
- if (WARN_ONCE(count < reg->nr_pages,
- "sg list from dma_buf_map_attachment < dma_buf->size=%zu, count : %zu, reg->nr_pages : %zu. \n",
- alloc->imported.umm.dma_buf->size,
- count,
- reg->nr_pages)) {
- err = -EINVAL;
- goto out;
- }
-
- /* Update nents as we now have pages to map */
- alloc->nents = count;
-
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn, kbase_get_gpu_phy_pages(reg), kbase_reg_current_backed_size(reg), reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD);
-
-out:
- if (err) {
- dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment, alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
- alloc->imported.umm.sgt = NULL;
- }
-
- return err;
-}
-
-static void kbase_jd_umm_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc)
-{
- KBASE_DEBUG_ASSERT(kctx);
- KBASE_DEBUG_ASSERT(alloc);
- KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
- KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
- dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
- alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
- alloc->imported.umm.sgt = NULL;
- alloc->nents = 0;
-}
-#endif /* CONFIG_DMA_SHARED_BUFFER */
-
void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
{
#ifdef CONFIG_KDS
kds_resource_set_release_sync(&katom->kds_rset);
}
#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ /* Flush dma-fence workqueue to ensure that any callbacks that may have
+ * been queued are done before continuing.
+ * Any successfully completed atom would have had all it's callbacks
+ * completed before the atom was run, so only flush for failed atoms.
+ */
+ if (katom->event_code != BASE_JD_EVENT_DONE)
+ flush_workqueue(katom->kctx->dma_fence.wq);
+#endif /* CONFIG_MALI_DMA_FENCE */
}
static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
katom->kds_dep_satisfied = true;
#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_signal(katom);
+#endif /* CONFIG_MALI_DMA_FENCE */
+
kbase_gpu_vm_lock(katom->kctx);
/* only roll back if extres is non-NULL */
if (katom->extres) {
res_no = katom->nr_extres;
while (res_no-- > 0) {
struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+ struct kbase_va_region *reg;
- switch (alloc->type) {
-#ifdef CONFIG_DMA_SHARED_BUFFER
- case KBASE_MEM_TYPE_IMPORTED_UMM: {
- alloc->imported.umm.current_mapping_usage_count--;
-
- if (0 == alloc->imported.umm.current_mapping_usage_count) {
- struct kbase_va_region *reg;
-
- reg = kbase_region_tracker_find_region_base_address(
- katom->kctx,
- katom->extres[res_no].gpu_address);
-
- if (reg && reg->gpu_alloc == alloc)
- kbase_mmu_teardown_pages(
- katom->kctx,
- reg->start_pfn,
- kbase_reg_current_backed_size(reg));
-
- kbase_jd_umm_unmap(katom->kctx, alloc);
- }
- }
- break;
-#endif /* CONFIG_DMA_SHARED_BUFFER */
- case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- alloc->imported.user_buf.current_mapping_usage_count--;
-
- if (0 == alloc->imported.user_buf.current_mapping_usage_count) {
- struct kbase_va_region *reg;
-
- reg = kbase_region_tracker_find_region_base_address(
- katom->kctx,
- katom->extres[res_no].gpu_address);
-
- if (reg && reg->gpu_alloc == alloc)
- kbase_mmu_teardown_pages(
- katom->kctx,
- reg->start_pfn,
- kbase_reg_current_backed_size(reg));
-
- kbase_jd_user_buf_unmap(katom->kctx,
- alloc,
- reg->flags & KBASE_REG_GPU_WR);
- }
- }
- break;
- default:
- break;
- }
- kbase_mem_phy_alloc_put(katom->extres[res_no].alloc);
+ reg = kbase_region_tracker_find_region_base_address(
+ katom->kctx,
+ katom->extres[res_no].gpu_address);
+ kbase_unmap_external_resource(katom->kctx, reg, alloc);
}
kfree(katom->extres);
katom->extres = NULL;
kbase_gpu_vm_unlock(katom->kctx);
}
-#if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
-static void add_kds_resource(struct kds_resource *kds_res, struct kds_resource **kds_resources, u32 *kds_res_count, unsigned long *kds_access_bitmap, bool exclusive)
-{
- u32 i;
-
- for (i = 0; i < *kds_res_count; i++) {
- /* Duplicate resource, ignore */
- if (kds_resources[i] == kds_res)
- return;
- }
-
- kds_resources[*kds_res_count] = kds_res;
- if (exclusive)
- set_bit(*kds_res_count, kds_access_bitmap);
- (*kds_res_count)++;
-}
-#endif
-
/*
* Set up external resources needed by this job.
*
struct kds_resource **kds_resources = NULL;
unsigned long *kds_access_bitmap = NULL;
#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ struct kbase_dma_fence_resv_info info = {
+ .dma_fence_resv_count = 0,
+ };
+#endif
struct base_external_resource *input_extres;
KBASE_DEBUG_ASSERT(katom);
KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL);
- if (NULL == kds_resources) {
+ if (!kds_resources) {
err_ret_val = -ENOMEM;
goto early_err_out;
}
KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
- kds_access_bitmap = kzalloc(sizeof(unsigned long) * ((katom->nr_extres + BITS_PER_LONG - 1) / BITS_PER_LONG), GFP_KERNEL);
-
- if (NULL == kds_access_bitmap) {
+ kds_access_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!kds_access_bitmap) {
err_ret_val = -ENOMEM;
goto early_err_out;
}
#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ info.resv_objs = kmalloc_array(katom->nr_extres,
+ sizeof(struct reservation_object *),
+ GFP_KERNEL);
+ if (!info.resv_objs) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+
+ info.dma_fence_excl_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!info.dma_fence_excl_bitmap) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* Take the processes mmap lock */
+ down_read(¤t->mm->mmap_sem);
+
/* need to keep the GPU VM locked while we set up UMM buffers */
kbase_gpu_vm_lock(katom->kctx);
for (res_no = 0; res_no < katom->nr_extres; res_no++) {
struct base_external_resource *res;
struct kbase_va_region *reg;
+ struct kbase_mem_phy_alloc *alloc;
+ bool exclusive;
res = &input_extres[res_no];
+ exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
+ ? true : false;
reg = kbase_region_tracker_find_region_enclosing_address(
katom->kctx,
res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
(reg->flags & KBASE_REG_SECURE)) {
- katom->atom_flags |= KBASE_KATOM_FLAG_SECURE;
- if ((katom->core_req & BASE_JD_REQ_FS) == 0) {
- WARN_RATELIMIT(1, "Secure non-fragment jobs not supported");
- goto failed_loop;
- }
+ katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
}
- /* decide what needs to happen for this resource */
- switch (reg->gpu_alloc->type) {
- case BASE_MEM_IMPORT_TYPE_USER_BUFFER: {
- reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
- if (1 == reg->gpu_alloc->imported.user_buf.current_mapping_usage_count) {
- /* use a local variable to not pollute
- * err_ret_val with a potential success
- * value as some other gotos depend on
- * the default error code stored in
- * err_ret_val */
- int tmp;
-
- tmp = kbase_jd_user_buf_map(katom->kctx,
- reg);
- if (0 != tmp) {
- /* failed to map this buffer,
- * roll back */
- err_ret_val = tmp;
- reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
- goto failed_loop;
- }
- }
- }
- break;
- case BASE_MEM_IMPORT_TYPE_UMP: {
-#if defined(CONFIG_KDS) && defined(CONFIG_UMP)
- struct kds_resource *kds_res;
-
- kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
- if (kds_res)
- add_kds_resource(kds_res, kds_resources, &kds_res_count,
- kds_access_bitmap,
- res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE);
-#endif /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */
- break;
- }
-#ifdef CONFIG_DMA_SHARED_BUFFER
- case BASE_MEM_IMPORT_TYPE_UMM: {
-#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
- struct kds_resource *kds_res;
-
- kds_res = get_dma_buf_kds_resource(reg->gpu_alloc->imported.umm.dma_buf);
- if (kds_res)
- add_kds_resource(kds_res, kds_resources, &kds_res_count, kds_access_bitmap, res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE);
-#endif
- reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
- if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
- /* use a local variable to not pollute err_ret_val
- * with a potential success value as some other gotos depend
- * on the default error code stored in err_ret_val */
- int tmp;
-
- tmp = kbase_jd_umm_map(katom->kctx, reg);
- if (tmp) {
- /* failed to map this buffer, roll back */
- err_ret_val = tmp;
- reg->gpu_alloc->imported.umm.current_mapping_usage_count--;
- goto failed_loop;
- }
- }
- break;
- }
+ alloc = kbase_map_external_resource(katom->kctx, reg,
+ current->mm
+#ifdef CONFIG_KDS
+ , &kds_res_count, kds_resources,
+ kds_access_bitmap, exclusive
#endif
- default:
+ );
+ if (!alloc) {
+ err_ret_val = -EINVAL;
goto failed_loop;
}
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+ struct reservation_object *resv;
+
+ resv = reg->gpu_alloc->imported.umm.dma_buf->resv;
+ if (resv)
+ kbase_dma_fence_add_reservation(resv, &info,
+ exclusive);
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
/* finish with updating out array with the data we found */
/* NOTE: It is important that this is the last thing we do (or
* at least not before the first write) as we overwrite elements
* until the last read for an element.
* */
katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
- katom->extres[res_no].alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ katom->extres[res_no].alloc = alloc;
}
/* successfully parsed the extres array */
/* drop the vm lock before we call into kds */
kbase_gpu_vm_unlock(katom->kctx);
+ /* Release the processes mmap lock */
+ up_read(¤t->mm->mmap_sem);
+
#ifdef CONFIG_KDS
if (kds_res_count) {
int wait_failed;
kfree(kds_access_bitmap);
#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (info.dma_fence_resv_count) {
+ int ret;
+
+ ret = kbase_dma_fence_wait(katom, &info);
+ if (ret < 0)
+ goto failed_dma_fence_setup;
+ }
+
+ kfree(info.resv_objs);
+ kfree(info.dma_fence_excl_bitmap);
+#endif /* CONFIG_MALI_DMA_FENCE */
+
/* all done OK */
return 0;
/* error handling section */
+#ifdef CONFIG_MALI_DMA_FENCE
+failed_dma_fence_setup:
#ifdef CONFIG_KDS
- failed_kds_setup:
+ /* If we are here, dma_fence setup failed but KDS didn't.
+ * Revert KDS setup if any.
+ */
+ if (kds_res_count) {
+ mutex_unlock(&katom->kctx->jctx.lock);
+ kds_resource_set_release_sync(&katom->kds_rset);
+ mutex_lock(&katom->kctx->jctx.lock);
+
+ kbase_jd_kds_waiters_remove(katom);
+ katom->kds_dep_satisfied = true;
+ }
+#endif /* CONFIG_KDS */
+#endif /* CONFIG_MALI_DMA_FENCE */
+#ifdef CONFIG_KDS
+failed_kds_setup:
+#endif
+#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
+ /* Lock the processes mmap lock */
+ down_read(¤t->mm->mmap_sem);
/* lock before we unmap */
kbase_gpu_vm_lock(katom->kctx);
-#endif /* CONFIG_KDS */
+#endif
failed_loop:
/* undo the loop work */
while (res_no-- > 0) {
struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
-#ifdef CONFIG_DMA_SHARED_BUFFER
- if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
- alloc->imported.umm.current_mapping_usage_count--;
-
- if (0 == alloc->imported.umm.current_mapping_usage_count) {
- struct kbase_va_region *reg;
-
- reg = kbase_region_tracker_find_region_base_address(
- katom->kctx,
- katom->extres[res_no].gpu_address);
- if (reg && reg->gpu_alloc == alloc)
- kbase_mmu_teardown_pages(katom->kctx,
- reg->start_pfn,
- kbase_reg_current_backed_size(reg));
-
- kbase_jd_umm_unmap(katom->kctx, alloc);
- }
- }
-#endif /* CONFIG_DMA_SHARED_BUFFER */
- kbase_mem_phy_alloc_put(alloc);
+ kbase_unmap_external_resource(katom->kctx, NULL, alloc);
}
kbase_gpu_vm_unlock(katom->kctx);
+ /* Release the processes mmap lock */
+ up_read(¤t->mm->mmap_sem);
+
early_err_out:
kfree(katom->extres);
katom->extres = NULL;
kfree(kds_resources);
kfree(kds_access_bitmap);
#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ kfree(info.resv_objs);
+ kfree(info.dma_fence_excl_bitmap);
+#endif
return err_ret_val;
}
static inline void jd_resolve_dep(struct list_head *out_list,
struct kbase_jd_atom *katom,
- u8 d,
- bool ctx_is_dying)
+ u8 d, bool ctx_is_dying)
{
u8 other_d = !d;
while (!list_empty(&katom->dep_head[d])) {
struct kbase_jd_atom *dep_atom;
+ struct kbase_jd_atom *other_dep_atom;
u8 dep_type;
dep_atom = list_entry(katom->dep_head[d].next,
struct kbase_jd_atom, dep_item[d]);
-
list_del(katom->dep_head[d].next);
dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
if (katom->event_code != BASE_JD_EVENT_DONE &&
- (dep_type != BASE_JD_DEP_TYPE_ORDER || ctx_is_dying)) {
- /* Atom failed, so remove the other dependencies and immediately fail the atom */
- if (kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
- list_del(&dep_atom->dep_item[other_d]);
- kbase_jd_katom_dep_clear(&dep_atom->dep[other_d]);
- }
+ (dep_type != BASE_JD_DEP_TYPE_ORDER)) {
#ifdef CONFIG_KDS
if (!dep_atom->kds_dep_satisfied) {
/* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and
}
#endif
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_cancel_callbacks(dep_atom);
+#endif
+
dep_atom->event_code = katom->event_code;
KBASE_DEBUG_ASSERT(dep_atom->status !=
KBASE_JD_ATOM_STATE_UNUSED);
- dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
- list_add_tail(&dep_atom->dep_item[0], out_list);
- } else if (!kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
+ if ((dep_atom->core_req & BASE_JD_REQ_SOFT_REPLAY)
+ != BASE_JD_REQ_SOFT_REPLAY) {
+ dep_atom->will_fail_event_code =
+ dep_atom->event_code;
+ } else {
+ dep_atom->status =
+ KBASE_JD_ATOM_STATE_COMPLETED;
+ }
+ }
+ other_dep_atom = (struct kbase_jd_atom *)
+ kbase_jd_katom_dep_atom(&dep_atom->dep[other_d]);
+
+ if (!dep_atom->in_jd_list && (!other_dep_atom ||
+ (IS_GPU_ATOM(dep_atom) && !ctx_is_dying &&
+ !dep_atom->will_fail_event_code &&
+ !other_dep_atom->will_fail_event_code))) {
+ bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+ int dep_count;
+
+ dep_count = atomic_read(&dep_atom->dma_fence.dep_count);
+ if (likely(dep_count == -1)) {
+ dep_satisfied = true;
+ } else if (dep_count == 0) {
+ /*
+ * All fences for this atom has signaled, but
+ * the worker that will queue the atom has not
+ * yet run.
+ *
+ * Mark the atom as handled by setting
+ * dep_count to -1 so that the worker doesn't
+ * queue the atom again.
+ */
+ atomic_set(&dep_atom->dma_fence.dep_count, -1);
+ /*
+ * Remove the atom from the list of dma-fence
+ * waiting atoms.
+ */
+ kbase_dma_fence_waiters_remove(dep_atom);
+ dep_satisfied = true;
+ } else {
+ dep_satisfied = false;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
#ifdef CONFIG_KDS
- if (dep_atom->kds_dep_satisfied)
+ dep_satisfied = dep_satisfied && dep_atom->kds_dep_satisfied;
#endif
- list_add_tail(&dep_atom->dep_item[0], out_list);
+
+ if (dep_satisfied) {
+ dep_atom->in_jd_list = true;
+ list_add_tail(&dep_atom->jd_item, out_list);
+ }
}
}
}
kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
- if ((dep_atom->core_req & BASEP_JD_REQ_ATOM_TYPE) ==
+ if ((dep_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
BASE_JD_REQ_SOFT_REPLAY &&
(dep_atom->core_req & kbdev->force_replay_core_req)
== kbdev->force_replay_core_req) {
}
#endif
+static void jd_try_submitting_deps(struct list_head *out_list,
+ struct kbase_jd_atom *node)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct list_head *pos;
+
+ list_for_each(pos, &node->dep_head[i]) {
+ struct kbase_jd_atom *dep_atom = list_entry(pos,
+ struct kbase_jd_atom, dep_item[i]);
+
+ if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) {
+ /*Check if atom deps look sane*/
+ bool dep0_valid = !dep_atom->dep[0].atom ||
+ (dep_atom->dep[0].atom->status
+ >= KBASE_JD_ATOM_STATE_IN_JS);
+ bool dep1_valid = !dep_atom->dep[1].atom ||
+ (dep_atom->dep[1].atom->status
+ >= KBASE_JD_ATOM_STATE_IN_JS);
+
+ if (dep0_valid && dep1_valid) {
+ dep_atom->in_jd_list = true;
+ list_add(&dep_atom->jd_item, out_list);
+ }
+ }
+ }
+ }
+}
+
/*
* Perform the necessary handling of an atom that has finished running
* on the GPU.
{
struct kbase_context *kctx = katom->kctx;
struct kbase_device *kbdev = kctx->kbdev;
- struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
struct list_head completed_jobs;
struct list_head runnable_jobs;
bool need_to_try_schedule_context = false;
jd_check_force_failure(katom);
#endif
-
/* This is needed in case an atom is failed due to being invalid, this
* can happen *before* the jobs that the atom depends on have completed */
for (i = 0; i < 2; i++) {
}
katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
- list_add_tail(&katom->dep_item[0], &completed_jobs);
+ list_add_tail(&katom->jd_item, &completed_jobs);
while (!list_empty(&completed_jobs)) {
- katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, dep_item[0]);
+ katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, jd_item);
list_del(completed_jobs.prev);
-
KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
for (i = 0; i < 2; i++)
jd_resolve_dep(&runnable_jobs, katom, i,
- js_kctx_info->ctx.is_dying);
+ kctx->jctx.sched_info.ctx.is_dying);
if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
kbase_jd_post_external_resources(katom);
struct kbase_jd_atom *node;
node = list_entry(runnable_jobs.next,
- struct kbase_jd_atom, dep_item[0]);
-
+ struct kbase_jd_atom, jd_item);
list_del(runnable_jobs.next);
+ node->in_jd_list = false;
KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
- if (node->status != KBASE_JD_ATOM_STATE_COMPLETED) {
+ if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
+ !kctx->jctx.sched_info.ctx.is_dying) {
need_to_try_schedule_context |= jd_run_atom(node);
} else {
node->event_code = katom->event_code;
- if ((node->core_req & BASEP_JD_REQ_ATOM_TYPE)
- == BASE_JD_REQ_SOFT_REPLAY) {
+ if ((node->core_req &
+ BASE_JD_REQ_SOFT_JOB_TYPE) ==
+ BASE_JD_REQ_SOFT_REPLAY) {
if (kbase_replay_process(node))
/* Don't complete this atom */
continue;
} else if (node->core_req &
BASE_JD_REQ_SOFT_JOB) {
- /* If this is a fence wait then remove it from the list of sync waiters. */
+ /* If this is a fence wait soft job
+ * then remove it from the list of sync
+ * waiters.
+ */
if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req)
- list_del(&node->dep_item[0]);
+ kbasep_remove_waiting_soft_job(node);
kbase_finish_soft_job(node);
}
node->status = KBASE_JD_ATOM_STATE_COMPLETED;
}
- if (node->status == KBASE_JD_ATOM_STATE_COMPLETED)
- list_add_tail(&node->dep_item[0], &completed_jobs);
+ if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ list_add_tail(&node->jd_item, &completed_jobs);
+ } else if (node->status == KBASE_JD_ATOM_STATE_IN_JS &&
+ !node->will_fail_event_code) {
+ /* Node successfully submitted, try submitting
+ * dependencies as they may now be representable
+ * in JS */
+ jd_try_submitting_deps(&runnable_jobs, node);
+ }
}
/* Register a completed job as a disjoint event when the GPU
*/
kbase_disjoint_event_potential(kctx->kbdev);
if (completed_jobs_ctx)
- list_add_tail(&katom->dep_item[0], completed_jobs_ctx);
+ list_add_tail(&katom->jd_item, completed_jobs_ctx);
else
kbase_event_post(kctx, katom);
}
#endif
-bool jd_submit_atom(struct kbase_context *kctx,
- const struct base_jd_atom_v2 *user_atom,
- struct kbase_jd_atom *katom)
+bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom)
{
struct kbase_jd_context *jctx = &kctx->jctx;
- base_jd_core_req core_req;
int queued = 0;
int i;
int sched_prio;
bool ret;
+ bool will_fail = false;
/* Update the TOTAL number of jobs. This includes those not tracked by
* the scheduler: 'not ready to run' and 'dependency-only' jobs. */
jctx->job_nr++;
- core_req = user_atom->core_req;
-
katom->start_timestamp.tv64 = 0;
katom->time_spent_us = 0;
katom->udata = user_atom->udata;
katom->affinity = 0;
katom->jc = user_atom->jc;
katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
- katom->core_req = core_req;
+ katom->core_req = user_atom->core_req;
katom->atom_flags = 0;
katom->retry_count = 0;
katom->need_cache_flush_cores_retained = 0;
+ katom->pre_dep = NULL;
+ katom->post_dep = NULL;
katom->x_pre_dep = NULL;
katom->x_post_dep = NULL;
+ katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED;
+ katom->exit_protected_state = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+ katom->age = kctx->age_count++;
+
+ INIT_LIST_HEAD(&katom->jd_item);
#ifdef CONFIG_KDS
/* Start by assuming that the KDS dependencies are satisfied,
* kbase_jd_pre_external_resources will correct this if there are dependencies */
katom->kds_dep_satisfied = true;
katom->kds_rset = NULL;
#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ atomic_set(&katom->dma_fence.dep_count, -1);
+#endif
/* Don't do anything if there is a mess up with dependencies.
This is done in a separate cycle to check both the dependencies at ones, otherwise
dep_atom_type != BASE_JD_DEP_TYPE_DATA) {
katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
-#if defined(CONFIG_MALI_MIPE_ENABLED)
+
/* Wrong dependency setup. Atom will be sent
* back to user space. Do not record any
* dependencies. */
kbase_jd_atom_id(kctx, katom));
kbase_tlstream_tl_ret_atom_ctx(
katom, kctx);
-#endif
+
ret = jd_done_nolock(katom, NULL);
goto out;
}
continue;
}
- if (i == 1 && kbase_jd_katom_dep_atom(&katom->dep[0])) {
- /* Remove the previous dependency */
- list_del(&katom->dep_item[0]);
- kbase_jd_katom_dep_clear(&katom->dep[0]);
- }
-
/* Atom has completed, propagate the error code if any */
katom->event_code = dep_atom->event_code;
katom->status = KBASE_JD_ATOM_STATE_QUEUED;
-#if defined(CONFIG_MALI_MIPE_ENABLED)
+
/* This atom is going through soft replay or
* will be sent back to user space. Do not record any
* dependencies. */
katom,
kbase_jd_atom_id(kctx, katom));
kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
-#endif
- if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
+
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
== BASE_JD_REQ_SOFT_REPLAY) {
if (kbase_replay_process(katom)) {
ret = false;
goto out;
}
}
- ret = jd_done_nolock(katom, NULL);
+ will_fail = true;
- goto out;
} else {
/* Atom is in progress, add this atom to the list */
list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
}
}
- /* These must occur after the above loop to ensure that an atom that
- * depends on a previous atom with the same number behaves as expected */
- katom->event_code = BASE_JD_EVENT_DONE;
- katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ if (will_fail) {
+ if (!queued) {
+ ret = jd_done_nolock(katom, NULL);
+
+ goto out;
+ } else {
+ katom->will_fail_event_code = katom->event_code;
+ ret = false;
+
+ goto out;
+ }
+ } else {
+ /* These must occur after the above loop to ensure that an atom
+ * that depends on a previous atom with the same number behaves
+ * as expected */
+ katom->event_code = BASE_JD_EVENT_DONE;
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ }
-#if defined(CONFIG_MALI_MIPE_ENABLED)
/* Create a new atom recording all dependencies it was set up with. */
kbase_tlstream_tl_new_atom(
katom,
kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
for (i = 0; i < 2; i++)
if (BASE_JD_DEP_TYPE_INVALID != kbase_jd_katom_dep_type(
- &katom->dep[i]))
+ &katom->dep[i])) {
kbase_tlstream_tl_dep_atom_atom(
(void *)kbase_jd_katom_dep_atom(
&katom->dep[i]),
(void *)katom);
-#endif
+ } else if (BASE_JD_DEP_TYPE_INVALID !=
+ user_atom->pre_dep[i].dependency_type) {
+ /* Resolved dependency. */
+ int dep_atom_number =
+ user_atom->pre_dep[i].atom_id;
+ struct kbase_jd_atom *dep_atom =
+ &jctx->atoms[dep_atom_number];
+
+ kbase_tlstream_tl_rdep_atom_atom(
+ (void *)dep_atom,
+ (void *)katom);
+ }
/* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
- if (!katom->jc && (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+ if (!katom->jc && (katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL");
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
ret = jd_done_nolock(katom, NULL);
goto out;
}
+ /* Reject atoms with invalid core requirements */
+ if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
+ (katom->core_req & BASE_JD_REQ_EVENT_COALESCE)) {
+ dev_warn(kctx->kbdev->dev,
+ "Rejecting atom with invalid core requirements");
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
/* For invalid priority, be most lenient and choose the default */
sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
}
#endif /* CONFIG_KDS */
- if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (atomic_read(&katom->dma_fence.dep_count) != -1) {
+ ret = false;
+ goto out;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
== BASE_JD_REQ_SOFT_REPLAY) {
if (kbase_replay_process(katom))
ret = false;
ret = jd_done_nolock(katom, NULL);
goto out;
}
- /* The job has not yet completed */
- list_add_tail(&katom->dep_item[0], &kctx->waiting_soft_jobs);
+
ret = false;
- } else if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+ } else if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
katom->status = KBASE_JD_ATOM_STATE_IN_JS;
ret = kbasep_js_add_job(kctx, katom);
/* If job was cancelled then resolve immediately */
user_atom.udata = user_atom_v6.udata;
user_atom.extres_list = user_atom_v6.extres_list;
user_atom.nr_extres = user_atom_v6.nr_extres;
- user_atom.core_req = user_atom_v6.core_req;
+ user_atom.core_req = (u32)(user_atom_v6.core_req & 0x7fff);
/* atom number 0 is used for no dependency atoms */
if (!user_atom_v6.pre_dep[0])
}
#endif /* BASE_LEGACY_UK6_SUPPORT */
+#ifdef BASE_LEGACY_UK10_2_SUPPORT
+ if (KBASE_API_VERSION(10, 3) > kctx->api_version)
+ user_atom.core_req = (u32)(user_atom.compat_core_req
+ & 0x7fff);
+#endif /* BASE_LEGACY_UK10_2_SUPPORT */
+
user_addr = (void __user *)((uintptr_t) user_addr + submit_data->stride);
mutex_lock(&jctx->lock);
struct kbase_jd_context *jctx;
struct kbase_context *kctx;
struct kbasep_js_kctx_info *js_kctx_info;
- union kbasep_js_policy *js_policy;
struct kbase_device *kbdev;
struct kbasep_js_device_data *js_devdata;
u64 cache_jc = katom->jc;
kbdev = kctx->kbdev;
js_kctx_info = &kctx->jctx.sched_info;
js_devdata = &kbdev->js_data;
- js_policy = &kbdev->js_data.policy;
KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
mutex_unlock(&js_devdata->queue_mutex);
- mutex_unlock(&jctx->lock);
spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
kbase_js_unpull(kctx, katom);
spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ mutex_unlock(&jctx->lock);
return;
}
while (!list_empty(&kctx->completed_jobs)) {
struct kbase_jd_atom *atom = list_entry(
kctx->completed_jobs.next,
- struct kbase_jd_atom, dep_item[0]);
+ struct kbase_jd_atom, jd_item);
list_del(kctx->completed_jobs.next);
kbase_event_post(kctx, atom);
kbase_js_sched_all(kbdev);
}
-/**
- * jd_evict_worker - Work queue job evict function
- * @data: a &struct work_struct
- *
- * Only called as part of evicting failed jobs. This is only called on jobs that
- * were never submitted to HW Access. Jobs that were submitted are handled
- * through kbase_jd_done_worker().
- * Operates serially with the kbase_jd_done_worker() on the work queue.
- *
- * We don't need to release most of the resources that would occur on
- * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be
- * running (by virtue of having not been submitted to HW Access).
- */
-static void jd_evict_worker(struct work_struct *data)
-{
- struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
- work);
- struct kbase_jd_context *jctx;
- struct kbase_context *kctx;
- struct kbasep_js_kctx_info *js_kctx_info;
- struct kbase_device *kbdev;
-
- /* Soft jobs should never reach this function */
- KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
-
- kctx = katom->kctx;
- kbdev = kctx->kbdev;
- jctx = &kctx->jctx;
- js_kctx_info = &kctx->jctx.sched_info;
-
- KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
-
- /* Scheduler: Remove the job from the system */
- mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
- kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
- mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
-
- mutex_lock(&jctx->lock);
- jd_done_nolock(katom, NULL);
- /* katom may have been freed now, do not use! */
- mutex_unlock(&jctx->lock);
-
- kbase_js_sched_all(kbdev);
-}
-
/**
* kbase_jd_done - Complete a job that has been removed from the Hardware
* @katom: atom which has been completed
#ifdef CONFIG_DEBUG_FS
/* a failed job happened and is waiting for dumping*/
- if (kbase_debug_job_fault_process(katom, katom->event_code))
+ if (!katom->will_fail_event_code &&
+ kbase_debug_job_fault_process(katom, katom->event_code))
return;
#endif
queue_work(kctx->jctx.job_done_wq, &katom->work);
}
-void kbase_jd_evict(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
-{
- struct kbase_context *kctx;
- struct kbasep_js_kctx_info *js_kctx_info;
-
- KBASE_DEBUG_ASSERT(NULL != kbdev);
- KBASE_DEBUG_ASSERT(NULL != katom);
- kctx = katom->kctx;
- KBASE_DEBUG_ASSERT(NULL != kctx);
-
- js_kctx_info = &kctx->jctx.sched_info;
-
- KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
-
- /* This should only be done from a context that is currently scheduled
- */
- KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled);
-
- WARN_ON(work_pending(&katom->work));
-
- KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
- INIT_WORK(&katom->work, jd_evict_worker);
- queue_work(kctx->jctx.job_done_wq, &katom->work);
-}
void kbase_jd_zap_context(struct kbase_context *kctx)
{
* queued outside the job scheduler.
*/
+ del_timer_sync(&kctx->soft_job_timeout);
list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
- katom = list_entry(entry, struct kbase_jd_atom, dep_item[0]);
+ katom = list_entry(entry, struct kbase_jd_atom, queue);
kbase_cancel_soft_job(katom);
}
}
#endif
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_cancel_all_atoms(kctx);
+#endif
+
mutex_unlock(&kctx->jctx.lock);
+#ifdef CONFIG_MALI_DMA_FENCE
+ /* Flush dma-fence workqueue to ensure that any callbacks that may have
+ * been queued are done before continuing.
+ */
+ flush_workqueue(kctx->dma_fence.wq);
+#endif
+
kbase_jm_wait_for_zero_jobs(kctx);
}
KBASE_DEBUG_ASSERT(kctx);
- kctx->jctx.job_done_wq = alloc_workqueue("mali_jd", 0, 1);
+ kctx->jctx.job_done_wq = alloc_workqueue("mali_jd",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
if (NULL == kctx->jctx.job_done_wq) {
mali_err = -ENOMEM;
goto out1;
/* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kctx->jctx.atoms[i].dma_fence.context = fence_context_alloc(1);
+ atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0);
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks);
+#endif
}
mutex_init(&kctx->jctx.lock);