Revert "MALI: rockchip: upgrade midgard DDK to r14p0-01rel0"
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_jd.c
index 81952e2d146fe1e5966e996a5389bc8c28c276cb..3e0a5892cc7a46084aa4fa23e0d5e8d51cb907ae 100644 (file)
@@ -61,7 +61,7 @@ static void __user *
 get_compat_pointer(struct kbase_context *kctx, const union kbase_pointer *p)
 {
 #ifdef CONFIG_COMPAT
-       if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+       if (kctx->is_compat)
                return compat_ptr(p->compat_value);
 #endif
        return p->value;
@@ -581,20 +581,24 @@ static inline void jd_resolve_dep(struct list_head *out_list,
                        dep_count = atomic_read(&dep_atom->dma_fence.dep_count);
                        if (likely(dep_count == -1)) {
                                dep_satisfied = true;
-                       } else {
+                       } else if (dep_count == 0) {
                                /*
-                                * There are either still active callbacks, or
-                                * all fences for this @dep_atom has signaled,
-                                * but the worker that will queue the atom has
-                                * not yet run.
+                                * All fences for this atom has signaled, but
+                                * the worker that will queue the atom has not
+                                * yet run.
                                 *
-                                * Wait for the fences to signal and the fence
-                                * worker to run and handle @dep_atom. If
-                                * @dep_atom was completed due to error on
-                                * @katom, then the fence worker will pick up
-                                * the complete status and error code set on
-                                * @dep_atom above.
+                                * Mark the atom as handled by setting
+                                * dep_count to -1 so that the worker doesn't
+                                * queue the atom again.
+                                */
+                               atomic_set(&dep_atom->dma_fence.dep_count, -1);
+                               /*
+                                * Remove the atom from the list of dma-fence
+                                * waiting atoms.
                                 */
+                               kbase_dma_fence_waiters_remove(dep_atom);
+                               dep_satisfied = true;
+                       } else {
                                dep_satisfied = false;
                        }
 #endif /* CONFIG_MALI_DMA_FENCE */
@@ -661,40 +665,6 @@ static void jd_check_force_failure(struct kbase_jd_atom *katom)
 }
 #endif
 
-/**
- * is_dep_valid - Validate that a dependency is valid for early dependency
- *                submission
- * @katom: Dependency atom to validate
- *
- * A dependency is valid if any of the following are true :
- * - It does not exist (a non-existent dependency does not block submission)
- * - It is in the job scheduler
- * - It has completed, does not have a failure event code, and has not been
- *   marked to fail in the future
- *
- * Return: true if valid, false otherwise
- */
-static bool is_dep_valid(struct kbase_jd_atom *katom)
-{
-       /* If there's no dependency then this is 'valid' from the perspective of
-        * early dependency submission */
-       if (!katom)
-               return true;
-
-       /* Dependency must have reached the job scheduler */
-       if (katom->status < KBASE_JD_ATOM_STATE_IN_JS)
-               return false;
-
-       /* If dependency has completed and has failed or will fail then it is
-        * not valid */
-       if (katom->status >= KBASE_JD_ATOM_STATE_HW_COMPLETED &&
-                       (katom->event_code != BASE_JD_EVENT_DONE ||
-                       katom->will_fail_event_code))
-               return false;
-
-       return true;
-}
-
 static void jd_try_submitting_deps(struct list_head *out_list,
                struct kbase_jd_atom *node)
 {
@@ -709,41 +679,14 @@ static void jd_try_submitting_deps(struct list_head *out_list,
 
                        if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) {
                                /*Check if atom deps look sane*/
-                               bool dep0_valid = is_dep_valid(
-                                               dep_atom->dep[0].atom);
-                               bool dep1_valid = is_dep_valid(
-                                               dep_atom->dep[1].atom);
-                               bool dep_satisfied = true;
-#ifdef CONFIG_MALI_DMA_FENCE
-                               int dep_count;
-
-                               dep_count = atomic_read(
-                                               &dep_atom->dma_fence.dep_count);
-                               if (likely(dep_count == -1)) {
-                                       dep_satisfied = true;
-                               } else {
-                               /*
-                                * There are either still active callbacks, or
-                                * all fences for this @dep_atom has signaled,
-                                * but the worker that will queue the atom has
-                                * not yet run.
-                                *
-                                * Wait for the fences to signal and the fence
-                                * worker to run and handle @dep_atom. If
-                                * @dep_atom was completed due to error on
-                                * @katom, then the fence worker will pick up
-                                * the complete status and error code set on
-                                * @dep_atom above.
-                                */
-                                       dep_satisfied = false;
-                               }
-#endif /* CONFIG_MALI_DMA_FENCE */
-#ifdef CONFIG_KDS
-                               dep_satisfied = dep_satisfied &&
-                                               dep_atom->kds_dep_satisfied;
-#endif
-
-                               if (dep0_valid && dep1_valid && dep_satisfied) {
+                               bool dep0_valid = !dep_atom->dep[0].atom ||
+                                               (dep_atom->dep[0].atom->status
+                                               >= KBASE_JD_ATOM_STATE_IN_JS);
+                               bool dep1_valid = !dep_atom->dep[1].atom ||
+                                               (dep_atom->dep[1].atom->status
+                                               >= KBASE_JD_ATOM_STATE_IN_JS);
+
+                               if (dep0_valid && dep1_valid) {
                                        dep_atom->in_jd_list = true;
                                        list_add(&dep_atom->jd_item, out_list);
                                }
@@ -815,7 +758,7 @@ bool jd_done_nolock(struct kbase_jd_atom *katom,
 
                for (i = 0; i < 2; i++)
                        jd_resolve_dep(&runnable_jobs, katom, i,
-                                       kbase_ctx_flag(kctx, KCTX_DYING));
+                                       kctx->jctx.sched_info.ctx.is_dying);
 
                if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
                        kbase_jd_post_external_resources(katom);
@@ -831,7 +774,7 @@ bool jd_done_nolock(struct kbase_jd_atom *katom,
                        KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
 
                        if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
-                                       !kbase_ctx_flag(kctx, KCTX_DYING)) {
+                                       !kctx->jctx.sched_info.ctx.is_dying) {
                                need_to_try_schedule_context |= jd_run_atom(node);
                        } else {
                                node->event_code = katom->event_code;
@@ -976,10 +919,7 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
        katom->x_pre_dep = NULL;
        katom->x_post_dep = NULL;
        katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED;
-
-       /* Implicitly sets katom->protected_state.enter as well. */
-       katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
-
+       katom->exit_protected_state = KBASE_ATOM_EXIT_PROTECTED_CHECK;
        katom->age = kctx->age_count++;
 
        INIT_LIST_HEAD(&katom->jd_item);
@@ -993,8 +933,6 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
        atomic_set(&katom->dma_fence.dep_count, -1);
 #endif
 
-       kbase_tlstream_tl_attrib_atom_state(katom, TL_ATOM_STATE_IDLE);
-
        /* Don't do anything if there is a mess up with dependencies.
           This is done in a separate cycle to check both the dependencies at ones, otherwise
           it will be extra complexity to deal with 1st dependency ( just added to the list )
@@ -1099,17 +1037,10 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
                katom->status = KBASE_JD_ATOM_STATE_QUEUED;
        }
 
-       /* For invalid priority, be most lenient and choose the default */
-       sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
-       if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
-               sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
-       katom->sched_priority = sched_prio;
-
        /* Create a new atom recording all dependencies it was set up with. */
        kbase_tlstream_tl_new_atom(
                        katom,
                        kbase_jd_atom_id(kctx, katom));
-       kbase_tlstream_tl_attrib_atom_priority(katom, katom->sched_priority);
        kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
        for (i = 0; i < 2; i++)
                if (BASE_JD_DEP_TYPE_INVALID != kbase_jd_katom_dep_type(
@@ -1161,6 +1092,12 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
                goto out;
        }
 
+       /* For invalid priority, be most lenient and choose the default */
+       sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
+       if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
+               sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
+       katom->sched_priority = sched_prio;
+
        if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
                /* handle what we need to do to access the external resources */
                if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
@@ -1275,7 +1212,7 @@ int kbase_jd_submit(struct kbase_context *kctx,
 
        beenthere(kctx, "%s", "Enter");
 
-       if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+       if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != 0) {
                dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
                return -EINVAL;
        }
@@ -1437,6 +1374,7 @@ void kbase_jd_done_worker(struct work_struct *data)
        struct kbasep_js_device_data *js_devdata;
        u64 cache_jc = katom->jc;
        struct kbasep_js_atom_retained_state katom_retained_state;
+       bool schedule = false;
        bool context_idle;
        base_jd_core_req core_req = katom->core_req;
        u64 affinity = katom->affinity;
@@ -1459,7 +1397,6 @@ void kbase_jd_done_worker(struct work_struct *data)
         * Begin transaction on JD context and JS context
         */
        mutex_lock(&jctx->lock);
-       kbase_tlstream_tl_attrib_atom_state(katom, TL_ATOM_STATE_DONE);
        mutex_lock(&js_devdata->queue_mutex);
        mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
 
@@ -1467,7 +1404,7 @@ void kbase_jd_done_worker(struct work_struct *data)
         * because it only happens in response to an IRQ from a job that was
         * running.
         */
-       KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+       KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled);
 
        if (katom->event_code == BASE_JD_EVENT_STOPPED) {
                /* Atom has been promoted to stopped */
@@ -1476,12 +1413,12 @@ void kbase_jd_done_worker(struct work_struct *data)
                mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
                mutex_unlock(&js_devdata->queue_mutex);
 
-               spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+               spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
 
                katom->status = KBASE_JD_ATOM_STATE_IN_JS;
                kbase_js_unpull(kctx, katom);
 
-               spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+               spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
                mutex_unlock(&jctx->lock);
 
                return;
@@ -1499,6 +1436,19 @@ void kbase_jd_done_worker(struct work_struct *data)
        /* Retain state before the katom disappears */
        kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
 
+       if (!kbasep_js_has_atom_finished(&katom_retained_state)) {
+               mutex_lock(&js_devdata->runpool_mutex);
+               kbasep_js_clear_job_retry_submit(katom);
+               /* An atom that has been hard-stopped might have previously
+                * been soft-stopped and has just finished before the hard-stop
+                * occurred. For this reason, clear the hard-stopped flag */
+               katom->atom_flags &= ~(KBASE_KATOM_FLAG_BEEN_HARD_STOPPED);
+               mutex_unlock(&js_devdata->runpool_mutex);
+       }
+
+       if (kbasep_js_has_atom_finished(&katom_retained_state))
+               schedule = true;
+
        context_idle = kbase_js_complete_atom_wq(kctx, katom);
 
        KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state));
@@ -1508,53 +1458,54 @@ void kbase_jd_done_worker(struct work_struct *data)
        mutex_unlock(&js_devdata->queue_mutex);
        katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
        /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
-       jd_done_nolock(katom, &kctx->completed_jobs);
+       schedule |= jd_done_nolock(katom, &kctx->completed_jobs);
 
        /* katom may have been freed now, do not use! */
 
        if (context_idle) {
                unsigned long flags;
 
-               context_idle = false;
                mutex_lock(&js_devdata->queue_mutex);
-               spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+               spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
 
                /* If kbase_sched() has scheduled this context back in then
-                * KCTX_ACTIVE will have been set after we marked it as
-                * inactive, and another pm reference will have been taken, so
-                * drop our reference. But do not call kbase_jm_idle_ctx(), as
-                * the context is active and fast-starting is allowed.
+                * ctx_active will have been set after we marked it as inactive,
+                * and another pm reference will have been taken, so drop our
+                * reference. But do not call kbase_jm_idle_ctx(), as the
+                * context is active and fast-starting is allowed.
                 *
                 * If an atom has been fast-started then kctx->atoms_pulled will
-                * be non-zero but KCTX_ACTIVE will still be false (as the
+                * be non-zero but ctx_active will still be false (as the
                 * previous pm reference has been inherited). Do NOT drop our
                 * reference, as it has been re-used, and leave the context as
                 * active.
                 *
-                * If no new atoms have been started then KCTX_ACTIVE will still
+                * If no new atoms have been started then ctx_active will still
                 * be false and atoms_pulled will be zero, so drop the reference
                 * and call kbase_jm_idle_ctx().
                 *
                 * As the checks are done under both the queue_mutex and
-                * hwaccess_lock is should be impossible for this to race
+                * runpool_irq.lock is should be impossible for this to race
                 * with the scheduler code.
                 */
-               if (kbase_ctx_flag(kctx, KCTX_ACTIVE) ||
-                   !atomic_read(&kctx->atoms_pulled)) {
+               if (kctx->ctx_active || !atomic_read(&kctx->atoms_pulled)) {
                        /* Calling kbase_jm_idle_ctx() here will ensure that
                         * atoms are not fast-started when we drop the
-                        * hwaccess_lock. This is not performed if
-                        * KCTX_ACTIVE is set as in that case another pm
-                        * reference has been taken and a fast-start would be
-                        * valid.
+                        * runpool_irq.lock. This is not performed if ctx_active
+                        * is set as in that case another pm reference has been
+                        * taken and a fast-start would be valid.
                         */
-                       if (!kbase_ctx_flag(kctx, KCTX_ACTIVE))
+                       if (!kctx->ctx_active)
                                kbase_jm_idle_ctx(kbdev, kctx);
-                       context_idle = true;
+                       spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
+                                       flags);
+
+                       kbase_pm_context_idle(kbdev);
                } else {
-                       kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+                       kctx->ctx_active = true;
+                       spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
+                                       flags);
                }
-               spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
                mutex_unlock(&js_devdata->queue_mutex);
        }
 
@@ -1568,7 +1519,8 @@ void kbase_jd_done_worker(struct work_struct *data)
 
        kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
 
-       kbase_js_sched_all(kbdev);
+       if (schedule)
+               kbase_js_sched_all(kbdev);
 
        if (!atomic_dec_return(&kctx->work_count)) {
                /* If worker now idle then post all events that jd_done_nolock()
@@ -1588,9 +1540,6 @@ void kbase_jd_done_worker(struct work_struct *data)
        kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
                        coreref_state);
 
-       if (context_idle)
-               kbase_pm_context_idle(kbdev);
-
        KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
 }
 
@@ -1633,7 +1582,7 @@ static void jd_cancel_worker(struct work_struct *data)
         * any), nor must we try to schedule out the context (it's already
         * scheduled out).
         */
-       KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+       KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
 
        /* Scheduler: Remove the job from the system */
        mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
@@ -1671,7 +1620,7 @@ static void jd_cancel_worker(struct work_struct *data)
  *
  * Context:
  *   This can be called safely from atomic context.
- *   The caller must hold kbdev->hwaccess_lock
+ *   The caller must hold kbasep_js_device_data.runpool_irq.lock
  */
 void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
                ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
@@ -1726,7 +1675,7 @@ void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
        KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
 
        /* This should only be done from a context that is not scheduled */
-       KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+       KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
 
        WARN_ON(work_pending(&katom->work));