return result;
}
-/* Helper macros to access and modify jsctx_queue.indicies */
-#define JSCTX_GET(offset, var, mask) \
- ((var >> offset) & mask)
-
-/* This wraps around to correct integer size automatically. */
-#define JSCTX_SET(var, offset, value, mask) \
- (var = ((var & ~(mask << offset)) /*Clear old bits */ \
- | (((value) & mask) << offset))) /* Set (after masking) new bits */
-
-#define JSCTX_GET_WR_IDX(var) \
- JSCTX_GET(JSCTX_WR_OFFSET, var, JSCTX_RB_MASK_STORE)
-#define JSCTX_GET_RN_IDX(var) \
- JSCTX_GET(JSCTX_RN_OFFSET, var, JSCTX_RB_MASK_STORE)
-#define JSCTX_GET_RD_IDX(var) \
- JSCTX_GET(JSCTX_RD_OFFSET, var, JSCTX_RB_MASK_STORE)
-
-#define JSCTX_GET_IDX_DIFF(lower, upper) \
- ((upper >= lower) ? (upper - lower) : (upper+JSCTX_RB_SIZE_STORE-lower))
-
-#define JSCTX_SET_WR_IDX(var, value) \
- JSCTX_SET(var, JSCTX_WR_OFFSET, value, JSCTX_RB_MASK_STORE)
-#define JSCTX_SET_RN_IDX(var, value) \
- JSCTX_SET(var, JSCTX_RN_OFFSET, value, JSCTX_RB_MASK_STORE)
-#define JSCTX_SET_RD_IDX(var, value) \
- JSCTX_SET(var, JSCTX_RD_OFFSET, value, JSCTX_RB_MASK_STORE)
-
/**
* jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
* @kctx: Pointer to kbase context with ring buffer.
jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio)
{
struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
- unsigned int var = atomic_read(&rb->indicies);
- return JSCTX_GET_RD_IDX(var) == JSCTX_GET_WR_IDX(var);
+ lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
+
+ return RB_EMPTY_ROOT(&rb->runnable_tree);
}
/**
return true;
}
-/**
- * jsctx_rb_is_full(): - Check if the given ringbuffer is full.
- * @queue: Pointer to the queue containing the ringbuffer.
- *
- * No locks explicitly required, result will always be consistent.
- * But depending on usage, the caller should consider jctx.lock,
- * for the result to remain correct.
- *
- * Return: true if the ringbuffer is full, false otherwise.
- */
-static inline bool
-jsctx_rb_is_full(struct jsctx_queue *queue)
-{
- unsigned int var = atomic_read(&queue->indicies);
- u16 rn_idx = JSCTX_GET_RN_IDX(var);
- u16 wr_idx = JSCTX_GET_WR_IDX(var);
-
- return JSCTX_GET_IDX_DIFF(rn_idx, wr_idx) >= JSCTX_RB_SIZE;
-}
-
-
/**
* jsctx_queue_foreach_prio(): - Execute callback for each entry in the queue.
* @kctx: Pointer to kbase context with the queue.
* @prio: Priority id to iterate.
* @callback: Function pointer to callback.
*
- * Iterate over a ring buffer and invoke @callback for each entry in buffer, and
- * remove the entry from the buffer.
+ * Iterate over a queue and invoke @callback for each entry in the queue, and
+ * remove the entry from the queue.
*
- * If entries are added to the ring buffer while this is running those entries
- * may, or may not be covered. To ensure that all entries in the buffer have
- * been enumerated when this function returns jsctx->lock must be held when
- * calling this function.
+ * If entries are added to the queue while this is running those entries may, or
+ * may not be covered. To ensure that all entries in the buffer have been
+ * enumerated when this function returns jsctx->lock must be held when calling
+ * this function.
*
* The HW access lock, js_data.runpool_irq.lock, must always be held when
* calling this function.
kbasep_js_policy_ctx_job_cb callback)
{
struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
- struct kbase_jd_atom *katom;
-
- struct list_head *pos, *q;
-
- unsigned int var = atomic_read(&queue->indicies);
- u16 running_idx = JSCTX_GET_RN_IDX(var);
- u16 read_idx = JSCTX_GET_RD_IDX(var);
- u16 wr_idx = JSCTX_GET_WR_IDX(var);
- u16 i;
- const u16 count = JSCTX_GET_IDX_DIFF(running_idx, wr_idx);
lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
- /* There must be no jobs currently in HW access */
- WARN_ON(read_idx != JSCTX_GET_RN_IDX(var));
-
- /* Invoke callback on all kbase_jd_atoms in the ring buffer, and
- * removes them from the buffer */
- for (i = 0; i < count; i++) {
- int id = queue->entries[read_idx & JSCTX_RB_MASK].atom_id;
+ while (!RB_EMPTY_ROOT(&queue->runnable_tree)) {
+ struct rb_node *node = rb_first(&queue->runnable_tree);
+ struct kbase_jd_atom *entry = rb_entry(node,
+ struct kbase_jd_atom, runnable_tree_node);
- katom = kbase_jd_atom_from_id(kctx, id);
- read_idx++;
- callback(kctx->kbdev, katom);
+ rb_erase(node, &queue->runnable_tree);
+ callback(kctx->kbdev, entry);
}
- atomic_set(&queue->indicies, 0);
- list_for_each_safe(pos, q, &queue->queue_head) {
- struct kbase_jd_atom *entry;
+ while (!list_empty(&queue->x_dep_head)) {
+ struct kbase_jd_atom *entry = list_entry(queue->x_dep_head.next,
+ struct kbase_jd_atom, queue);
+
+ list_del(queue->x_dep_head.next);
- entry = list_entry(pos, struct kbase_jd_atom, queue);
- list_del(pos);
callback(kctx->kbdev, entry);
}
}
jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio)
{
struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
- int id;
- unsigned int var = atomic_read(&rb->indicies);
+ struct rb_node *node;
lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
- if (JSCTX_GET_RD_IDX(var) == JSCTX_GET_WR_IDX(var))
+ node = rb_first(&rb->runnable_tree);
+ if (!node)
return NULL;
- id = rb->entries[JSCTX_GET_RD_IDX(var) & JSCTX_RB_MASK].atom_id;
- return kbase_jd_atom_from_id(kctx, id);
+ return rb_entry(node, struct kbase_jd_atom, runnable_tree_node);
}
/**
return NULL;
}
-/**
- * jsctx_rb_peek_last(): - Check a ring buffer and get the last atom
- * @kctx: Pointer to kbase context with ring buffer.
- * @js: Job slot id to check.
- * @prio: Priority id to check.
- *
- * Check the ring buffer for the specified @js and @prio and return a
- * pointer to the last atom, unless all the priority's ring buffers are empty.
- *
- * The last atom is the atom that was added using jsctx_rb_add() most recently.
- *
- * Return: Pointer to last atom in buffer, or NULL if there is no atom.
- */
-static inline struct kbase_jd_atom *
-jsctx_rb_peek_last(struct kbase_context *kctx, int js, int prio)
-{
- struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
- unsigned int var = atomic_read(&rb->indicies);
- int id;
-
- lockdep_assert_held(&kctx->jctx.lock);
-
- if (!list_empty(&rb->queue_head)) {
- return list_entry(rb->queue_head.prev,
- struct kbase_jd_atom, queue);
- }
-
- if (JSCTX_GET_RN_IDX(var) == JSCTX_GET_WR_IDX(var))
- return NULL;
-
- id = rb->entries[(JSCTX_GET_WR_IDX(var) - 1) & JSCTX_RB_MASK].atom_id;
- return kbase_jd_atom_from_id(kctx, id);
-}
-
/**
* jsctx_rb_pull(): - Mark atom in list as running
* @kctx: Pointer to kbase context with ring buffer.
{
int prio = katom->sched_priority;
int js = katom->slot_nr;
- unsigned int oldvar, var;
struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
/* Atoms must be pulled in the correct order. */
WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
- do {
- u16 rd_idx;
-
- oldvar = atomic_read(&rb->indicies);
- var = oldvar;
- rd_idx = JSCTX_GET_RD_IDX(var);
-
- JSCTX_SET_RD_IDX(var, rd_idx+1);
- } while (atomic_cmpxchg(&rb->indicies, oldvar, var) != oldvar);
+ rb_erase(&katom->runnable_tree_node, &rb->runnable_tree);
}
-/**
- * jsctx_rb_unpull(): - Undo marking of atom in list as running
- * @kctx: Pointer to kbase context with ring buffer.
- * @katom: Pointer to katom to unpull.
- *
- * Undo jsctx_rb_pull() and put @katom back in the queue.
- *
- * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
- * pulled.
- */
-static inline void
-jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
-{
- int prio = katom->sched_priority;
- int js = katom->slot_nr;
- unsigned int oldvar, var;
- struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
-
-
- lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
-
- do {
- u16 rd_idx;
-
- oldvar = atomic_read(&rb->indicies);
- var = oldvar;
-
-
- rd_idx = JSCTX_GET_RD_IDX(var)-1;
-
- /* Atoms must be unpulled in correct order. */
- WARN_ON(rb->entries[rd_idx & JSCTX_RB_MASK].atom_id !=
- kbase_jd_atom_id(kctx, katom));
-
- JSCTX_SET_RD_IDX(var, rd_idx);
- } while (atomic_cmpxchg(&rb->indicies, oldvar, var) != oldvar);
-}
+#define LESS_THAN_WRAP(a, b) ((s32)(a - b) < 0)
-/**
- * jsctx_rb_add(): - Add atom to ring buffer
- * @kctx: Pointer to kbase context with ring buffer.
- * @katom: Pointer to katom to add.
- *
- * Add @katom to the ring buffer determined by the atom's priority and job slot
- * number.
- *
- * If the ring buffer is full -EBUSY will be returned.
- *
- * Return: On success 0 is returned, on failure a negative error code.
- */
-static int
-jsctx_rb_add_atom(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+static void
+jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom)
{
int prio = katom->sched_priority;
int js = katom->slot_nr;
- struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
- unsigned int oldvar, var;
- u16 wr_idx, running_idx, count;
-
- lockdep_assert_held(&kctx->jctx.lock);
-
- oldvar = atomic_read(&rb->indicies);
- var = oldvar;
-
- running_idx = JSCTX_GET_RN_IDX(var);
- wr_idx = JSCTX_GET_WR_IDX(var);
- count = JSCTX_GET_IDX_DIFF(running_idx, wr_idx);
-
- /* Check if the ring buffer is full */
- if (count >= JSCTX_RB_SIZE)
- return -EBUSY;
-
- rb->entries[wr_idx & JSCTX_RB_MASK].atom_id =
- kbase_jd_atom_id(kctx, katom);
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+ struct rb_node **new = &(queue->runnable_tree.rb_node), *parent = NULL;
- wr_idx++;
- JSCTX_SET_WR_IDX(var, wr_idx);
+ lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
- while (atomic_cmpxchg(&rb->indicies, oldvar, var) != oldvar) {
- oldvar = atomic_read(&rb->indicies);
- var = oldvar;
- wr_idx = JSCTX_GET_WR_IDX(var)+1;
+ while (*new) {
+ struct kbase_jd_atom *entry = container_of(*new,
+ struct kbase_jd_atom, runnable_tree_node);
- JSCTX_SET_WR_IDX(var, wr_idx);
+ parent = *new;
+ if (LESS_THAN_WRAP(katom->age, entry->age))
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
}
- return 0;
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&katom->runnable_tree_node, parent, new);
+ rb_insert_color(&katom->runnable_tree_node, &queue->runnable_tree);
}
/**
- * jsctx_rb_remove(): - Remove atom from ring buffer
+ * jsctx_rb_unpull(): - Undo marking of atom in list as running
* @kctx: Pointer to kbase context with ring buffer.
- * @katom: Pointer to katom to remove.
+ * @katom: Pointer to katom to unpull.
*
- * Remove @katom from the ring buffer.
+ * Undo jsctx_rb_pull() and put @katom back in the queue.
*
- * @katom must have been pulled from the buffer earlier by jsctx_rb_pull(), and
- * atoms must be removed in the same order they were pulled from the ring
- * buffer.
+ * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
+ * pulled.
*/
static inline void
-jsctx_rb_remove(struct kbase_context *kctx, struct kbase_jd_atom *katom)
-{
- int prio = katom->sched_priority;
- int js = katom->slot_nr;
- unsigned int oldvar, var;
- struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
-
- lockdep_assert_held(&kctx->jctx.lock);
-
-
- do {
- unsigned int rn_idx;
-
- oldvar = atomic_read(&rb->indicies);
- var = oldvar;
-
- rn_idx = JSCTX_GET_RN_IDX(var);
-
- JSCTX_SET_RN_IDX(var, rn_idx+1);
- } while (atomic_cmpxchg(&rb->indicies, oldvar, var) != oldvar);
-}
-
-
-static void
-jsctx_ll_add(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
{
- int prio = katom->sched_priority;
- int js = katom->slot_nr;
- struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
-
- lockdep_assert_held(&kctx->jctx.lock);
+ lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
- list_add_tail(&katom->queue, &queue->queue_head);
+ jsctx_tree_add(kctx, katom);
}
static bool kbase_js_ctx_pullable(struct kbase_context *kctx,
int js,
bool is_scheduled);
-static bool kbase_js_ctx_list_add_pullable(struct kbase_device *kbdev,
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js);
-static bool kbase_js_ctx_list_add_unpullable(struct kbase_device *kbdev,
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js);
-void
-jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js)
-{
- unsigned long flags;
- struct list_head *pos, *q;
- struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
- bool flushed_any = false;
- struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
- bool enqueue_required = false;
-
- lockdep_assert_held(&kctx->jctx.lock);
-
-
- /* Early out for common case */
- if (list_empty(&queue->queue_head) || jsctx_rb_is_full(queue))
- return;
-
-
- mutex_lock(&js_devdata->queue_mutex);
- mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
- mutex_lock(&js_devdata->runpool_mutex);
-
-
- spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, flags);
- /* If slot will transition from unpullable to pullable then add to
- * pullable list */
- if (jsctx_rb_none_to_pull(kctx, js))
- enqueue_required = true;
- else
- enqueue_required = false;
-
- list_for_each_safe(pos, q, &queue->queue_head) {
- struct kbase_jd_atom *katom;
-
- katom = list_entry(pos, struct kbase_jd_atom, queue);
-
- KBASE_DEBUG_ASSERT(katom);
-
- if (jsctx_rb_add_atom(kctx, katom))
- break;
-
- katom->atom_flags &= ~KBASE_KATOM_FLAG_JSCTX_IN_LL;
- katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED;
- flushed_any = true;
-
- list_del(pos);
- }
-
-
- if (flushed_any) {
- bool timer_sync = false;
-
- if (enqueue_required) {
- if (kbase_js_ctx_pullable(kctx, js, false))
- timer_sync = kbase_js_ctx_list_add_pullable(
- kctx->kbdev, kctx, js);
- else
- timer_sync = kbase_js_ctx_list_add_unpullable(
- kctx->kbdev, kctx, js);
- /* If this context is active and the atom is the first
- * on its slot, kick the job manager to attempt to
- * fast-start the atom */
- if (kctx == kctx->kbdev->hwaccess.active_kctx)
- kbase_jm_try_kick(kctx->kbdev, 1 << js);
-
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
- flags);
-
- if (timer_sync)
- kbase_backend_ctx_count_changed(kctx->kbdev);
-
- } else {
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
- flags);
- }
- } else {
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
- }
- mutex_unlock(&js_devdata->runpool_mutex);
- mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
- mutex_unlock(&js_devdata->queue_mutex);
-
-}
-
/*
* Functions private to KBase ('Protected' functions)
*/
DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
jsdd->cfs_ctx_runtime_min_slices =
DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
- atomic_set(&jsdd->soft_event_timeout_ms, DEFAULT_JS_SOFT_EVENT_TIMEOUT);
+ atomic_set(&jsdd->soft_job_timeout_ms, DEFAULT_JS_SOFT_JOB_TIMEOUT);
dev_dbg(kbdev->dev, "JS Config Attribs: ");
dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
jsdd->cfs_ctx_runtime_init_slices);
dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u",
jsdd->cfs_ctx_runtime_min_slices);
- dev_dbg(kbdev->dev, "\tsoft_event_timeout:%i",
- atomic_read(&jsdd->soft_event_timeout_ms));
+ dev_dbg(kbdev->dev, "\tsoft_job_timeout:%i",
+ atomic_read(&jsdd->soft_job_timeout_ms));
if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
for (j = 0; j < BASE_JM_MAX_NR_SLOTS; j++) {
- INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].queue_head);
- atomic_set(&kctx->jsctx_queue[i][j].indicies, 0);
+ INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].x_dep_head);
+ kctx->jsctx_queue[i][j].runnable_tree = RB_ROOT;
}
}
}
/**
- * kbase_js_ctx_list_add_pullable - Add context to the tail of the per-slot
- * pullable context queue
+ * kbase_js_ctx_list_add_pullable_nolock - Variant of
+ * kbase_jd_ctx_list_add_pullable()
+ * where the caller must hold
+ * runpool_irq.lock
* @kbdev: Device pointer
* @kctx: Context to add to queue
* @js: Job slot to use
*
- * If the context is on either the pullable or unpullable queues, then it is
- * removed before being added to the tail.
- *
- * This function should be used when queueing a context for the first time, or
- * re-queueing a context that has been pulled from.
- *
- * Caller must hold kbasep_jd_device_data.queue_mutex
+ * Caller must hold runpool_irq.lock
*
* Return: true if caller should call kbase_backend_ctx_count_changed()
*/
-static bool kbase_js_ctx_list_add_pullable(struct kbase_device *kbdev,
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js)
{
bool ret = false;
- lockdep_assert_held(&kbdev->js_data.queue_mutex);
- lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
}
/**
- * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
- * per-slot pullable context queue
+ * kbase_js_ctx_list_add_pullable_head_nolock - Variant of
+ * kbase_js_ctx_list_add_pullable_head()
+ * where the caller must hold
+ * runpool_irq.lock
* @kbdev: Device pointer
* @kctx: Context to add to queue
* @js: Job slot to use
*
- * If the context is on either the pullable or unpullable queues, then it is
- * removed before being added to the head.
- *
- * This function should be used when a context has been scheduled, but no jobs
- * can currently be pulled from it.
- *
- * Caller must hold kbasep_jd_device_data.queue_mutex
+ * Caller must hold runpool_irq.lock
*
* Return: true if caller should call kbase_backend_ctx_count_changed()
*/
-static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
- struct kbase_context *kctx,
- int js)
+static bool kbase_js_ctx_list_add_pullable_head_nolock(
+ struct kbase_device *kbdev, struct kbase_context *kctx, int js)
{
bool ret = false;
- lockdep_assert_held(&kbdev->js_data.queue_mutex);
- lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
}
/**
- * kbase_js_ctx_list_add_unpullable - Add context to the tail of the per-slot
- * unpullable context queue
+ * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
+ * per-slot pullable context queue
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * If the context is on either the pullable or unpullable queues, then it is
+ * removed before being added to the head.
+ *
+ * This function should be used when a context has been scheduled, but no jobs
+ * can currently be pulled from it.
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
+ ret = kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_unpullable_nolock - Add context to the tail of the
+ * per-slot unpullable context queue
* @kbdev: Device pointer
* @kctx: Context to add to queue
* @js: Job slot to use
* This function should be used when a context has been pulled from, and there
* are no jobs remaining on the specified slot.
*
- * Caller must hold kbasep_jd_device_data.queue_mutex
+ * Caller must hold runpool_irq.lock
*
* Return: true if caller should call kbase_backend_ctx_count_changed()
*/
-static bool kbase_js_ctx_list_add_unpullable(struct kbase_device *kbdev,
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js)
{
bool ret = false;
- lockdep_assert_held(&kbdev->js_data.queue_mutex);
- lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
&kbdev->js_data.ctx_list_unpullable[js]);
}
/**
- * kbase_js_ctx_list_remove - Remove context from the per-slot pullable or
- * unpullable context queues
+ * kbase_js_ctx_list_remove_nolock - Remove context from the per-slot pullable
+ * or unpullable context queues
* @kbdev: Device pointer
* @kctx: Context to remove from queue
* @js: Job slot to use
* This function should be used when a context has no jobs on the GPU, and no
* jobs remaining for the specified slot.
*
- * Caller must hold kbasep_jd_device_data.queue_mutex
+ * Caller must hold runpool_irq.lock
*
* Return: true if caller should call kbase_backend_ctx_count_changed()
*/
-static bool kbase_js_ctx_list_remove(struct kbase_device *kbdev,
+static bool kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js)
{
bool ret = false;
- lockdep_assert_held(&kbdev->js_data.queue_mutex);
- lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
}
/**
- * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
- * queue.
+ * kbase_js_ctx_list_pop_head_nolock - Variant of kbase_js_ctx_list_pop_head()
+ * where the caller must hold
+ * runpool_irq.lock
* @kbdev: Device pointer
* @js: Job slot to use
*
- * Caller must hold kbasep_jd_device_data::queue_mutex
+ * Caller must hold runpool_irq.lock
*
* Return: Context to use for specified slot.
* NULL if no contexts present for specified slot
*/
-static struct kbase_context *kbase_js_ctx_list_pop_head(
+static struct kbase_context *kbase_js_ctx_list_pop_head_nolock(
struct kbase_device *kbdev,
int js)
{
struct kbase_context *kctx;
- lockdep_assert_held(&kbdev->js_data.queue_mutex);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
if (list_empty(&kbdev->js_data.ctx_list_pullable[js]))
return NULL;
return kctx;
}
+/**
+ * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
+ * queue.
+ * @kbdev: Device pointer
+ * @js: Job slot to use
+ *
+ * Return: Context to use for specified slot.
+ * NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head(
+ struct kbase_device *kbdev, int js)
+{
+ struct kbase_context *kctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
+ kctx = kbase_js_ctx_list_pop_head_nolock(kbdev, js);
+ spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
+
+ return kctx;
+}
+
/**
* kbase_js_ctx_pullable - Return if a context can be pulled from on the
* specified slot
/* Dependent atom must already have been submitted */
if (!(dep_atom->atom_flags &
- (KBASE_KATOM_FLAG_JSCTX_IN_LL |
- KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED))){
+ KBASE_KATOM_FLAG_JSCTX_IN_TREE)) {
ret = false;
break;
}
ret = false;
break;
}
+ /* Each dependee atom can only have one
+ * same-slot dependency */
+ if (dep_atom->post_dep) {
+ ret = false;
+ break;
+ }
has_dep = true;
} else {
/* Only one cross-slot dependency can be
has_x_dep = true;
}
- if (kbase_jd_katom_dep_type(&katom->dep[i]) ==
- BASE_JD_DEP_TYPE_DATA &&
- js == dep_js) {
- struct kbase_jd_atom *last_atom =
- jsctx_rb_peek_last(kctx, js,
- prio);
-
- /* Last atom on slot must be pre-dep for this
- * atom */
- if (last_atom != dep_atom) {
- ret = false;
- break;
- }
- }
-
/* Dependency can be represented in ringbuffers */
}
}
}
if ((kbase_jd_katom_dep_type(&katom->dep[i])
== BASE_JD_DEP_TYPE_DATA) &&
- (js == dep_js))
- katom->atom_flags |=
- KBASE_KATOM_FLAG_FAIL_PREV;
+ (js == dep_js)) {
+ katom->pre_dep = dep_atom;
+ dep_atom->post_dep = katom;
+ }
list_del(&katom->dep_item[i]);
kbase_jd_katom_dep_clear(&katom->dep[i]);
if (enqueue_required) {
if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
- timer_sync = kbase_js_ctx_list_add_pullable(kbdev, kctx,
- atom->slot_nr);
+ timer_sync = kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, atom->slot_nr);
else
- timer_sync = kbase_js_ctx_list_add_unpullable(kbdev,
- kctx, atom->slot_nr);
+ timer_sync = kbase_js_ctx_list_add_unpullable_nolock(
+ kbdev, kctx, atom->slot_nr);
}
/* If this context is active and the atom is the first on its slot,
* kick the job manager to attempt to fast-start the atom */
wake_up(&js_kctx_info->ctx.is_scheduled_wait);
/* Queue an action to occur after we've dropped the lock */
- release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED;
+ release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED |
+ KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
} else {
kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
katom_retained_state, runpool_ctx_attr_change);
{
struct kbasep_js_device_data *js_devdata;
struct kbasep_js_kctx_info *js_kctx_info;
- base_jd_event_code event_code;
kbasep_js_release_result release_result;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(kctx != NULL);
js_kctx_info = &kctx->jctx.sched_info;
js_devdata = &kbdev->js_data;
- event_code = katom_retained_state->event_code;
mutex_lock(&js_devdata->queue_mutex);
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
*/
}
-/**
- * kbase_js_set_timeouts - update all JS timeouts with user specified data
- * @kbdev: Device pointer
- *
- * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
- * set to a positive number then that becomes the new value used, if a timeout
- * is negative then the default is set.
- */
-static void kbase_js_set_timeouts(struct kbase_device *kbdev)
+void kbase_js_set_timeouts(struct kbase_device *kbdev)
{
- struct kbasep_js_device_data *js_data = &kbdev->js_data;
-
- if (kbdev->js_scheduling_period_ns < 0)
- js_data->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
- else if (kbdev->js_scheduling_period_ns > 0)
- js_data->scheduling_period_ns = kbdev->js_scheduling_period_ns;
-
- if (kbdev->js_soft_stop_ticks < 0)
- js_data->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
- else if (kbdev->js_soft_stop_ticks > 0)
- js_data->soft_stop_ticks = kbdev->js_soft_stop_ticks;
-
- if (kbdev->js_soft_stop_ticks_cl < 0)
- js_data->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
- else if (kbdev->js_soft_stop_ticks_cl > 0)
- js_data->soft_stop_ticks_cl = kbdev->js_soft_stop_ticks_cl;
-
- if (kbdev->js_hard_stop_ticks_ss < 0) {
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
- js_data->hard_stop_ticks_ss =
- DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
- else
- js_data->hard_stop_ticks_ss =
- DEFAULT_JS_HARD_STOP_TICKS_SS;
- } else if (kbdev->js_hard_stop_ticks_ss > 0) {
- js_data->hard_stop_ticks_ss = kbdev->js_hard_stop_ticks_ss;
- }
-
- if (kbdev->js_hard_stop_ticks_cl < 0)
- js_data->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
- else if (kbdev->js_hard_stop_ticks_cl > 0)
- js_data->hard_stop_ticks_cl = kbdev->js_hard_stop_ticks_cl;
-
- if (kbdev->js_hard_stop_ticks_dumping < 0)
- js_data->hard_stop_ticks_dumping =
- DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
- else if (kbdev->js_hard_stop_ticks_dumping > 0)
- js_data->hard_stop_ticks_dumping =
- kbdev->js_hard_stop_ticks_dumping;
-
- if (kbdev->js_reset_ticks_ss < 0) {
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
- js_data->gpu_reset_ticks_ss =
- DEFAULT_JS_RESET_TICKS_SS_8408;
- else
- js_data->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
- } else if (kbdev->js_reset_ticks_ss > 0) {
- js_data->gpu_reset_ticks_ss = kbdev->js_reset_ticks_ss;
- }
-
- if (kbdev->js_reset_ticks_cl < 0)
- js_data->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
- else if (kbdev->js_reset_ticks_cl > 0)
- js_data->gpu_reset_ticks_cl = kbdev->js_reset_ticks_cl;
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
- if (kbdev->js_reset_ticks_dumping < 0)
- js_data->gpu_reset_ticks_dumping =
- DEFAULT_JS_RESET_TICKS_DUMPING;
- else if (kbdev->js_reset_ticks_dumping > 0)
- js_data->gpu_reset_ticks_dumping =
- kbdev->js_reset_ticks_dumping;
+ kbase_backend_timeouts_changed(kbdev);
}
static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
0u,
kbasep_js_trace_get_refcnt(kbdev, kctx));
- if (js_devdata->nr_user_contexts_running == 0 &&
- kbdev->js_timeouts_updated) {
- /* Only when there are no other contexts submitting jobs:
- * Latch in run-time job scheduler timeouts that were set
- * through js_timeouts sysfs file */
- kbase_js_set_timeouts(kbdev);
-
- kbdev->js_timeouts_updated = false;
- }
-
js_kctx_info->ctx.is_scheduled = true;
mutex_lock(&new_address_space->transaction_mutex);
if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
mutex_unlock(&new_address_space->transaction_mutex);
- /* If address space is not pending, then kbase_backend_use_ctx()
- * failed. Roll back the transaction so far and return */
- if (!kctx->as_pending) {
- js_kctx_info->ctx.is_scheduled = false;
+ /* Roll back the transaction so far and return */
+ js_kctx_info->ctx.is_scheduled = false;
- kbase_backend_release_free_address_space(kbdev, as_nr);
- }
+ kbase_backend_release_free_address_space(kbdev, as_nr);
mutex_unlock(&js_devdata->runpool_mutex);
unsigned long flags;
spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
- if (kctx->as_pending) {
- /* Context waiting for AS to be assigned */
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
- return false;
- }
if (kbase_backend_use_ctx_sched(kbdev, kctx)) {
/* Context already has ASID - mark as active */
kbdev->hwaccess.active_kctx = kctx;
is_scheduled = js_kctx_info->ctx.is_scheduled;
if (!is_scheduled) {
/* Add the context to the pullable list */
- if (kbase_js_ctx_list_add_pullable(kbdev, kctx, 0))
+ if (kbase_js_ctx_list_add_pullable_head(kbdev, kctx, 0))
kbase_js_sync_timers(kbdev);
/* Fast-starting requires the jsctx_mutex to be dropped,
struct kbase_context *kctx)
{
struct kbasep_js_kctx_info *js_kctx_info;
- bool pending;
KBASE_DEBUG_ASSERT(kctx != NULL);
js_kctx_info = &kctx->jctx.sched_info;
/* We don't need to use the address space anymore */
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
js_kctx_info->ctx.flags &= (~KBASE_CTX_FLAG_PRIVILEGED);
- pending = kctx->as_pending;
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
- /* Release the context - it will be scheduled out if there is no
- * pending job */
- if (!pending)
- kbasep_js_runpool_release_ctx(kbdev, kctx);
+ /* Release the context - it will be scheduled out */
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
kbase_js_sched_all(kbdev);
}
* the instrumented context. It'll be suspended by
* disabling instrumentation */
if (kctx->jctx.sched_info.ctx.flags &
- KBASE_CTX_FLAG_PRIVILEGED)
- KBASE_DEBUG_ASSERT(++nr_privileged_ctx == 1);
+ KBASE_CTX_FLAG_PRIVILEGED) {
+ ++nr_privileged_ctx;
+ WARN_ON(nr_privileged_ctx != 1);
+ }
}
}
CSTD_UNUSED(nr_privileged_ctx);
if (!js_kctx_info->ctx.is_scheduled &&
kbase_js_ctx_pullable(kctx, js, false))
- timer_sync = kbase_js_ctx_list_add_pullable(
+ timer_sync =
+ kbase_js_ctx_list_add_pullable_nolock(
kbdev, kctx, js);
spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
/* Check if there are lower priority jobs to soft stop */
kbase_job_slot_ctx_priority_check_locked(kctx, katom);
- /* Add atom to ring buffer. */
- if (jsctx_rb_add_atom(kctx, katom)) {
- jsctx_ll_add(kctx, katom);
+ if ((katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) ||
+ (katom->pre_dep && (katom->pre_dep->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+ int prio = katom->sched_priority;
+ int js = katom->slot_nr;
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+ list_add_tail(&katom->queue, &queue->x_dep_head);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
enqueue_required = false;
- katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_LL;
} else {
- katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED;
+ /* Add atom to ring buffer. */
+ jsctx_tree_add(kctx, katom);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
}
+
return enqueue_required;
}
+/**
+ * kbase_js_move_to_tree - Move atom (and any dependent atoms) to the
+ * runnable_tree, ready for execution
+ * @katom: Atom to submit
+ *
+ * It is assumed that @katom does not have KBASE_KATOM_FLAG_X_DEP_BLOCKED set,
+ * but is still present in the x_dep list. If @katom has a same-slot dependent
+ * atom then that atom (and any dependents) will also be moved.
+ */
+static void kbase_js_move_to_tree(struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&katom->kctx->kbdev->js_data.runpool_irq.lock);
+
+ while (katom) {
+ WARN_ON(!(katom->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST));
+
+ if (!(katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+ list_del(&katom->queue);
+ katom->atom_flags &=
+ ~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+ jsctx_tree_add(katom->kctx, katom);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+ } else {
+ break;
+ }
+
+ katom = katom->post_dep;
+ }
+}
+
+
/**
* kbase_js_evict_deps - Evict dependencies of a failed atom.
* @kctx: Context pointer
struct kbase_jd_atom *katom, int js, int prio)
{
struct kbase_jd_atom *x_dep = katom->x_post_dep;
- struct kbase_jd_atom *next_katom = jsctx_rb_peek_prio(kctx, js, prio);
+ struct kbase_jd_atom *next_katom = katom->post_dep;
lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
- if (next_katom &&
- (next_katom->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV)) {
+ if (next_katom) {
KBASE_DEBUG_ASSERT(next_katom->status !=
KBASE_JD_ATOM_STATE_HW_COMPLETED);
-
next_katom->will_fail_event_code = katom->event_code;
}
/* Has cross slot depenency. */
- if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_LL |
- KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED))) {
+ if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_TREE |
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
/* Remove dependency.*/
x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
if (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) {
x_dep->will_fail_event_code = katom->event_code;
}
+ if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)
+ kbase_js_move_to_tree(x_dep);
}
}
/* Due to ordering restrictions when unpulling atoms on failure, we do
* not allow multiple runs of fail-dep atoms from the same context to be
* present on the same slot */
- if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV) &&
- atomic_read(&kctx->atoms_pulled_slot[js])) {
+ if (katom->pre_dep && atomic_read(&kctx->atoms_pulled_slot[js])) {
struct kbase_jd_atom *prev_atom =
kbase_backend_inspect_tail(kctx->kbdev, js);
if (!atomic_read(&kctx->atoms_pulled_slot[js]) &&
jsctx_rb_none_to_pull(kctx, js))
- timer_sync |= kbase_js_ctx_list_remove(kbdev, kctx, js);
+ timer_sync |= kbase_js_ctx_list_remove_nolock(kbdev, kctx, js);
if (!atomic_read(&kctx->atoms_pulled)) {
if (!kctx->slots_pullable) {
for (slot = 0; slot < num_slots; slot++) {
if (kbase_js_ctx_pullable(kctx, slot, true))
timer_sync |=
- kbase_js_ctx_list_add_pullable(
+ kbase_js_ctx_list_add_pullable_nolock(
kbdev, kctx, slot);
}
}
mutex_lock(&js_devdata->runpool_mutex);
spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
- if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED) {
- jsctx_rb_remove(kctx, katom);
-
+ if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) {
context_idle = !atomic_dec_return(&kctx->atoms_pulled);
atomic_dec(&kctx->atoms_pulled_slot[atom_slot]);
timer_sync = true;
}
}
- WARN_ON(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_LL);
+ WARN_ON(!(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE));
if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) &&
- jsctx_rb_none_to_pull(kctx, atom_slot))
- timer_sync |= kbase_js_ctx_list_remove(kctx->kbdev, kctx,
- atom_slot);
+ jsctx_rb_none_to_pull(kctx, atom_slot)) {
+ if (!list_empty(
+ &kctx->jctx.sched_info.ctx.ctx_list_entry[atom_slot]))
+ timer_sync |= kbase_js_ctx_list_remove_nolock(
+ kctx->kbdev, kctx, atom_slot);
+ }
/*
* If submission is disabled on this context (most likely due to an
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
if (kbase_js_ctx_pullable(kctx, js, true))
- timer_sync |= kbase_js_ctx_list_add_pullable(
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
kbdev, kctx, js);
}
} else if (katom->x_post_dep &&
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
if (kbase_js_ctx_pullable(kctx, js, true))
- timer_sync |= kbase_js_ctx_list_add_pullable(
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
kbdev, kctx, js);
}
}
struct kbase_device *kbdev;
struct kbase_context *kctx = katom->kctx;
union kbasep_js_policy *js_policy;
- struct kbasep_js_device_data *js_devdata;
+ struct kbase_jd_atom *x_dep = katom->x_post_dep;
kbdev = kctx->kbdev;
js_policy = &kbdev->js_data.policy;
- js_devdata = &kbdev->js_data;
lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
katom->slot_nr), NULL, 0);
#endif
- kbase_tlstream_tl_nret_atom_lpu(
- katom,
- &kbdev->gpu_props.props.raw_props.js_features[
- katom->slot_nr]);
- kbase_tlstream_tl_nret_atom_as(katom, &kbdev->as[kctx->as_nr]);
- kbase_tlstream_tl_nret_ctx_lpu(
- kctx,
- &kbdev->gpu_props.props.raw_props.js_features[
- katom->slot_nr]);
-
/* Calculate the job's time used */
if (end_timestamp != NULL) {
/* Only calculating it for jobs that really run on the HW (e.g.
kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
/* Unblock cross dependency if present */
- if (katom->x_post_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
- !(katom->x_post_dep->atom_flags &
- KBASE_KATOM_FLAG_FAIL_BLOCKER)))
- katom->x_post_dep->atom_flags &=
- ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+ if (x_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
+ !(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) &&
+ (x_dep->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+ bool was_pullable = kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+ false);
+ x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+ kbase_js_move_to_tree(x_dep);
+ if (!was_pullable && kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+ false))
+ kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx,
+ x_dep->slot_nr);
+ }
}
void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
{
struct kbasep_js_device_data *js_devdata;
- union kbasep_js_policy *js_policy;
bool timer_sync = false;
js_devdata = &kbdev->js_data;
- js_policy = &js_devdata->policy;
down(&js_devdata->schedule_sem);
mutex_lock(&js_devdata->queue_mutex);
/* Context can not be used at this time */
spin_lock_irqsave(&js_devdata->runpool_irq.lock,
flags);
- if (kctx->as_pending ||
- kbase_js_ctx_pullable(kctx, js, false)
+ if (kbase_js_ctx_pullable(kctx, js, false)
|| (kctx->jctx.sched_info.ctx.flags &
KBASE_CTX_FLAG_PRIVILEGED))
timer_sync |=
- kbase_js_ctx_list_add_pullable_head(
+ kbase_js_ctx_list_add_pullable_head_nolock(
kctx->kbdev, kctx, js);
else
timer_sync |=
- kbase_js_ctx_list_add_unpullable(
+ kbase_js_ctx_list_add_unpullable_nolock(
kctx->kbdev, kctx, js);
spin_unlock_irqrestore(
&js_devdata->runpool_irq.lock, flags);
/* Failed to pull jobs - push to head of list */
if (kbase_js_ctx_pullable(kctx, js, true))
timer_sync |=
- kbase_js_ctx_list_add_pullable_head(
+ kbase_js_ctx_list_add_pullable_head_nolock(
kctx->kbdev,
kctx, js);
else
timer_sync |=
- kbase_js_ctx_list_add_unpullable(
+ kbase_js_ctx_list_add_unpullable_nolock(
kctx->kbdev,
kctx, js);
/* Push to back of list */
if (kbase_js_ctx_pullable(kctx, js, true))
- timer_sync |= kbase_js_ctx_list_add_pullable(
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
kctx->kbdev, kctx, js);
else
- timer_sync |= kbase_js_ctx_list_add_unpullable(
+ timer_sync |=
+ kbase_js_ctx_list_add_unpullable_nolock(
kctx->kbdev, kctx, js);
spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
flags);
/* First, atomically do the following:
* - mark the context as dying
* - try to evict it from the policy queue */
+ mutex_lock(&kctx->jctx.lock);
mutex_lock(&js_devdata->queue_mutex);
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
js_kctx_info->ctx.is_dying = true;
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
mutex_unlock(&js_devdata->queue_mutex);
+ mutex_unlock(&kctx->jctx.lock);
} else {
unsigned long flags;
bool was_retained;
spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
mutex_unlock(&js_devdata->queue_mutex);
+ mutex_unlock(&kctx->jctx.lock);
dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)",
kctx);