3 * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * Job Scheduler Implementation
23 #include <mali_kbase.h>
24 #include <mali_kbase_js.h>
25 #if defined(CONFIG_MALI_GATOR_SUPPORT)
26 #include <mali_kbase_gator.h>
28 #if defined(CONFIG_MALI_MIPE_ENABLED)
29 #include <mali_kbase_tlstream.h>
31 #include <mali_kbase_hw.h>
33 #include <mali_kbase_defs.h>
34 #include <mali_kbase_config_defaults.h>
36 #include "mali_kbase_jm.h"
37 #include "mali_kbase_hwaccess_jm.h"
43 /* Bitpattern indicating the result of releasing a context */
45 /* The context was descheduled - caller should try scheduling in a new
46 * one to keep the runpool full */
47 KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
48 /* Ctx attributes were changed - caller should try scheduling all
50 KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1)
53 typedef u32 kbasep_js_release_result;
55 const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = {
56 KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */
57 KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */
58 KBASE_JS_ATOM_SCHED_PRIO_LOW /* BASE_JD_PRIO_LOW */
62 kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = {
63 BASE_JD_PRIO_HIGH, /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */
64 BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */
65 BASE_JD_PRIO_LOW /* KBASE_JS_ATOM_SCHED_PRIO_LOW */
70 * Private function prototypes
72 static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
73 struct kbase_device *kbdev, struct kbase_context *kctx,
74 struct kbasep_js_atom_retained_state *katom_retained_state);
76 static int kbase_js_get_slot(struct kbase_device *kbdev,
77 struct kbase_jd_atom *katom);
79 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
80 kbasep_js_policy_ctx_job_cb callback);
82 static bool kbase_js_evict_atom(struct kbase_context *kctx,
83 struct kbase_jd_atom *katom_evict,
84 struct kbase_jd_atom *start_katom,
85 struct kbase_jd_atom *head_katom,
86 struct list_head *evict_list,
87 struct jsctx_rb *rb, int idx);
89 /* Helper for trace subcodes */
90 #if KBASE_TRACE_ENABLE
91 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
92 struct kbase_context *kctx)
95 struct kbasep_js_device_data *js_devdata;
99 js_devdata = &kbdev->js_data;
101 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
103 if (as_nr != KBASEP_AS_NR_INVALID) {
104 struct kbasep_js_per_as_data *js_per_as_data;
106 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
108 refcnt = js_per_as_data->as_busy_refcount;
110 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
115 static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev,
116 struct kbase_context *kctx)
118 struct kbasep_js_device_data *js_devdata;
122 js_devdata = &kbdev->js_data;
125 if (as_nr != KBASEP_AS_NR_INVALID) {
126 struct kbasep_js_per_as_data *js_per_as_data;
128 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
130 refcnt = js_per_as_data->as_busy_refcount;
135 #else /* KBASE_TRACE_ENABLE */
136 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
137 struct kbase_context *kctx)
143 static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev,
144 struct kbase_context *kctx)
150 #endif /* KBASE_TRACE_ENABLE */
156 JS_DEVDATA_INIT_NONE = 0,
157 JS_DEVDATA_INIT_CONSTANTS = (1 << 0),
158 JS_DEVDATA_INIT_POLICY = (1 << 1),
159 JS_DEVDATA_INIT_ALL = ((1 << 2) - 1)
163 JS_KCTX_INIT_NONE = 0,
164 JS_KCTX_INIT_CONSTANTS = (1 << 0),
165 JS_KCTX_INIT_POLICY = (1 << 1),
166 JS_KCTX_INIT_ALL = ((1 << 2) - 1)
174 * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements
175 * @features: JSn_FEATURE register value
177 * Given a JSn_FEATURE register value returns the core requirements that match
179 * Return: Core requirement bit mask
181 static base_jd_core_req core_reqs_from_jsn_features(u16 features)
183 base_jd_core_req core_req = 0u;
185 if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
186 core_req |= BASE_JD_REQ_V;
188 if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
189 core_req |= BASE_JD_REQ_CF;
191 if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
192 core_req |= BASE_JD_REQ_CS;
194 if ((features & JS_FEATURE_TILER_JOB) != 0)
195 core_req |= BASE_JD_REQ_T;
197 if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
198 core_req |= BASE_JD_REQ_FS;
203 static void kbase_js_sync_timers(struct kbase_device *kbdev)
205 mutex_lock(&kbdev->js_data.runpool_mutex);
206 kbase_backend_ctx_count_changed(kbdev);
207 mutex_unlock(&kbdev->js_data.runpool_mutex);
210 /* Hold the kbasep_js_device_data::runpool_irq::lock for this */
211 bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev,
212 struct kbase_context *kctx)
214 struct kbasep_js_device_data *js_devdata;
215 struct kbasep_js_per_as_data *js_per_as_data;
219 KBASE_DEBUG_ASSERT(kbdev != NULL);
220 KBASE_DEBUG_ASSERT(kctx != NULL);
221 js_devdata = &kbdev->js_data;
224 if (as_nr != KBASEP_AS_NR_INVALID) {
227 KBASE_DEBUG_ASSERT(as_nr >= 0);
228 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
230 KBASE_DEBUG_ASSERT(js_per_as_data->kctx != NULL);
232 new_refcnt = ++(js_per_as_data->as_busy_refcount);
234 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx,
235 NULL, 0u, new_refcnt);
243 * jsctx_rb_is_empty_prio(): - Check if ring buffer is empty
244 * @kctx: Pointer to kbase context with ring buffer.
245 * @js: Job slot id to check.
246 * @prio: Priority to check.
248 * Caller must hold runpool_irq.lock
250 * Return: true if the ring buffer is empty, false otherwise.
253 jsctx_rb_is_empty_prio(struct kbase_context *kctx, int js, int prio)
255 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
257 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
259 return rb->running_idx == rb->write_idx;
263 * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
264 * @kctx: Pointer to kbase context with ring buffer.
265 * @js: Job slot id to check.
266 * @prio: Priority to check.
268 * Return true if there are no atoms to pull. There may be running atoms in the
269 * ring buffer even if there are no atoms to pull. It is also possible for the
270 * ring buffer to be full (with running atoms) when this functions returns
273 * Caller must hold runpool_irq.lock
275 * Return: true if there are no atoms to pull, false otherwise.
278 jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio)
280 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
282 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
284 return rb->read_idx == rb->write_idx;
288 * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no
290 * @kctx: Pointer to kbase context with ring buffer.
291 * @js: Job slot id to check.
293 * Caller must hold runpool_irq.lock
295 * Return: true if the ring buffers for all priorities have no pullable atoms,
299 jsctx_rb_none_to_pull(struct kbase_context *kctx, int js)
303 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
305 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
306 if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
314 * jsctx_rb_compact_prio(): - Compact a ring buffer
315 * @kctx: Pointer to kbase context with ring buffer.
316 * @js: Job slot id to compact.
317 * @prio: Priority id to compact.
320 jsctx_rb_compact_prio(struct kbase_context *kctx, int js, int prio)
322 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
323 u16 compact_idx = rb->write_idx - 1;
324 u16 end_idx = rb->running_idx - 1;
327 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
328 lockdep_assert_held(&kctx->jctx.lock);
330 for (i = compact_idx; i != end_idx; i--) {
331 if (rb->entries[i & JSCTX_RB_MASK].atom_id !=
332 KBASEP_ATOM_ID_INVALID) {
333 WARN_ON(compact_idx < rb->running_idx);
334 rb->entries[compact_idx & JSCTX_RB_MASK].atom_id =
335 rb->entries[i & JSCTX_RB_MASK].atom_id;
339 if (rb->read_idx == i)
340 rb->read_idx = compact_idx + 1;
343 rb->running_idx = compact_idx + 1;
347 * jsctx_rb_compact(): - Compact all priority ring buffers
348 * @kctx: Pointer to kbase context with ring buffer.
349 * @js: Job slot id to compact.
352 jsctx_rb_compact(struct kbase_context *kctx, int js)
356 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
357 jsctx_rb_compact_prio(kctx, js, prio);
361 * jsctx_rb_foreach_prio(): - Execute callback for each entry in ring buffer
362 * @kctx: Pointer to kbase context with ring buffer.
363 * @js: Job slot id to iterate.
364 * @prio: Priority id to iterate.
365 * @callback: Function pointer to callback.
367 * Iterate over a ring buffer and invoke @callback for each entry in buffer, and
368 * remove the entry from the buffer.
370 * If entries are added to the ring buffer while this is running those entries
371 * may, or may not be covered. To ensure that all entries in the buffer have
372 * been enumerated when this function returns jsctx->lock must be held when
373 * calling this function.
375 * The HW access lock, js_data.runpool_irq.lock, must always be held when
376 * calling this function.
379 jsctx_rb_foreach_prio(struct kbase_context *kctx, int js, int prio,
380 kbasep_js_policy_ctx_job_cb callback)
382 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
383 struct kbase_jd_atom *katom;
384 u16 write_idx = ACCESS_ONCE(rb->write_idx);
386 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
388 /* There must be no jobs currently in HW access */
389 WARN_ON(rb->read_idx != rb->running_idx);
391 /* Invoke callback on all kbase_jd_atoms in the ring buffer, and
392 * removes them from the buffer */
393 while (rb->read_idx != write_idx) {
394 int id = rb->entries[rb->read_idx & JSCTX_RB_MASK].atom_id;
396 katom = kbase_jd_atom_from_id(kctx, id);
401 callback(kctx->kbdev, katom);
406 * jsctx_rb_foreach(): - Execute callback for each entry in all priority rb
407 * @kctx: Pointer to kbase context with ring buffer.
408 * @js: Job slot id to iterate.
409 * @callback: Function pointer to callback.
411 * Iterate over all the different priorities, and for each call
412 * jsctx_rb_foreach_prio() to iterate over the ring buffer and invoke @callback
413 * for each entry in buffer, and remove the entry from the buffer.
416 jsctx_rb_foreach(struct kbase_context *kctx, int js,
417 kbasep_js_policy_ctx_job_cb callback)
421 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
422 jsctx_rb_foreach_prio(kctx, js, prio, callback);
426 * jsctx_rb_peek_prio(): - Check buffer and get next atom
427 * @kctx: Pointer to kbase context with ring buffer.
428 * @js: Job slot id to check.
429 * @prio: Priority id to check.
431 * Check the ring buffer for the specified @js and @prio and return a pointer to
432 * the next atom, unless the ring buffer is empty.
434 * Return: Pointer to next atom in buffer, or NULL if there is no atom.
436 static inline struct kbase_jd_atom *
437 jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio)
439 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
442 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
444 if (jsctx_rb_none_to_pull_prio(kctx, js, prio))
447 id = rb->entries[rb->read_idx & JSCTX_RB_MASK].atom_id;
448 return kbase_jd_atom_from_id(kctx, id);
452 * jsctx_rb_peek(): - Check all priority buffers and get next atom
453 * @kctx: Pointer to kbase context with ring buffer.
454 * @js: Job slot id to check.
456 * Check the ring buffers for all priorities, starting from
457 * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a
458 * pointer to the next atom, unless all the priority's ring buffers are empty.
460 * Return: Pointer to next atom in buffer, or NULL if there is no atom.
462 static inline struct kbase_jd_atom *
463 jsctx_rb_peek(struct kbase_context *kctx, int js)
467 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
468 struct kbase_jd_atom *katom;
470 katom = jsctx_rb_peek_prio(kctx, js, prio);
479 * jsctx_rb_peek_last(): - Check a ring buffer and get the last atom
480 * @kctx: Pointer to kbase context with ring buffer.
481 * @js: Job slot id to check.
482 * @prio: Priority id to check.
484 * Check the ring buffer for the specified @js and @prio and return a
485 * pointer to the last atom, unless all the priority's ring buffers are empty.
487 * The last atom is the atom that was added using jsctx_rb_add() most recently.
489 * Return: Pointer to last atom in buffer, or NULL if there is no atom.
491 static inline struct kbase_jd_atom *
492 jsctx_rb_peek_last(struct kbase_context *kctx, int js, int prio)
494 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
497 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
498 lockdep_assert_held(&kctx->jctx.lock);
500 if (jsctx_rb_is_empty_prio(kctx, js, prio))
503 id = rb->entries[(rb->write_idx - 1) & JSCTX_RB_MASK].atom_id;
504 return kbase_jd_atom_from_id(kctx, id);
508 * jsctx_rb_pull(): - Mark atom in list as running
509 * @kctx: Pointer to kbase context with ring buffer.
510 * @katom: Pointer to katom to pull.
512 * Mark an atom previously obtained from jsctx_rb_peek() as running.
514 * @katom must currently be at the head of the ring buffer.
517 jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
519 int prio = katom->sched_priority;
520 int js = katom->slot_nr;
521 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
523 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
525 /* Atoms must be pulled in the correct order. */
526 WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
532 * jsctx_rb_unpull(): - Undo marking of atom in list as running
533 * @kctx: Pointer to kbase context with ring buffer.
534 * @katom: Pointer to katom to unpull.
536 * Undo jsctx_rb_pull() and put @katom back in the queue.
538 * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
542 jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
544 int prio = katom->sched_priority;
545 int js = katom->slot_nr;
546 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
548 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
550 /* Atoms must be unpulled in correct order. */
551 WARN_ON(rb->entries[(rb->read_idx - 1) & JSCTX_RB_MASK].atom_id !=
552 kbase_jd_atom_id(kctx, katom));
558 * jsctx_rb_add(): - Add atom to ring buffer
559 * @kctx: Pointer to kbase context with ring buffer.
560 * @katom: Pointer to katom to add.
562 * Add @katom to the ring buffer determined by the atom's priority and job slot
565 * If the ring buffer is full -EBUSY will be returned.
567 * Return: On success 0 is returned, on failure a negative error code.
570 jsctx_rb_add_atom(struct kbase_context *kctx, struct kbase_jd_atom *katom)
572 int prio = katom->sched_priority;
573 int js = katom->slot_nr;
574 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
576 lockdep_assert_held(&kctx->jctx.lock);
578 /* Check if the ring buffer is full */
579 if ((rb->write_idx - rb->running_idx) >= JSCTX_RB_SIZE)
582 rb->entries[rb->write_idx & JSCTX_RB_MASK].atom_id =
583 kbase_jd_atom_id(kctx, katom);
590 * jsctx_rb_remove(): - Remove atom from ring buffer
591 * @kctx: Pointer to kbase context with ring buffer.
592 * @katom: Pointer to katom to remove.
594 * Remove @katom from the ring buffer.
596 * @katom must have been pulled from the buffer earlier by jsctx_rb_pull(), and
597 * atoms must be removed in the same order they were pulled from the ring
601 jsctx_rb_remove(struct kbase_context *kctx, struct kbase_jd_atom *katom)
603 int prio = katom->sched_priority;
604 int js = katom->slot_nr;
605 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
607 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
609 /* Atoms must be completed in order. */
610 WARN_ON(rb->entries[rb->running_idx & JSCTX_RB_MASK].atom_id !=
611 kbase_jd_atom_id(kctx, katom));
617 * jsctx_rb_evict(): - Evict atom, and dependents, from ring buffer
618 * @kctx: Pointer to kbase context with ring buffer.
619 * @start_katom: Pointer to the first katom to evict.
620 * @head_katom: Pointer to head katom.
621 * @evict_list: Pointer to head of list where evicted atoms are added.
623 * Iterate over the ring buffer starting at @start_katom and evict @start_atom
624 * and dependent atoms in ring buffer.
626 * @evict_list and @head_katom is passed on to kbase_js_evict_atom() which will
627 * examine the atom dependencies.
629 * jsctx_rb_evict() is only called by kbase_js_evict_deps().
632 jsctx_rb_evict(struct kbase_context *kctx,
633 struct kbase_jd_atom *start_katom,
634 struct kbase_jd_atom *head_katom,
635 struct list_head *evict_list)
637 int prio = start_katom->sched_priority;
638 int js = start_katom->slot_nr;
639 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
640 bool atom_in_rb = false;
643 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
644 lockdep_assert_held(&kctx->jctx.lock);
646 for (i = rb->running_idx; i != rb->write_idx; i++) {
647 if (rb->entries[i & JSCTX_RB_MASK].atom_id ==
648 kbase_jd_atom_id(kctx, start_katom)) {
655 /* start_katom must still be in ring buffer. */
656 if (i == rb->write_idx || !atom_in_rb)
659 /* Evict all dependencies on same slot. */
660 for (i = start_idx; i != rb->write_idx; i++) {
663 katom_evict = rb->entries[i & JSCTX_RB_MASK].atom_id;
664 if (katom_evict != KBASEP_ATOM_ID_INVALID) {
665 if (!kbase_js_evict_atom(kctx,
666 &kctx->jctx.atoms[katom_evict],
667 start_katom, head_katom,
675 * Functions private to KBase ('Protected' functions)
677 int kbasep_js_devdata_init(struct kbase_device * const kbdev)
679 struct kbasep_js_device_data *jsdd;
684 KBASE_DEBUG_ASSERT(kbdev != NULL);
686 jsdd = &kbdev->js_data;
688 KBASE_DEBUG_ASSERT(jsdd->init_status == JS_DEVDATA_INIT_NONE);
690 /* These two must be recalculated if nr_hw_address_spaces changes
691 * (e.g. for HW workarounds) */
692 as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
693 kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
694 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
697 use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
698 if (use_workaround) {
699 dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
700 kbdev->nr_user_address_spaces = 1;
703 #ifdef CONFIG_MALI_DEBUG
704 /* Soft-stop will be disabled on a single context by default unless
705 * softstop_always is set */
706 jsdd->softstop_always = false;
707 #endif /* CONFIG_MALI_DEBUG */
708 jsdd->nr_all_contexts_running = 0;
709 jsdd->nr_user_contexts_running = 0;
710 jsdd->nr_contexts_pullable = 0;
711 atomic_set(&jsdd->nr_contexts_runnable, 0);
712 /* All ASs initially free */
713 jsdd->as_free = as_present;
714 /* No ctx allowed to submit */
715 jsdd->runpool_irq.submit_allowed = 0u;
716 memset(jsdd->runpool_irq.ctx_attr_ref_count, 0,
717 sizeof(jsdd->runpool_irq.ctx_attr_ref_count));
718 memset(jsdd->runpool_irq.slot_affinities, 0,
719 sizeof(jsdd->runpool_irq.slot_affinities));
720 memset(jsdd->runpool_irq.slot_affinity_refcount, 0,
721 sizeof(jsdd->runpool_irq.slot_affinity_refcount));
722 INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list);
724 /* Config attributes */
725 jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
726 jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
727 jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
728 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
729 jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
731 jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS;
732 jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
733 jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
734 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
735 jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408;
737 jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
738 jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
739 jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING;
740 jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS;
741 jsdd->cfs_ctx_runtime_init_slices =
742 DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
743 jsdd->cfs_ctx_runtime_min_slices =
744 DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
746 dev_dbg(kbdev->dev, "JS Config Attribs: ");
747 dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
748 jsdd->scheduling_period_ns);
749 dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u",
750 jsdd->soft_stop_ticks);
751 dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u",
752 jsdd->soft_stop_ticks_cl);
753 dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u",
754 jsdd->hard_stop_ticks_ss);
755 dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u",
756 jsdd->hard_stop_ticks_cl);
757 dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u",
758 jsdd->hard_stop_ticks_dumping);
759 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u",
760 jsdd->gpu_reset_ticks_ss);
761 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u",
762 jsdd->gpu_reset_ticks_cl);
763 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u",
764 jsdd->gpu_reset_ticks_dumping);
765 dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u",
766 jsdd->ctx_timeslice_ns);
767 dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u",
768 jsdd->cfs_ctx_runtime_init_slices);
769 dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u",
770 jsdd->cfs_ctx_runtime_min_slices);
772 if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
773 jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
774 jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping &&
775 jsdd->hard_stop_ticks_dumping <
776 jsdd->gpu_reset_ticks_dumping)) {
777 dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n");
781 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
782 dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.",
783 jsdd->soft_stop_ticks,
784 jsdd->scheduling_period_ns);
786 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS
787 dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.",
788 jsdd->hard_stop_ticks_ss,
789 jsdd->hard_stop_ticks_dumping,
790 jsdd->scheduling_period_ns);
792 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
793 dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
796 /* setup the number of irq throttle cycles base on given time */
798 int time_us = kbdev->gpu_props.irq_throttle_time_us;
799 int cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev,
802 atomic_set(&kbdev->irq_throttle_cycles, cycles);
805 /* Clear the AS data, including setting NULL pointers */
806 memset(&jsdd->runpool_irq.per_as_data[0], 0,
807 sizeof(jsdd->runpool_irq.per_as_data));
809 for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
810 jsdd->js_reqs[i] = core_reqs_from_jsn_features(
811 kbdev->gpu_props.props.raw_props.js_features[i]);
813 jsdd->init_status |= JS_DEVDATA_INIT_CONSTANTS;
815 /* On error, we could continue on: providing none of the below resources
816 * rely on the ones above */
818 mutex_init(&jsdd->runpool_mutex);
819 mutex_init(&jsdd->queue_mutex);
820 spin_lock_init(&jsdd->runpool_irq.lock);
821 sema_init(&jsdd->schedule_sem, 1);
823 err = kbasep_js_policy_init(kbdev);
825 jsdd->init_status |= JS_DEVDATA_INIT_POLICY;
827 for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
828 INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i]);
829 INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i]);
832 /* On error, do no cleanup; this will be handled by the caller(s), since
833 * we've designed this resource to be safe to terminate on init-fail */
834 if (jsdd->init_status != JS_DEVDATA_INIT_ALL)
840 void kbasep_js_devdata_halt(struct kbase_device *kbdev)
845 void kbasep_js_devdata_term(struct kbase_device *kbdev)
847 struct kbasep_js_device_data *js_devdata;
849 KBASE_DEBUG_ASSERT(kbdev != NULL);
851 js_devdata = &kbdev->js_data;
853 if ((js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS)) {
854 s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
855 /* The caller must de-register all contexts before calling this
857 KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
858 KBASE_DEBUG_ASSERT(memcmp(
859 js_devdata->runpool_irq.ctx_attr_ref_count,
860 zero_ctx_attr_ref_count,
861 sizeof(zero_ctx_attr_ref_count)) == 0);
862 CSTD_UNUSED(zero_ctx_attr_ref_count);
864 if ((js_devdata->init_status & JS_DEVDATA_INIT_POLICY))
865 kbasep_js_policy_term(&js_devdata->policy);
867 js_devdata->init_status = JS_DEVDATA_INIT_NONE;
870 int kbasep_js_kctx_init(struct kbase_context * const kctx)
872 struct kbase_device *kbdev;
873 struct kbasep_js_kctx_info *js_kctx_info;
877 KBASE_DEBUG_ASSERT(kctx != NULL);
880 KBASE_DEBUG_ASSERT(kbdev != NULL);
882 for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
883 INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
885 js_kctx_info = &kctx->jctx.sched_info;
886 KBASE_DEBUG_ASSERT(js_kctx_info->init_status == JS_KCTX_INIT_NONE);
888 js_kctx_info->ctx.nr_jobs = 0;
889 js_kctx_info->ctx.is_scheduled = false;
890 js_kctx_info->ctx.is_dying = false;
891 memset(js_kctx_info->ctx.ctx_attr_ref_count, 0,
892 sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
894 /* Initially, the context is disabled from submission until the create
896 js_kctx_info->ctx.flags = KBASE_CTX_FLAG_SUBMIT_DISABLED;
898 js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS;
900 /* On error, we could continue on: providing none of the below resources
901 * rely on the ones above */
902 mutex_init(&js_kctx_info->ctx.jsctx_mutex);
904 init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
906 err = kbasep_js_policy_init_ctx(kbdev, kctx);
908 js_kctx_info->init_status |= JS_KCTX_INIT_POLICY;
910 /* On error, do no cleanup; this will be handled by the caller(s), since
911 * we've designed this resource to be safe to terminate on init-fail */
912 if (js_kctx_info->init_status != JS_KCTX_INIT_ALL)
918 void kbasep_js_kctx_term(struct kbase_context *kctx)
920 struct kbase_device *kbdev;
921 struct kbasep_js_kctx_info *js_kctx_info;
922 union kbasep_js_policy *js_policy;
925 KBASE_DEBUG_ASSERT(kctx != NULL);
928 KBASE_DEBUG_ASSERT(kbdev != NULL);
930 js_policy = &kbdev->js_data.policy;
931 js_kctx_info = &kctx->jctx.sched_info;
933 if ((js_kctx_info->init_status & JS_KCTX_INIT_CONSTANTS)) {
934 /* The caller must de-register all jobs before calling this */
935 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
936 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
939 mutex_lock(&kbdev->js_data.queue_mutex);
940 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
941 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
942 mutex_unlock(&kbdev->js_data.queue_mutex);
944 if ((js_kctx_info->init_status & JS_KCTX_INIT_POLICY))
945 kbasep_js_policy_term_ctx(js_policy, kctx);
947 js_kctx_info->init_status = JS_KCTX_INIT_NONE;
951 * kbase_js_ctx_list_add_pullable - Add context to the tail of the per-slot
952 * pullable context queue
953 * @kbdev: Device pointer
954 * @kctx: Context to add to queue
955 * @js: Job slot to use
957 * If the context is on either the pullable or unpullable queues, then it is
958 * removed before being added to the tail.
960 * This function should be used when queueing a context for the first time, or
961 * re-queueing a context that has been pulled from.
963 * Caller must hold kbasep_jd_device_data.queue_mutex
965 * Return: true if caller should call kbase_backend_ctx_count_changed()
967 static bool kbase_js_ctx_list_add_pullable(struct kbase_device *kbdev,
968 struct kbase_context *kctx,
973 lockdep_assert_held(&kbdev->js_data.queue_mutex);
974 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
976 if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
977 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
979 list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
980 &kbdev->js_data.ctx_list_pullable[js]);
982 if (!kctx->slots_pullable) {
983 kbdev->js_data.nr_contexts_pullable++;
985 if (!atomic_read(&kctx->atoms_pulled))
986 atomic_inc(&kbdev->js_data.nr_contexts_runnable);
988 kctx->slots_pullable |= (1 << js);
994 * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
995 * per-slot pullable context queue
996 * @kbdev: Device pointer
997 * @kctx: Context to add to queue
998 * @js: Job slot to use
1000 * If the context is on either the pullable or unpullable queues, then it is
1001 * removed before being added to the head.
1003 * This function should be used when a context has been scheduled, but no jobs
1004 * can currently be pulled from it.
1006 * Caller must hold kbasep_jd_device_data.queue_mutex
1008 * Return: true if caller should call kbase_backend_ctx_count_changed()
1010 static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
1011 struct kbase_context *kctx,
1016 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1017 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1019 if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
1020 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1022 list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
1023 &kbdev->js_data.ctx_list_pullable[js]);
1025 if (!kctx->slots_pullable) {
1026 kbdev->js_data.nr_contexts_pullable++;
1028 if (!atomic_read(&kctx->atoms_pulled))
1029 atomic_inc(&kbdev->js_data.nr_contexts_runnable);
1031 kctx->slots_pullable |= (1 << js);
1037 * kbase_js_ctx_list_add_unpullable - Add context to the tail of the per-slot
1038 * unpullable context queue
1039 * @kbdev: Device pointer
1040 * @kctx: Context to add to queue
1041 * @js: Job slot to use
1043 * The context must already be on the per-slot pullable queue. It will be
1044 * removed from the pullable queue before being added to the unpullable queue.
1046 * This function should be used when a context has been pulled from, and there
1047 * are no jobs remaining on the specified slot.
1049 * Caller must hold kbasep_jd_device_data.queue_mutex
1051 * Return: true if caller should call kbase_backend_ctx_count_changed()
1053 static bool kbase_js_ctx_list_add_unpullable(struct kbase_device *kbdev,
1054 struct kbase_context *kctx,
1059 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1060 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1062 list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
1063 &kbdev->js_data.ctx_list_unpullable[js]);
1065 if (kctx->slots_pullable == (1 << js)) {
1066 kbdev->js_data.nr_contexts_pullable--;
1068 if (!atomic_read(&kctx->atoms_pulled))
1069 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
1071 kctx->slots_pullable &= ~(1 << js);
1077 * kbase_js_ctx_list_remove - Remove context from the per-slot pullable or
1078 * unpullable context queues
1079 * @kbdev: Device pointer
1080 * @kctx: Context to remove from queue
1081 * @js: Job slot to use
1083 * The context must already be on one of the queues.
1085 * This function should be used when a context has no jobs on the GPU, and no
1086 * jobs remaining for the specified slot.
1088 * Caller must hold kbasep_jd_device_data.queue_mutex
1090 * Return: true if caller should call kbase_backend_ctx_count_changed()
1092 static bool kbase_js_ctx_list_remove(struct kbase_device *kbdev,
1093 struct kbase_context *kctx,
1098 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1099 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1101 WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
1103 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1105 if (kctx->slots_pullable == (1 << js)) {
1106 kbdev->js_data.nr_contexts_pullable--;
1108 if (!atomic_read(&kctx->atoms_pulled))
1109 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
1111 kctx->slots_pullable &= ~(1 << js);
1117 * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
1119 * @kbdev: Device pointer
1120 * @js: Job slot to use
1122 * Caller must hold kbasep_jd_device_data::queue_mutex
1124 * Return: Context to use for specified slot.
1125 * NULL if no contexts present for specified slot
1127 static struct kbase_context *kbase_js_ctx_list_pop_head(
1128 struct kbase_device *kbdev,
1131 struct kbase_context *kctx;
1133 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1135 if (list_empty(&kbdev->js_data.ctx_list_pullable[js]))
1138 kctx = list_entry(kbdev->js_data.ctx_list_pullable[js].next,
1139 struct kbase_context,
1140 jctx.sched_info.ctx.ctx_list_entry[js]);
1142 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1148 * kbase_js_ctx_pullable - Return if a context can be pulled from on the
1150 * @kctx: Context pointer
1151 * @js: Job slot to use
1152 * @is_scheduled: true if the context is currently scheduled
1154 * Caller must hold runpool_irq.lock
1156 * Return: true if context can be pulled from on specified slot
1159 static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js,
1162 struct kbasep_js_device_data *js_devdata;
1163 struct kbase_jd_atom *katom;
1165 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
1167 js_devdata = &kctx->kbdev->js_data;
1170 if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
1173 katom = jsctx_rb_peek(kctx, js);
1175 return false; /* No pullable atoms */
1176 if (atomic_read(&katom->blocked))
1177 return false; /* next atom blocked */
1178 if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
1179 if (katom->x_pre_dep->gpu_rb_state ==
1180 KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB)
1182 if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
1183 kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
1190 static bool kbase_js_dep_validate(struct kbase_context *kctx,
1191 struct kbase_jd_atom *katom)
1193 struct kbase_device *kbdev = kctx->kbdev;
1195 bool has_dep = false, has_x_dep = false;
1196 int js = kbase_js_get_slot(kbdev, katom);
1197 int prio = katom->sched_priority;
1200 for (i = 0; i < 2; i++) {
1201 struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
1204 int dep_js = kbase_js_get_slot(kbdev, dep_atom);
1205 int dep_prio = dep_atom->sched_priority;
1207 /* Dependent atom must already have been submitted */
1208 if (!(dep_atom->atom_flags &
1209 KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED)) {
1214 /* Dependencies with different priorities can't
1215 be represented in the ringbuffer */
1216 if (prio != dep_prio) {
1222 /* Only one same-slot dependency can be
1223 * represented in the ringbuffer */
1230 /* Only one cross-slot dependency can be
1231 * represented in the ringbuffer */
1236 /* Each dependee atom can only have one
1237 * cross-slot dependency */
1238 if (dep_atom->x_post_dep) {
1242 /* The dependee atom can not already be in the
1243 * HW access ringbuffer */
1244 if (dep_atom->gpu_rb_state !=
1245 KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
1249 /* The dependee atom can not already have
1251 if (dep_atom->status !=
1252 KBASE_JD_ATOM_STATE_IN_JS) {
1256 /* Cross-slot dependencies must not violate
1257 * PRLAM-8987 affinity restrictions */
1258 if (kbase_hw_has_issue(kbdev,
1259 BASE_HW_ISSUE_8987) &&
1260 (js == 2 || dep_js == 2)) {
1267 if (kbase_jd_katom_dep_type(&katom->dep[i]) ==
1268 BASE_JD_DEP_TYPE_DATA &&
1270 struct kbase_jd_atom *last_atom =
1271 jsctx_rb_peek_last(kctx, js,
1274 /* Last atom on slot must be pre-dep for this
1276 if (last_atom != dep_atom) {
1282 /* Dependency can be represented in ringbuffers */
1286 /* If dependencies can be represented by ringbuffer then clear them from
1289 for (i = 0; i < 2; i++) {
1290 struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
1293 int dep_js = kbase_js_get_slot(kbdev, dep_atom);
1295 if ((js != dep_js) &&
1296 (dep_atom->status !=
1297 KBASE_JD_ATOM_STATE_COMPLETED)
1298 && (dep_atom->status !=
1299 KBASE_JD_ATOM_STATE_HW_COMPLETED)
1300 && (dep_atom->status !=
1301 KBASE_JD_ATOM_STATE_UNUSED)) {
1303 katom->atom_flags |=
1304 KBASE_KATOM_FLAG_X_DEP_BLOCKED;
1305 katom->x_pre_dep = dep_atom;
1306 dep_atom->x_post_dep = katom;
1307 if (kbase_jd_katom_dep_type(
1309 BASE_JD_DEP_TYPE_DATA)
1310 katom->atom_flags |=
1311 KBASE_KATOM_FLAG_FAIL_BLOCKER;
1313 if ((kbase_jd_katom_dep_type(&katom->dep[i])
1314 == BASE_JD_DEP_TYPE_DATA) &&
1316 katom->atom_flags |=
1317 KBASE_KATOM_FLAG_FAIL_PREV;
1319 list_del(&katom->dep_item[i]);
1320 kbase_jd_katom_dep_clear(&katom->dep[i]);
1328 bool kbasep_js_add_job(struct kbase_context *kctx,
1329 struct kbase_jd_atom *atom)
1331 unsigned long flags;
1332 struct kbasep_js_kctx_info *js_kctx_info;
1333 struct kbase_device *kbdev;
1334 struct kbasep_js_device_data *js_devdata;
1335 union kbasep_js_policy *js_policy;
1337 bool enqueue_required = false;
1338 bool timer_sync = false;
1340 KBASE_DEBUG_ASSERT(kctx != NULL);
1341 KBASE_DEBUG_ASSERT(atom != NULL);
1342 lockdep_assert_held(&kctx->jctx.lock);
1344 kbdev = kctx->kbdev;
1345 js_devdata = &kbdev->js_data;
1346 js_policy = &kbdev->js_data.policy;
1347 js_kctx_info = &kctx->jctx.sched_info;
1349 mutex_lock(&js_devdata->queue_mutex);
1350 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1353 * Begin Runpool transaction
1355 mutex_lock(&js_devdata->runpool_mutex);
1357 /* Refcount ctx.nr_jobs */
1358 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
1359 ++(js_kctx_info->ctx.nr_jobs);
1361 /* Setup any scheduling information */
1362 kbasep_js_clear_job_retry_submit(atom);
1364 /* Lock for state available during IRQ */
1365 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1367 if (!kbase_js_dep_validate(kctx, atom)) {
1368 /* Dependencies could not be represented */
1369 --(js_kctx_info->ctx.nr_jobs);
1371 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1372 mutex_unlock(&js_devdata->runpool_mutex);
1377 KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom));
1379 if (kbase_js_dep_resolved_submit(kctx, atom, &enqueue_required) != 0) {
1380 /* Ringbuffer was full (should be impossible) - fail the job */
1381 --(js_kctx_info->ctx.nr_jobs);
1383 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1384 mutex_unlock(&js_devdata->runpool_mutex);
1386 atom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1391 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc,
1392 kbasep_js_trace_get_refcnt_nolock(kbdev, kctx));
1394 /* Context Attribute Refcounting */
1395 kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
1397 if (enqueue_required) {
1398 if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
1399 timer_sync = kbase_js_ctx_list_add_pullable(kbdev, kctx,
1402 timer_sync = kbase_js_ctx_list_add_unpullable(kbdev,
1403 kctx, atom->slot_nr);
1405 /* If this context is active and the atom is the first on its slot,
1406 * kick the job manager to attempt to fast-start the atom */
1407 if (enqueue_required && kctx == kbdev->hwaccess.active_kctx)
1408 kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
1410 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1412 kbase_backend_ctx_count_changed(kbdev);
1413 mutex_unlock(&js_devdata->runpool_mutex);
1414 /* End runpool transaction */
1416 if (!js_kctx_info->ctx.is_scheduled) {
1417 if (js_kctx_info->ctx.is_dying) {
1418 /* A job got added while/after kbase_job_zap_context()
1419 * was called on a non-scheduled context (e.g. KDS
1420 * dependency resolved). Kill that job by killing the
1422 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
1424 } else if (js_kctx_info->ctx.nr_jobs == 1) {
1425 /* Handle Refcount going from 0 to 1: schedule the
1426 * context on the Policy Queue */
1427 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
1428 dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
1430 /* Policy Queue was updated - caller must try to
1431 * schedule the head context */
1432 WARN_ON(!enqueue_required);
1436 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1438 mutex_unlock(&js_devdata->queue_mutex);
1440 return enqueue_required;
1443 void kbasep_js_remove_job(struct kbase_device *kbdev,
1444 struct kbase_context *kctx, struct kbase_jd_atom *atom)
1446 struct kbasep_js_kctx_info *js_kctx_info;
1447 struct kbasep_js_device_data *js_devdata;
1448 union kbasep_js_policy *js_policy;
1450 KBASE_DEBUG_ASSERT(kbdev != NULL);
1451 KBASE_DEBUG_ASSERT(kctx != NULL);
1452 KBASE_DEBUG_ASSERT(atom != NULL);
1454 js_devdata = &kbdev->js_data;
1455 js_policy = &kbdev->js_data.policy;
1456 js_kctx_info = &kctx->jctx.sched_info;
1458 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
1459 kbasep_js_trace_get_refcnt(kbdev, kctx));
1461 /* De-refcount ctx.nr_jobs */
1462 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
1463 --(js_kctx_info->ctx.nr_jobs);
1466 bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
1467 struct kbase_context *kctx, struct kbase_jd_atom *katom)
1469 unsigned long flags;
1470 struct kbasep_js_atom_retained_state katom_retained_state;
1471 struct kbasep_js_device_data *js_devdata;
1472 bool attr_state_changed;
1474 KBASE_DEBUG_ASSERT(kbdev != NULL);
1475 KBASE_DEBUG_ASSERT(kctx != NULL);
1476 KBASE_DEBUG_ASSERT(katom != NULL);
1478 js_devdata = &kbdev->js_data;
1480 kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
1481 kbasep_js_remove_job(kbdev, kctx, katom);
1483 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1485 /* The atom has 'finished' (will not be re-run), so no need to call
1486 * kbasep_js_has_atom_finished().
1488 * This is because it returns false for soft-stopped atoms, but we
1489 * want to override that, because we're cancelling an atom regardless of
1490 * whether it was soft-stopped or not */
1491 attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx,
1492 &katom_retained_state);
1494 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1496 return attr_state_changed;
1499 bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev,
1500 struct kbase_context *kctx)
1502 unsigned long flags;
1503 struct kbasep_js_device_data *js_devdata;
1506 KBASE_DEBUG_ASSERT(kbdev != NULL);
1507 js_devdata = &kbdev->js_data;
1509 /* KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_RETAIN_CTX, kctx, NULL, 0,
1510 kbasep_js_trace_get_refcnt(kbdev, kctx)); */
1511 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1512 result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
1513 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1518 struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev,
1521 unsigned long flags;
1522 struct kbasep_js_device_data *js_devdata;
1523 struct kbase_context *found_kctx = NULL;
1524 struct kbasep_js_per_as_data *js_per_as_data;
1526 KBASE_DEBUG_ASSERT(kbdev != NULL);
1527 KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
1528 js_devdata = &kbdev->js_data;
1529 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
1531 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1533 found_kctx = js_per_as_data->kctx;
1535 if (found_kctx != NULL)
1536 ++(js_per_as_data->as_busy_refcount);
1538 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1543 struct kbase_context *kbasep_js_runpool_lookup_ctx_nolock(
1544 struct kbase_device *kbdev, int as_nr)
1546 struct kbasep_js_device_data *js_devdata;
1547 struct kbase_context *found_kctx = NULL;
1548 struct kbasep_js_per_as_data *js_per_as_data;
1550 KBASE_DEBUG_ASSERT(kbdev != NULL);
1551 KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
1553 lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
1555 js_devdata = &kbdev->js_data;
1556 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
1558 found_kctx = js_per_as_data->kctx;
1560 if (found_kctx != NULL)
1561 ++(js_per_as_data->as_busy_refcount);
1567 * kbasep_js_release_result - Try running more jobs after releasing a context
1570 * @kbdev: The kbase_device to operate on
1571 * @kctx: The kbase_context to operate on
1572 * @katom_retained_state: Retained state from the atom
1573 * @runpool_ctx_attr_change: True if the runpool context attributes have changed
1575 * This collates a set of actions that must happen whilst
1576 * kbasep_js_device_data.runpool_irq.lock is held.
1578 * This includes running more jobs when:
1579 * - The previously released kctx caused a ctx attribute change,
1580 * - The released atom caused a ctx attribute change,
1581 * - Slots were previously blocked due to affinity restrictions,
1582 * - Submission during IRQ handling failed.
1584 * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were
1585 * changed. The caller should try scheduling all contexts
1587 static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
1588 struct kbase_device *kbdev,
1589 struct kbase_context *kctx,
1590 struct kbasep_js_atom_retained_state *katom_retained_state,
1591 bool runpool_ctx_attr_change)
1593 struct kbasep_js_device_data *js_devdata;
1594 kbasep_js_release_result result = 0;
1596 KBASE_DEBUG_ASSERT(kbdev != NULL);
1597 KBASE_DEBUG_ASSERT(kctx != NULL);
1598 KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
1599 js_devdata = &kbdev->js_data;
1601 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1602 lockdep_assert_held(&js_devdata->runpool_mutex);
1603 lockdep_assert_held(&js_devdata->runpool_irq.lock);
1605 if (js_devdata->nr_user_contexts_running != 0) {
1606 bool retry_submit = false;
1607 int retry_jobslot = 0;
1609 if (katom_retained_state)
1610 retry_submit = kbasep_js_get_atom_retry_submit_slot(
1611 katom_retained_state, &retry_jobslot);
1613 if (runpool_ctx_attr_change || retry_submit) {
1614 /* A change in runpool ctx attributes might mean we can
1615 * run more jobs than before */
1616 result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
1618 KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB,
1619 kctx, NULL, 0u, retry_jobslot);
1626 * Internal function to release the reference on a ctx and an atom's "retained
1627 * state", only taking the runpool and as transaction mutexes
1629 * This also starts more jobs running in the case of an ctx-attribute state
1632 * This does none of the followup actions for scheduling:
1633 * - It does not schedule in a new context
1634 * - It does not requeue or handle dying contexts
1636 * For those tasks, just call kbasep_js_runpool_release_ctx() instead
1639 * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
1640 * - Context has a non-zero refcount
1641 * - Caller holds js_kctx_info->ctx.jsctx_mutex
1642 * - Caller holds js_devdata->runpool_mutex
1644 static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
1645 struct kbase_device *kbdev,
1646 struct kbase_context *kctx,
1647 struct kbasep_js_atom_retained_state *katom_retained_state)
1649 unsigned long flags;
1650 struct kbasep_js_device_data *js_devdata;
1651 struct kbasep_js_kctx_info *js_kctx_info;
1652 union kbasep_js_policy *js_policy;
1653 struct kbasep_js_per_as_data *js_per_as_data;
1655 kbasep_js_release_result release_result = 0u;
1656 bool runpool_ctx_attr_change = false;
1658 struct kbase_as *current_as;
1661 KBASE_DEBUG_ASSERT(kbdev != NULL);
1662 KBASE_DEBUG_ASSERT(kctx != NULL);
1663 js_kctx_info = &kctx->jctx.sched_info;
1664 js_devdata = &kbdev->js_data;
1665 js_policy = &kbdev->js_data.policy;
1667 /* Ensure context really is scheduled in */
1668 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled);
1670 /* kctx->as_nr and js_per_as_data are only read from here. The caller's
1671 * js_ctx_mutex provides a barrier that ensures they are up-to-date.
1673 * They will not change whilst we're reading them, because the refcount
1674 * is non-zero (and we ASSERT on that last fact).
1676 kctx_as_nr = kctx->as_nr;
1677 KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
1678 js_per_as_data = &js_devdata->runpool_irq.per_as_data[kctx_as_nr];
1679 KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
1682 * Transaction begins on AS and runpool_irq
1684 * Assert about out calling contract
1686 current_as = &kbdev->as[kctx_as_nr];
1687 mutex_lock(&kbdev->pm.lock);
1688 mutex_lock(¤t_as->transaction_mutex);
1689 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1690 KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
1691 KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
1693 /* Update refcount */
1694 new_ref_count = --(js_per_as_data->as_busy_refcount);
1696 /* Release the atom if it finished (i.e. wasn't soft-stopped) */
1697 if (kbasep_js_has_atom_finished(katom_retained_state))
1698 runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(
1699 kbdev, kctx, katom_retained_state);
1701 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u,
1704 if (new_ref_count == 1 && kctx->jctx.sched_info.ctx.flags &
1705 KBASE_CTX_FLAG_PRIVILEGED &&
1706 !kbase_pm_is_suspending(kbdev)) {
1707 /* Context is kept scheduled into an address space even when
1708 * there are no jobs, in this case we have to handle the
1709 * situation where all jobs have been evicted from the GPU and
1710 * submission is disabled.
1712 * At this point we re-enable submission to allow further jobs
1715 kbasep_js_set_submit_allowed(js_devdata, kctx);
1718 /* Make a set of checks to see if the context should be scheduled out */
1719 if (new_ref_count == 0 &&
1720 (!kbasep_js_is_submit_allowed(js_devdata, kctx) ||
1721 kbdev->pm.suspending)) {
1722 /* Last reference, and we've been told to remove this context
1723 * from the Run Pool */
1724 dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d",
1725 kctx, new_ref_count, js_kctx_info->ctx.nr_jobs,
1726 kbasep_js_is_submit_allowed(js_devdata, kctx));
1728 #if defined(CONFIG_MALI_GATOR_SUPPORT)
1729 kbase_trace_mali_mmu_as_released(kctx->as_nr);
1731 #if defined(CONFIG_MALI_MIPE_ENABLED)
1732 kbase_tlstream_tl_nret_as_ctx(&kbdev->as[kctx->as_nr], kctx);
1735 kbase_backend_release_ctx_irq(kbdev, kctx);
1737 if (kbdev->hwaccess.active_kctx == kctx)
1738 kbdev->hwaccess.active_kctx = NULL;
1740 /* Ctx Attribute handling
1742 * Releasing atoms attributes must either happen before this, or
1743 * after 'is_scheduled' is changed, otherwise we double-decount
1745 runpool_ctx_attr_change |=
1746 kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
1748 /* Releasing the context and katom retained state can allow
1749 * more jobs to run */
1751 kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev,
1752 kctx, katom_retained_state,
1753 runpool_ctx_attr_change);
1756 * Transaction ends on AS and runpool_irq:
1758 * By this point, the AS-related data is now clear and ready
1761 * Since releases only occur once for each previous successful
1762 * retain, and no more retains are allowed on this context, no
1763 * other thread will be operating in this
1764 * code whilst we are
1766 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1768 kbase_backend_release_ctx_noirq(kbdev, kctx);
1770 mutex_unlock(¤t_as->transaction_mutex);
1771 mutex_unlock(&kbdev->pm.lock);
1773 /* Note: Don't reuse kctx_as_nr now */
1775 /* Synchronize with any policy timers */
1776 kbase_backend_ctx_count_changed(kbdev);
1778 /* update book-keeping info */
1779 js_kctx_info->ctx.is_scheduled = false;
1780 /* Signal any waiter that the context is not scheduled, so is
1781 * safe for termination - once the jsctx_mutex is also dropped,
1782 * and jobs have finished. */
1783 wake_up(&js_kctx_info->ctx.is_scheduled_wait);
1785 /* Queue an action to occur after we've dropped the lock */
1786 release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED;
1788 kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
1789 katom_retained_state, runpool_ctx_attr_change);
1791 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1792 mutex_unlock(¤t_as->transaction_mutex);
1793 mutex_unlock(&kbdev->pm.lock);
1796 return release_result;
1799 void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
1800 struct kbase_context *kctx)
1802 struct kbasep_js_atom_retained_state katom_retained_state;
1804 /* Setup a dummy katom_retained_state */
1805 kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
1807 kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1808 &katom_retained_state);
1811 void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
1812 struct kbase_context *kctx, bool has_pm_ref)
1814 struct kbasep_js_device_data *js_devdata;
1815 union kbasep_js_policy *js_policy;
1816 struct kbasep_js_kctx_info *js_kctx_info;
1818 KBASE_DEBUG_ASSERT(kbdev != NULL);
1819 KBASE_DEBUG_ASSERT(kctx != NULL);
1820 js_kctx_info = &kctx->jctx.sched_info;
1821 js_policy = &kbdev->js_data.policy;
1822 js_devdata = &kbdev->js_data;
1824 /* This is called if and only if you've you've detached the context from
1825 * the Runpool or the Policy Queue, and not added it back to the Runpool
1827 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
1829 if (js_kctx_info->ctx.is_dying) {
1830 /* Dying: don't requeue, but kill all jobs on the context. This
1831 * happens asynchronously */
1833 "JS: ** Killing Context %p on RunPool Remove **", kctx);
1834 kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel);
1838 void kbasep_js_runpool_release_ctx_and_katom_retained_state(
1839 struct kbase_device *kbdev, struct kbase_context *kctx,
1840 struct kbasep_js_atom_retained_state *katom_retained_state)
1842 struct kbasep_js_device_data *js_devdata;
1843 struct kbasep_js_kctx_info *js_kctx_info;
1844 base_jd_event_code event_code;
1845 kbasep_js_release_result release_result;
1847 KBASE_DEBUG_ASSERT(kbdev != NULL);
1848 KBASE_DEBUG_ASSERT(kctx != NULL);
1849 js_kctx_info = &kctx->jctx.sched_info;
1850 js_devdata = &kbdev->js_data;
1851 event_code = katom_retained_state->event_code;
1853 mutex_lock(&js_devdata->queue_mutex);
1854 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1855 mutex_lock(&js_devdata->runpool_mutex);
1857 release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1858 katom_retained_state);
1860 /* Drop the runpool mutex to allow requeing kctx */
1861 mutex_unlock(&js_devdata->runpool_mutex);
1863 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
1864 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
1866 /* Drop the jsctx_mutex to allow scheduling in a new context */
1868 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1869 mutex_unlock(&js_devdata->queue_mutex);
1871 if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL)
1872 kbase_js_sched_all(kbdev);
1875 void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
1876 struct kbase_context *kctx)
1878 struct kbasep_js_atom_retained_state katom_retained_state;
1880 kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
1882 kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
1883 &katom_retained_state);
1886 /* Variant of kbasep_js_runpool_release_ctx() that doesn't call into
1887 * kbase_js_sched_all() */
1888 static void kbasep_js_runpool_release_ctx_no_schedule(
1889 struct kbase_device *kbdev, struct kbase_context *kctx)
1891 struct kbasep_js_device_data *js_devdata;
1892 struct kbasep_js_kctx_info *js_kctx_info;
1893 kbasep_js_release_result release_result;
1894 struct kbasep_js_atom_retained_state katom_retained_state_struct;
1895 struct kbasep_js_atom_retained_state *katom_retained_state =
1896 &katom_retained_state_struct;
1898 KBASE_DEBUG_ASSERT(kbdev != NULL);
1899 KBASE_DEBUG_ASSERT(kctx != NULL);
1900 js_kctx_info = &kctx->jctx.sched_info;
1901 js_devdata = &kbdev->js_data;
1902 kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
1904 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1905 mutex_lock(&js_devdata->runpool_mutex);
1907 release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1908 katom_retained_state);
1910 /* Drop the runpool mutex to allow requeing kctx */
1911 mutex_unlock(&js_devdata->runpool_mutex);
1912 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
1913 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
1915 /* Drop the jsctx_mutex to allow scheduling in a new context */
1916 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1918 /* NOTE: could return release_result if the caller would like to know
1919 * whether it should schedule a new context, but currently no callers do
1924 * kbase_js_set_timeouts - update all JS timeouts with user specified data
1925 * @kbdev: Device pointer
1927 * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
1928 * set to a positive number then that becomes the new value used, if a timeout
1929 * is negative then the default is set.
1931 static void kbase_js_set_timeouts(struct kbase_device *kbdev)
1933 struct kbasep_js_device_data *js_data = &kbdev->js_data;
1935 if (kbdev->js_scheduling_period_ns < 0)
1936 js_data->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
1937 else if (kbdev->js_scheduling_period_ns > 0)
1938 js_data->scheduling_period_ns = kbdev->js_scheduling_period_ns;
1940 if (kbdev->js_soft_stop_ticks < 0)
1941 js_data->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
1942 else if (kbdev->js_soft_stop_ticks > 0)
1943 js_data->soft_stop_ticks = kbdev->js_soft_stop_ticks;
1945 if (kbdev->js_soft_stop_ticks_cl < 0)
1946 js_data->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
1947 else if (kbdev->js_soft_stop_ticks_cl > 0)
1948 js_data->soft_stop_ticks_cl = kbdev->js_soft_stop_ticks_cl;
1950 if (kbdev->js_hard_stop_ticks_ss < 0) {
1951 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
1952 js_data->hard_stop_ticks_ss =
1953 DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
1955 js_data->hard_stop_ticks_ss =
1956 DEFAULT_JS_HARD_STOP_TICKS_SS;
1957 } else if (kbdev->js_hard_stop_ticks_ss > 0) {
1958 js_data->hard_stop_ticks_ss = kbdev->js_hard_stop_ticks_ss;
1961 if (kbdev->js_hard_stop_ticks_cl < 0)
1962 js_data->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
1963 else if (kbdev->js_hard_stop_ticks_cl > 0)
1964 js_data->hard_stop_ticks_cl = kbdev->js_hard_stop_ticks_cl;
1966 if (kbdev->js_hard_stop_ticks_dumping < 0)
1967 js_data->hard_stop_ticks_dumping =
1968 DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
1969 else if (kbdev->js_hard_stop_ticks_dumping > 0)
1970 js_data->hard_stop_ticks_dumping =
1971 kbdev->js_hard_stop_ticks_dumping;
1973 if (kbdev->js_reset_ticks_ss < 0) {
1974 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
1975 js_data->gpu_reset_ticks_ss =
1976 DEFAULT_JS_RESET_TICKS_SS_8408;
1978 js_data->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
1979 } else if (kbdev->js_reset_ticks_ss > 0) {
1980 js_data->gpu_reset_ticks_ss = kbdev->js_reset_ticks_ss;
1983 if (kbdev->js_reset_ticks_cl < 0)
1984 js_data->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
1985 else if (kbdev->js_reset_ticks_cl > 0)
1986 js_data->gpu_reset_ticks_cl = kbdev->js_reset_ticks_cl;
1988 if (kbdev->js_reset_ticks_dumping < 0)
1989 js_data->gpu_reset_ticks_dumping =
1990 DEFAULT_JS_RESET_TICKS_DUMPING;
1991 else if (kbdev->js_reset_ticks_dumping > 0)
1992 js_data->gpu_reset_ticks_dumping =
1993 kbdev->js_reset_ticks_dumping;
1996 static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
1997 struct kbase_context *kctx)
1999 struct kbasep_js_device_data *js_devdata;
2000 struct kbasep_js_kctx_info *js_kctx_info;
2001 union kbasep_js_policy *js_policy;
2002 struct kbase_as *new_address_space = NULL;
2003 unsigned long flags;
2004 bool kctx_suspended = false;
2007 js_devdata = &kbdev->js_data;
2008 js_policy = &kbdev->js_data.policy;
2009 js_kctx_info = &kctx->jctx.sched_info;
2011 /* Pick available address space for this context */
2012 as_nr = kbase_backend_find_free_address_space(kbdev, kctx);
2014 if (as_nr == KBASEP_AS_NR_INVALID)
2015 return false; /* No address spaces currently available */
2017 new_address_space = &kbdev->as[as_nr];
2020 * Atomic transaction on the Context and Run Pool begins
2022 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2023 mutex_lock(&js_devdata->runpool_mutex);
2025 /* Check to see if context is dying due to kbase_job_zap_context() */
2026 if (js_kctx_info->ctx.is_dying) {
2027 /* Roll back the transaction so far and return */
2028 kbase_backend_release_free_address_space(kbdev, as_nr);
2030 mutex_unlock(&js_devdata->runpool_mutex);
2031 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2036 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL,
2038 kbasep_js_trace_get_refcnt(kbdev, kctx));
2040 if (js_devdata->nr_user_contexts_running == 0 &&
2041 kbdev->js_timeouts_updated) {
2042 /* Only when there are no other contexts submitting jobs:
2043 * Latch in run-time job scheduler timeouts that were set
2044 * through js_timeouts sysfs file */
2045 kbase_js_set_timeouts(kbdev);
2047 kbdev->js_timeouts_updated = false;
2050 js_kctx_info->ctx.is_scheduled = true;
2052 mutex_lock(&new_address_space->transaction_mutex);
2053 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2055 /* Assign context to previously chosen address space */
2056 if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
2057 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2058 mutex_unlock(&new_address_space->transaction_mutex);
2059 /* If address space is not pending, then kbase_backend_use_ctx()
2060 * failed. Roll back the transaction so far and return */
2061 if (!kctx->as_pending) {
2062 js_kctx_info->ctx.is_scheduled = false;
2064 kbase_backend_release_free_address_space(kbdev, as_nr);
2067 mutex_unlock(&js_devdata->runpool_mutex);
2069 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2073 kbdev->hwaccess.active_kctx = kctx;
2075 #if defined(CONFIG_MALI_GATOR_SUPPORT)
2076 kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
2078 #if defined(CONFIG_MALI_MIPE_ENABLED)
2079 kbase_tlstream_tl_ret_as_ctx(&kbdev->as[kctx->as_nr], kctx);
2082 /* Cause any future waiter-on-termination to wait until the context is
2084 wake_up(&js_kctx_info->ctx.is_scheduled_wait);
2086 /* Re-check for suspending: a suspend could've occurred, and all the
2087 * contexts could've been removed from the runpool before we took this
2088 * lock. In this case, we don't want to allow this context to run jobs,
2089 * we just want it out immediately.
2091 * The DMB required to read the suspend flag was issued recently as part
2092 * of the runpool_irq locking. If a suspend occurs *after* that lock was
2093 * taken (i.e. this condition doesn't execute), then the
2094 * kbasep_js_suspend() code will cleanup this context instead (by virtue
2095 * of it being called strictly after the suspend flag is set, and will
2096 * wait for this lock to drop) */
2097 if (kbase_pm_is_suspending(kbdev)) {
2098 /* Cause it to leave at some later point */
2101 retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
2102 KBASE_DEBUG_ASSERT(retained);
2104 kbasep_js_clear_submit_allowed(js_devdata, kctx);
2105 kctx_suspended = true;
2108 /* Transaction complete */
2109 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2110 mutex_unlock(&new_address_space->transaction_mutex);
2112 /* Synchronize with any policy timers */
2113 kbase_backend_ctx_count_changed(kbdev);
2115 mutex_unlock(&js_devdata->runpool_mutex);
2116 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2117 /* Note: after this point, the context could potentially get scheduled
2118 * out immediately */
2120 if (kctx_suspended) {
2121 /* Finishing forcing out the context due to a suspend. Use a
2122 * variant of kbasep_js_runpool_release_ctx() that doesn't
2123 * schedule a new context, to prevent a risk of recursion back
2124 * into this function */
2125 kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx);
2131 static bool kbase_js_use_ctx(struct kbase_device *kbdev,
2132 struct kbase_context *kctx)
2134 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
2135 unsigned long flags;
2137 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2138 if (kctx->as_pending) {
2139 /* Context waiting for AS to be assigned */
2140 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2143 if (kbase_backend_use_ctx_sched(kbdev, kctx)) {
2144 /* Context already has ASID - mark as active */
2145 kbdev->hwaccess.active_kctx = kctx;
2147 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2148 return true; /* Context already scheduled */
2150 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2152 return kbasep_js_schedule_ctx(kbdev, kctx);
2155 void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
2156 struct kbase_context *kctx)
2158 struct kbasep_js_kctx_info *js_kctx_info;
2159 struct kbasep_js_device_data *js_devdata;
2162 KBASE_DEBUG_ASSERT(kbdev != NULL);
2163 KBASE_DEBUG_ASSERT(kctx != NULL);
2165 js_devdata = &kbdev->js_data;
2166 js_kctx_info = &kctx->jctx.sched_info;
2168 /* This must never be attempted whilst suspending - i.e. it should only
2169 * happen in response to a syscall from a user-space thread */
2170 BUG_ON(kbase_pm_is_suspending(kbdev));
2172 mutex_lock(&js_devdata->queue_mutex);
2173 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2175 /* Mark the context as privileged */
2176 js_kctx_info->ctx.flags |= KBASE_CTX_FLAG_PRIVILEGED;
2178 is_scheduled = js_kctx_info->ctx.is_scheduled;
2179 if (!is_scheduled) {
2180 /* Add the context to the pullable list */
2181 if (kbase_js_ctx_list_add_pullable(kbdev, kctx, 0))
2182 kbase_js_sync_timers(kbdev);
2184 /* Fast-starting requires the jsctx_mutex to be dropped,
2185 * because it works on multiple ctxs */
2186 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2187 mutex_unlock(&js_devdata->queue_mutex);
2189 /* Try to schedule the context in */
2190 kbase_js_sched_all(kbdev);
2192 /* Wait for the context to be scheduled in */
2193 wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
2194 kctx->jctx.sched_info.ctx.is_scheduled);
2196 /* Already scheduled in - We need to retain it to keep the
2197 * corresponding address space */
2198 kbasep_js_runpool_retain_ctx(kbdev, kctx);
2199 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2200 mutex_unlock(&js_devdata->queue_mutex);
2203 KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx);
2205 void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
2206 struct kbase_context *kctx)
2208 struct kbasep_js_kctx_info *js_kctx_info;
2211 KBASE_DEBUG_ASSERT(kctx != NULL);
2212 js_kctx_info = &kctx->jctx.sched_info;
2214 /* We don't need to use the address space anymore */
2215 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2216 js_kctx_info->ctx.flags &= (~KBASE_CTX_FLAG_PRIVILEGED);
2217 pending = kctx->as_pending;
2218 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2220 /* Release the context - it will be scheduled out if there is no
2223 kbasep_js_runpool_release_ctx(kbdev, kctx);
2225 kbase_js_sched_all(kbdev);
2227 KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx);
2229 void kbasep_js_suspend(struct kbase_device *kbdev)
2231 unsigned long flags;
2232 struct kbasep_js_device_data *js_devdata;
2235 int nr_privileged_ctx = 0;
2237 KBASE_DEBUG_ASSERT(kbdev);
2238 KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
2239 js_devdata = &kbdev->js_data;
2241 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2243 /* Prevent all contexts from submitting */
2244 js_devdata->runpool_irq.submit_allowed = 0;
2246 /* Retain each of the contexts, so we can cause it to leave even if it
2247 * had no refcount to begin with */
2248 for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
2249 struct kbasep_js_per_as_data *js_per_as_data =
2250 &js_devdata->runpool_irq.per_as_data[i];
2251 struct kbase_context *kctx = js_per_as_data->kctx;
2253 retained = retained << 1;
2256 ++(js_per_as_data->as_busy_refcount);
2258 /* We can only cope with up to 1 privileged context -
2259 * the instrumented context. It'll be suspended by
2260 * disabling instrumentation */
2261 if (kctx->jctx.sched_info.ctx.flags &
2262 KBASE_CTX_FLAG_PRIVILEGED)
2263 KBASE_DEBUG_ASSERT(++nr_privileged_ctx == 1);
2266 CSTD_UNUSED(nr_privileged_ctx);
2267 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2269 /* De-ref the previous retain to ensure each context gets pulled out
2270 * sometime later. */
2273 ++i, retained = retained >> 1) {
2274 struct kbasep_js_per_as_data *js_per_as_data =
2275 &js_devdata->runpool_irq.per_as_data[i];
2276 struct kbase_context *kctx = js_per_as_data->kctx;
2279 kbasep_js_runpool_release_ctx(kbdev, kctx);
2282 /* Caller must wait for all Power Manager active references to be
2286 void kbasep_js_resume(struct kbase_device *kbdev)
2288 struct kbasep_js_device_data *js_devdata;
2291 KBASE_DEBUG_ASSERT(kbdev);
2292 js_devdata = &kbdev->js_data;
2293 KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
2295 mutex_lock(&js_devdata->queue_mutex);
2296 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
2297 struct kbase_context *kctx, *n;
2299 list_for_each_entry_safe(kctx, n,
2300 &kbdev->js_data.ctx_list_unpullable[js],
2301 jctx.sched_info.ctx.ctx_list_entry[js]) {
2302 struct kbasep_js_kctx_info *js_kctx_info;
2303 unsigned long flags;
2304 bool timer_sync = false;
2306 js_kctx_info = &kctx->jctx.sched_info;
2308 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2309 mutex_lock(&js_devdata->runpool_mutex);
2310 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2312 if (!js_kctx_info->ctx.is_scheduled &&
2313 kbase_js_ctx_pullable(kctx, js, false))
2314 timer_sync = kbase_js_ctx_list_add_pullable(
2317 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
2320 kbase_backend_ctx_count_changed(kbdev);
2321 mutex_unlock(&js_devdata->runpool_mutex);
2322 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2325 mutex_unlock(&js_devdata->queue_mutex);
2327 /* Restart atom processing */
2328 kbase_js_sched_all(kbdev);
2330 /* JS Resume complete */
2333 bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
2334 struct kbase_jd_atom *katom)
2336 if ((katom->core_req & BASE_JD_REQ_FS) &&
2337 (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE |
2341 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) &&
2342 (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) &&
2343 (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T)))
2349 static int kbase_js_get_slot(struct kbase_device *kbdev,
2350 struct kbase_jd_atom *katom)
2352 if (katom->core_req & BASE_JD_REQ_FS)
2355 if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
2356 if (katom->device_nr == 1 &&
2357 kbdev->gpu_props.num_core_groups == 2)
2359 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
2366 int kbase_js_dep_resolved_submit(struct kbase_context *kctx,
2367 struct kbase_jd_atom *katom,
2368 bool *enqueue_required)
2370 katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom);
2372 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2374 /* If slot will transition from unpullable to pullable then add to
2376 if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) {
2377 *enqueue_required = true;
2379 *enqueue_required = false;
2381 /* Check if there are lower priority jobs to soft stop */
2382 kbase_job_slot_ctx_priority_check_locked(kctx, katom);
2384 /* Add atom to ring buffer. */
2385 if (unlikely(jsctx_rb_add_atom(kctx, katom))) {
2386 /* The ring buffer is full. This should be impossible as the
2387 * job dispatcher can not submit enough atoms to exceed the
2388 * ring buffer size. Fail the job.
2390 WARN(1, "Job submit while JSCTX ringbuffer already full\n");
2394 katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED;
2399 struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
2401 struct kbase_jd_atom *katom;
2402 struct kbasep_js_device_data *js_devdata;
2405 KBASE_DEBUG_ASSERT(kctx);
2407 js_devdata = &kctx->kbdev->js_data;
2408 lockdep_assert_held(&js_devdata->runpool_irq.lock);
2410 if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
2412 if (kbase_pm_is_suspending(kctx->kbdev))
2415 katom = jsctx_rb_peek(kctx, js);
2419 if (atomic_read(&katom->blocked))
2422 /* Due to ordering restrictions when unpulling atoms on failure, we do
2423 * not allow multiple runs of fail-dep atoms from the same context to be
2424 * present on the same slot */
2425 if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV) &&
2426 atomic_read(&kctx->atoms_pulled_slot[js])) {
2427 struct kbase_jd_atom *prev_atom =
2428 kbase_backend_inspect_tail(kctx->kbdev, js);
2430 if (prev_atom && prev_atom->kctx != kctx)
2434 if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
2435 if (katom->x_pre_dep->gpu_rb_state ==
2436 KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB)
2438 if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
2439 kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
2443 kctx->pulled = true;
2444 pulled = atomic_inc_return(&kctx->atoms_pulled);
2445 if (pulled == 1 && !kctx->slots_pullable)
2446 atomic_inc(&kctx->kbdev->js_data.nr_contexts_runnable);
2447 atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]);
2448 jsctx_rb_pull(kctx, katom);
2450 kbasep_js_runpool_retain_ctx_nolock(kctx->kbdev, kctx);
2451 katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF;
2453 katom->sched_info.cfs.ticks = 0;
2459 static void js_return_worker(struct work_struct *data)
2461 struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
2463 struct kbase_context *kctx = katom->kctx;
2464 struct kbase_device *kbdev = kctx->kbdev;
2465 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
2466 struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
2467 struct kbasep_js_atom_retained_state retained_state;
2468 int js = katom->slot_nr;
2469 bool timer_sync = false;
2470 bool context_idle = false;
2471 unsigned long flags;
2472 base_jd_core_req core_req = katom->core_req;
2473 u64 affinity = katom->affinity;
2474 enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
2476 kbase_backend_complete_wq(kbdev, katom);
2478 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
2479 kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
2481 kbasep_js_atom_retained_state_copy(&retained_state, katom);
2483 mutex_lock(&js_devdata->queue_mutex);
2484 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2486 atomic_dec(&kctx->atoms_pulled);
2487 atomic_dec(&kctx->atoms_pulled_slot[js]);
2489 atomic_dec(&katom->blocked);
2491 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2493 if (!atomic_read(&kctx->atoms_pulled_slot[js]) &&
2494 jsctx_rb_none_to_pull(kctx, js))
2495 timer_sync |= kbase_js_ctx_list_remove(kbdev, kctx, js);
2497 if (!atomic_read(&kctx->atoms_pulled)) {
2498 if (!kctx->slots_pullable)
2499 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
2501 if (kctx->as_nr != KBASEP_AS_NR_INVALID &&
2502 !js_kctx_info->ctx.is_dying) {
2503 int num_slots = kbdev->gpu_props.num_job_slots;
2506 if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
2507 kbasep_js_set_submit_allowed(js_devdata, kctx);
2509 for (slot = 0; slot < num_slots; slot++) {
2510 if (kbase_js_ctx_pullable(kctx, slot, true))
2512 kbase_js_ctx_list_add_pullable(
2517 kbase_jm_idle_ctx(kbdev, kctx);
2519 context_idle = true;
2522 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2525 WARN_ON(!kctx->ctx_active);
2526 kctx->ctx_active = false;
2527 kbase_pm_context_idle(kbdev);
2531 kbase_js_sync_timers(kbdev);
2533 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2534 mutex_unlock(&js_devdata->queue_mutex);
2536 katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
2537 kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
2540 kbase_js_sched_all(kbdev);
2542 kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
2546 void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
2548 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2550 jsctx_rb_unpull(kctx, katom);
2552 WARN_ON(work_pending(&katom->work));
2554 /* Block re-submission until workqueue has run */
2555 atomic_inc(&katom->blocked);
2557 kbase_job_check_leave_disjoint(kctx->kbdev, katom);
2559 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
2560 INIT_WORK(&katom->work, js_return_worker);
2561 queue_work(kctx->jctx.job_done_wq, &katom->work);
2564 static bool kbase_js_evict_atom(struct kbase_context *kctx,
2565 struct kbase_jd_atom *katom_evict,
2566 struct kbase_jd_atom *start_katom,
2567 struct kbase_jd_atom *head_katom,
2568 struct list_head *evict_list,
2569 struct jsctx_rb *rb, int idx)
2571 struct kbase_jd_atom *x_dep = katom_evict->x_post_dep;
2573 if (!(katom_evict->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV) &&
2574 katom_evict != start_katom)
2577 if (katom_evict->gpu_rb_state != KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
2578 WARN_ON(katom_evict->event_code != head_katom->event_code);
2583 if (katom_evict->status == KBASE_JD_ATOM_STATE_HW_COMPLETED &&
2584 katom_evict != head_katom)
2587 /* Evict cross dependency if present */
2588 if (x_dep && (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED)
2589 && (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER))
2590 list_add_tail(&x_dep->dep_item[0], evict_list);
2592 /* If cross dependency is present and does not have a data dependency
2594 if (x_dep && (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED)
2595 && !(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER))
2596 x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
2598 if (katom_evict != head_katom) {
2599 rb->entries[idx & JSCTX_RB_MASK].atom_id =
2600 KBASEP_ATOM_ID_INVALID;
2602 katom_evict->event_code = head_katom->event_code;
2603 katom_evict->atom_flags &=
2604 ~KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED;
2606 if (katom_evict->atom_flags & KBASE_KATOM_FLAG_HOLDING_CTX_REF)
2607 kbase_jd_done(katom_evict, katom_evict->slot_nr, NULL,
2610 kbase_jd_evict(kctx->kbdev, katom_evict);
2617 * kbase_js_evict_deps - Evict dependencies
2618 * @kctx: Context pointer
2619 * @head_katom: Pointer to the atom to evict
2621 * Remove all post dependencies of an atom from the context ringbuffers.
2623 * The original atom's event_code will be propogated to all dependent atoms.
2625 * Context: Caller must hold both jctx and HW access locks
2627 static void kbase_js_evict_deps(struct kbase_context *kctx,
2628 struct kbase_jd_atom *head_katom)
2630 struct list_head evict_list;
2632 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2633 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2635 INIT_LIST_HEAD(&evict_list);
2637 list_add_tail(&head_katom->dep_item[0], &evict_list);
2639 while (!list_empty(&evict_list)) {
2640 struct kbase_jd_atom *start_katom;
2642 start_katom = list_entry(evict_list.prev, struct kbase_jd_atom,
2644 list_del(evict_list.prev);
2646 jsctx_rb_evict(kctx, start_katom, head_katom, &evict_list);
2651 * kbase_js_compact - Compact JSCTX ringbuffers
2652 * @kctx: Context pointer
2654 * Compact the JSCTX ringbuffers, removing any NULL entries
2656 * Context: Caller must hold both jctx and HW access locks
2658 static void kbase_js_compact(struct kbase_context *kctx)
2660 struct kbase_device *kbdev = kctx->kbdev;
2663 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2664 lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
2666 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
2667 jsctx_rb_compact(kctx, js);
2670 bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
2671 struct kbase_jd_atom *katom)
2673 struct kbasep_js_kctx_info *js_kctx_info;
2674 struct kbasep_js_device_data *js_devdata;
2675 struct kbase_device *kbdev;
2676 unsigned long flags;
2677 bool timer_sync = false;
2679 bool context_idle = false;
2681 kbdev = kctx->kbdev;
2682 atom_slot = katom->slot_nr;
2684 js_kctx_info = &kctx->jctx.sched_info;
2685 js_devdata = &kbdev->js_data;
2687 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
2689 mutex_lock(&js_devdata->runpool_mutex);
2690 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2692 if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED) {
2693 if (katom->event_code != BASE_JD_EVENT_DONE)
2694 kbase_js_evict_deps(kctx, katom);
2696 jsctx_rb_remove(kctx, katom);
2698 context_idle = !atomic_dec_return(&kctx->atoms_pulled);
2699 atomic_dec(&kctx->atoms_pulled_slot[atom_slot]);
2701 if (!atomic_read(&kctx->atoms_pulled) && !kctx->slots_pullable)
2702 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
2704 if (katom->event_code != BASE_JD_EVENT_DONE)
2705 kbase_js_compact(kctx);
2708 if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) &&
2709 jsctx_rb_none_to_pull(kctx, atom_slot))
2710 timer_sync |= kbase_js_ctx_list_remove(kctx->kbdev, kctx,
2714 * If submission is disabled on this context (most likely due to an
2715 * atom failure) and there are now no atoms left in the system then
2716 * re-enable submission so that context can be scheduled again.
2718 if (!kbasep_js_is_submit_allowed(js_devdata, kctx) &&
2719 !atomic_read(&kctx->atoms_pulled) &&
2720 !js_kctx_info->ctx.is_dying) {
2723 kbasep_js_set_submit_allowed(js_devdata, kctx);
2725 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
2726 if (kbase_js_ctx_pullable(kctx, js, true))
2727 timer_sync |= kbase_js_ctx_list_add_pullable(
2730 } else if (katom->x_post_dep &&
2731 kbasep_js_is_submit_allowed(js_devdata, kctx)) {
2734 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
2735 if (kbase_js_ctx_pullable(kctx, js, true))
2736 timer_sync |= kbase_js_ctx_list_add_pullable(
2741 /* Mark context as inactive. The pm reference will be dropped later in
2745 kctx->ctx_active = false;
2747 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2749 kbase_backend_ctx_count_changed(kbdev);
2750 mutex_unlock(&js_devdata->runpool_mutex);
2752 return context_idle;
2755 void kbase_js_complete_atom(struct kbase_jd_atom *katom, ktime_t *end_timestamp)
2757 u64 microseconds_spent = 0;
2758 struct kbase_device *kbdev;
2759 struct kbase_context *kctx = katom->kctx;
2760 union kbasep_js_policy *js_policy;
2761 struct kbasep_js_device_data *js_devdata;
2763 kbdev = kctx->kbdev;
2765 js_policy = &kbdev->js_data.policy;
2766 js_devdata = &kbdev->js_data;
2768 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2770 katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED;
2772 #if defined(CONFIG_MALI_GATOR_SUPPORT)
2773 kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP,
2774 katom->slot_nr), NULL, 0);
2776 #if defined(CONFIG_MALI_MIPE_ENABLED)
2777 kbase_tlstream_tl_nret_atom_lpu(
2779 &kbdev->gpu_props.props.raw_props.js_features[
2781 kbase_tlstream_tl_nret_atom_as(katom, &kbdev->as[kctx->as_nr]);
2782 kbase_tlstream_tl_nret_ctx_lpu(
2784 &kbdev->gpu_props.props.raw_props.js_features[
2787 /* Calculate the job's time used */
2788 if (end_timestamp != NULL) {
2789 /* Only calculating it for jobs that really run on the HW (e.g.
2790 * removed from next jobs never actually ran, so really did take
2792 ktime_t tick_diff = ktime_sub(*end_timestamp,
2793 katom->start_timestamp);
2795 microseconds_spent = ktime_to_ns(tick_diff);
2797 do_div(microseconds_spent, 1000);
2799 /* Round up time spent to the minimum timer resolution */
2800 if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US)
2801 microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US;
2804 /* Log the result of the job (completion status, and time spent). */
2805 kbasep_js_policy_log_job_result(js_policy, katom, microseconds_spent);
2807 kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
2809 /* Unblock cross dependency if present */
2810 if (katom->x_post_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
2811 !(katom->x_post_dep->atom_flags &
2812 KBASE_KATOM_FLAG_FAIL_BLOCKER)))
2813 katom->x_post_dep->atom_flags &=
2814 ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
2817 void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
2819 struct kbasep_js_device_data *js_devdata;
2820 union kbasep_js_policy *js_policy;
2821 bool timer_sync = false;
2823 js_devdata = &kbdev->js_data;
2824 js_policy = &js_devdata->policy;
2826 down(&js_devdata->schedule_sem);
2827 mutex_lock(&js_devdata->queue_mutex);
2832 js = ffs(js_mask) - 1;
2835 struct kbase_context *kctx;
2836 unsigned long flags;
2837 bool context_idle = false;
2839 kctx = kbase_js_ctx_list_pop_head(kbdev, js);
2842 js_mask &= ~(1 << js);
2843 break; /* No contexts on pullable list */
2846 if (!kctx->ctx_active) {
2847 context_idle = true;
2849 if (kbase_pm_context_active_handle_suspend(
2851 KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
2852 /* Suspend pending - return context to
2853 * queue and stop scheduling */
2855 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2856 if (kbase_js_ctx_list_add_pullable_head(
2857 kctx->kbdev, kctx, js))
2858 kbase_js_sync_timers(kbdev);
2860 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2861 mutex_unlock(&js_devdata->queue_mutex);
2862 up(&js_devdata->schedule_sem);
2865 kctx->ctx_active = true;
2868 if (!kbase_js_use_ctx(kbdev, kctx)) {
2870 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2871 /* Context can not be used at this time */
2872 spin_lock_irqsave(&js_devdata->runpool_irq.lock,
2874 if (kctx->as_pending ||
2875 kbase_js_ctx_pullable(kctx, js, false)
2876 || (kctx->jctx.sched_info.ctx.flags &
2877 KBASE_CTX_FLAG_PRIVILEGED))
2879 kbase_js_ctx_list_add_pullable_head(
2880 kctx->kbdev, kctx, js);
2883 kbase_js_ctx_list_add_unpullable(
2884 kctx->kbdev, kctx, js);
2885 spin_unlock_irqrestore(
2886 &js_devdata->runpool_irq.lock, flags);
2888 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2890 WARN_ON(!kctx->ctx_active);
2891 kctx->ctx_active = false;
2892 kbase_pm_context_idle(kbdev);
2895 /* No more jobs can be submitted on this slot */
2896 js_mask &= ~(1 << js);
2899 mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2900 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2902 kctx->pulled = false;
2904 if (!kbase_jm_kick(kbdev, 1 << js))
2905 /* No more jobs can be submitted on this slot */
2906 js_mask &= ~(1 << js);
2908 if (!kctx->pulled) {
2909 /* Failed to pull jobs - push to head of list */
2910 if (kbase_js_ctx_pullable(kctx, js, true))
2912 kbase_js_ctx_list_add_pullable_head(
2917 kbase_js_ctx_list_add_unpullable(
2922 kbase_jm_idle_ctx(kbdev, kctx);
2923 spin_unlock_irqrestore(
2924 &js_devdata->runpool_irq.lock,
2926 WARN_ON(!kctx->ctx_active);
2927 kctx->ctx_active = false;
2928 kbase_pm_context_idle(kbdev);
2930 spin_unlock_irqrestore(
2931 &js_devdata->runpool_irq.lock,
2935 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2937 js_mask &= ~(1 << js);
2938 break; /* Could not run atoms on this slot */
2941 /* Push to back of list */
2942 if (kbase_js_ctx_pullable(kctx, js, true))
2943 timer_sync |= kbase_js_ctx_list_add_pullable(
2944 kctx->kbdev, kctx, js);
2946 timer_sync |= kbase_js_ctx_list_add_unpullable(
2947 kctx->kbdev, kctx, js);
2948 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
2950 mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2955 kbase_js_sync_timers(kbdev);
2957 mutex_unlock(&js_devdata->queue_mutex);
2958 up(&js_devdata->schedule_sem);
2961 void kbase_js_zap_context(struct kbase_context *kctx)
2963 struct kbase_device *kbdev = kctx->kbdev;
2964 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
2965 struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
2969 * Critical assumption: No more submission is possible outside of the
2970 * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
2971 * whilst the struct kbase_context is terminating.
2974 /* First, atomically do the following:
2975 * - mark the context as dying
2976 * - try to evict it from the policy queue */
2977 mutex_lock(&js_devdata->queue_mutex);
2978 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2979 js_kctx_info->ctx.is_dying = true;
2981 dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
2984 * At this point we know:
2985 * - If eviction succeeded, it was in the policy queue, but now no
2987 * - We must cancel the jobs here. No Power Manager active reference to
2989 * - This happens asynchronously - kbase_jd_zap_context() will wait for
2990 * those jobs to be killed.
2991 * - If eviction failed, then it wasn't in the policy queue. It is one
2993 * - a. it didn't have any jobs, and so is not in the Policy Queue or
2994 * the Run Pool (not scheduled)
2995 * - Hence, no more work required to cancel jobs. No Power Manager
2996 * active reference to release.
2997 * - b. it was in the middle of a scheduling transaction (and thus must
2998 * have at least 1 job). This can happen from a syscall or a
2999 * kernel thread. We still hold the jsctx_mutex, and so the thread
3000 * must be waiting inside kbasep_js_try_schedule_head_ctx(),
3001 * before checking whether the runpool is full. That thread will
3002 * continue after we drop the mutex, and will notice the context
3003 * is dying. It will rollback the transaction, killing all jobs at
3004 * the same time. kbase_jd_zap_context() will wait for those jobs
3006 * - Hence, no more work required to cancel jobs, or to release the
3007 * Power Manager active reference.
3008 * - c. it is scheduled, and may or may not be running jobs
3009 * - We must cause it to leave the runpool by stopping it from
3010 * submitting any more jobs. When it finally does leave,
3011 * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
3012 * (because it is dying), release the Power Manager active reference,
3013 * and will not requeue the context in the policy queue.
3014 * kbase_jd_zap_context() will wait for those jobs to be killed.
3015 * - Hence, work required just to make it leave the runpool. Cancelling
3016 * jobs and releasing the Power manager active reference will be
3017 * handled when it leaves the runpool.
3019 if (!js_kctx_info->ctx.is_scheduled) {
3020 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
3022 &kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
3024 &kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
3027 /* The following events require us to kill off remaining jobs
3028 * and update PM book-keeping:
3029 * - we evicted it correctly (it must have jobs to be in the
3032 * These events need no action, but take this path anyway:
3033 * - Case a: it didn't have any jobs, and was never in the Queue
3034 * - Case b: scheduling transaction will be partially rolled-
3035 * back (this already cancels the jobs)
3038 KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u,
3039 js_kctx_info->ctx.is_scheduled);
3041 dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx);
3043 /* Only cancel jobs when we evicted from the policy
3044 * queue. No Power Manager active reference was held.
3046 * Having is_dying set ensures that this kills, and
3047 * doesn't requeue */
3048 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false);
3050 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
3051 mutex_unlock(&js_devdata->queue_mutex);
3053 unsigned long flags;
3056 /* Case c: didn't evict, but it is scheduled - it's in the Run
3058 KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u,
3059 js_kctx_info->ctx.is_scheduled);
3060 dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
3062 /* Disable the ctx from submitting any more jobs */
3063 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
3065 kbasep_js_clear_submit_allowed(js_devdata, kctx);
3067 /* Retain and (later) release the context whilst it is is now
3068 * disallowed from submitting jobs - ensures that someone
3069 * somewhere will be removing the context later on */
3070 was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
3072 /* Since it's scheduled and we have the jsctx_mutex, it must be
3073 * retained successfully */
3074 KBASE_DEBUG_ASSERT(was_retained);
3076 dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
3078 /* Cancel any remaining running jobs for this kctx - if any.
3079 * Submit is disallowed which takes effect immediately, so no
3080 * more new jobs will appear after we do this. */
3081 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
3082 kbase_job_slot_hardstop(kctx, js, NULL);
3084 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
3085 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
3086 mutex_unlock(&js_devdata->queue_mutex);
3088 dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)",
3091 kbasep_js_runpool_release_ctx(kbdev, kctx);
3094 KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
3096 /* After this, you must wait on both the
3097 * kbase_jd_context::zero_jobs_wait and the
3098 * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs
3099 * to be destroyed, and the context to be de-scheduled (if it was on the
3102 * kbase_jd_zap_context() will do this. */
3105 static inline int trace_get_refcnt(struct kbase_device *kbdev,
3106 struct kbase_context *kctx)
3108 struct kbasep_js_device_data *js_devdata;
3112 js_devdata = &kbdev->js_data;
3114 as_nr = kctx->as_nr;
3115 if (as_nr != KBASEP_AS_NR_INVALID) {
3116 struct kbasep_js_per_as_data *js_per_as_data;
3118 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
3120 refcnt = js_per_as_data->as_busy_refcount;
3127 * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context
3128 * @kctx: Pointer to context.
3129 * @callback: Pointer to function to call for each job.
3131 * Call a function on all jobs belonging to a non-queued, non-running
3132 * context, and detach the jobs from the context as it goes.
3134 * Due to the locks that might be held at the time of the call, the callback
3135 * may need to defer work on a workqueue to complete its actions (e.g. when
3138 * Atoms will be removed from the queue, so this must only be called when
3139 * cancelling jobs (which occurs as part of context destruction).
3141 * The locking conditions on the caller are as follows:
3142 * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
3144 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
3145 kbasep_js_policy_ctx_job_cb callback)
3147 struct kbase_device *kbdev;
3148 struct kbasep_js_device_data *js_devdata;
3149 unsigned long flags;
3152 kbdev = kctx->kbdev;
3154 js_devdata = &kbdev->js_data;
3156 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
3158 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL,
3159 0u, trace_get_refcnt(kbdev, kctx));
3161 /* Invoke callback on jobs on each slot in turn */
3162 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
3163 jsctx_rb_foreach(kctx, js, callback);
3165 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);