3 * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * Job Scheduler Implementation
23 #include <mali_kbase.h>
24 #include <mali_kbase_js.h>
25 #if defined(CONFIG_MALI_GATOR_SUPPORT)
26 #include <mali_kbase_gator.h>
28 #if defined(CONFIG_MALI_MIPE_ENABLED)
29 #include <mali_kbase_tlstream.h>
31 #include <mali_kbase_hw.h>
33 #include <mali_kbase_defs.h>
34 #include <mali_kbase_config_defaults.h>
36 #include "mali_kbase_jm.h"
37 #include "mali_kbase_hwaccess_jm.h"
43 /* Bitpattern indicating the result of releasing a context */
45 /* The context was descheduled - caller should try scheduling in a new
46 * one to keep the runpool full */
47 KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
48 /* Ctx attributes were changed - caller should try scheduling all
50 KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1)
53 typedef u32 kbasep_js_release_result;
55 const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = {
56 KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */
57 KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */
58 KBASE_JS_ATOM_SCHED_PRIO_LOW /* BASE_JD_PRIO_LOW */
62 kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = {
63 BASE_JD_PRIO_HIGH, /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */
64 BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */
65 BASE_JD_PRIO_LOW /* KBASE_JS_ATOM_SCHED_PRIO_LOW */
70 * Private function prototypes
72 static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
73 struct kbase_device *kbdev, struct kbase_context *kctx,
74 struct kbasep_js_atom_retained_state *katom_retained_state);
76 static int kbase_js_get_slot(struct kbase_device *kbdev,
77 struct kbase_jd_atom *katom);
79 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
80 kbasep_js_policy_ctx_job_cb callback);
82 static bool kbase_js_evict_atom(struct kbase_context *kctx,
83 struct kbase_jd_atom *katom_evict,
84 struct kbase_jd_atom *start_katom,
85 struct kbase_jd_atom *head_katom,
86 struct list_head *evict_list,
87 struct jsctx_rb *rb, int idx);
89 /* Helper for trace subcodes */
90 #if KBASE_TRACE_ENABLE
91 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
92 struct kbase_context *kctx)
95 struct kbasep_js_device_data *js_devdata;
99 js_devdata = &kbdev->js_data;
101 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
103 if (as_nr != KBASEP_AS_NR_INVALID) {
104 struct kbasep_js_per_as_data *js_per_as_data;
106 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
108 refcnt = js_per_as_data->as_busy_refcount;
110 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
115 static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev,
116 struct kbase_context *kctx)
118 struct kbasep_js_device_data *js_devdata;
122 js_devdata = &kbdev->js_data;
125 if (as_nr != KBASEP_AS_NR_INVALID) {
126 struct kbasep_js_per_as_data *js_per_as_data;
128 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
130 refcnt = js_per_as_data->as_busy_refcount;
135 #else /* KBASE_TRACE_ENABLE */
136 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
137 struct kbase_context *kctx)
143 static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev,
144 struct kbase_context *kctx)
150 #endif /* KBASE_TRACE_ENABLE */
156 JS_DEVDATA_INIT_NONE = 0,
157 JS_DEVDATA_INIT_CONSTANTS = (1 << 0),
158 JS_DEVDATA_INIT_POLICY = (1 << 1),
159 JS_DEVDATA_INIT_ALL = ((1 << 2) - 1)
163 JS_KCTX_INIT_NONE = 0,
164 JS_KCTX_INIT_CONSTANTS = (1 << 0),
165 JS_KCTX_INIT_POLICY = (1 << 1),
166 JS_KCTX_INIT_ALL = ((1 << 2) - 1)
174 * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements
175 * @features: JSn_FEATURE register value
177 * Given a JSn_FEATURE register value returns the core requirements that match
179 * Return: Core requirement bit mask
181 static base_jd_core_req core_reqs_from_jsn_features(u16 features)
183 base_jd_core_req core_req = 0u;
185 if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
186 core_req |= BASE_JD_REQ_V;
188 if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
189 core_req |= BASE_JD_REQ_CF;
191 if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
192 core_req |= BASE_JD_REQ_CS;
194 if ((features & JS_FEATURE_TILER_JOB) != 0)
195 core_req |= BASE_JD_REQ_T;
197 if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
198 core_req |= BASE_JD_REQ_FS;
203 static void kbase_js_sync_timers(struct kbase_device *kbdev)
205 mutex_lock(&kbdev->js_data.runpool_mutex);
206 kbase_backend_ctx_count_changed(kbdev);
207 mutex_unlock(&kbdev->js_data.runpool_mutex);
210 /* Hold the kbasep_js_device_data::runpool_irq::lock for this */
211 bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev,
212 struct kbase_context *kctx)
214 struct kbasep_js_device_data *js_devdata;
215 struct kbasep_js_per_as_data *js_per_as_data;
219 KBASE_DEBUG_ASSERT(kbdev != NULL);
220 KBASE_DEBUG_ASSERT(kctx != NULL);
221 js_devdata = &kbdev->js_data;
224 if (as_nr != KBASEP_AS_NR_INVALID) {
227 KBASE_DEBUG_ASSERT(as_nr >= 0);
228 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
230 KBASE_DEBUG_ASSERT(js_per_as_data->kctx != NULL);
232 new_refcnt = ++(js_per_as_data->as_busy_refcount);
234 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx,
235 NULL, 0u, new_refcnt);
243 * jsctx_rb_is_empty_prio(): - Check if ring buffer is empty
244 * @kctx: Pointer to kbase context with ring buffer.
245 * @js: Job slot id to check.
246 * @prio: Priority to check.
248 * Caller must hold runpool_irq.lock
250 * Return: true if the ring buffer is empty, false otherwise.
253 jsctx_rb_is_empty_prio(struct kbase_context *kctx, int js, int prio)
255 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
257 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
259 return rb->running_idx == rb->write_idx;
263 * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
264 * @kctx: Pointer to kbase context with ring buffer.
265 * @js: Job slot id to check.
266 * @prio: Priority to check.
268 * Return true if there are no atoms to pull. There may be running atoms in the
269 * ring buffer even if there are no atoms to pull. It is also possible for the
270 * ring buffer to be full (with running atoms) when this functions returns
273 * Caller must hold runpool_irq.lock
275 * Return: true if there are no atoms to pull, false otherwise.
278 jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio)
280 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
282 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
284 return rb->read_idx == rb->write_idx;
288 * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no
290 * @kctx: Pointer to kbase context with ring buffer.
291 * @js: Job slot id to check.
293 * Caller must hold runpool_irq.lock
295 * Return: true if the ring buffers for all priorities have no pullable atoms,
299 jsctx_rb_none_to_pull(struct kbase_context *kctx, int js)
303 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
305 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
306 if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
314 * jsctx_rb_compact_prio(): - Compact a ring buffer
315 * @kctx: Pointer to kbase context with ring buffer.
316 * @js: Job slot id to compact.
317 * @prio: Priority id to compact.
320 jsctx_rb_compact_prio(struct kbase_context *kctx, int js, int prio)
322 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
323 u16 compact_idx = rb->write_idx - 1;
324 u16 end_idx = rb->running_idx - 1;
327 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
328 lockdep_assert_held(&kctx->jctx.lock);
330 for (i = compact_idx; i != end_idx; i--) {
331 if (rb->entries[i & JSCTX_RB_MASK].atom_id !=
332 KBASEP_ATOM_ID_INVALID) {
333 WARN_ON(compact_idx < rb->running_idx);
334 rb->entries[compact_idx & JSCTX_RB_MASK].atom_id =
335 rb->entries[i & JSCTX_RB_MASK].atom_id;
339 if (rb->read_idx == i)
340 rb->read_idx = compact_idx + 1;
343 rb->running_idx = compact_idx + 1;
347 * jsctx_rb_compact(): - Compact all priority ring buffers
348 * @kctx: Pointer to kbase context with ring buffer.
349 * @js: Job slot id to compact.
352 jsctx_rb_compact(struct kbase_context *kctx, int js)
356 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
357 jsctx_rb_compact_prio(kctx, js, prio);
361 * jsctx_rb_foreach_prio(): - Execute callback for each entry in ring buffer
362 * @kctx: Pointer to kbase context with ring buffer.
363 * @js: Job slot id to iterate.
364 * @prio: Priority id to iterate.
365 * @callback: Function pointer to callback.
367 * Iterate over a ring buffer and invoke @callback for each entry in buffer, and
368 * remove the entry from the buffer.
370 * If entries are added to the ring buffer while this is running those entries
371 * may, or may not be covered. To ensure that all entries in the buffer have
372 * been enumerated when this function returns jsctx->lock must be held when
373 * calling this function.
375 * The HW access lock, js_data.runpool_irq.lock, must always be held when
376 * calling this function.
379 jsctx_rb_foreach_prio(struct kbase_context *kctx, int js, int prio,
380 kbasep_js_policy_ctx_job_cb callback)
382 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
383 struct kbase_jd_atom *katom;
384 u16 write_idx = ACCESS_ONCE(rb->write_idx);
386 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
388 /* There must be no jobs currently in HW access */
389 WARN_ON(rb->read_idx != rb->running_idx);
391 /* Invoke callback on all kbase_jd_atoms in the ring buffer, and
392 * removes them from the buffer */
393 while (rb->read_idx != write_idx) {
394 int id = rb->entries[rb->read_idx & JSCTX_RB_MASK].atom_id;
396 katom = kbase_jd_atom_from_id(kctx, id);
401 callback(kctx->kbdev, katom);
406 * jsctx_rb_foreach(): - Execute callback for each entry in all priority rb
407 * @kctx: Pointer to kbase context with ring buffer.
408 * @js: Job slot id to iterate.
409 * @callback: Function pointer to callback.
411 * Iterate over all the different priorities, and for each call
412 * jsctx_rb_foreach_prio() to iterate over the ring buffer and invoke @callback
413 * for each entry in buffer, and remove the entry from the buffer.
416 jsctx_rb_foreach(struct kbase_context *kctx, int js,
417 kbasep_js_policy_ctx_job_cb callback)
421 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
422 jsctx_rb_foreach_prio(kctx, js, prio, callback);
426 * jsctx_rb_peek_prio(): - Check buffer and get next atom
427 * @kctx: Pointer to kbase context with ring buffer.
428 * @js: Job slot id to check.
429 * @prio: Priority id to check.
431 * Check the ring buffer for the specified @js and @prio and return a pointer to
432 * the next atom, unless the ring buffer is empty.
434 * Return: Pointer to next atom in buffer, or NULL if there is no atom.
436 static inline struct kbase_jd_atom *
437 jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio)
439 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
442 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
444 if (jsctx_rb_none_to_pull_prio(kctx, js, prio))
447 id = rb->entries[rb->read_idx & JSCTX_RB_MASK].atom_id;
448 return kbase_jd_atom_from_id(kctx, id);
452 * jsctx_rb_peek(): - Check all priority buffers and get next atom
453 * @kctx: Pointer to kbase context with ring buffer.
454 * @js: Job slot id to check.
456 * Check the ring buffers for all priorities, starting from
457 * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a
458 * pointer to the next atom, unless all the priority's ring buffers are empty.
460 * Return: Pointer to next atom in buffer, or NULL if there is no atom.
462 static inline struct kbase_jd_atom *
463 jsctx_rb_peek(struct kbase_context *kctx, int js)
467 for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
468 struct kbase_jd_atom *katom;
470 katom = jsctx_rb_peek_prio(kctx, js, prio);
479 * jsctx_rb_peek_last(): - Check a ring buffer and get the last atom
480 * @kctx: Pointer to kbase context with ring buffer.
481 * @js: Job slot id to check.
482 * @prio: Priority id to check.
484 * Check the ring buffer for the specified @js and @prio and return a
485 * pointer to the last atom, unless all the priority's ring buffers are empty.
487 * The last atom is the atom that was added using jsctx_rb_add() most recently.
489 * Return: Pointer to last atom in buffer, or NULL if there is no atom.
491 static inline struct kbase_jd_atom *
492 jsctx_rb_peek_last(struct kbase_context *kctx, int js, int prio)
494 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
497 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
498 lockdep_assert_held(&kctx->jctx.lock);
500 if (jsctx_rb_is_empty_prio(kctx, js, prio))
503 id = rb->entries[(rb->write_idx - 1) & JSCTX_RB_MASK].atom_id;
504 return kbase_jd_atom_from_id(kctx, id);
508 * jsctx_rb_pull(): - Mark atom in list as running
509 * @kctx: Pointer to kbase context with ring buffer.
510 * @katom: Pointer to katom to pull.
512 * Mark an atom previously obtained from jsctx_rb_peek() as running.
514 * @katom must currently be at the head of the ring buffer.
517 jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
519 int prio = katom->sched_priority;
520 int js = katom->slot_nr;
521 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
523 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
525 /* Atoms must be pulled in the correct order. */
526 WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
532 * jsctx_rb_unpull(): - Undo marking of atom in list as running
533 * @kctx: Pointer to kbase context with ring buffer.
534 * @katom: Pointer to katom to unpull.
536 * Undo jsctx_rb_pull() and put @katom back in the queue.
538 * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
542 jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
544 int prio = katom->sched_priority;
545 int js = katom->slot_nr;
546 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
548 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
550 /* Atoms must be unpulled in correct order. */
551 WARN_ON(rb->entries[(rb->read_idx - 1) & JSCTX_RB_MASK].atom_id !=
552 kbase_jd_atom_id(kctx, katom));
558 * jsctx_rb_add(): - Add atom to ring buffer
559 * @kctx: Pointer to kbase context with ring buffer.
560 * @katom: Pointer to katom to add.
562 * Add @katom to the ring buffer determined by the atom's priority and job slot
565 * If the ring buffer is full -EBUSY will be returned.
567 * Return: On success 0 is returned, on failure a negative error code.
570 jsctx_rb_add_atom(struct kbase_context *kctx, struct kbase_jd_atom *katom)
572 int prio = katom->sched_priority;
573 int js = katom->slot_nr;
574 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
576 lockdep_assert_held(&kctx->jctx.lock);
578 /* Check if the ring buffer is full */
579 if ((rb->write_idx - rb->running_idx) >= JSCTX_RB_SIZE)
582 rb->entries[rb->write_idx & JSCTX_RB_MASK].atom_id =
583 kbase_jd_atom_id(kctx, katom);
590 * jsctx_rb_remove(): - Remove atom from ring buffer
591 * @kctx: Pointer to kbase context with ring buffer.
592 * @katom: Pointer to katom to remove.
594 * Remove @katom from the ring buffer.
596 * @katom must have been pulled from the buffer earlier by jsctx_rb_pull(), and
597 * atoms must be removed in the same order they were pulled from the ring
601 jsctx_rb_remove(struct kbase_context *kctx, struct kbase_jd_atom *katom)
603 int prio = katom->sched_priority;
604 int js = katom->slot_nr;
605 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
607 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
609 /* Atoms must be completed in order. */
610 WARN_ON(rb->entries[rb->running_idx & JSCTX_RB_MASK].atom_id !=
611 kbase_jd_atom_id(kctx, katom));
617 * jsctx_rb_evict(): - Evict atom, and dependents, from ring buffer
618 * @kctx: Pointer to kbase context with ring buffer.
619 * @start_katom: Pointer to the first katom to evict.
620 * @head_katom: Pointer to head katom.
621 * @evict_list: Pointer to head of list where evicted atoms are added.
623 * Iterate over the ring buffer starting at @start_katom and evict @start_atom
624 * and dependent atoms in ring buffer.
626 * @evict_list and @head_katom is passed on to kbase_js_evict_atom() which will
627 * examine the atom dependencies.
629 * jsctx_rb_evict() is only called by kbase_js_evict_deps().
632 jsctx_rb_evict(struct kbase_context *kctx,
633 struct kbase_jd_atom *start_katom,
634 struct kbase_jd_atom *head_katom,
635 struct list_head *evict_list)
637 int prio = start_katom->sched_priority;
638 int js = start_katom->slot_nr;
639 struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js];
640 bool atom_in_rb = false;
643 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
644 lockdep_assert_held(&kctx->jctx.lock);
646 for (i = rb->running_idx; i != rb->write_idx; i++) {
647 if (rb->entries[i & JSCTX_RB_MASK].atom_id ==
648 kbase_jd_atom_id(kctx, start_katom)) {
655 /* start_katom must still be in ring buffer. */
656 if (i == rb->write_idx || !atom_in_rb)
659 /* Evict all dependencies on same slot. */
660 for (i = start_idx; i != rb->write_idx; i++) {
663 katom_evict = rb->entries[i & JSCTX_RB_MASK].atom_id;
664 if (katom_evict != KBASEP_ATOM_ID_INVALID) {
665 if (!kbase_js_evict_atom(kctx,
666 &kctx->jctx.atoms[katom_evict],
667 start_katom, head_katom,
675 * Functions private to KBase ('Protected' functions)
677 int kbasep_js_devdata_init(struct kbase_device * const kbdev)
679 struct kbasep_js_device_data *jsdd;
684 KBASE_DEBUG_ASSERT(kbdev != NULL);
686 jsdd = &kbdev->js_data;
688 KBASE_DEBUG_ASSERT(jsdd->init_status == JS_DEVDATA_INIT_NONE);
690 /* These two must be recalculated if nr_hw_address_spaces changes
691 * (e.g. for HW workarounds) */
692 as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
693 kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
694 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
697 use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
698 if (use_workaround) {
699 dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
700 kbdev->nr_user_address_spaces = 1;
703 #ifdef CONFIG_MALI_DEBUG
704 /* Soft-stop will be disabled on a single context by default unless
705 * softstop_always is set */
706 jsdd->softstop_always = false;
707 #endif /* CONFIG_MALI_DEBUG */
708 jsdd->nr_all_contexts_running = 0;
709 jsdd->nr_user_contexts_running = 0;
710 jsdd->nr_contexts_pullable = 0;
711 atomic_set(&jsdd->nr_contexts_runnable, 0);
712 /* All ASs initially free */
713 jsdd->as_free = as_present;
714 /* No ctx allowed to submit */
715 jsdd->runpool_irq.submit_allowed = 0u;
716 memset(jsdd->runpool_irq.ctx_attr_ref_count, 0,
717 sizeof(jsdd->runpool_irq.ctx_attr_ref_count));
718 memset(jsdd->runpool_irq.slot_affinities, 0,
719 sizeof(jsdd->runpool_irq.slot_affinities));
720 memset(jsdd->runpool_irq.slot_affinity_refcount, 0,
721 sizeof(jsdd->runpool_irq.slot_affinity_refcount));
722 INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list);
724 /* Config attributes */
725 jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
726 jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
727 jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
728 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
729 jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
731 jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS;
732 jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
733 jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
734 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
735 jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408;
737 jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
738 jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
739 jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING;
740 jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS;
741 jsdd->cfs_ctx_runtime_init_slices =
742 DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
743 jsdd->cfs_ctx_runtime_min_slices =
744 DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
746 dev_dbg(kbdev->dev, "JS Config Attribs: ");
747 dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
748 jsdd->scheduling_period_ns);
749 dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u",
750 jsdd->soft_stop_ticks);
751 dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u",
752 jsdd->soft_stop_ticks_cl);
753 dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u",
754 jsdd->hard_stop_ticks_ss);
755 dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u",
756 jsdd->hard_stop_ticks_cl);
757 dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u",
758 jsdd->hard_stop_ticks_dumping);
759 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u",
760 jsdd->gpu_reset_ticks_ss);
761 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u",
762 jsdd->gpu_reset_ticks_cl);
763 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u",
764 jsdd->gpu_reset_ticks_dumping);
765 dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u",
766 jsdd->ctx_timeslice_ns);
767 dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u",
768 jsdd->cfs_ctx_runtime_init_slices);
769 dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u",
770 jsdd->cfs_ctx_runtime_min_slices);
772 if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
773 jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
774 jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping &&
775 jsdd->hard_stop_ticks_dumping <
776 jsdd->gpu_reset_ticks_dumping)) {
777 dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n");
781 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
782 dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.",
783 jsdd->soft_stop_ticks,
784 jsdd->scheduling_period_ns);
786 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS
787 dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.",
788 jsdd->hard_stop_ticks_ss,
789 jsdd->hard_stop_ticks_dumping,
790 jsdd->scheduling_period_ns);
792 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
793 dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
796 /* setup the number of irq throttle cycles base on given time */
798 int time_us = kbdev->gpu_props.irq_throttle_time_us;
799 int cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev,
802 atomic_set(&kbdev->irq_throttle_cycles, cycles);
805 /* Clear the AS data, including setting NULL pointers */
806 memset(&jsdd->runpool_irq.per_as_data[0], 0,
807 sizeof(jsdd->runpool_irq.per_as_data));
809 for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
810 jsdd->js_reqs[i] = core_reqs_from_jsn_features(
811 kbdev->gpu_props.props.raw_props.js_features[i]);
813 jsdd->init_status |= JS_DEVDATA_INIT_CONSTANTS;
815 /* On error, we could continue on: providing none of the below resources
816 * rely on the ones above */
818 mutex_init(&jsdd->runpool_mutex);
819 mutex_init(&jsdd->queue_mutex);
820 spin_lock_init(&jsdd->runpool_irq.lock);
821 sema_init(&jsdd->schedule_sem, 1);
823 err = kbasep_js_policy_init(kbdev);
825 jsdd->init_status |= JS_DEVDATA_INIT_POLICY;
827 for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
828 INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i]);
829 INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i]);
832 /* On error, do no cleanup; this will be handled by the caller(s), since
833 * we've designed this resource to be safe to terminate on init-fail */
834 if (jsdd->init_status != JS_DEVDATA_INIT_ALL)
840 void kbasep_js_devdata_halt(struct kbase_device *kbdev)
845 void kbasep_js_devdata_term(struct kbase_device *kbdev)
847 struct kbasep_js_device_data *js_devdata;
849 KBASE_DEBUG_ASSERT(kbdev != NULL);
851 js_devdata = &kbdev->js_data;
853 if ((js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS)) {
854 s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
855 /* The caller must de-register all contexts before calling this
857 KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
858 KBASE_DEBUG_ASSERT(memcmp(
859 js_devdata->runpool_irq.ctx_attr_ref_count,
860 zero_ctx_attr_ref_count,
861 sizeof(zero_ctx_attr_ref_count)) == 0);
862 CSTD_UNUSED(zero_ctx_attr_ref_count);
864 if ((js_devdata->init_status & JS_DEVDATA_INIT_POLICY))
865 kbasep_js_policy_term(&js_devdata->policy);
867 js_devdata->init_status = JS_DEVDATA_INIT_NONE;
870 int kbasep_js_kctx_init(struct kbase_context * const kctx)
872 struct kbase_device *kbdev;
873 struct kbasep_js_kctx_info *js_kctx_info;
877 KBASE_DEBUG_ASSERT(kctx != NULL);
880 KBASE_DEBUG_ASSERT(kbdev != NULL);
882 for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
883 INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
885 js_kctx_info = &kctx->jctx.sched_info;
886 KBASE_DEBUG_ASSERT(js_kctx_info->init_status == JS_KCTX_INIT_NONE);
888 js_kctx_info->ctx.nr_jobs = 0;
889 js_kctx_info->ctx.is_scheduled = false;
890 js_kctx_info->ctx.is_dying = false;
891 memset(js_kctx_info->ctx.ctx_attr_ref_count, 0,
892 sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
894 /* Initially, the context is disabled from submission until the create
896 js_kctx_info->ctx.flags = KBASE_CTX_FLAG_SUBMIT_DISABLED;
898 js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS;
900 /* On error, we could continue on: providing none of the below resources
901 * rely on the ones above */
902 mutex_init(&js_kctx_info->ctx.jsctx_mutex);
904 init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
906 err = kbasep_js_policy_init_ctx(kbdev, kctx);
908 js_kctx_info->init_status |= JS_KCTX_INIT_POLICY;
910 /* On error, do no cleanup; this will be handled by the caller(s), since
911 * we've designed this resource to be safe to terminate on init-fail */
912 if (js_kctx_info->init_status != JS_KCTX_INIT_ALL)
918 void kbasep_js_kctx_term(struct kbase_context *kctx)
920 struct kbase_device *kbdev;
921 struct kbasep_js_kctx_info *js_kctx_info;
922 union kbasep_js_policy *js_policy;
924 bool update_ctx_count = false;
926 KBASE_DEBUG_ASSERT(kctx != NULL);
929 KBASE_DEBUG_ASSERT(kbdev != NULL);
931 js_policy = &kbdev->js_data.policy;
932 js_kctx_info = &kctx->jctx.sched_info;
934 if ((js_kctx_info->init_status & JS_KCTX_INIT_CONSTANTS)) {
935 /* The caller must de-register all jobs before calling this */
936 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
937 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
940 mutex_lock(&kbdev->js_data.queue_mutex);
941 mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
943 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
944 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
946 if (kctx->ctx_runnable_ref) {
947 WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0);
948 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
949 update_ctx_count = true;
950 kctx->ctx_runnable_ref = false;
953 mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
954 mutex_unlock(&kbdev->js_data.queue_mutex);
956 if ((js_kctx_info->init_status & JS_KCTX_INIT_POLICY))
957 kbasep_js_policy_term_ctx(js_policy, kctx);
959 js_kctx_info->init_status = JS_KCTX_INIT_NONE;
961 if (update_ctx_count) {
962 mutex_lock(&kbdev->js_data.runpool_mutex);
963 kbase_backend_ctx_count_changed(kbdev);
964 mutex_unlock(&kbdev->js_data.runpool_mutex);
969 * kbase_js_ctx_list_add_pullable - Add context to the tail of the per-slot
970 * pullable context queue
971 * @kbdev: Device pointer
972 * @kctx: Context to add to queue
973 * @js: Job slot to use
975 * If the context is on either the pullable or unpullable queues, then it is
976 * removed before being added to the tail.
978 * This function should be used when queueing a context for the first time, or
979 * re-queueing a context that has been pulled from.
981 * Caller must hold kbasep_jd_device_data.queue_mutex
983 * Return: true if caller should call kbase_backend_ctx_count_changed()
985 static bool kbase_js_ctx_list_add_pullable(struct kbase_device *kbdev,
986 struct kbase_context *kctx,
991 lockdep_assert_held(&kbdev->js_data.queue_mutex);
992 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
994 if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
995 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
997 list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
998 &kbdev->js_data.ctx_list_pullable[js]);
1000 if (!kctx->slots_pullable) {
1001 kbdev->js_data.nr_contexts_pullable++;
1003 if (!atomic_read(&kctx->atoms_pulled)) {
1004 WARN_ON(kctx->ctx_runnable_ref);
1005 kctx->ctx_runnable_ref = true;
1006 atomic_inc(&kbdev->js_data.nr_contexts_runnable);
1009 kctx->slots_pullable |= (1 << js);
1015 * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
1016 * per-slot pullable context queue
1017 * @kbdev: Device pointer
1018 * @kctx: Context to add to queue
1019 * @js: Job slot to use
1021 * If the context is on either the pullable or unpullable queues, then it is
1022 * removed before being added to the head.
1024 * This function should be used when a context has been scheduled, but no jobs
1025 * can currently be pulled from it.
1027 * Caller must hold kbasep_jd_device_data.queue_mutex
1029 * Return: true if caller should call kbase_backend_ctx_count_changed()
1031 static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
1032 struct kbase_context *kctx,
1037 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1038 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1040 if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
1041 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1043 list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
1044 &kbdev->js_data.ctx_list_pullable[js]);
1046 if (!kctx->slots_pullable) {
1047 kbdev->js_data.nr_contexts_pullable++;
1049 if (!atomic_read(&kctx->atoms_pulled)) {
1050 WARN_ON(kctx->ctx_runnable_ref);
1051 kctx->ctx_runnable_ref = true;
1052 atomic_inc(&kbdev->js_data.nr_contexts_runnable);
1055 kctx->slots_pullable |= (1 << js);
1061 * kbase_js_ctx_list_add_unpullable - Add context to the tail of the per-slot
1062 * unpullable context queue
1063 * @kbdev: Device pointer
1064 * @kctx: Context to add to queue
1065 * @js: Job slot to use
1067 * The context must already be on the per-slot pullable queue. It will be
1068 * removed from the pullable queue before being added to the unpullable queue.
1070 * This function should be used when a context has been pulled from, and there
1071 * are no jobs remaining on the specified slot.
1073 * Caller must hold kbasep_jd_device_data.queue_mutex
1075 * Return: true if caller should call kbase_backend_ctx_count_changed()
1077 static bool kbase_js_ctx_list_add_unpullable(struct kbase_device *kbdev,
1078 struct kbase_context *kctx,
1083 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1084 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1086 list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
1087 &kbdev->js_data.ctx_list_unpullable[js]);
1089 if (kctx->slots_pullable == (1 << js)) {
1090 kbdev->js_data.nr_contexts_pullable--;
1092 if (!atomic_read(&kctx->atoms_pulled)) {
1093 WARN_ON(!kctx->ctx_runnable_ref);
1094 kctx->ctx_runnable_ref = false;
1095 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
1098 kctx->slots_pullable &= ~(1 << js);
1104 * kbase_js_ctx_list_remove - Remove context from the per-slot pullable or
1105 * unpullable context queues
1106 * @kbdev: Device pointer
1107 * @kctx: Context to remove from queue
1108 * @js: Job slot to use
1110 * The context must already be on one of the queues.
1112 * This function should be used when a context has no jobs on the GPU, and no
1113 * jobs remaining for the specified slot.
1115 * Caller must hold kbasep_jd_device_data.queue_mutex
1117 * Return: true if caller should call kbase_backend_ctx_count_changed()
1119 static bool kbase_js_ctx_list_remove(struct kbase_device *kbdev,
1120 struct kbase_context *kctx,
1125 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1126 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1128 WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
1130 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1132 if (kctx->slots_pullable == (1 << js)) {
1133 kbdev->js_data.nr_contexts_pullable--;
1135 if (!atomic_read(&kctx->atoms_pulled)) {
1136 WARN_ON(!kctx->ctx_runnable_ref);
1137 kctx->ctx_runnable_ref = false;
1138 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
1141 kctx->slots_pullable &= ~(1 << js);
1147 * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
1149 * @kbdev: Device pointer
1150 * @js: Job slot to use
1152 * Caller must hold kbasep_jd_device_data::queue_mutex
1154 * Return: Context to use for specified slot.
1155 * NULL if no contexts present for specified slot
1157 static struct kbase_context *kbase_js_ctx_list_pop_head(
1158 struct kbase_device *kbdev,
1161 struct kbase_context *kctx;
1163 lockdep_assert_held(&kbdev->js_data.queue_mutex);
1165 if (list_empty(&kbdev->js_data.ctx_list_pullable[js]))
1168 kctx = list_entry(kbdev->js_data.ctx_list_pullable[js].next,
1169 struct kbase_context,
1170 jctx.sched_info.ctx.ctx_list_entry[js]);
1172 list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1178 * kbase_js_ctx_pullable - Return if a context can be pulled from on the
1180 * @kctx: Context pointer
1181 * @js: Job slot to use
1182 * @is_scheduled: true if the context is currently scheduled
1184 * Caller must hold runpool_irq.lock
1186 * Return: true if context can be pulled from on specified slot
1189 static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js,
1192 struct kbasep_js_device_data *js_devdata;
1193 struct kbase_jd_atom *katom;
1195 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
1197 js_devdata = &kctx->kbdev->js_data;
1200 if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
1203 katom = jsctx_rb_peek(kctx, js);
1205 return false; /* No pullable atoms */
1206 if (atomic_read(&katom->blocked))
1207 return false; /* next atom blocked */
1208 if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
1209 if (katom->x_pre_dep->gpu_rb_state ==
1210 KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB)
1212 if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
1213 kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
1220 static bool kbase_js_dep_validate(struct kbase_context *kctx,
1221 struct kbase_jd_atom *katom)
1223 struct kbase_device *kbdev = kctx->kbdev;
1225 bool has_dep = false, has_x_dep = false;
1226 int js = kbase_js_get_slot(kbdev, katom);
1227 int prio = katom->sched_priority;
1230 for (i = 0; i < 2; i++) {
1231 struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
1234 int dep_js = kbase_js_get_slot(kbdev, dep_atom);
1235 int dep_prio = dep_atom->sched_priority;
1237 /* Dependent atom must already have been submitted */
1238 if (!(dep_atom->atom_flags &
1239 KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED)) {
1244 /* Dependencies with different priorities can't
1245 be represented in the ringbuffer */
1246 if (prio != dep_prio) {
1252 /* Only one same-slot dependency can be
1253 * represented in the ringbuffer */
1260 /* Only one cross-slot dependency can be
1261 * represented in the ringbuffer */
1266 /* Each dependee atom can only have one
1267 * cross-slot dependency */
1268 if (dep_atom->x_post_dep) {
1272 /* The dependee atom can not already be in the
1273 * HW access ringbuffer */
1274 if (dep_atom->gpu_rb_state !=
1275 KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
1279 /* The dependee atom can not already have
1281 if (dep_atom->status !=
1282 KBASE_JD_ATOM_STATE_IN_JS) {
1286 /* Cross-slot dependencies must not violate
1287 * PRLAM-8987 affinity restrictions */
1288 if (kbase_hw_has_issue(kbdev,
1289 BASE_HW_ISSUE_8987) &&
1290 (js == 2 || dep_js == 2)) {
1297 if (kbase_jd_katom_dep_type(&katom->dep[i]) ==
1298 BASE_JD_DEP_TYPE_DATA &&
1300 struct kbase_jd_atom *last_atom =
1301 jsctx_rb_peek_last(kctx, js,
1304 /* Last atom on slot must be pre-dep for this
1306 if (last_atom != dep_atom) {
1312 /* Dependency can be represented in ringbuffers */
1316 /* If dependencies can be represented by ringbuffer then clear them from
1319 for (i = 0; i < 2; i++) {
1320 struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
1323 int dep_js = kbase_js_get_slot(kbdev, dep_atom);
1325 if ((js != dep_js) &&
1326 (dep_atom->status !=
1327 KBASE_JD_ATOM_STATE_COMPLETED)
1328 && (dep_atom->status !=
1329 KBASE_JD_ATOM_STATE_HW_COMPLETED)
1330 && (dep_atom->status !=
1331 KBASE_JD_ATOM_STATE_UNUSED)) {
1333 katom->atom_flags |=
1334 KBASE_KATOM_FLAG_X_DEP_BLOCKED;
1335 katom->x_pre_dep = dep_atom;
1336 dep_atom->x_post_dep = katom;
1337 if (kbase_jd_katom_dep_type(
1339 BASE_JD_DEP_TYPE_DATA)
1340 katom->atom_flags |=
1341 KBASE_KATOM_FLAG_FAIL_BLOCKER;
1343 if ((kbase_jd_katom_dep_type(&katom->dep[i])
1344 == BASE_JD_DEP_TYPE_DATA) &&
1346 katom->atom_flags |=
1347 KBASE_KATOM_FLAG_FAIL_PREV;
1349 list_del(&katom->dep_item[i]);
1350 kbase_jd_katom_dep_clear(&katom->dep[i]);
1358 bool kbasep_js_add_job(struct kbase_context *kctx,
1359 struct kbase_jd_atom *atom)
1361 unsigned long flags;
1362 struct kbasep_js_kctx_info *js_kctx_info;
1363 struct kbase_device *kbdev;
1364 struct kbasep_js_device_data *js_devdata;
1365 union kbasep_js_policy *js_policy;
1367 bool enqueue_required = false;
1368 bool timer_sync = false;
1370 KBASE_DEBUG_ASSERT(kctx != NULL);
1371 KBASE_DEBUG_ASSERT(atom != NULL);
1372 lockdep_assert_held(&kctx->jctx.lock);
1374 kbdev = kctx->kbdev;
1375 js_devdata = &kbdev->js_data;
1376 js_policy = &kbdev->js_data.policy;
1377 js_kctx_info = &kctx->jctx.sched_info;
1379 mutex_lock(&js_devdata->queue_mutex);
1380 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1383 * Begin Runpool transaction
1385 mutex_lock(&js_devdata->runpool_mutex);
1387 /* Refcount ctx.nr_jobs */
1388 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
1389 ++(js_kctx_info->ctx.nr_jobs);
1391 /* Setup any scheduling information */
1392 kbasep_js_clear_job_retry_submit(atom);
1394 /* Lock for state available during IRQ */
1395 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1397 if (!kbase_js_dep_validate(kctx, atom)) {
1398 /* Dependencies could not be represented */
1399 --(js_kctx_info->ctx.nr_jobs);
1401 /* Setting atom status back to queued as it still has unresolved
1403 atom->status = KBASE_JD_ATOM_STATE_QUEUED;
1405 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1406 mutex_unlock(&js_devdata->runpool_mutex);
1411 KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom));
1413 if (kbase_js_dep_resolved_submit(kctx, atom, &enqueue_required) != 0) {
1414 /* Ringbuffer was full (should be impossible) - fail the job */
1415 --(js_kctx_info->ctx.nr_jobs);
1417 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1418 mutex_unlock(&js_devdata->runpool_mutex);
1420 atom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1425 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc,
1426 kbasep_js_trace_get_refcnt_nolock(kbdev, kctx));
1428 /* Context Attribute Refcounting */
1429 kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
1431 if (enqueue_required) {
1432 if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
1433 timer_sync = kbase_js_ctx_list_add_pullable(kbdev, kctx,
1436 timer_sync = kbase_js_ctx_list_add_unpullable(kbdev,
1437 kctx, atom->slot_nr);
1439 /* If this context is active and the atom is the first on its slot,
1440 * kick the job manager to attempt to fast-start the atom */
1441 if (enqueue_required && kctx == kbdev->hwaccess.active_kctx)
1442 kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
1444 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1446 kbase_backend_ctx_count_changed(kbdev);
1447 mutex_unlock(&js_devdata->runpool_mutex);
1448 /* End runpool transaction */
1450 if (!js_kctx_info->ctx.is_scheduled) {
1451 if (js_kctx_info->ctx.is_dying) {
1452 /* A job got added while/after kbase_job_zap_context()
1453 * was called on a non-scheduled context (e.g. KDS
1454 * dependency resolved). Kill that job by killing the
1456 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
1458 } else if (js_kctx_info->ctx.nr_jobs == 1) {
1459 /* Handle Refcount going from 0 to 1: schedule the
1460 * context on the Policy Queue */
1461 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
1462 dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
1464 /* Policy Queue was updated - caller must try to
1465 * schedule the head context */
1466 WARN_ON(!enqueue_required);
1470 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1472 mutex_unlock(&js_devdata->queue_mutex);
1474 return enqueue_required;
1477 void kbasep_js_remove_job(struct kbase_device *kbdev,
1478 struct kbase_context *kctx, struct kbase_jd_atom *atom)
1480 struct kbasep_js_kctx_info *js_kctx_info;
1481 struct kbasep_js_device_data *js_devdata;
1482 union kbasep_js_policy *js_policy;
1484 KBASE_DEBUG_ASSERT(kbdev != NULL);
1485 KBASE_DEBUG_ASSERT(kctx != NULL);
1486 KBASE_DEBUG_ASSERT(atom != NULL);
1488 js_devdata = &kbdev->js_data;
1489 js_policy = &kbdev->js_data.policy;
1490 js_kctx_info = &kctx->jctx.sched_info;
1492 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
1493 kbasep_js_trace_get_refcnt(kbdev, kctx));
1495 /* De-refcount ctx.nr_jobs */
1496 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
1497 --(js_kctx_info->ctx.nr_jobs);
1500 bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
1501 struct kbase_context *kctx, struct kbase_jd_atom *katom)
1503 unsigned long flags;
1504 struct kbasep_js_atom_retained_state katom_retained_state;
1505 struct kbasep_js_device_data *js_devdata;
1506 bool attr_state_changed;
1508 KBASE_DEBUG_ASSERT(kbdev != NULL);
1509 KBASE_DEBUG_ASSERT(kctx != NULL);
1510 KBASE_DEBUG_ASSERT(katom != NULL);
1512 js_devdata = &kbdev->js_data;
1514 kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
1515 kbasep_js_remove_job(kbdev, kctx, katom);
1517 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1519 /* The atom has 'finished' (will not be re-run), so no need to call
1520 * kbasep_js_has_atom_finished().
1522 * This is because it returns false for soft-stopped atoms, but we
1523 * want to override that, because we're cancelling an atom regardless of
1524 * whether it was soft-stopped or not */
1525 attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx,
1526 &katom_retained_state);
1528 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1530 return attr_state_changed;
1533 bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev,
1534 struct kbase_context *kctx)
1536 unsigned long flags;
1537 struct kbasep_js_device_data *js_devdata;
1540 KBASE_DEBUG_ASSERT(kbdev != NULL);
1541 js_devdata = &kbdev->js_data;
1543 /* KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_RETAIN_CTX, kctx, NULL, 0,
1544 kbasep_js_trace_get_refcnt(kbdev, kctx)); */
1545 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1546 result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
1547 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1552 struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev,
1555 unsigned long flags;
1556 struct kbasep_js_device_data *js_devdata;
1557 struct kbase_context *found_kctx = NULL;
1558 struct kbasep_js_per_as_data *js_per_as_data;
1560 KBASE_DEBUG_ASSERT(kbdev != NULL);
1561 KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
1562 js_devdata = &kbdev->js_data;
1563 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
1565 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1567 found_kctx = js_per_as_data->kctx;
1569 if (found_kctx != NULL)
1570 ++(js_per_as_data->as_busy_refcount);
1572 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1577 struct kbase_context *kbasep_js_runpool_lookup_ctx_nolock(
1578 struct kbase_device *kbdev, int as_nr)
1580 struct kbasep_js_device_data *js_devdata;
1581 struct kbase_context *found_kctx = NULL;
1582 struct kbasep_js_per_as_data *js_per_as_data;
1584 KBASE_DEBUG_ASSERT(kbdev != NULL);
1585 KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
1587 lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
1589 js_devdata = &kbdev->js_data;
1590 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
1592 found_kctx = js_per_as_data->kctx;
1594 if (found_kctx != NULL)
1595 ++(js_per_as_data->as_busy_refcount);
1601 * kbasep_js_release_result - Try running more jobs after releasing a context
1604 * @kbdev: The kbase_device to operate on
1605 * @kctx: The kbase_context to operate on
1606 * @katom_retained_state: Retained state from the atom
1607 * @runpool_ctx_attr_change: True if the runpool context attributes have changed
1609 * This collates a set of actions that must happen whilst
1610 * kbasep_js_device_data.runpool_irq.lock is held.
1612 * This includes running more jobs when:
1613 * - The previously released kctx caused a ctx attribute change,
1614 * - The released atom caused a ctx attribute change,
1615 * - Slots were previously blocked due to affinity restrictions,
1616 * - Submission during IRQ handling failed.
1618 * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were
1619 * changed. The caller should try scheduling all contexts
1621 static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
1622 struct kbase_device *kbdev,
1623 struct kbase_context *kctx,
1624 struct kbasep_js_atom_retained_state *katom_retained_state,
1625 bool runpool_ctx_attr_change)
1627 struct kbasep_js_device_data *js_devdata;
1628 kbasep_js_release_result result = 0;
1630 KBASE_DEBUG_ASSERT(kbdev != NULL);
1631 KBASE_DEBUG_ASSERT(kctx != NULL);
1632 KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
1633 js_devdata = &kbdev->js_data;
1635 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1636 lockdep_assert_held(&js_devdata->runpool_mutex);
1637 lockdep_assert_held(&js_devdata->runpool_irq.lock);
1639 if (js_devdata->nr_user_contexts_running != 0) {
1640 bool retry_submit = false;
1641 int retry_jobslot = 0;
1643 if (katom_retained_state)
1644 retry_submit = kbasep_js_get_atom_retry_submit_slot(
1645 katom_retained_state, &retry_jobslot);
1647 if (runpool_ctx_attr_change || retry_submit) {
1648 /* A change in runpool ctx attributes might mean we can
1649 * run more jobs than before */
1650 result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
1652 KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB,
1653 kctx, NULL, 0u, retry_jobslot);
1660 * Internal function to release the reference on a ctx and an atom's "retained
1661 * state", only taking the runpool and as transaction mutexes
1663 * This also starts more jobs running in the case of an ctx-attribute state
1666 * This does none of the followup actions for scheduling:
1667 * - It does not schedule in a new context
1668 * - It does not requeue or handle dying contexts
1670 * For those tasks, just call kbasep_js_runpool_release_ctx() instead
1673 * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
1674 * - Context has a non-zero refcount
1675 * - Caller holds js_kctx_info->ctx.jsctx_mutex
1676 * - Caller holds js_devdata->runpool_mutex
1678 static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
1679 struct kbase_device *kbdev,
1680 struct kbase_context *kctx,
1681 struct kbasep_js_atom_retained_state *katom_retained_state)
1683 unsigned long flags;
1684 struct kbasep_js_device_data *js_devdata;
1685 struct kbasep_js_kctx_info *js_kctx_info;
1686 union kbasep_js_policy *js_policy;
1687 struct kbasep_js_per_as_data *js_per_as_data;
1689 kbasep_js_release_result release_result = 0u;
1690 bool runpool_ctx_attr_change = false;
1692 struct kbase_as *current_as;
1695 KBASE_DEBUG_ASSERT(kbdev != NULL);
1696 KBASE_DEBUG_ASSERT(kctx != NULL);
1697 js_kctx_info = &kctx->jctx.sched_info;
1698 js_devdata = &kbdev->js_data;
1699 js_policy = &kbdev->js_data.policy;
1701 /* Ensure context really is scheduled in */
1702 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled);
1704 /* kctx->as_nr and js_per_as_data are only read from here. The caller's
1705 * js_ctx_mutex provides a barrier that ensures they are up-to-date.
1707 * They will not change whilst we're reading them, because the refcount
1708 * is non-zero (and we ASSERT on that last fact).
1710 kctx_as_nr = kctx->as_nr;
1711 KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
1712 js_per_as_data = &js_devdata->runpool_irq.per_as_data[kctx_as_nr];
1713 KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
1716 * Transaction begins on AS and runpool_irq
1718 * Assert about out calling contract
1720 current_as = &kbdev->as[kctx_as_nr];
1721 mutex_lock(&kbdev->pm.lock);
1722 mutex_lock(¤t_as->transaction_mutex);
1723 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1724 KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
1725 KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
1727 /* Update refcount */
1728 new_ref_count = --(js_per_as_data->as_busy_refcount);
1730 /* Release the atom if it finished (i.e. wasn't soft-stopped) */
1731 if (kbasep_js_has_atom_finished(katom_retained_state))
1732 runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(
1733 kbdev, kctx, katom_retained_state);
1735 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u,
1738 if (new_ref_count == 1 && kctx->jctx.sched_info.ctx.flags &
1739 KBASE_CTX_FLAG_PRIVILEGED &&
1740 !kbase_pm_is_suspending(kbdev)) {
1741 /* Context is kept scheduled into an address space even when
1742 * there are no jobs, in this case we have to handle the
1743 * situation where all jobs have been evicted from the GPU and
1744 * submission is disabled.
1746 * At this point we re-enable submission to allow further jobs
1749 kbasep_js_set_submit_allowed(js_devdata, kctx);
1752 /* Make a set of checks to see if the context should be scheduled out */
1753 if (new_ref_count == 0 &&
1754 (!kbasep_js_is_submit_allowed(js_devdata, kctx) ||
1755 kbdev->pm.suspending)) {
1756 /* Last reference, and we've been told to remove this context
1757 * from the Run Pool */
1758 dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d",
1759 kctx, new_ref_count, js_kctx_info->ctx.nr_jobs,
1760 kbasep_js_is_submit_allowed(js_devdata, kctx));
1762 #if defined(CONFIG_MALI_GATOR_SUPPORT)
1763 kbase_trace_mali_mmu_as_released(kctx->as_nr);
1765 #if defined(CONFIG_MALI_MIPE_ENABLED)
1766 kbase_tlstream_tl_nret_as_ctx(&kbdev->as[kctx->as_nr], kctx);
1769 kbase_backend_release_ctx_irq(kbdev, kctx);
1771 if (kbdev->hwaccess.active_kctx == kctx)
1772 kbdev->hwaccess.active_kctx = NULL;
1774 /* Ctx Attribute handling
1776 * Releasing atoms attributes must either happen before this, or
1777 * after 'is_scheduled' is changed, otherwise we double-decount
1779 runpool_ctx_attr_change |=
1780 kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
1782 /* Releasing the context and katom retained state can allow
1783 * more jobs to run */
1785 kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev,
1786 kctx, katom_retained_state,
1787 runpool_ctx_attr_change);
1790 * Transaction ends on AS and runpool_irq:
1792 * By this point, the AS-related data is now clear and ready
1795 * Since releases only occur once for each previous successful
1796 * retain, and no more retains are allowed on this context, no
1797 * other thread will be operating in this
1798 * code whilst we are
1800 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1802 kbase_backend_release_ctx_noirq(kbdev, kctx);
1804 mutex_unlock(¤t_as->transaction_mutex);
1805 mutex_unlock(&kbdev->pm.lock);
1807 /* Note: Don't reuse kctx_as_nr now */
1809 /* Synchronize with any policy timers */
1810 kbase_backend_ctx_count_changed(kbdev);
1812 /* update book-keeping info */
1813 js_kctx_info->ctx.is_scheduled = false;
1814 /* Signal any waiter that the context is not scheduled, so is
1815 * safe for termination - once the jsctx_mutex is also dropped,
1816 * and jobs have finished. */
1817 wake_up(&js_kctx_info->ctx.is_scheduled_wait);
1819 /* Queue an action to occur after we've dropped the lock */
1820 release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED;
1822 kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
1823 katom_retained_state, runpool_ctx_attr_change);
1825 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1826 mutex_unlock(¤t_as->transaction_mutex);
1827 mutex_unlock(&kbdev->pm.lock);
1830 return release_result;
1833 void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
1834 struct kbase_context *kctx)
1836 struct kbasep_js_atom_retained_state katom_retained_state;
1838 /* Setup a dummy katom_retained_state */
1839 kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
1841 kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1842 &katom_retained_state);
1845 void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
1846 struct kbase_context *kctx, bool has_pm_ref)
1848 struct kbasep_js_device_data *js_devdata;
1849 union kbasep_js_policy *js_policy;
1850 struct kbasep_js_kctx_info *js_kctx_info;
1852 KBASE_DEBUG_ASSERT(kbdev != NULL);
1853 KBASE_DEBUG_ASSERT(kctx != NULL);
1854 js_kctx_info = &kctx->jctx.sched_info;
1855 js_policy = &kbdev->js_data.policy;
1856 js_devdata = &kbdev->js_data;
1858 /* This is called if and only if you've you've detached the context from
1859 * the Runpool or the Policy Queue, and not added it back to the Runpool
1861 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
1863 if (js_kctx_info->ctx.is_dying) {
1864 /* Dying: don't requeue, but kill all jobs on the context. This
1865 * happens asynchronously */
1867 "JS: ** Killing Context %p on RunPool Remove **", kctx);
1868 kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel);
1872 void kbasep_js_runpool_release_ctx_and_katom_retained_state(
1873 struct kbase_device *kbdev, struct kbase_context *kctx,
1874 struct kbasep_js_atom_retained_state *katom_retained_state)
1876 struct kbasep_js_device_data *js_devdata;
1877 struct kbasep_js_kctx_info *js_kctx_info;
1878 base_jd_event_code event_code;
1879 kbasep_js_release_result release_result;
1881 KBASE_DEBUG_ASSERT(kbdev != NULL);
1882 KBASE_DEBUG_ASSERT(kctx != NULL);
1883 js_kctx_info = &kctx->jctx.sched_info;
1884 js_devdata = &kbdev->js_data;
1885 event_code = katom_retained_state->event_code;
1887 mutex_lock(&js_devdata->queue_mutex);
1888 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1889 mutex_lock(&js_devdata->runpool_mutex);
1891 release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1892 katom_retained_state);
1894 /* Drop the runpool mutex to allow requeing kctx */
1895 mutex_unlock(&js_devdata->runpool_mutex);
1897 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
1898 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
1900 /* Drop the jsctx_mutex to allow scheduling in a new context */
1902 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1903 mutex_unlock(&js_devdata->queue_mutex);
1905 if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL)
1906 kbase_js_sched_all(kbdev);
1909 void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
1910 struct kbase_context *kctx)
1912 struct kbasep_js_atom_retained_state katom_retained_state;
1914 kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
1916 kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
1917 &katom_retained_state);
1920 /* Variant of kbasep_js_runpool_release_ctx() that doesn't call into
1921 * kbase_js_sched_all() */
1922 static void kbasep_js_runpool_release_ctx_no_schedule(
1923 struct kbase_device *kbdev, struct kbase_context *kctx)
1925 struct kbasep_js_device_data *js_devdata;
1926 struct kbasep_js_kctx_info *js_kctx_info;
1927 kbasep_js_release_result release_result;
1928 struct kbasep_js_atom_retained_state katom_retained_state_struct;
1929 struct kbasep_js_atom_retained_state *katom_retained_state =
1930 &katom_retained_state_struct;
1932 KBASE_DEBUG_ASSERT(kbdev != NULL);
1933 KBASE_DEBUG_ASSERT(kctx != NULL);
1934 js_kctx_info = &kctx->jctx.sched_info;
1935 js_devdata = &kbdev->js_data;
1936 kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
1938 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1939 mutex_lock(&js_devdata->runpool_mutex);
1941 release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1942 katom_retained_state);
1944 /* Drop the runpool mutex to allow requeing kctx */
1945 mutex_unlock(&js_devdata->runpool_mutex);
1946 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
1947 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
1949 /* Drop the jsctx_mutex to allow scheduling in a new context */
1950 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1952 /* NOTE: could return release_result if the caller would like to know
1953 * whether it should schedule a new context, but currently no callers do
1958 * kbase_js_set_timeouts - update all JS timeouts with user specified data
1959 * @kbdev: Device pointer
1961 * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
1962 * set to a positive number then that becomes the new value used, if a timeout
1963 * is negative then the default is set.
1965 static void kbase_js_set_timeouts(struct kbase_device *kbdev)
1967 struct kbasep_js_device_data *js_data = &kbdev->js_data;
1969 if (kbdev->js_scheduling_period_ns < 0)
1970 js_data->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
1971 else if (kbdev->js_scheduling_period_ns > 0)
1972 js_data->scheduling_period_ns = kbdev->js_scheduling_period_ns;
1974 if (kbdev->js_soft_stop_ticks < 0)
1975 js_data->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
1976 else if (kbdev->js_soft_stop_ticks > 0)
1977 js_data->soft_stop_ticks = kbdev->js_soft_stop_ticks;
1979 if (kbdev->js_soft_stop_ticks_cl < 0)
1980 js_data->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
1981 else if (kbdev->js_soft_stop_ticks_cl > 0)
1982 js_data->soft_stop_ticks_cl = kbdev->js_soft_stop_ticks_cl;
1984 if (kbdev->js_hard_stop_ticks_ss < 0) {
1985 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
1986 js_data->hard_stop_ticks_ss =
1987 DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
1989 js_data->hard_stop_ticks_ss =
1990 DEFAULT_JS_HARD_STOP_TICKS_SS;
1991 } else if (kbdev->js_hard_stop_ticks_ss > 0) {
1992 js_data->hard_stop_ticks_ss = kbdev->js_hard_stop_ticks_ss;
1995 if (kbdev->js_hard_stop_ticks_cl < 0)
1996 js_data->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
1997 else if (kbdev->js_hard_stop_ticks_cl > 0)
1998 js_data->hard_stop_ticks_cl = kbdev->js_hard_stop_ticks_cl;
2000 if (kbdev->js_hard_stop_ticks_dumping < 0)
2001 js_data->hard_stop_ticks_dumping =
2002 DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
2003 else if (kbdev->js_hard_stop_ticks_dumping > 0)
2004 js_data->hard_stop_ticks_dumping =
2005 kbdev->js_hard_stop_ticks_dumping;
2007 if (kbdev->js_reset_ticks_ss < 0) {
2008 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
2009 js_data->gpu_reset_ticks_ss =
2010 DEFAULT_JS_RESET_TICKS_SS_8408;
2012 js_data->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
2013 } else if (kbdev->js_reset_ticks_ss > 0) {
2014 js_data->gpu_reset_ticks_ss = kbdev->js_reset_ticks_ss;
2017 if (kbdev->js_reset_ticks_cl < 0)
2018 js_data->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
2019 else if (kbdev->js_reset_ticks_cl > 0)
2020 js_data->gpu_reset_ticks_cl = kbdev->js_reset_ticks_cl;
2022 if (kbdev->js_reset_ticks_dumping < 0)
2023 js_data->gpu_reset_ticks_dumping =
2024 DEFAULT_JS_RESET_TICKS_DUMPING;
2025 else if (kbdev->js_reset_ticks_dumping > 0)
2026 js_data->gpu_reset_ticks_dumping =
2027 kbdev->js_reset_ticks_dumping;
2030 static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
2031 struct kbase_context *kctx)
2033 struct kbasep_js_device_data *js_devdata;
2034 struct kbasep_js_kctx_info *js_kctx_info;
2035 union kbasep_js_policy *js_policy;
2036 struct kbase_as *new_address_space = NULL;
2037 unsigned long flags;
2038 bool kctx_suspended = false;
2041 js_devdata = &kbdev->js_data;
2042 js_policy = &kbdev->js_data.policy;
2043 js_kctx_info = &kctx->jctx.sched_info;
2045 /* Pick available address space for this context */
2046 as_nr = kbase_backend_find_free_address_space(kbdev, kctx);
2048 if (as_nr == KBASEP_AS_NR_INVALID)
2049 return false; /* No address spaces currently available */
2051 new_address_space = &kbdev->as[as_nr];
2054 * Atomic transaction on the Context and Run Pool begins
2056 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2057 mutex_lock(&js_devdata->runpool_mutex);
2059 /* Check to see if context is dying due to kbase_job_zap_context() */
2060 if (js_kctx_info->ctx.is_dying) {
2061 /* Roll back the transaction so far and return */
2062 kbase_backend_release_free_address_space(kbdev, as_nr);
2064 mutex_unlock(&js_devdata->runpool_mutex);
2065 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2070 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL,
2072 kbasep_js_trace_get_refcnt(kbdev, kctx));
2074 if (js_devdata->nr_user_contexts_running == 0 &&
2075 kbdev->js_timeouts_updated) {
2076 /* Only when there are no other contexts submitting jobs:
2077 * Latch in run-time job scheduler timeouts that were set
2078 * through js_timeouts sysfs file */
2079 kbase_js_set_timeouts(kbdev);
2081 kbdev->js_timeouts_updated = false;
2084 js_kctx_info->ctx.is_scheduled = true;
2086 mutex_lock(&new_address_space->transaction_mutex);
2087 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2089 /* Assign context to previously chosen address space */
2090 if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
2091 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2092 mutex_unlock(&new_address_space->transaction_mutex);
2093 /* If address space is not pending, then kbase_backend_use_ctx()
2094 * failed. Roll back the transaction so far and return */
2095 if (!kctx->as_pending) {
2096 js_kctx_info->ctx.is_scheduled = false;
2098 kbase_backend_release_free_address_space(kbdev, as_nr);
2101 mutex_unlock(&js_devdata->runpool_mutex);
2103 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2107 kbdev->hwaccess.active_kctx = kctx;
2109 #if defined(CONFIG_MALI_GATOR_SUPPORT)
2110 kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
2112 #if defined(CONFIG_MALI_MIPE_ENABLED)
2113 kbase_tlstream_tl_ret_as_ctx(&kbdev->as[kctx->as_nr], kctx);
2116 /* Cause any future waiter-on-termination to wait until the context is
2118 wake_up(&js_kctx_info->ctx.is_scheduled_wait);
2120 /* Re-check for suspending: a suspend could've occurred, and all the
2121 * contexts could've been removed from the runpool before we took this
2122 * lock. In this case, we don't want to allow this context to run jobs,
2123 * we just want it out immediately.
2125 * The DMB required to read the suspend flag was issued recently as part
2126 * of the runpool_irq locking. If a suspend occurs *after* that lock was
2127 * taken (i.e. this condition doesn't execute), then the
2128 * kbasep_js_suspend() code will cleanup this context instead (by virtue
2129 * of it being called strictly after the suspend flag is set, and will
2130 * wait for this lock to drop) */
2131 if (kbase_pm_is_suspending(kbdev)) {
2132 /* Cause it to leave at some later point */
2135 retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
2136 KBASE_DEBUG_ASSERT(retained);
2138 kbasep_js_clear_submit_allowed(js_devdata, kctx);
2139 kctx_suspended = true;
2142 /* Transaction complete */
2143 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2144 mutex_unlock(&new_address_space->transaction_mutex);
2146 /* Synchronize with any policy timers */
2147 kbase_backend_ctx_count_changed(kbdev);
2149 mutex_unlock(&js_devdata->runpool_mutex);
2150 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2151 /* Note: after this point, the context could potentially get scheduled
2152 * out immediately */
2154 if (kctx_suspended) {
2155 /* Finishing forcing out the context due to a suspend. Use a
2156 * variant of kbasep_js_runpool_release_ctx() that doesn't
2157 * schedule a new context, to prevent a risk of recursion back
2158 * into this function */
2159 kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx);
2165 static bool kbase_js_use_ctx(struct kbase_device *kbdev,
2166 struct kbase_context *kctx)
2168 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
2169 unsigned long flags;
2171 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2172 if (kctx->as_pending) {
2173 /* Context waiting for AS to be assigned */
2174 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2177 if (kbase_backend_use_ctx_sched(kbdev, kctx)) {
2178 /* Context already has ASID - mark as active */
2179 kbdev->hwaccess.active_kctx = kctx;
2181 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2182 return true; /* Context already scheduled */
2184 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2186 return kbasep_js_schedule_ctx(kbdev, kctx);
2189 void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
2190 struct kbase_context *kctx)
2192 struct kbasep_js_kctx_info *js_kctx_info;
2193 struct kbasep_js_device_data *js_devdata;
2196 KBASE_DEBUG_ASSERT(kbdev != NULL);
2197 KBASE_DEBUG_ASSERT(kctx != NULL);
2199 js_devdata = &kbdev->js_data;
2200 js_kctx_info = &kctx->jctx.sched_info;
2202 /* This must never be attempted whilst suspending - i.e. it should only
2203 * happen in response to a syscall from a user-space thread */
2204 BUG_ON(kbase_pm_is_suspending(kbdev));
2206 mutex_lock(&js_devdata->queue_mutex);
2207 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2209 /* Mark the context as privileged */
2210 js_kctx_info->ctx.flags |= KBASE_CTX_FLAG_PRIVILEGED;
2212 is_scheduled = js_kctx_info->ctx.is_scheduled;
2213 if (!is_scheduled) {
2214 /* Add the context to the pullable list */
2215 if (kbase_js_ctx_list_add_pullable(kbdev, kctx, 0))
2216 kbase_js_sync_timers(kbdev);
2218 /* Fast-starting requires the jsctx_mutex to be dropped,
2219 * because it works on multiple ctxs */
2220 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2221 mutex_unlock(&js_devdata->queue_mutex);
2223 /* Try to schedule the context in */
2224 kbase_js_sched_all(kbdev);
2226 /* Wait for the context to be scheduled in */
2227 wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
2228 kctx->jctx.sched_info.ctx.is_scheduled);
2230 /* Already scheduled in - We need to retain it to keep the
2231 * corresponding address space */
2232 kbasep_js_runpool_retain_ctx(kbdev, kctx);
2233 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2234 mutex_unlock(&js_devdata->queue_mutex);
2237 KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx);
2239 void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
2240 struct kbase_context *kctx)
2242 struct kbasep_js_kctx_info *js_kctx_info;
2245 KBASE_DEBUG_ASSERT(kctx != NULL);
2246 js_kctx_info = &kctx->jctx.sched_info;
2248 /* We don't need to use the address space anymore */
2249 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2250 js_kctx_info->ctx.flags &= (~KBASE_CTX_FLAG_PRIVILEGED);
2251 pending = kctx->as_pending;
2252 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2254 /* Release the context - it will be scheduled out if there is no
2257 kbasep_js_runpool_release_ctx(kbdev, kctx);
2259 kbase_js_sched_all(kbdev);
2261 KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx);
2263 void kbasep_js_suspend(struct kbase_device *kbdev)
2265 unsigned long flags;
2266 struct kbasep_js_device_data *js_devdata;
2269 int nr_privileged_ctx = 0;
2271 KBASE_DEBUG_ASSERT(kbdev);
2272 KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
2273 js_devdata = &kbdev->js_data;
2275 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2277 /* Prevent all contexts from submitting */
2278 js_devdata->runpool_irq.submit_allowed = 0;
2280 /* Retain each of the contexts, so we can cause it to leave even if it
2281 * had no refcount to begin with */
2282 for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
2283 struct kbasep_js_per_as_data *js_per_as_data =
2284 &js_devdata->runpool_irq.per_as_data[i];
2285 struct kbase_context *kctx = js_per_as_data->kctx;
2287 retained = retained << 1;
2290 ++(js_per_as_data->as_busy_refcount);
2292 /* We can only cope with up to 1 privileged context -
2293 * the instrumented context. It'll be suspended by
2294 * disabling instrumentation */
2295 if (kctx->jctx.sched_info.ctx.flags &
2296 KBASE_CTX_FLAG_PRIVILEGED)
2297 KBASE_DEBUG_ASSERT(++nr_privileged_ctx == 1);
2300 CSTD_UNUSED(nr_privileged_ctx);
2301 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2303 /* De-ref the previous retain to ensure each context gets pulled out
2304 * sometime later. */
2307 ++i, retained = retained >> 1) {
2308 struct kbasep_js_per_as_data *js_per_as_data =
2309 &js_devdata->runpool_irq.per_as_data[i];
2310 struct kbase_context *kctx = js_per_as_data->kctx;
2313 kbasep_js_runpool_release_ctx(kbdev, kctx);
2316 /* Caller must wait for all Power Manager active references to be
2320 void kbasep_js_resume(struct kbase_device *kbdev)
2322 struct kbasep_js_device_data *js_devdata;
2325 KBASE_DEBUG_ASSERT(kbdev);
2326 js_devdata = &kbdev->js_data;
2327 KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
2329 mutex_lock(&js_devdata->queue_mutex);
2330 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
2331 struct kbase_context *kctx, *n;
2333 list_for_each_entry_safe(kctx, n,
2334 &kbdev->js_data.ctx_list_unpullable[js],
2335 jctx.sched_info.ctx.ctx_list_entry[js]) {
2336 struct kbasep_js_kctx_info *js_kctx_info;
2337 unsigned long flags;
2338 bool timer_sync = false;
2340 js_kctx_info = &kctx->jctx.sched_info;
2342 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2343 mutex_lock(&js_devdata->runpool_mutex);
2344 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2346 if (!js_kctx_info->ctx.is_scheduled &&
2347 kbase_js_ctx_pullable(kctx, js, false))
2348 timer_sync = kbase_js_ctx_list_add_pullable(
2351 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
2354 kbase_backend_ctx_count_changed(kbdev);
2355 mutex_unlock(&js_devdata->runpool_mutex);
2356 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2359 mutex_unlock(&js_devdata->queue_mutex);
2361 /* Restart atom processing */
2362 kbase_js_sched_all(kbdev);
2364 /* JS Resume complete */
2367 bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
2368 struct kbase_jd_atom *katom)
2370 if ((katom->core_req & BASE_JD_REQ_FS) &&
2371 (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE |
2375 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) &&
2376 (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) &&
2377 (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T)))
2383 static int kbase_js_get_slot(struct kbase_device *kbdev,
2384 struct kbase_jd_atom *katom)
2386 if (katom->core_req & BASE_JD_REQ_FS)
2389 if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
2390 if (katom->device_nr == 1 &&
2391 kbdev->gpu_props.num_core_groups == 2)
2393 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
2400 int kbase_js_dep_resolved_submit(struct kbase_context *kctx,
2401 struct kbase_jd_atom *katom,
2402 bool *enqueue_required)
2404 katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom);
2406 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2408 /* If slot will transition from unpullable to pullable then add to
2410 if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) {
2411 *enqueue_required = true;
2413 *enqueue_required = false;
2415 /* Check if there are lower priority jobs to soft stop */
2416 kbase_job_slot_ctx_priority_check_locked(kctx, katom);
2418 /* Add atom to ring buffer. */
2419 if (unlikely(jsctx_rb_add_atom(kctx, katom))) {
2420 /* The ring buffer is full. This should be impossible as the
2421 * job dispatcher can not submit enough atoms to exceed the
2422 * ring buffer size. Fail the job.
2424 WARN(1, "Job submit while JSCTX ringbuffer already full\n");
2428 katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED;
2433 struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
2435 struct kbase_jd_atom *katom;
2436 struct kbasep_js_device_data *js_devdata;
2439 KBASE_DEBUG_ASSERT(kctx);
2441 js_devdata = &kctx->kbdev->js_data;
2442 lockdep_assert_held(&js_devdata->runpool_irq.lock);
2444 if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
2446 if (kbase_pm_is_suspending(kctx->kbdev))
2449 katom = jsctx_rb_peek(kctx, js);
2453 if (atomic_read(&katom->blocked))
2456 /* Due to ordering restrictions when unpulling atoms on failure, we do
2457 * not allow multiple runs of fail-dep atoms from the same context to be
2458 * present on the same slot */
2459 if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV) &&
2460 atomic_read(&kctx->atoms_pulled_slot[js])) {
2461 struct kbase_jd_atom *prev_atom =
2462 kbase_backend_inspect_tail(kctx->kbdev, js);
2464 if (prev_atom && prev_atom->kctx != kctx)
2468 if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
2469 if (katom->x_pre_dep->gpu_rb_state ==
2470 KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB)
2472 if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
2473 kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
2477 kctx->pulled = true;
2478 pulled = atomic_inc_return(&kctx->atoms_pulled);
2479 if (pulled == 1 && !kctx->slots_pullable) {
2480 WARN_ON(kctx->ctx_runnable_ref);
2481 kctx->ctx_runnable_ref = true;
2482 atomic_inc(&kctx->kbdev->js_data.nr_contexts_runnable);
2484 atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]);
2485 jsctx_rb_pull(kctx, katom);
2487 kbasep_js_runpool_retain_ctx_nolock(kctx->kbdev, kctx);
2488 katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF;
2490 katom->sched_info.cfs.ticks = 0;
2496 static void js_return_worker(struct work_struct *data)
2498 struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
2500 struct kbase_context *kctx = katom->kctx;
2501 struct kbase_device *kbdev = kctx->kbdev;
2502 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
2503 struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
2504 struct kbasep_js_atom_retained_state retained_state;
2505 int js = katom->slot_nr;
2506 bool timer_sync = false;
2507 bool context_idle = false;
2508 unsigned long flags;
2509 base_jd_core_req core_req = katom->core_req;
2510 u64 affinity = katom->affinity;
2511 enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
2513 kbase_backend_complete_wq(kbdev, katom);
2515 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
2516 kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
2518 kbasep_js_atom_retained_state_copy(&retained_state, katom);
2520 mutex_lock(&js_devdata->queue_mutex);
2521 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2523 atomic_dec(&kctx->atoms_pulled);
2524 atomic_dec(&kctx->atoms_pulled_slot[js]);
2526 atomic_dec(&katom->blocked);
2528 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2530 if (!atomic_read(&kctx->atoms_pulled_slot[js]) &&
2531 jsctx_rb_none_to_pull(kctx, js))
2532 timer_sync |= kbase_js_ctx_list_remove(kbdev, kctx, js);
2534 if (!atomic_read(&kctx->atoms_pulled)) {
2535 if (!kctx->slots_pullable) {
2536 WARN_ON(!kctx->ctx_runnable_ref);
2537 kctx->ctx_runnable_ref = false;
2538 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
2541 if (kctx->as_nr != KBASEP_AS_NR_INVALID &&
2542 !js_kctx_info->ctx.is_dying) {
2543 int num_slots = kbdev->gpu_props.num_job_slots;
2546 if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
2547 kbasep_js_set_submit_allowed(js_devdata, kctx);
2549 for (slot = 0; slot < num_slots; slot++) {
2550 if (kbase_js_ctx_pullable(kctx, slot, true))
2552 kbase_js_ctx_list_add_pullable(
2557 kbase_jm_idle_ctx(kbdev, kctx);
2559 context_idle = true;
2562 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2565 WARN_ON(!kctx->ctx_active);
2566 kctx->ctx_active = false;
2567 kbase_pm_context_idle(kbdev);
2571 kbase_js_sync_timers(kbdev);
2573 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2574 mutex_unlock(&js_devdata->queue_mutex);
2576 katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
2577 kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
2580 kbase_js_sched_all(kbdev);
2582 kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
2586 void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
2588 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2590 jsctx_rb_unpull(kctx, katom);
2592 WARN_ON(work_pending(&katom->work));
2594 /* Block re-submission until workqueue has run */
2595 atomic_inc(&katom->blocked);
2597 kbase_job_check_leave_disjoint(kctx->kbdev, katom);
2599 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
2600 INIT_WORK(&katom->work, js_return_worker);
2601 queue_work(kctx->jctx.job_done_wq, &katom->work);
2604 static bool kbase_js_evict_atom(struct kbase_context *kctx,
2605 struct kbase_jd_atom *katom_evict,
2606 struct kbase_jd_atom *start_katom,
2607 struct kbase_jd_atom *head_katom,
2608 struct list_head *evict_list,
2609 struct jsctx_rb *rb, int idx)
2611 struct kbase_jd_atom *x_dep = katom_evict->x_post_dep;
2613 if (!(katom_evict->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV) &&
2614 katom_evict != start_katom)
2617 if (katom_evict->gpu_rb_state != KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
2618 WARN_ON(katom_evict->event_code != head_katom->event_code);
2623 if (katom_evict->status == KBASE_JD_ATOM_STATE_HW_COMPLETED &&
2624 katom_evict != head_katom)
2627 /* Evict cross dependency if present */
2628 if (x_dep && (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED)
2629 && (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER))
2630 list_add_tail(&x_dep->dep_item[0], evict_list);
2632 /* If cross dependency is present and does not have a data dependency
2634 if (x_dep && (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED)
2635 && !(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER))
2636 x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
2638 if (katom_evict != head_katom) {
2639 rb->entries[idx & JSCTX_RB_MASK].atom_id =
2640 KBASEP_ATOM_ID_INVALID;
2642 katom_evict->event_code = head_katom->event_code;
2643 katom_evict->atom_flags &=
2644 ~KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED;
2646 if (katom_evict->atom_flags & KBASE_KATOM_FLAG_HOLDING_CTX_REF)
2647 kbase_jd_done(katom_evict, katom_evict->slot_nr, NULL,
2650 kbase_jd_evict(kctx->kbdev, katom_evict);
2657 * kbase_js_evict_deps - Evict dependencies
2658 * @kctx: Context pointer
2659 * @head_katom: Pointer to the atom to evict
2661 * Remove all post dependencies of an atom from the context ringbuffers.
2663 * The original atom's event_code will be propogated to all dependent atoms.
2665 * Context: Caller must hold both jctx and HW access locks
2667 static void kbase_js_evict_deps(struct kbase_context *kctx,
2668 struct kbase_jd_atom *head_katom)
2670 struct list_head evict_list;
2672 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2673 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2675 INIT_LIST_HEAD(&evict_list);
2677 list_add_tail(&head_katom->dep_item[0], &evict_list);
2679 while (!list_empty(&evict_list)) {
2680 struct kbase_jd_atom *start_katom;
2682 start_katom = list_entry(evict_list.prev, struct kbase_jd_atom,
2684 list_del(evict_list.prev);
2686 jsctx_rb_evict(kctx, start_katom, head_katom, &evict_list);
2691 * kbase_js_compact - Compact JSCTX ringbuffers
2692 * @kctx: Context pointer
2694 * Compact the JSCTX ringbuffers, removing any NULL entries
2696 * Context: Caller must hold both jctx and HW access locks
2698 static void kbase_js_compact(struct kbase_context *kctx)
2700 struct kbase_device *kbdev = kctx->kbdev;
2703 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2704 lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
2706 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
2707 jsctx_rb_compact(kctx, js);
2710 bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
2711 struct kbase_jd_atom *katom)
2713 struct kbasep_js_kctx_info *js_kctx_info;
2714 struct kbasep_js_device_data *js_devdata;
2715 struct kbase_device *kbdev;
2716 unsigned long flags;
2717 bool timer_sync = false;
2719 bool context_idle = false;
2721 kbdev = kctx->kbdev;
2722 atom_slot = katom->slot_nr;
2724 js_kctx_info = &kctx->jctx.sched_info;
2725 js_devdata = &kbdev->js_data;
2727 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
2729 mutex_lock(&js_devdata->runpool_mutex);
2730 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2732 if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED) {
2733 if (katom->event_code != BASE_JD_EVENT_DONE)
2734 kbase_js_evict_deps(kctx, katom);
2736 jsctx_rb_remove(kctx, katom);
2738 context_idle = !atomic_dec_return(&kctx->atoms_pulled);
2739 atomic_dec(&kctx->atoms_pulled_slot[atom_slot]);
2741 if (!atomic_read(&kctx->atoms_pulled) &&
2742 !kctx->slots_pullable) {
2743 WARN_ON(!kctx->ctx_runnable_ref);
2744 kctx->ctx_runnable_ref = false;
2745 atomic_dec(&kbdev->js_data.nr_contexts_runnable);
2748 if (katom->event_code != BASE_JD_EVENT_DONE)
2749 kbase_js_compact(kctx);
2752 if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) &&
2753 jsctx_rb_none_to_pull(kctx, atom_slot))
2754 timer_sync |= kbase_js_ctx_list_remove(kctx->kbdev, kctx,
2758 * If submission is disabled on this context (most likely due to an
2759 * atom failure) and there are now no atoms left in the system then
2760 * re-enable submission so that context can be scheduled again.
2762 if (!kbasep_js_is_submit_allowed(js_devdata, kctx) &&
2763 !atomic_read(&kctx->atoms_pulled) &&
2764 !js_kctx_info->ctx.is_dying) {
2767 kbasep_js_set_submit_allowed(js_devdata, kctx);
2769 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
2770 if (kbase_js_ctx_pullable(kctx, js, true))
2771 timer_sync |= kbase_js_ctx_list_add_pullable(
2774 } else if (katom->x_post_dep &&
2775 kbasep_js_is_submit_allowed(js_devdata, kctx)) {
2778 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
2779 if (kbase_js_ctx_pullable(kctx, js, true))
2780 timer_sync |= kbase_js_ctx_list_add_pullable(
2785 /* Mark context as inactive. The pm reference will be dropped later in
2789 kctx->ctx_active = false;
2791 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2793 kbase_backend_ctx_count_changed(kbdev);
2794 mutex_unlock(&js_devdata->runpool_mutex);
2796 return context_idle;
2799 void kbase_js_complete_atom(struct kbase_jd_atom *katom, ktime_t *end_timestamp)
2801 u64 microseconds_spent = 0;
2802 struct kbase_device *kbdev;
2803 struct kbase_context *kctx = katom->kctx;
2804 union kbasep_js_policy *js_policy;
2805 struct kbasep_js_device_data *js_devdata;
2807 kbdev = kctx->kbdev;
2809 js_policy = &kbdev->js_data.policy;
2810 js_devdata = &kbdev->js_data;
2812 lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
2814 katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED;
2816 #if defined(CONFIG_MALI_GATOR_SUPPORT)
2817 kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP,
2818 katom->slot_nr), NULL, 0);
2820 #if defined(CONFIG_MALI_MIPE_ENABLED)
2821 kbase_tlstream_tl_nret_atom_lpu(
2823 &kbdev->gpu_props.props.raw_props.js_features[
2825 kbase_tlstream_tl_nret_atom_as(katom, &kbdev->as[kctx->as_nr]);
2826 kbase_tlstream_tl_nret_ctx_lpu(
2828 &kbdev->gpu_props.props.raw_props.js_features[
2831 /* Calculate the job's time used */
2832 if (end_timestamp != NULL) {
2833 /* Only calculating it for jobs that really run on the HW (e.g.
2834 * removed from next jobs never actually ran, so really did take
2836 ktime_t tick_diff = ktime_sub(*end_timestamp,
2837 katom->start_timestamp);
2839 microseconds_spent = ktime_to_ns(tick_diff);
2841 do_div(microseconds_spent, 1000);
2843 /* Round up time spent to the minimum timer resolution */
2844 if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US)
2845 microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US;
2848 /* Log the result of the job (completion status, and time spent). */
2849 kbasep_js_policy_log_job_result(js_policy, katom, microseconds_spent);
2851 kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
2853 /* Unblock cross dependency if present */
2854 if (katom->x_post_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
2855 !(katom->x_post_dep->atom_flags &
2856 KBASE_KATOM_FLAG_FAIL_BLOCKER)))
2857 katom->x_post_dep->atom_flags &=
2858 ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
2861 void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
2863 struct kbasep_js_device_data *js_devdata;
2864 union kbasep_js_policy *js_policy;
2865 bool timer_sync = false;
2867 js_devdata = &kbdev->js_data;
2868 js_policy = &js_devdata->policy;
2870 down(&js_devdata->schedule_sem);
2871 mutex_lock(&js_devdata->queue_mutex);
2876 js = ffs(js_mask) - 1;
2879 struct kbase_context *kctx;
2880 unsigned long flags;
2881 bool context_idle = false;
2883 kctx = kbase_js_ctx_list_pop_head(kbdev, js);
2886 js_mask &= ~(1 << js);
2887 break; /* No contexts on pullable list */
2890 if (!kctx->ctx_active) {
2891 context_idle = true;
2893 if (kbase_pm_context_active_handle_suspend(
2895 KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
2896 /* Suspend pending - return context to
2897 * queue and stop scheduling */
2899 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2900 if (kbase_js_ctx_list_add_pullable_head(
2901 kctx->kbdev, kctx, js))
2902 kbase_js_sync_timers(kbdev);
2904 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2905 mutex_unlock(&js_devdata->queue_mutex);
2906 up(&js_devdata->schedule_sem);
2909 kctx->ctx_active = true;
2912 if (!kbase_js_use_ctx(kbdev, kctx)) {
2914 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2915 /* Context can not be used at this time */
2916 spin_lock_irqsave(&js_devdata->runpool_irq.lock,
2918 if (kctx->as_pending ||
2919 kbase_js_ctx_pullable(kctx, js, false)
2920 || (kctx->jctx.sched_info.ctx.flags &
2921 KBASE_CTX_FLAG_PRIVILEGED))
2923 kbase_js_ctx_list_add_pullable_head(
2924 kctx->kbdev, kctx, js);
2927 kbase_js_ctx_list_add_unpullable(
2928 kctx->kbdev, kctx, js);
2929 spin_unlock_irqrestore(
2930 &js_devdata->runpool_irq.lock, flags);
2932 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2934 WARN_ON(!kctx->ctx_active);
2935 kctx->ctx_active = false;
2936 kbase_pm_context_idle(kbdev);
2939 /* No more jobs can be submitted on this slot */
2940 js_mask &= ~(1 << js);
2943 mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2944 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2946 kctx->pulled = false;
2948 if (!kbase_jm_kick(kbdev, 1 << js))
2949 /* No more jobs can be submitted on this slot */
2950 js_mask &= ~(1 << js);
2952 if (!kctx->pulled) {
2953 /* Failed to pull jobs - push to head of list */
2954 if (kbase_js_ctx_pullable(kctx, js, true))
2956 kbase_js_ctx_list_add_pullable_head(
2961 kbase_js_ctx_list_add_unpullable(
2966 kbase_jm_idle_ctx(kbdev, kctx);
2967 spin_unlock_irqrestore(
2968 &js_devdata->runpool_irq.lock,
2970 WARN_ON(!kctx->ctx_active);
2971 kctx->ctx_active = false;
2972 kbase_pm_context_idle(kbdev);
2974 spin_unlock_irqrestore(
2975 &js_devdata->runpool_irq.lock,
2979 &kctx->jctx.sched_info.ctx.jsctx_mutex);
2981 js_mask &= ~(1 << js);
2982 break; /* Could not run atoms on this slot */
2985 /* Push to back of list */
2986 if (kbase_js_ctx_pullable(kctx, js, true))
2987 timer_sync |= kbase_js_ctx_list_add_pullable(
2988 kctx->kbdev, kctx, js);
2990 timer_sync |= kbase_js_ctx_list_add_unpullable(
2991 kctx->kbdev, kctx, js);
2992 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
2994 mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
2999 kbase_js_sync_timers(kbdev);
3001 mutex_unlock(&js_devdata->queue_mutex);
3002 up(&js_devdata->schedule_sem);
3005 void kbase_js_zap_context(struct kbase_context *kctx)
3007 struct kbase_device *kbdev = kctx->kbdev;
3008 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
3009 struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
3013 * Critical assumption: No more submission is possible outside of the
3014 * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
3015 * whilst the struct kbase_context is terminating.
3018 /* First, atomically do the following:
3019 * - mark the context as dying
3020 * - try to evict it from the policy queue */
3021 mutex_lock(&js_devdata->queue_mutex);
3022 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
3023 js_kctx_info->ctx.is_dying = true;
3025 dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
3028 * At this point we know:
3029 * - If eviction succeeded, it was in the policy queue, but now no
3031 * - We must cancel the jobs here. No Power Manager active reference to
3033 * - This happens asynchronously - kbase_jd_zap_context() will wait for
3034 * those jobs to be killed.
3035 * - If eviction failed, then it wasn't in the policy queue. It is one
3037 * - a. it didn't have any jobs, and so is not in the Policy Queue or
3038 * the Run Pool (not scheduled)
3039 * - Hence, no more work required to cancel jobs. No Power Manager
3040 * active reference to release.
3041 * - b. it was in the middle of a scheduling transaction (and thus must
3042 * have at least 1 job). This can happen from a syscall or a
3043 * kernel thread. We still hold the jsctx_mutex, and so the thread
3044 * must be waiting inside kbasep_js_try_schedule_head_ctx(),
3045 * before checking whether the runpool is full. That thread will
3046 * continue after we drop the mutex, and will notice the context
3047 * is dying. It will rollback the transaction, killing all jobs at
3048 * the same time. kbase_jd_zap_context() will wait for those jobs
3050 * - Hence, no more work required to cancel jobs, or to release the
3051 * Power Manager active reference.
3052 * - c. it is scheduled, and may or may not be running jobs
3053 * - We must cause it to leave the runpool by stopping it from
3054 * submitting any more jobs. When it finally does leave,
3055 * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
3056 * (because it is dying), release the Power Manager active reference,
3057 * and will not requeue the context in the policy queue.
3058 * kbase_jd_zap_context() will wait for those jobs to be killed.
3059 * - Hence, work required just to make it leave the runpool. Cancelling
3060 * jobs and releasing the Power manager active reference will be
3061 * handled when it leaves the runpool.
3063 if (!js_kctx_info->ctx.is_scheduled) {
3064 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
3066 &kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
3068 &kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
3071 /* The following events require us to kill off remaining jobs
3072 * and update PM book-keeping:
3073 * - we evicted it correctly (it must have jobs to be in the
3076 * These events need no action, but take this path anyway:
3077 * - Case a: it didn't have any jobs, and was never in the Queue
3078 * - Case b: scheduling transaction will be partially rolled-
3079 * back (this already cancels the jobs)
3082 KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u,
3083 js_kctx_info->ctx.is_scheduled);
3085 dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx);
3087 /* Only cancel jobs when we evicted from the policy
3088 * queue. No Power Manager active reference was held.
3090 * Having is_dying set ensures that this kills, and
3091 * doesn't requeue */
3092 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false);
3094 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
3095 mutex_unlock(&js_devdata->queue_mutex);
3097 unsigned long flags;
3100 /* Case c: didn't evict, but it is scheduled - it's in the Run
3102 KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u,
3103 js_kctx_info->ctx.is_scheduled);
3104 dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
3106 /* Disable the ctx from submitting any more jobs */
3107 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
3109 kbasep_js_clear_submit_allowed(js_devdata, kctx);
3111 /* Retain and (later) release the context whilst it is is now
3112 * disallowed from submitting jobs - ensures that someone
3113 * somewhere will be removing the context later on */
3114 was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
3116 /* Since it's scheduled and we have the jsctx_mutex, it must be
3117 * retained successfully */
3118 KBASE_DEBUG_ASSERT(was_retained);
3120 dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
3122 /* Cancel any remaining running jobs for this kctx - if any.
3123 * Submit is disallowed which takes effect immediately, so no
3124 * more new jobs will appear after we do this. */
3125 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
3126 kbase_job_slot_hardstop(kctx, js, NULL);
3128 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
3129 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
3130 mutex_unlock(&js_devdata->queue_mutex);
3132 dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)",
3135 kbasep_js_runpool_release_ctx(kbdev, kctx);
3138 KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
3140 /* After this, you must wait on both the
3141 * kbase_jd_context::zero_jobs_wait and the
3142 * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs
3143 * to be destroyed, and the context to be de-scheduled (if it was on the
3146 * kbase_jd_zap_context() will do this. */
3149 static inline int trace_get_refcnt(struct kbase_device *kbdev,
3150 struct kbase_context *kctx)
3152 struct kbasep_js_device_data *js_devdata;
3156 js_devdata = &kbdev->js_data;
3158 as_nr = kctx->as_nr;
3159 if (as_nr != KBASEP_AS_NR_INVALID) {
3160 struct kbasep_js_per_as_data *js_per_as_data;
3162 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
3164 refcnt = js_per_as_data->as_busy_refcount;
3171 * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context
3172 * @kctx: Pointer to context.
3173 * @callback: Pointer to function to call for each job.
3175 * Call a function on all jobs belonging to a non-queued, non-running
3176 * context, and detach the jobs from the context as it goes.
3178 * Due to the locks that might be held at the time of the call, the callback
3179 * may need to defer work on a workqueue to complete its actions (e.g. when
3182 * Atoms will be removed from the queue, so this must only be called when
3183 * cancelling jobs (which occurs as part of context destruction).
3185 * The locking conditions on the caller are as follows:
3186 * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
3188 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
3189 kbasep_js_policy_ctx_job_cb callback)
3191 struct kbase_device *kbdev;
3192 struct kbasep_js_device_data *js_devdata;
3193 unsigned long flags;
3196 kbdev = kctx->kbdev;
3198 js_devdata = &kbdev->js_data;
3200 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
3202 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL,
3203 0u, trace_get_refcnt(kbdev, kctx));
3205 /* Invoke callback on jobs on each slot in turn */
3206 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
3207 jsctx_rb_foreach(kctx, js, callback);
3209 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);