3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * Job Scheduler Implementation
23 #include <mali_kbase.h>
24 #include <mali_kbase_js.h>
25 #include <mali_kbase_js_affinity.h>
26 #include <mali_kbase_gator.h>
27 #include <mali_kbase_hw.h>
29 #include "mali_kbase_jm.h"
30 #include <mali_kbase_defs.h>
36 /** Bitpattern indicating the result of releasing a context */
38 /** The context was descheduled - caller should try scheduling in a new one
39 * to keep the runpool full */
40 KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
43 typedef u32 kbasep_js_release_result;
46 * Private function prototypes
48 STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom);
50 STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom);
52 STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state);
54 /** Helper for trace subcodes */
55 #if KBASE_TRACE_ENABLE != 0
56 STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
59 kbasep_js_device_data *js_devdata;
63 js_devdata = &kbdev->js_data;
65 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
67 if (as_nr != KBASEP_AS_NR_INVALID) {
68 kbasep_js_per_as_data *js_per_as_data;
69 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
71 refcnt = js_per_as_data->as_busy_refcount;
73 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
77 #else /* KBASE_TRACE_ENABLE != 0 */
78 STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
84 #endif /* KBASE_TRACE_ENABLE != 0 */
90 JS_DEVDATA_INIT_NONE = 0,
91 JS_DEVDATA_INIT_CONSTANTS = (1 << 0),
92 JS_DEVDATA_INIT_POLICY = (1 << 1),
93 JS_DEVDATA_INIT_ALL = ((1 << 2) - 1)
97 JS_KCTX_INIT_NONE = 0,
98 JS_KCTX_INIT_CONSTANTS = (1 << 0),
99 JS_KCTX_INIT_POLICY = (1 << 1),
100 JS_KCTX_INIT_ALL = ((1 << 2) - 1)
108 * Check if the job had performance monitoring enabled and decrement the count. If no jobs require
109 * performance monitoring, then the cycle counters will be disabled in the GPU.
111 * No locks need to be held - locking is handled further down
113 * This function does not sleep.
116 STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom)
118 KBASE_DEBUG_ASSERT(kbdev != NULL);
119 KBASE_DEBUG_ASSERT(katom != NULL);
121 if (katom->core_req & BASE_JD_REQ_PERMON)
122 kbase_pm_release_gpu_cycle_counter(kbdev);
126 * Check if the job has performance monitoring enabled and keep a count of it. If at least one
127 * job requires performance monitoring, then the cycle counters will be enabled in the GPU.
129 * No locks need to be held - locking is handled further down
131 * This function does not sleep.
134 STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom)
136 KBASE_DEBUG_ASSERT(kbdev != NULL);
137 KBASE_DEBUG_ASSERT(katom != NULL);
139 if (katom->core_req & BASE_JD_REQ_PERMON)
140 kbase_pm_request_gpu_cycle_counter(kbdev);
144 * The following locking conditions are made on the caller:
145 * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
146 * - The caller must hold the kbasep_js_device_data::runpool_mutex
148 STATIC INLINE void runpool_inc_context_count(kbase_device *kbdev, kbase_context *kctx)
150 kbasep_js_device_data *js_devdata;
151 kbasep_js_kctx_info *js_kctx_info;
152 KBASE_DEBUG_ASSERT(kbdev != NULL);
153 KBASE_DEBUG_ASSERT(kctx != NULL);
155 js_devdata = &kbdev->js_data;
156 js_kctx_info = &kctx->jctx.sched_info;
158 BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
159 BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
161 /* Track total contexts */
162 KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
163 ++(js_devdata->nr_all_contexts_running);
165 if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
166 /* Track contexts that can submit jobs */
167 KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running < S8_MAX);
168 ++(js_devdata->nr_user_contexts_running);
173 * The following locking conditions are made on the caller:
174 * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
175 * - The caller must hold the kbasep_js_device_data::runpool_mutex
177 STATIC INLINE void runpool_dec_context_count(kbase_device *kbdev, kbase_context *kctx)
179 kbasep_js_device_data *js_devdata;
180 kbasep_js_kctx_info *js_kctx_info;
181 KBASE_DEBUG_ASSERT(kbdev != NULL);
182 KBASE_DEBUG_ASSERT(kctx != NULL);
184 js_devdata = &kbdev->js_data;
185 js_kctx_info = &kctx->jctx.sched_info;
187 BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
188 BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
190 /* Track total contexts */
191 --(js_devdata->nr_all_contexts_running);
192 KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
194 if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
195 /* Track contexts that can submit jobs */
196 --(js_devdata->nr_user_contexts_running);
197 KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
202 * @brief check whether the runpool is full for a specified context
204 * If kctx == NULL, then this makes the least restrictive check on the
205 * runpool. A specific context that is supplied immediately after could fail
206 * the check, even under the same conditions.
208 * Therefore, once a context is obtained you \b must re-check it with this
209 * function, since the return value could change to MALI_FALSE.
211 * The following locking conditions are made on the caller:
212 * - In all cases, the caller must hold kbasep_js_device_data::runpool_mutex
213 * - When kctx != NULL the caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
214 * - When kctx == NULL, then the caller need not hold any jsctx_mutex locks (but it doesn't do any harm to do so).
216 STATIC mali_bool check_is_runpool_full(kbase_device *kbdev, kbase_context *kctx)
218 kbasep_js_device_data *js_devdata;
219 mali_bool is_runpool_full;
220 KBASE_DEBUG_ASSERT(kbdev != NULL);
222 js_devdata = &kbdev->js_data;
223 BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
225 /* Regardless of whether a context is submitting or not, can't have more than there
226 * are HW address spaces */
227 is_runpool_full = (mali_bool) (js_devdata->nr_all_contexts_running >= kbdev->nr_hw_address_spaces);
229 if (kctx != NULL && (kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
230 BUG_ON(!mutex_is_locked(&kctx->jctx.sched_info.ctx.jsctx_mutex));
231 /* Contexts that submit might use less of the address spaces available, due to HW
232 * workarounds. In which case, the runpool is also full when the number of
233 * submitting contexts exceeds the number of submittable address spaces.
235 * Both checks must be made: can have nr_user_address_spaces == nr_hw_address spaces,
236 * and at the same time can have nr_user_contexts_running < nr_all_contexts_running. */
237 is_runpool_full |= (mali_bool) (js_devdata->nr_user_contexts_running >= kbdev->nr_user_address_spaces);
240 return is_runpool_full;
243 STATIC base_jd_core_req core_reqs_from_jsn_features(u16 features) /* JS<n>_FEATURE register value */
245 base_jd_core_req core_req = 0u;
247 if ((features & JSn_FEATURE_SET_VALUE_JOB) != 0)
248 core_req |= BASE_JD_REQ_V;
250 if ((features & JSn_FEATURE_CACHE_FLUSH_JOB) != 0)
251 core_req |= BASE_JD_REQ_CF;
253 if ((features & JSn_FEATURE_COMPUTE_JOB) != 0)
254 core_req |= BASE_JD_REQ_CS;
256 if ((features & JSn_FEATURE_TILER_JOB) != 0)
257 core_req |= BASE_JD_REQ_T;
259 if ((features & JSn_FEATURE_FRAGMENT_JOB) != 0)
260 core_req |= BASE_JD_REQ_FS;
266 * Picks and reserves an address space.
268 * When this function returns, the address space returned is reserved and
269 * cannot be picked for another context until it is released.
271 * The caller must ensure there \b is a free address space before calling this.
273 * The following locking conditions are made on the caller:
274 * - it must hold kbasep_js_device_data::runpool_mutex
276 * @return a non-NULL pointer to a kbase_as that is not in use by any other context
278 STATIC kbase_as *pick_free_addr_space(kbase_device *kbdev)
280 kbasep_js_device_data *js_devdata;
281 kbase_as *current_as;
283 js_devdata = &kbdev->js_data;
285 lockdep_assert_held(&js_devdata->runpool_mutex);
287 /* Find the free address space */
288 ffs_result = ffs(js_devdata->as_free) - 1;
290 /* ASSERT that we should've found a free one */
291 KBASE_DEBUG_ASSERT(0 <= ffs_result && ffs_result < kbdev->nr_hw_address_spaces);
292 /* Ensure no-one else picks this one */
293 js_devdata->as_free &= ~((u16) (1u << ffs_result));
295 current_as = &kbdev->as[ffs_result];
301 * Release an address space, making it available for being picked again.
303 * The following locking conditions are made on the caller:
304 * - it must hold kbasep_js_device_data::runpool_mutex
306 STATIC INLINE void release_addr_space(kbase_device *kbdev, int kctx_as_nr)
308 kbasep_js_device_data *js_devdata;
309 u16 as_bit = (1u << kctx_as_nr);
311 js_devdata = &kbdev->js_data;
312 lockdep_assert_held(&js_devdata->runpool_mutex);
314 /* The address space must not already be free */
315 KBASE_DEBUG_ASSERT(!(js_devdata->as_free & as_bit));
317 js_devdata->as_free |= as_bit;
321 * Assign an Address Space (AS) to a context, and add the context to the Policy.
324 * - setting up the global runpool_irq structure and the context on the AS
325 * - Activating the MMU on the AS
326 * - Allowing jobs to be submitted on the AS
328 * Locking conditions:
329 * - Caller must hold the kbasep_js_kctx_info::jsctx_mutex
330 * - Caller must hold the kbasep_js_device_data::runpool_mutex
331 * - Caller must hold AS transaction mutex
332 * - Caller must hold Runpool IRQ lock
334 STATIC void assign_and_activate_kctx_addr_space(kbase_device *kbdev, kbase_context *kctx, kbase_as *current_as)
336 kbasep_js_device_data *js_devdata;
337 kbasep_js_per_as_data *js_per_as_data;
340 KBASE_DEBUG_ASSERT(kbdev != NULL);
341 KBASE_DEBUG_ASSERT(kctx != NULL);
342 KBASE_DEBUG_ASSERT(current_as != NULL);
344 js_devdata = &kbdev->js_data;
345 as_nr = current_as->number;
347 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
348 lockdep_assert_held(&js_devdata->runpool_mutex);
349 lockdep_assert_held(¤t_as->transaction_mutex);
350 lockdep_assert_held(&js_devdata->runpool_irq.lock);
352 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
354 /* Attribute handling */
355 kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
357 /* Assign addr space */
359 #ifdef CONFIG_MALI_GATOR_SUPPORT
360 kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
361 #endif /* CONFIG_MALI_GATOR_SUPPORT */
362 /* Activate this address space on the MMU */
363 kbase_mmu_update(kctx);
365 /* Allow it to run jobs */
366 kbasep_js_set_submit_allowed(js_devdata, kctx);
369 js_per_as_data->kctx = kctx;
370 js_per_as_data->as_busy_refcount = 0;
372 /* Lastly, add the context to the policy's runpool - this really allows it to run jobs */
373 kbasep_js_policy_runpool_add_ctx(&js_devdata->policy, kctx);
377 void kbasep_js_try_run_next_job_nolock(kbase_device *kbdev)
379 kbasep_js_device_data *js_devdata;
382 KBASE_DEBUG_ASSERT(kbdev != NULL);
383 js_devdata = &kbdev->js_data;
384 lockdep_assert_held(&js_devdata->runpool_mutex);
385 lockdep_assert_held(&js_devdata->runpool_irq.lock);
387 /* It's cheap and simple to retest this here - otherwise we burden the
388 * caller with it. In some cases, we do this higher up to optimize out the
390 if (js_devdata->nr_user_contexts_running == 0)
391 return; /* No contexts present - the GPU might be powered off, so just return */
393 for (js = 0; js < kbdev->gpu_props.num_job_slots; ++js)
394 kbasep_js_try_run_next_job_on_slot_nolock(kbdev, js);
397 /** Hold the kbasep_js_device_data::runpool_irq::lock for this */
398 mali_bool kbasep_js_runpool_retain_ctx_nolock(kbase_device *kbdev, kbase_context *kctx)
400 kbasep_js_device_data *js_devdata;
401 kbasep_js_per_as_data *js_per_as_data;
402 mali_bool result = MALI_FALSE;
404 KBASE_DEBUG_ASSERT(kbdev != NULL);
405 KBASE_DEBUG_ASSERT(kctx != NULL);
406 js_devdata = &kbdev->js_data;
409 if (as_nr != KBASEP_AS_NR_INVALID) {
412 KBASE_DEBUG_ASSERT(as_nr >= 0);
413 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
415 KBASE_DEBUG_ASSERT(js_per_as_data->kctx != NULL);
417 new_refcnt = ++(js_per_as_data->as_busy_refcount);
418 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx, NULL, 0u, new_refcnt);
426 * Functions private to KBase ('Protected' functions)
428 void kbase_js_try_run_jobs(kbase_device *kbdev)
430 kbasep_js_device_data *js_devdata;
433 KBASE_DEBUG_ASSERT(kbdev != NULL);
434 js_devdata = &kbdev->js_data;
436 mutex_lock(&js_devdata->runpool_mutex);
437 if (js_devdata->nr_user_contexts_running != 0) {
438 /* Only try running jobs when we have contexts present, otherwise the GPU might be powered off. */
439 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
441 kbasep_js_try_run_next_job_nolock(kbdev);
443 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
445 mutex_unlock(&js_devdata->runpool_mutex);
448 void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js)
451 kbasep_js_device_data *js_devdata;
453 KBASE_DEBUG_ASSERT(kbdev != NULL);
454 js_devdata = &kbdev->js_data;
456 mutex_lock(&js_devdata->runpool_mutex);
457 if (js_devdata->nr_user_contexts_running != 0) {
458 /* Only try running jobs when we have contexts present, otherwise the GPU might be powered off. */
459 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
461 kbasep_js_try_run_next_job_on_slot_nolock(kbdev, js);
463 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
465 mutex_unlock(&js_devdata->runpool_mutex);
468 mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
470 kbasep_js_device_data *js_devdata;
475 KBASE_DEBUG_ASSERT(kbdev != NULL);
477 js_devdata = &kbdev->js_data;
479 KBASE_DEBUG_ASSERT(js_devdata->init_status == JS_DEVDATA_INIT_NONE);
481 /* These two must be recalculated if nr_hw_address_spaces changes (e.g. for HW workarounds) */
482 as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
483 kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
484 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
485 mali_bool use_workaround_for_security;
486 use_workaround_for_security = (mali_bool) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE);
487 if (use_workaround_for_security != MALI_FALSE) {
488 dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
489 kbdev->nr_user_address_spaces = 1;
492 #ifdef CONFIG_MALI_DEBUG
493 /* Soft-stop will be disabled on a single context by default unless softstop_always is set */
494 js_devdata->softstop_always = MALI_FALSE;
495 #endif /* CONFIG_MALI_DEBUG */
496 js_devdata->nr_all_contexts_running = 0;
497 js_devdata->nr_user_contexts_running = 0;
498 js_devdata->as_free = as_present; /* All ASs initially free */
499 js_devdata->runpool_irq.submit_allowed = 0u; /* No ctx allowed to submit */
500 memset(js_devdata->runpool_irq.ctx_attr_ref_count, 0, sizeof(js_devdata->runpool_irq.ctx_attr_ref_count));
501 memset(js_devdata->runpool_irq.slot_affinities, 0, sizeof(js_devdata->runpool_irq.slot_affinities));
502 js_devdata->runpool_irq.slots_blocked_on_affinity = 0u;
503 memset(js_devdata->runpool_irq.slot_affinity_refcount, 0, sizeof(js_devdata->runpool_irq.slot_affinity_refcount));
504 INIT_LIST_HEAD(&js_devdata->suspended_soft_jobs_list);
506 /* Config attributes */
507 js_devdata->scheduling_tick_ns = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS);
508 js_devdata->soft_stop_ticks = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS);
509 js_devdata->soft_stop_ticks_cl = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL);
510 js_devdata->hard_stop_ticks_ss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS);
511 js_devdata->hard_stop_ticks_cl = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL);
512 js_devdata->hard_stop_ticks_nss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS);
513 js_devdata->gpu_reset_ticks_ss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS);
514 js_devdata->gpu_reset_ticks_cl = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL);
515 js_devdata->gpu_reset_ticks_nss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS);
516 js_devdata->ctx_timeslice_ns = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS);
517 js_devdata->cfs_ctx_runtime_init_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES);
518 js_devdata->cfs_ctx_runtime_min_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES);
520 dev_dbg(kbdev->dev, "JS Config Attribs: ");
521 dev_dbg(kbdev->dev, "\tscheduling_tick_ns:%u", js_devdata->scheduling_tick_ns);
522 dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u", js_devdata->soft_stop_ticks);
523 dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u", js_devdata->soft_stop_ticks_cl);
524 dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u", js_devdata->hard_stop_ticks_ss);
525 dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u", js_devdata->hard_stop_ticks_cl);
526 dev_dbg(kbdev->dev, "\thard_stop_ticks_nss:%u", js_devdata->hard_stop_ticks_nss);
527 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u", js_devdata->gpu_reset_ticks_ss);
528 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u", js_devdata->gpu_reset_ticks_cl);
529 dev_dbg(kbdev->dev, "\tgpu_reset_ticks_nss:%u", js_devdata->gpu_reset_ticks_nss);
530 dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u", js_devdata->ctx_timeslice_ns);
531 dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u", js_devdata->cfs_ctx_runtime_init_slices);
532 dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u", js_devdata->cfs_ctx_runtime_min_slices);
534 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0
535 dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns);
537 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
538 dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%u at %uns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns);
540 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 && KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
541 dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
544 /* setup the number of irq throttle cycles base on given time */
546 int irq_throttle_time_us = kbdev->gpu_props.irq_throttle_time_us;
547 int irq_throttle_cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev, irq_throttle_time_us);
548 atomic_set(&kbdev->irq_throttle_cycles, irq_throttle_cycles);
551 /* Clear the AS data, including setting NULL pointers */
552 memset(&js_devdata->runpool_irq.per_as_data[0], 0, sizeof(js_devdata->runpool_irq.per_as_data));
554 for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
555 js_devdata->js_reqs[i] = core_reqs_from_jsn_features(kbdev->gpu_props.props.raw_props.js_features[i]);
557 js_devdata->init_status |= JS_DEVDATA_INIT_CONSTANTS;
559 /* On error, we could continue on: providing none of the below resources
560 * rely on the ones above */
562 mutex_init(&js_devdata->runpool_mutex);
563 mutex_init(&js_devdata->queue_mutex);
564 spin_lock_init(&js_devdata->runpool_irq.lock);
566 err = kbasep_js_policy_init(kbdev);
567 if (err == MALI_ERROR_NONE)
568 js_devdata->init_status |= JS_DEVDATA_INIT_POLICY;
570 /* On error, do no cleanup; this will be handled by the caller(s), since
571 * we've designed this resource to be safe to terminate on init-fail */
572 if (js_devdata->init_status != JS_DEVDATA_INIT_ALL)
573 return MALI_ERROR_FUNCTION_FAILED;
575 return MALI_ERROR_NONE;
578 void kbasep_js_devdata_halt(kbase_device *kbdev)
583 void kbasep_js_devdata_term(kbase_device *kbdev)
585 kbasep_js_device_data *js_devdata;
587 KBASE_DEBUG_ASSERT(kbdev != NULL);
589 js_devdata = &kbdev->js_data;
591 if ((js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS)) {
592 s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
593 /* The caller must de-register all contexts before calling this */
594 KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
595 KBASE_DEBUG_ASSERT(memcmp(js_devdata->runpool_irq.ctx_attr_ref_count, zero_ctx_attr_ref_count, sizeof(js_devdata->runpool_irq.ctx_attr_ref_count)) == 0);
596 CSTD_UNUSED(zero_ctx_attr_ref_count);
598 if ((js_devdata->init_status & JS_DEVDATA_INIT_POLICY))
599 kbasep_js_policy_term(&js_devdata->policy);
601 js_devdata->init_status = JS_DEVDATA_INIT_NONE;
604 mali_error kbasep_js_kctx_init(kbase_context * const kctx)
607 kbasep_js_kctx_info *js_kctx_info;
610 KBASE_DEBUG_ASSERT(kctx != NULL);
613 KBASE_DEBUG_ASSERT(kbdev != NULL);
615 js_kctx_info = &kctx->jctx.sched_info;
616 KBASE_DEBUG_ASSERT(js_kctx_info->init_status == JS_KCTX_INIT_NONE);
618 js_kctx_info->ctx.nr_jobs = 0;
619 js_kctx_info->ctx.is_scheduled = MALI_FALSE;
620 js_kctx_info->ctx.is_dying = MALI_FALSE;
621 memset(js_kctx_info->ctx.ctx_attr_ref_count, 0, sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
623 /* Initially, the context is disabled from submission until the create flags are set */
624 js_kctx_info->ctx.flags = KBASE_CTX_FLAG_SUBMIT_DISABLED;
626 js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS;
628 /* On error, we could continue on: providing none of the below resources
629 * rely on the ones above */
630 mutex_init(&js_kctx_info->ctx.jsctx_mutex);
632 init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
634 err = kbasep_js_policy_init_ctx(kbdev, kctx);
635 if (err == MALI_ERROR_NONE)
636 js_kctx_info->init_status |= JS_KCTX_INIT_POLICY;
638 /* On error, do no cleanup; this will be handled by the caller(s), since
639 * we've designed this resource to be safe to terminate on init-fail */
640 if (js_kctx_info->init_status != JS_KCTX_INIT_ALL)
641 return MALI_ERROR_FUNCTION_FAILED;
643 return MALI_ERROR_NONE;
646 void kbasep_js_kctx_term(kbase_context *kctx)
649 kbasep_js_kctx_info *js_kctx_info;
650 kbasep_js_policy *js_policy;
652 KBASE_DEBUG_ASSERT(kctx != NULL);
655 KBASE_DEBUG_ASSERT(kbdev != NULL);
657 js_policy = &kbdev->js_data.policy;
658 js_kctx_info = &kctx->jctx.sched_info;
660 if ((js_kctx_info->init_status & JS_KCTX_INIT_CONSTANTS)) {
661 /* The caller must de-register all jobs before calling this */
662 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
663 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
666 if ((js_kctx_info->init_status & JS_KCTX_INIT_POLICY))
667 kbasep_js_policy_term_ctx(js_policy, kctx);
669 js_kctx_info->init_status = JS_KCTX_INIT_NONE;
672 /* Evict jobs from the NEXT registers
674 * The caller must hold:
675 * - kbasep_js_kctx_info::ctx::jsctx_mutex
676 * - kbasep_js_device_data::runpool_mutex
678 STATIC void kbasep_js_runpool_evict_next_jobs(kbase_device *kbdev, kbase_context *kctx)
682 kbasep_js_device_data *js_devdata;
684 js_devdata = &kbdev->js_data;
686 BUG_ON(!mutex_is_locked(&kctx->jctx.sched_info.ctx.jsctx_mutex));
687 BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
689 /* Prevent contexts in the runpool from submitting jobs */
690 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
692 /* There's no need to prevent contexts in the runpool from submitting jobs,
693 * because we complete this operation by the time we release the
694 * runpool_irq.lock */
696 /* Evict jobs from the NEXT registers */
697 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
701 if (!kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), NULL)) {
702 /* No job in the NEXT register */
706 slot = &kbdev->jm_slots[js];
707 tail = kbasep_jm_peek_idx_submit_slot(slot, slot->submitted_nr - 1);
709 KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 1);
710 /* Clearing job from next registers */
711 kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_NOP, NULL);
713 /* Check to see if we did remove a job from the next registers */
714 if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), NULL) != 0) {
715 /* The job was successfully cleared from the next registers, requeue it */
716 kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
717 KBASE_DEBUG_ASSERT(dequeued_katom == tail);
719 /* Set the next registers to NULL */
720 kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), 0, NULL);
721 kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), 0, NULL);
723 KBASE_TRACE_ADD_SLOT(kbdev, JM_SLOT_EVICT, dequeued_katom->kctx, dequeued_katom, dequeued_katom->jc, js);
725 /* Complete the job, indicate that it took no time, and don't start
727 kbase_jd_done(dequeued_katom, js, NULL, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
729 KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 0);
731 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
735 * Fast start a higher priority job
736 * If the runpool is full, the lower priority contexts with no running jobs
737 * will be evicted from the runpool
739 * If \a kctx_new is NULL, the first context with no running jobs will be evicted
741 * The following locking conditions are made on the caller:
742 * - The caller must \b not hold \a kctx_new's
743 * kbasep_js_kctx_info::ctx::jsctx_mutex, or that mutex of any ctx in the
744 * runpool. This is because \a kctx_new's jsctx_mutex and one of the other
745 * scheduled ctx's jsctx_mutex will be obtained internally.
746 * - it must \em not hold kbasep_js_device_data::runpool_irq::lock (as this will be
747 * obtained internally)
748 * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
749 * obtained internally)
750 * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used
753 STATIC void kbasep_js_runpool_attempt_fast_start_ctx(kbase_device *kbdev, kbase_context *kctx_new)
756 kbasep_js_device_data *js_devdata;
757 kbasep_js_kctx_info *js_kctx_new;
758 kbasep_js_policy *js_policy;
759 kbasep_js_per_as_data *js_per_as_data;
761 kbasep_js_atom_retained_state katom_retained_state;
763 KBASE_DEBUG_ASSERT(kbdev != NULL);
765 js_devdata = &kbdev->js_data;
766 js_policy = &kbdev->js_data.policy;
768 if (kctx_new != NULL) {
769 js_kctx_new = &kctx_new->jctx.sched_info;
770 mutex_lock(&js_kctx_new->ctx.jsctx_mutex);
773 CSTD_UNUSED(js_kctx_new);
776 /* Setup a dummy katom_retained_state */
777 kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
779 mutex_lock(&js_devdata->runpool_mutex);
781 /* If the runpool is full and either there is no specified context or the specified context is not dying, then
782 attempt to fast start the specified context or evict the first context with no running jobs. */
783 if (check_is_runpool_full(kbdev, kctx_new) &&
784 (!js_kctx_new || (js_kctx_new && !js_kctx_new->ctx.is_dying))) {
785 /* No free address spaces - attempt to evict non-running lower priority context */
786 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
787 for (evict_as_nr = 0; evict_as_nr < kbdev->nr_hw_address_spaces; evict_as_nr++) {
788 kbase_context *kctx_evict;
789 js_per_as_data = &js_devdata->runpool_irq.per_as_data[evict_as_nr];
790 kctx_evict = js_per_as_data->kctx;
792 /* Look for the AS which is not currently running */
793 if (0 == js_per_as_data->as_busy_refcount && kctx_evict != NULL) {
794 /* Now compare the scheduled priority we are considering evicting with the new ctx priority
795 * and take into consideration if the scheduled priority is a realtime policy or not.
796 * Note that the lower the number, the higher the priority
798 if ((kctx_new == NULL) || kbasep_js_policy_ctx_has_priority(js_policy, kctx_evict, kctx_new)) {
799 mali_bool retain_result;
800 kbasep_js_release_result release_result;
801 KBASE_TRACE_ADD(kbdev, JS_FAST_START_EVICTS_CTX, kctx_evict, NULL, 0u, (uintptr_t)kctx_new);
803 /* Retain the ctx to work on it - this shouldn't be able to fail */
804 retain_result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx_evict);
805 KBASE_DEBUG_ASSERT(retain_result != MALI_FALSE);
806 CSTD_UNUSED(retain_result);
808 /* This will cause the context to be scheduled out on the next runpool_release_ctx(),
809 * and also stop its refcount increasing */
810 kbasep_js_clear_submit_allowed(js_devdata, kctx_evict);
812 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
813 mutex_unlock(&js_devdata->runpool_mutex);
814 if (kctx_new != NULL)
815 mutex_unlock(&js_kctx_new->ctx.jsctx_mutex);
817 /* Stop working on the target context, start working on the kctx_evict context */
819 mutex_lock(&kctx_evict->jctx.sched_info.ctx.jsctx_mutex);
820 mutex_lock(&js_devdata->runpool_mutex);
821 release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx_evict, &katom_retained_state);
822 mutex_unlock(&js_devdata->runpool_mutex);
823 /* Only requeue if actually descheduled, which is more robust in case
824 * something else retains it (e.g. two high priority contexts racing
825 * to evict the same lower priority context) */
826 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
827 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx_evict, MALI_TRUE);
829 mutex_unlock(&kctx_evict->jctx.sched_info.ctx.jsctx_mutex);
831 /* release_result isn't propogated further:
832 * - the caller will be scheduling in a context anyway
833 * - which will also cause new jobs to run */
835 /* ctx fast start has taken place */
840 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
843 /* ctx fast start has not taken place */
844 mutex_unlock(&js_devdata->runpool_mutex);
845 if (kctx_new != NULL)
846 mutex_unlock(&js_kctx_new->ctx.jsctx_mutex);
849 mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom)
852 kbasep_js_kctx_info *js_kctx_info;
854 kbasep_js_device_data *js_devdata;
855 kbasep_js_policy *js_policy;
857 mali_bool policy_queue_updated = MALI_FALSE;
859 KBASE_DEBUG_ASSERT(kctx != NULL);
860 KBASE_DEBUG_ASSERT(atom != NULL);
861 lockdep_assert_held(&kctx->jctx.lock);
864 js_devdata = &kbdev->js_data;
865 js_policy = &kbdev->js_data.policy;
866 js_kctx_info = &kctx->jctx.sched_info;
868 KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom));
870 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
871 /* Policy-specific initialization of atoms (which cannot fail). Anything that
872 * could've failed must've been done at kbasep_jd_policy_init_job() time. */
873 kbasep_js_policy_register_job(js_policy, kctx, atom);
876 * Begin Runpool transaction
878 mutex_lock(&js_devdata->runpool_mutex);
879 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc, kbasep_js_trace_get_refcnt(kbdev, kctx));
881 /* Refcount ctx.nr_jobs */
882 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
883 ++(js_kctx_info->ctx.nr_jobs);
885 /* Setup any scheduling information */
886 kbasep_js_clear_job_retry_submit(atom);
888 /* Lock for state available during IRQ */
889 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
891 /* Context Attribute Refcounting */
892 kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
894 /* Enqueue the job in the policy, causing it to be scheduled if the
895 * parent context gets scheduled */
896 kbasep_js_policy_enqueue_job(js_policy, atom);
898 if (js_kctx_info->ctx.is_scheduled != MALI_FALSE) {
899 /* Handle an already running context - try to run the new job, in case it
900 * matches requirements that aren't matched by any other job in the Run
902 kbasep_js_try_run_next_job_nolock(kbdev);
904 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
905 mutex_unlock(&js_devdata->runpool_mutex);
906 /* End runpool transaction */
908 if (js_kctx_info->ctx.is_scheduled == MALI_FALSE) {
909 if (js_kctx_info->ctx.is_dying) {
910 /* A job got added while/after kbase_job_zap_context() was called
911 * on a non-scheduled context (e.g. KDS dependency resolved). Kill
912 * that job by killing the context. */
913 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, MALI_FALSE);
914 } else if (js_kctx_info->ctx.nr_jobs == 1) {
915 /* Handle Refcount going from 0 to 1: schedule the context on the Policy Queue */
916 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
917 dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
919 mutex_lock(&js_devdata->queue_mutex);
920 kbasep_js_policy_enqueue_ctx(js_policy, kctx);
921 mutex_unlock(&js_devdata->queue_mutex);
923 /* Policy Queue was updated - caller must try to schedule the head context
924 * We also try to encourage a fast-start from here. */
925 policy_queue_updated = MALI_TRUE;
928 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
930 /* If the runpool is full and this job has a higher priority than the
931 * non-running job in the runpool - evict it so this higher priority job
932 * starts faster. Fast-starting requires the jsctx_mutex to be dropped,
933 * because it works on multiple ctxs
935 * Note: If the context is being killed with kbase_job_zap_context(), then
936 * kctx can't disappear after the jsctx_mutex was dropped. This is because
937 * the caller holds kctx->jctx.lock */
938 if (policy_queue_updated)
939 kbasep_js_runpool_attempt_fast_start_ctx(kbdev, kctx);
941 return policy_queue_updated;
944 void kbasep_js_remove_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *atom)
946 kbasep_js_kctx_info *js_kctx_info;
947 kbasep_js_device_data *js_devdata;
948 kbasep_js_policy *js_policy;
950 KBASE_DEBUG_ASSERT(kbdev != NULL);
951 KBASE_DEBUG_ASSERT(kctx != NULL);
952 KBASE_DEBUG_ASSERT(atom != NULL);
954 js_devdata = &kbdev->js_data;
955 js_policy = &kbdev->js_data.policy;
956 js_kctx_info = &kctx->jctx.sched_info;
958 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc, kbasep_js_trace_get_refcnt(kbdev, kctx));
960 /* De-refcount ctx.nr_jobs */
961 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
962 --(js_kctx_info->ctx.nr_jobs);
964 /* De-register the job from the system */
965 kbasep_js_policy_deregister_job(js_policy, kctx, atom);
968 void kbasep_js_remove_cancelled_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom)
971 kbasep_js_atom_retained_state katom_retained_state;
972 kbasep_js_device_data *js_devdata;
973 mali_bool attr_state_changed;
975 KBASE_DEBUG_ASSERT(kbdev != NULL);
976 KBASE_DEBUG_ASSERT(kctx != NULL);
977 KBASE_DEBUG_ASSERT(katom != NULL);
979 js_devdata = &kbdev->js_data;
981 kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
982 kbasep_js_remove_job(kbdev, kctx, katom);
984 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
986 /* The atom has 'finished' (will not be re-run), so no need to call
987 * kbasep_js_has_atom_finished().
989 * This is because it returns MALI_FALSE for soft-stopped atoms, but we
990 * want to override that, because we're cancelling an atom regardless of
991 * whether it was soft-stopped or not */
992 attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx, &katom_retained_state);
994 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
996 if (attr_state_changed != MALI_FALSE) {
997 /* A change in runpool ctx attributes might mean we can run more jobs
999 kbase_js_try_run_jobs(kbdev);
1003 mali_bool kbasep_js_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx)
1005 unsigned long flags;
1006 kbasep_js_device_data *js_devdata;
1008 KBASE_DEBUG_ASSERT(kbdev != NULL);
1009 js_devdata = &kbdev->js_data;
1011 /* KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_RETAIN_CTX, kctx, NULL, 0,
1012 kbasep_js_trace_get_refcnt(kbdev, kctx)); */
1013 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1014 result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
1015 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1020 kbase_context *kbasep_js_runpool_lookup_ctx(kbase_device *kbdev, int as_nr)
1022 unsigned long flags;
1023 kbasep_js_device_data *js_devdata;
1024 kbase_context *found_kctx = NULL;
1025 kbasep_js_per_as_data *js_per_as_data;
1027 KBASE_DEBUG_ASSERT(kbdev != NULL);
1028 KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
1029 js_devdata = &kbdev->js_data;
1030 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
1032 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1034 found_kctx = js_per_as_data->kctx;
1036 if (found_kctx != NULL)
1037 ++(js_per_as_data->as_busy_refcount);
1039 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1045 * @brief Try running more jobs after releasing a context and/or atom
1047 * This collates a set of actions that must happen whilst
1048 * kbasep_js_device_data::runpool_irq::lock is held.
1050 * This includes running more jobs when:
1051 * - The previously released kctx caused a ctx attribute change
1052 * - The released atom caused a ctx attribute change
1053 * - Slots were previously blocked due to affinity restrictions
1054 * - Submission during IRQ handling failed
1056 STATIC void kbasep_js_run_jobs_after_ctx_and_atom_release(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state, mali_bool runpool_ctx_attr_change)
1058 kbasep_js_device_data *js_devdata;
1060 KBASE_DEBUG_ASSERT(kbdev != NULL);
1061 KBASE_DEBUG_ASSERT(kctx != NULL);
1062 KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
1063 js_devdata = &kbdev->js_data;
1065 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1066 lockdep_assert_held(&js_devdata->runpool_mutex);
1067 lockdep_assert_held(&js_devdata->runpool_irq.lock);
1069 if (js_devdata->nr_user_contexts_running != 0) {
1070 mali_bool retry_submit;
1073 retry_submit = kbasep_js_get_atom_retry_submit_slot(katom_retained_state, &retry_jobslot);
1075 if (runpool_ctx_attr_change != MALI_FALSE) {
1076 /* A change in runpool ctx attributes might mean we can run more jobs
1078 kbasep_js_try_run_next_job_nolock(kbdev);
1080 /* A retry submit on all slots has now happened, so don't need to do it again */
1081 retry_submit = MALI_FALSE;
1084 /* Submit on any slots that might've had atoms blocked by the affinity of
1087 * If no atom has recently completed, then this is harmelss */
1088 kbase_js_affinity_submit_to_blocked_slots(kbdev);
1090 /* If the IRQ handler failed to get a job from the policy, try again from
1091 * outside the IRQ handler
1092 * NOTE: We may've already cleared retry_submit from submitting above */
1093 if (retry_submit != MALI_FALSE) {
1094 KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB, kctx, NULL, 0u, retry_jobslot);
1095 kbasep_js_try_run_next_job_on_slot_nolock(kbdev, retry_jobslot);
1101 * Internal function to release the reference on a ctx and an atom's "retained
1102 * state", only taking the runpool and as transaction mutexes
1104 * This also starts more jobs running in the case of an ctx-attribute state change
1106 * This does none of the followup actions for scheduling:
1107 * - It does not schedule in a new context
1108 * - It does not requeue or handle dying contexts
1110 * For those tasks, just call kbasep_js_runpool_release_ctx() instead
1113 * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
1114 * - Context has a non-zero refcount
1115 * - Caller holds js_kctx_info->ctx.jsctx_mutex
1116 * - Caller holds js_devdata->runpool_mutex
1118 STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
1120 unsigned long flags;
1121 kbasep_js_device_data *js_devdata;
1122 kbasep_js_kctx_info *js_kctx_info;
1123 kbasep_js_policy *js_policy;
1124 kbasep_js_per_as_data *js_per_as_data;
1126 kbasep_js_release_result release_result = 0u;
1127 mali_bool runpool_ctx_attr_change = MALI_FALSE;
1129 kbase_as *current_as;
1132 KBASE_DEBUG_ASSERT(kbdev != NULL);
1133 KBASE_DEBUG_ASSERT(kctx != NULL);
1134 js_kctx_info = &kctx->jctx.sched_info;
1135 js_devdata = &kbdev->js_data;
1136 js_policy = &kbdev->js_data.policy;
1138 /* Ensure context really is scheduled in */
1139 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != MALI_FALSE);
1141 /* kctx->as_nr and js_per_as_data are only read from here. The caller's
1142 * js_ctx_mutex provides a barrier that ensures they are up-to-date.
1144 * They will not change whilst we're reading them, because the refcount
1145 * is non-zero (and we ASSERT on that last fact).
1147 kctx_as_nr = kctx->as_nr;
1148 KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
1149 js_per_as_data = &js_devdata->runpool_irq.per_as_data[kctx_as_nr];
1150 KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
1153 * Transaction begins on AS and runpool_irq
1155 * Assert about out calling contract
1157 current_as = &kbdev->as[kctx_as_nr];
1158 mutex_lock(¤t_as->transaction_mutex);
1159 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1160 KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
1161 KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
1163 /* Update refcount */
1164 new_ref_count = --(js_per_as_data->as_busy_refcount);
1166 /* Release the atom if it finished (i.e. wasn't soft-stopped) */
1167 if (kbasep_js_has_atom_finished(katom_retained_state) != MALI_FALSE)
1168 runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx, katom_retained_state);
1170 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u, new_ref_count);
1172 if (new_ref_count == 1 && kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED
1173 && !kbase_pm_is_suspending(kbdev) ) {
1174 /* Context is kept scheduled into an address space even when there are no jobs, in this case we have
1175 * to handle the situation where all jobs have been evicted from the GPU and submission is disabled.
1177 * At this point we re-enable submission to allow further jobs to be executed
1179 kbasep_js_set_submit_allowed(js_devdata, kctx);
1182 /* Make a set of checks to see if the context should be scheduled out */
1183 if (new_ref_count == 0 && (kctx->jctx.sched_info.ctx.nr_jobs == 0 || kbasep_js_is_submit_allowed(js_devdata, kctx) == MALI_FALSE)) {
1184 /* Last reference, and we've been told to remove this context from the Run Pool */
1185 dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, kbasep_js_is_submit_allowed(js_devdata, kctx));
1187 kbasep_js_policy_runpool_remove_ctx(js_policy, kctx);
1189 /* Stop any more refcounts occuring on the context */
1190 js_per_as_data->kctx = NULL;
1192 /* Ensure we prevent the context from submitting any new jobs
1193 * e.g. from kbasep_js_try_run_next_job_on_slot_irq_nolock() */
1194 kbasep_js_clear_submit_allowed(js_devdata, kctx);
1196 /* Disable the MMU on the affected address space, and indicate it's invalid */
1197 kbase_mmu_disable(kctx);
1199 #ifdef CONFIG_MALI_GATOR_SUPPORT
1200 kbase_trace_mali_mmu_as_released(kctx->as_nr);
1201 #endif /* CONFIG_MALI_GATOR_SUPPORT */
1203 kctx->as_nr = KBASEP_AS_NR_INVALID;
1205 /* Ctx Attribute handling
1207 * Releasing atoms attributes must either happen before this, or after
1208 * 'is_scheduled' is changed, otherwise we double-decount the attributes*/
1209 runpool_ctx_attr_change |= kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
1211 /* Early update of context count, to optimize the
1212 * kbasep_js_run_jobs_after_ctx_and_atom_release() call */
1213 runpool_dec_context_count(kbdev, kctx);
1215 /* Releasing the context and katom retained state can allow more jobs to run */
1216 kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx, katom_retained_state, runpool_ctx_attr_change);
1219 * Transaction ends on AS and runpool_irq:
1221 * By this point, the AS-related data is now clear and ready for re-use.
1223 * Since releases only occur once for each previous successful retain, and no more
1224 * retains are allowed on this context, no other thread will be operating in this
1225 * code whilst we are
1227 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1228 mutex_unlock(¤t_as->transaction_mutex);
1230 /* Free up the address space */
1231 release_addr_space(kbdev, kctx_as_nr);
1232 /* Note: Don't reuse kctx_as_nr now */
1234 /* Synchronize with any policy timers */
1235 kbasep_js_policy_runpool_timers_sync(js_policy);
1237 /* update book-keeping info */
1238 js_kctx_info->ctx.is_scheduled = MALI_FALSE;
1239 /* Signal any waiter that the context is not scheduled, so is safe for
1240 * termination - once the jsctx_mutex is also dropped, and jobs have
1242 wake_up(&js_kctx_info->ctx.is_scheduled_wait);
1244 /* Queue an action to occur after we've dropped the lock */
1245 release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED;
1248 kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx, katom_retained_state, runpool_ctx_attr_change);
1250 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1251 mutex_unlock(¤t_as->transaction_mutex);
1254 return release_result;
1257 void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *kctx, mali_bool has_pm_ref)
1259 kbasep_js_device_data *js_devdata;
1260 kbasep_js_policy *js_policy;
1261 kbasep_js_kctx_info *js_kctx_info;
1263 KBASE_DEBUG_ASSERT(kbdev != NULL);
1264 KBASE_DEBUG_ASSERT(kctx != NULL);
1265 js_kctx_info = &kctx->jctx.sched_info;
1266 js_policy = &kbdev->js_data.policy;
1267 js_devdata = &kbdev->js_data;
1269 /* This is called if and only if you've you've detached the context from
1270 * the Runpool or the Policy Queue, and not added it back to the Runpool */
1271 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
1273 if (js_kctx_info->ctx.is_dying != MALI_FALSE) {
1274 /* Dying: don't requeue, but kill all jobs on the context. This happens
1276 dev_dbg(kbdev->dev, "JS: ** Killing Context %p on RunPool Remove **", kctx);
1277 kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbase_jd_cancel, MALI_TRUE);
1278 } else if (js_kctx_info->ctx.nr_jobs > 0) {
1279 /* Not dying, has jobs: de-ref core counts from each job before addding
1280 * back to the queue */
1281 kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbasep_js_job_check_deref_cores, MALI_FALSE);
1283 dev_dbg(kbdev->dev, "JS: Requeue Context %p", kctx);
1284 mutex_lock(&js_devdata->queue_mutex);
1285 kbasep_js_policy_enqueue_ctx(js_policy, kctx);
1286 mutex_unlock(&js_devdata->queue_mutex);
1288 /* Not dying, no jobs: don't add back to the queue */
1289 dev_dbg(kbdev->dev, "JS: Idling Context %p (not requeued)", kctx);
1293 /* In all cases where we had a pm active refcount, release it */
1294 kbase_pm_context_idle(kbdev);
1298 void kbasep_js_runpool_release_ctx_and_katom_retained_state(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
1300 kbasep_js_device_data *js_devdata;
1301 kbasep_js_kctx_info *js_kctx_info;
1302 kbasep_js_release_result release_result;
1304 KBASE_DEBUG_ASSERT(kbdev != NULL);
1305 KBASE_DEBUG_ASSERT(kctx != NULL);
1306 js_kctx_info = &kctx->jctx.sched_info;
1307 js_devdata = &kbdev->js_data;
1309 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1310 mutex_lock(&js_devdata->runpool_mutex);
1311 release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, katom_retained_state);
1313 /* Drop the runpool mutex to allow requeing kctx */
1314 mutex_unlock(&js_devdata->runpool_mutex);
1315 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
1316 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, MALI_TRUE);
1318 /* Drop the jsctx_mutex to allow scheduling in a new context */
1319 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1320 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u) {
1321 /* We've freed up an address space, so let's try to schedule in another
1324 * Note: if there's a context to schedule in, then it also tries to run
1325 * another job, in case the new context has jobs satisfying requirements
1326 * that no other context/job in the runpool does */
1327 kbasep_js_try_schedule_head_ctx(kbdev);
1331 void kbasep_js_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx)
1333 kbasep_js_atom_retained_state katom_retained_state;
1335 kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
1337 kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
1340 /** Variant of kbasep_js_runpool_release_ctx() that doesn't call into
1341 * kbasep_js_try_schedule_head_ctx() */
1342 STATIC void kbasep_js_runpool_release_ctx_no_schedule(kbase_device *kbdev, kbase_context *kctx)
1344 kbasep_js_device_data *js_devdata;
1345 kbasep_js_kctx_info *js_kctx_info;
1346 kbasep_js_release_result release_result;
1347 kbasep_js_atom_retained_state katom_retained_state_struct;
1348 kbasep_js_atom_retained_state *katom_retained_state = &katom_retained_state_struct;
1350 KBASE_DEBUG_ASSERT(kbdev != NULL);
1351 KBASE_DEBUG_ASSERT(kctx != NULL);
1352 js_kctx_info = &kctx->jctx.sched_info;
1353 js_devdata = &kbdev->js_data;
1354 kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
1356 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1357 mutex_lock(&js_devdata->runpool_mutex);
1358 release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, katom_retained_state);
1360 /* Drop the runpool mutex to allow requeing kctx */
1361 mutex_unlock(&js_devdata->runpool_mutex);
1362 if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
1363 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, MALI_TRUE);
1365 /* Drop the jsctx_mutex to allow scheduling in a new context */
1366 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1368 /* NOTE: could return release_result if the caller would like to know
1369 * whether it should schedule a new context, but currently no callers do */
1374 * @brief Handle retaining cores for power management and affinity management,
1375 * ensuring that cores are powered up and won't violate affinity restrictions.
1377 * This function enters at the following @ref kbase_atom_coreref_state states:
1379 * - NO_CORES_REQUESTED,
1380 * - WAITING_FOR_REQUESTED_CORES,
1381 * - RECHECK_AFFINITY,
1383 * The transitions are as folows:
1384 * - NO_CORES_REQUESTED -> WAITING_FOR_REQUESTED_CORES
1385 * - WAITING_FOR_REQUESTED_CORES -> ( WAITING_FOR_REQUESTED_CORES or RECHECK_AFFINITY )
1386 * - RECHECK_AFFINITY -> ( WAITING_FOR_REQUESTED_CORES or CHECK_AFFINITY_VIOLATIONS )
1387 * - CHECK_AFFINITY_VIOLATIONS -> ( RECHECK_AFFINITY or READY )
1389 * The caller must hold:
1390 * - kbasep_js_device_data::runpool_irq::lock
1392 * @return MALI_FALSE when the function makes a transition to the same or lower state, indicating
1393 * that the cores are not ready.
1394 * @return MALI_TRUE once READY state is reached, indicating that the cores are 'ready' and won't
1395 * violate affinity restrictions.
1398 STATIC mali_bool kbasep_js_job_check_ref_cores(kbase_device *kbdev, int js, kbase_jd_atom *katom)
1400 /* The most recently checked affinity. Having this at this scope allows us
1401 * to guarantee that we've checked the affinity in this function call. */
1402 u64 recently_chosen_affinity = 0;
1403 mali_bool chosen_affinity = MALI_FALSE;
1409 /* NOTE: The following uses a number of FALLTHROUGHs to optimize the
1410 * calls to this function. Ending of the function is indicated by BREAK OUT */
1411 switch (katom->coreref_state) {
1412 /* State when job is first attempted to be run */
1413 case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
1414 KBASE_DEBUG_ASSERT(katom->affinity == 0);
1416 /* Compute affinity */
1417 if (MALI_FALSE == kbase_js_choose_affinity(&recently_chosen_affinity, kbdev, katom, js)) {
1418 /* No cores are currently available */
1419 /* *** BREAK OUT: No state transition *** */
1423 chosen_affinity = MALI_TRUE;
1425 /* Request the cores */
1426 kbase_pm_request_cores(kbdev, katom->core_req & BASE_JD_REQ_T, recently_chosen_affinity);
1428 katom->affinity = recently_chosen_affinity;
1430 /* Proceed to next state */
1431 katom->coreref_state = KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
1433 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1435 case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
1437 kbase_pm_cores_ready cores_ready;
1438 KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
1440 cores_ready = kbase_pm_register_inuse_cores(kbdev, katom->core_req & BASE_JD_REQ_T, katom->affinity);
1441 if (cores_ready == KBASE_NEW_AFFINITY) {
1442 /* Affinity no longer valid - return to previous state */
1443 kbasep_js_job_check_deref_cores(kbdev, katom);
1444 KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_INUSE_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
1445 /* *** BREAK OUT: Return to previous state, retry *** */
1449 if (cores_ready == KBASE_CORES_NOT_READY) {
1450 /* Stay in this state and return, to retry at this state later */
1451 KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_INUSE_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
1452 /* *** BREAK OUT: No state transition *** */
1455 /* Proceed to next state */
1456 katom->coreref_state = KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
1459 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1461 case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
1462 KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
1464 /* Optimize out choosing the affinity twice in the same function call */
1465 if (chosen_affinity == MALI_FALSE) {
1466 /* See if the affinity changed since a previous call. */
1467 if (MALI_FALSE == kbase_js_choose_affinity(&recently_chosen_affinity, kbdev, katom, js)) {
1468 /* No cores are currently available */
1469 kbasep_js_job_check_deref_cores(kbdev, katom);
1470 KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REQUEST_ON_RECHECK_FAILED, katom->kctx, katom, katom->jc, js, (u32) recently_chosen_affinity);
1471 /* *** BREAK OUT: Transition to lower state *** */
1474 chosen_affinity = MALI_TRUE;
1477 /* Now see if this requires a different set of cores */
1478 if (recently_chosen_affinity != katom->affinity) {
1479 kbase_pm_cores_ready cores_ready;
1481 kbase_pm_request_cores(kbdev, katom->core_req & BASE_JD_REQ_T, recently_chosen_affinity);
1483 /* Register new cores whilst we still hold the old ones, to minimize power transitions */
1484 cores_ready = kbase_pm_register_inuse_cores(kbdev, katom->core_req & BASE_JD_REQ_T, recently_chosen_affinity);
1485 kbasep_js_job_check_deref_cores(kbdev, katom);
1487 /* Fixup the state that was reduced by deref_cores: */
1488 katom->coreref_state = KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
1489 katom->affinity = recently_chosen_affinity;
1490 if (cores_ready == KBASE_NEW_AFFINITY) {
1491 /* Affinity no longer valid - return to previous state */
1492 katom->coreref_state = KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
1493 kbasep_js_job_check_deref_cores(kbdev, katom);
1494 KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_INUSE_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
1495 /* *** BREAK OUT: Return to previous state, retry *** */
1499 /* Now might be waiting for powerup again, with a new affinity */
1500 if (cores_ready == KBASE_CORES_NOT_READY) {
1501 /* Return to previous state */
1502 katom->coreref_state = KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
1503 KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_ON_RECHECK_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
1504 /* *** BREAK OUT: Transition to lower state *** */
1508 /* Proceed to next state */
1509 katom->coreref_state = KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
1511 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1512 case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
1513 KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
1514 KBASE_DEBUG_ASSERT(katom->affinity == recently_chosen_affinity);
1516 /* Note: this is where the caller must've taken the runpool_irq.lock */
1518 /* Check for affinity violations - if there are any, then we just ask
1519 * the caller to requeue and try again later */
1520 if (kbase_js_affinity_would_violate(kbdev, js, katom->affinity) != MALI_FALSE) {
1521 /* Cause a re-attempt to submit from this slot on the next job complete */
1522 kbase_js_affinity_slot_blocked_an_atom(kbdev, js);
1523 /* Return to previous state */
1524 katom->coreref_state = KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
1525 /* *** BREAK OUT: Transition to lower state *** */
1526 KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_AFFINITY_WOULD_VIOLATE, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
1530 /* No affinity violations would result, so the cores are ready */
1531 katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY;
1532 /* *** BREAK OUT: Cores Ready *** */
1536 KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "Unhandled kbase_atom_coreref_state %d", katom->coreref_state);
1539 } while (retry != MALI_FALSE);
1541 return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
1544 void kbasep_js_job_check_deref_cores(kbase_device *kbdev, struct kbase_jd_atom *katom)
1546 KBASE_DEBUG_ASSERT(kbdev != NULL);
1547 KBASE_DEBUG_ASSERT(katom != NULL);
1549 switch (katom->coreref_state) {
1550 case KBASE_ATOM_COREREF_STATE_READY:
1551 /* State where atom was submitted to the HW - just proceed to power-down */
1552 KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
1554 /* *** FALLTHROUGH *** */
1556 case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
1557 /* State where cores were registered */
1558 KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
1559 kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T, katom->affinity);
1561 /* Note: We do not clear the state for kbase_js_affinity_slot_blocked_an_atom().
1562 * That is handled after finishing the job. This might be slightly
1563 * suboptimal for some corner cases, but is otherwise not a problem
1564 * (and resolves itself after the next job completes). */
1568 case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
1569 /* State where cores were requested, but not registered */
1570 KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
1571 kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T, katom->affinity);
1574 case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
1575 /* Initial state - nothing required */
1576 KBASE_DEBUG_ASSERT(katom->affinity == 0);
1580 KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "Unhandled coreref_state: %d", katom->coreref_state);
1584 katom->affinity = 0;
1585 katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
1589 * Note: this function is quite similar to kbasep_js_try_run_next_job_on_slot()
1591 mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int js, s8 *submit_count)
1593 kbasep_js_device_data *js_devdata;
1594 mali_bool cores_ready;
1596 KBASE_DEBUG_ASSERT(kbdev != NULL);
1598 js_devdata = &kbdev->js_data;
1600 /* The caller of this function may not be aware of Ctx Attribute state changes so we
1601 * must recheck if the given slot is still valid. Otherwise do not try to run.
1603 if (kbase_js_can_run_job_on_slot_no_lock(kbdev, js)) {
1604 /* Keep submitting while there's space to run a job on this job-slot,
1605 * and there are jobs to get that match its requirements (see 'break'
1606 * statement below) */
1607 while (*submit_count < KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ && kbasep_jm_is_submit_slots_free(kbdev, js, NULL) != MALI_FALSE) {
1608 kbase_jd_atom *dequeued_atom;
1609 mali_bool has_job = MALI_FALSE;
1611 /* Dequeue a job that matches the requirements */
1612 has_job = kbasep_js_policy_dequeue_job(kbdev, js, &dequeued_atom);
1614 if (has_job != MALI_FALSE) {
1615 /* NOTE: since the runpool_irq lock is currently held and acts across
1616 * all address spaces, any context whose busy refcount has reached
1617 * zero won't yet be scheduled out whilst we're trying to run jobs
1619 kbase_context *parent_ctx = dequeued_atom->kctx;
1620 mali_bool retain_success;
1622 /* Retain/power up the cores it needs, check if cores are ready */
1623 cores_ready = kbasep_js_job_check_ref_cores(kbdev, js, dequeued_atom);
1625 if (cores_ready != MALI_TRUE && dequeued_atom->event_code != BASE_JD_EVENT_PM_EVENT) {
1626 /* The job can't be submitted until the cores are ready, requeue the job */
1627 kbasep_js_policy_enqueue_job(&kbdev->js_data.policy, dequeued_atom);
1631 /* ASSERT that the Policy picked a job from an allowed context */
1632 KBASE_DEBUG_ASSERT(kbasep_js_is_submit_allowed(js_devdata, parent_ctx));
1634 /* Retain the context to stop it from being scheduled out
1635 * This is released when the job finishes */
1636 retain_success = kbasep_js_runpool_retain_ctx_nolock(kbdev, parent_ctx);
1637 KBASE_DEBUG_ASSERT(retain_success != MALI_FALSE);
1638 CSTD_UNUSED(retain_success);
1640 /* Retain the affinity on the slot */
1641 kbase_js_affinity_retain_slot_cores(kbdev, js, dequeued_atom->affinity);
1643 /* Check if this job needs the cycle counter enabled before submission */
1644 kbasep_js_ref_permon_check_and_enable_cycle_counter(kbdev, dequeued_atom);
1646 if (dequeued_atom->event_code == BASE_JD_EVENT_PM_EVENT) {
1647 dev_warn(kbdev->dev, "Rejecting atom due to BASE_JD_EVENT_PM_EVENT\n");
1648 /* The job has failed due to the specified core group being unavailable */
1649 kbase_jd_done(dequeued_atom, js, NULL, 0);
1651 /* Submit the job */
1652 kbase_job_submit_nolock(kbdev, dequeued_atom, js);
1657 /* No more jobs - stop submitting for this slot */
1663 /* Indicate whether a retry in submission should be tried on a different
1664 * dequeue function. These are the reasons why it *must* happen:
1665 * - the KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ threshold was reached
1666 * and new scheduling must be performed outside of IRQ mode.
1668 * Failure to indicate this correctly could stop further jobs being processed.
1670 * However, we do not _need_ to indicate a retry for the following:
1671 * - kbasep_js_policy_dequeue_job() couldn't get a job. In which case,
1672 * there's no point re-trying outside of IRQ, because the result will be
1673 * the same until job dependencies are resolved, or user-space provides
1674 * more jobs. In both those cases, we try to run jobs anyway, so
1675 * processing does not stop.
1676 * - kbasep_jm_is_submit_slots_free() was MALI_FALSE, indicating jobs were
1677 * already running. When those jobs complete, that will still cause events
1678 * that cause us to resume job submission.
1679 * - kbase_js_can_run_job_on_slot_no_lock() was MALI_FALSE - this is for
1680 * Ctx Attribute handling. That _can_ change outside of IRQ context, but
1681 * is handled explicitly by kbasep_js_runpool_release_ctx_and_katom_retained_state().
1683 return (mali_bool) (*submit_count >= KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ);
1686 void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js)
1688 kbasep_js_device_data *js_devdata;
1690 mali_bool cores_ready;
1692 KBASE_DEBUG_ASSERT(kbdev != NULL);
1694 js_devdata = &kbdev->js_data;
1696 KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running > 0);
1698 /* Keep submitting while there's space to run a job on this job-slot,
1699 * and there are jobs to get that match its requirements (see 'break'
1700 * statement below) */
1701 if (kbasep_jm_is_submit_slots_free(kbdev, js, NULL) != MALI_FALSE) {
1702 /* The caller of this function may not be aware of Ctx Attribute state changes so we
1703 * must recheck if the given slot is still valid. Otherwise do not try to run.
1705 if (kbase_js_can_run_job_on_slot_no_lock(kbdev, js)) {
1707 kbase_jd_atom *dequeued_atom;
1709 /* Dequeue a job that matches the requirements */
1710 has_job = kbasep_js_policy_dequeue_job(kbdev, js, &dequeued_atom);
1712 if (has_job != MALI_FALSE) {
1713 /* NOTE: since the runpool_irq lock is currently held and acts across
1714 * all address spaces, any context whose busy refcount has reached
1715 * zero won't yet be scheduled out whilst we're trying to run jobs
1717 kbase_context *parent_ctx = dequeued_atom->kctx;
1718 mali_bool retain_success;
1720 /* Retain/power up the cores it needs, check if cores are ready */
1721 cores_ready = kbasep_js_job_check_ref_cores(kbdev, js, dequeued_atom);
1723 if (cores_ready != MALI_TRUE && dequeued_atom->event_code != BASE_JD_EVENT_PM_EVENT) {
1724 /* The job can't be submitted until the cores are ready, requeue the job */
1725 kbasep_js_policy_enqueue_job(&kbdev->js_data.policy, dequeued_atom);
1728 /* ASSERT that the Policy picked a job from an allowed context */
1729 KBASE_DEBUG_ASSERT(kbasep_js_is_submit_allowed(js_devdata, parent_ctx));
1731 /* Retain the context to stop it from being scheduled out
1732 * This is released when the job finishes */
1733 retain_success = kbasep_js_runpool_retain_ctx_nolock(kbdev, parent_ctx);
1734 KBASE_DEBUG_ASSERT(retain_success != MALI_FALSE);
1735 CSTD_UNUSED(retain_success);
1737 /* Retain the affinity on the slot */
1738 kbase_js_affinity_retain_slot_cores(kbdev, js, dequeued_atom->affinity);
1740 /* Check if this job needs the cycle counter enabled before submission */
1741 kbasep_js_ref_permon_check_and_enable_cycle_counter(kbdev, dequeued_atom);
1743 if (dequeued_atom->event_code == BASE_JD_EVENT_PM_EVENT) {
1744 dev_warn(kbdev->dev, "Rejecting atom due to BASE_JD_EVENT_PM_EVENT\n");
1745 /* The job has failed due to the specified core group being unavailable */
1746 kbase_jd_done(dequeued_atom, js, NULL, 0);
1748 /* Submit the job */
1749 kbase_job_submit_nolock(kbdev, dequeued_atom, js);
1753 } while (kbasep_jm_is_submit_slots_free(kbdev, js, NULL) != MALI_FALSE && has_job != MALI_FALSE);
1758 void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev)
1760 kbasep_js_device_data *js_devdata;
1762 kbase_context *head_kctx;
1763 kbasep_js_kctx_info *js_kctx_info;
1764 mali_bool is_runpool_full;
1765 kbase_as *new_address_space;
1766 unsigned long flags;
1767 mali_bool head_kctx_suspended = MALI_FALSE;
1770 KBASE_DEBUG_ASSERT(kbdev != NULL);
1772 js_devdata = &kbdev->js_data;
1774 /* We *don't* make a speculative check on whether we can fit a context in the
1775 * runpool, because most of our use-cases assume 2 or fewer contexts, and
1776 * so we will usually have enough address spaces free.
1778 * In any case, the check will be done later on once we have a context */
1780 /* Grab the context off head of queue - if there is one */
1781 mutex_lock(&js_devdata->queue_mutex);
1782 has_kctx = kbasep_js_policy_dequeue_head_ctx(&js_devdata->policy, &head_kctx);
1783 mutex_unlock(&js_devdata->queue_mutex);
1785 if (has_kctx == MALI_FALSE) {
1786 /* No ctxs to run - nothing to do */
1789 js_kctx_info = &head_kctx->jctx.sched_info;
1791 dev_dbg(kbdev->dev, "JS: Dequeue Context %p", head_kctx);
1793 pm_active_err = kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE);
1796 * Atomic transaction on the Context and Run Pool begins
1798 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1799 mutex_lock(&js_devdata->runpool_mutex);
1801 /* Check to see if we shouldn't add the context to run Run Pool:
1802 * - it can't take the specified context, and so is 'full'. This may be
1803 * 'full' even when there are addres spaces available, since some contexts
1804 * are allowed in whereas others may not due to HW workarounds
1805 * - A suspend is taking place
1806 * - The context is dying due to kbase_job_zap_context() */
1807 is_runpool_full = check_is_runpool_full(kbdev, head_kctx);
1808 if (is_runpool_full || pm_active_err || js_kctx_info->ctx.is_dying) {
1809 /* Roll back the transaction so far and return */
1810 mutex_unlock(&js_devdata->runpool_mutex);
1812 /* Note: If a Power Management active reference was taken, it's released by
1814 kbasep_js_runpool_requeue_or_kill_ctx(kbdev, head_kctx, !pm_active_err);
1816 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1820 /* From the point on, the Power Management active reference is released
1821 * only if kbasep_js_runpool_release_ctx() causes the context to be removed
1822 * from the runpool */
1824 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, head_kctx, NULL, 0u, kbasep_js_trace_get_refcnt(kbdev, head_kctx));
1826 #if MALI_CUSTOMER_RELEASE == 0
1827 if (js_devdata->nr_user_contexts_running == 0) {
1828 /* Only when there are no other contexts submitting jobs:
1829 * Latch in run-time job scheduler timeouts that were set through js_timeouts sysfs file */
1830 if (kbdev->js_soft_stop_ticks != 0)
1831 js_devdata->soft_stop_ticks = kbdev->js_soft_stop_ticks;
1833 if (kbdev->js_soft_stop_ticks_cl != 0)
1834 js_devdata->soft_stop_ticks_cl = kbdev->js_soft_stop_ticks_cl;
1836 if (kbdev->js_hard_stop_ticks_ss != 0)
1837 js_devdata->hard_stop_ticks_ss = kbdev->js_hard_stop_ticks_ss;
1839 if (kbdev->js_hard_stop_ticks_cl != 0)
1840 js_devdata->hard_stop_ticks_cl = kbdev->js_hard_stop_ticks_cl;
1842 if (kbdev->js_hard_stop_ticks_nss != 0)
1843 js_devdata->hard_stop_ticks_nss = kbdev->js_hard_stop_ticks_nss;
1845 if (kbdev->js_reset_ticks_ss != 0)
1846 js_devdata->gpu_reset_ticks_ss = kbdev->js_reset_ticks_ss;
1848 if (kbdev->js_reset_ticks_cl != 0)
1849 js_devdata->gpu_reset_ticks_cl = kbdev->js_reset_ticks_cl;
1851 if (kbdev->js_reset_ticks_nss != 0)
1852 js_devdata->gpu_reset_ticks_nss = kbdev->js_reset_ticks_nss;
1856 runpool_inc_context_count(kbdev, head_kctx);
1857 /* Cause any future waiter-on-termination to wait until the context is
1859 js_kctx_info->ctx.is_scheduled = MALI_TRUE;
1860 wake_up(&js_kctx_info->ctx.is_scheduled_wait);
1862 /* Pick the free address space (guaranteed free by check_is_runpool_full() ) */
1863 new_address_space = pick_free_addr_space(kbdev);
1865 /* Lock the address space whilst working on it */
1866 mutex_lock(&new_address_space->transaction_mutex);
1867 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1869 /* Do all the necessaries to assign the address space (inc. update book-keeping info)
1870 * Add the context to the Run Pool, and allow it to run jobs */
1871 assign_and_activate_kctx_addr_space(kbdev, head_kctx, new_address_space);
1873 /* NOTE: If Linux allows, then we can drop the new_address_space->transaction mutex here */
1875 if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0) {
1876 /* We need to retain it to keep the corresponding address space */
1877 kbasep_js_runpool_retain_ctx_nolock(kbdev, head_kctx);
1880 /* Re-check for suspending: a suspend could've occurred after we
1881 * pm_context_active'd, and all the contexts could've been removed from the
1882 * runpool before we took this lock. In this case, we don't want to allow
1883 * this context to run jobs, we just want it out immediately.
1885 * The DMB required to read the suspend flag was issued recently as part of
1886 * the runpool_irq locking. If a suspend occurs *after* that lock was taken
1887 * (i.e. this condition doesn't execute), then the kbasep_js_suspend() code
1888 * will cleanup this context instead (by virtue of it being called strictly
1889 * after the suspend flag is set, and will wait for this lock to drop) */
1890 if (kbase_pm_is_suspending(kbdev)) {
1891 /* Cause it to leave at some later point */
1893 retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, head_kctx);
1894 KBASE_DEBUG_ASSERT(retained);
1895 kbasep_js_clear_submit_allowed(js_devdata, head_kctx);
1896 head_kctx_suspended = MALI_TRUE;
1899 /* Try to run the next job, in case this context has jobs that match the
1900 * job slot requirements, but none of the other currently running contexts
1902 kbasep_js_try_run_next_job_nolock(kbdev);
1904 /* Transaction complete */
1905 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1906 mutex_unlock(&new_address_space->transaction_mutex);
1907 mutex_unlock(&js_devdata->runpool_mutex);
1908 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1909 /* Note: after this point, the context could potentially get scheduled out immediately */
1911 if (head_kctx_suspended) {
1912 /* Finishing forcing out the context due to a suspend. Use a variant of
1913 * kbasep_js_runpool_release_ctx() that doesn't schedule a new context,
1914 * to prevent a risk of recursion back into this function */
1915 kbasep_js_runpool_release_ctx_no_schedule(kbdev, head_kctx);
1920 void kbasep_js_schedule_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
1922 kbasep_js_kctx_info *js_kctx_info;
1923 kbasep_js_device_data *js_devdata;
1924 mali_bool is_scheduled;
1926 KBASE_DEBUG_ASSERT(kbdev != NULL);
1927 KBASE_DEBUG_ASSERT(kctx != NULL);
1929 js_devdata = &kbdev->js_data;
1930 js_kctx_info = &kctx->jctx.sched_info;
1932 /* This must never be attempted whilst suspending - i.e. it should only
1933 * happen in response to a syscall from a user-space thread */
1934 BUG_ON(kbase_pm_is_suspending(kbdev));
1936 kbase_pm_request_l2_caches(kbdev);
1938 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1939 /* Mark the context as privileged */
1940 js_kctx_info->ctx.flags |= KBASE_CTX_FLAG_PRIVILEGED;
1942 is_scheduled = js_kctx_info->ctx.is_scheduled;
1943 if (is_scheduled == MALI_FALSE) {
1944 mali_bool is_runpool_full;
1946 /* Add the context to the runpool */
1947 mutex_lock(&js_devdata->queue_mutex);
1948 kbasep_js_policy_enqueue_ctx(&js_devdata->policy, kctx);
1949 mutex_unlock(&js_devdata->queue_mutex);
1951 mutex_lock(&js_devdata->runpool_mutex);
1953 is_runpool_full = check_is_runpool_full(kbdev, kctx);
1954 if (is_runpool_full != MALI_FALSE) {
1955 /* Evict jobs from the NEXT registers to free an AS asap */
1956 kbasep_js_runpool_evict_next_jobs(kbdev, kctx);
1959 mutex_unlock(&js_devdata->runpool_mutex);
1960 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1961 /* Fast-starting requires the jsctx_mutex to be dropped, because it works on multiple ctxs */
1963 if (is_runpool_full != MALI_FALSE) {
1964 /* Evict non-running contexts from the runpool */
1965 kbasep_js_runpool_attempt_fast_start_ctx(kbdev, NULL);
1967 /* Try to schedule the context in */
1968 kbasep_js_try_schedule_head_ctx(kbdev);
1970 /* Wait for the context to be scheduled in */
1971 wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait, kctx->jctx.sched_info.ctx.is_scheduled == MALI_TRUE);
1973 /* Already scheduled in - We need to retain it to keep the corresponding address space */
1974 kbasep_js_runpool_retain_ctx(kbdev, kctx);
1975 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1980 void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
1982 kbasep_js_kctx_info *js_kctx_info;
1983 KBASE_DEBUG_ASSERT(kctx != NULL);
1984 js_kctx_info = &kctx->jctx.sched_info;
1986 /* We don't need to use the address space anymore */
1987 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1988 js_kctx_info->ctx.flags &= (~KBASE_CTX_FLAG_PRIVILEGED);
1989 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1991 kbase_pm_release_l2_caches(kbdev);
1993 /* Release the context - it will be scheduled out if there is no pending job */
1994 kbasep_js_runpool_release_ctx(kbdev, kctx);
1997 void kbasep_js_job_done_slot_irq(kbase_jd_atom *katom, int slot_nr,
1998 ktime_t *end_timestamp,
1999 kbasep_js_atom_done_code done_code)
2001 kbase_device *kbdev;
2002 kbasep_js_policy *js_policy;
2003 kbasep_js_device_data *js_devdata;
2004 mali_bool submit_retry_needed = MALI_TRUE; /* If we don't start jobs here, start them from the workqueue */
2006 u64 microseconds_spent = 0u;
2007 kbase_context *parent_ctx;
2009 KBASE_DEBUG_ASSERT(katom);
2010 parent_ctx = katom->kctx;
2011 KBASE_DEBUG_ASSERT(parent_ctx);
2012 kbdev = parent_ctx->kbdev;
2013 KBASE_DEBUG_ASSERT(kbdev);
2015 js_devdata = &kbdev->js_data;
2016 js_policy = &kbdev->js_data.policy;
2018 lockdep_assert_held(&js_devdata->runpool_irq.lock);
2021 * Release resources before submitting new jobs (bounds the refcount of
2022 * the resource to BASE_JM_SUBMIT_SLOTS)
2024 #ifdef CONFIG_MALI_GATOR_SUPPORT
2025 kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP, slot_nr), NULL, 0);
2026 #endif /* CONFIG_MALI_GATOR_SUPPORT */
2028 /* Check if submitted jobs no longer require the cycle counter to be enabled */
2029 kbasep_js_deref_permon_check_and_disable_cycle_counter(kbdev, katom);
2031 /* Release the affinity from the slot - must happen before next submission to this slot */
2032 kbase_js_affinity_release_slot_cores(kbdev, slot_nr, katom->affinity);
2033 kbase_js_debug_log_current_affinities(kbdev);
2034 /* Calculate the job's time used */
2035 if (end_timestamp != NULL) {
2036 /* Only calculating it for jobs that really run on the HW (e.g. removed
2037 * from next jobs never actually ran, so really did take zero time) */
2038 tick_diff = ktime_sub(*end_timestamp, katom->start_timestamp);
2040 microseconds_spent = ktime_to_ns(tick_diff);
2041 do_div(microseconds_spent, 1000);
2043 /* Round up time spent to the minimum timer resolution */
2044 if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US)
2045 microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US;
2048 /* Log the result of the job (completion status, and time spent). */
2049 kbasep_js_policy_log_job_result(js_policy, katom, microseconds_spent);
2050 /* Determine whether the parent context's timeslice is up */
2051 if (kbasep_js_policy_should_remove_ctx(js_policy, parent_ctx) != MALI_FALSE)
2052 kbasep_js_clear_submit_allowed(js_devdata, parent_ctx);
2054 if (done_code & KBASE_JS_ATOM_DONE_START_NEW_ATOMS) {
2055 /* Submit a new job (if there is one) to help keep the GPU's HEAD and NEXT registers full */
2056 KBASE_TRACE_ADD_SLOT(kbdev, JS_JOB_DONE_TRY_RUN_NEXT_JOB, parent_ctx, katom, katom->jc, slot_nr);
2058 submit_retry_needed = kbasep_js_try_run_next_job_on_slot_irq_nolock(kbdev, slot_nr, &kbdev->slot_submit_count_irq[slot_nr]);
2061 if (submit_retry_needed != MALI_FALSE || katom->event_code == BASE_JD_EVENT_STOPPED) {
2062 /* The extra condition on STOPPED jobs is needed because they may be
2063 * the only job present, but they won't get re-run until the JD work
2064 * queue activates. Crucially, work queues can run items out of order
2065 * e.g. on different CPUs, so being able to submit from the IRQ handler
2066 * is not a good indication that we don't need to run jobs; the
2067 * submitted job could be processed on the work-queue *before* the
2068 * stopped job, even though it was submitted after.
2070 * Therefore, we must try to run it, otherwise it might not get run at
2071 * all after this. */
2073 KBASE_TRACE_ADD_SLOT(kbdev, JS_JOB_DONE_RETRY_NEEDED, parent_ctx, katom, katom->jc, slot_nr);
2074 kbasep_js_set_job_retry_submit_slot(katom, slot_nr);
2078 void kbasep_js_suspend(kbase_device *kbdev)
2080 unsigned long flags;
2081 kbasep_js_device_data *js_devdata;
2084 int nr_privileged_ctx = 0;
2085 KBASE_DEBUG_ASSERT(kbdev);
2086 KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
2087 js_devdata = &kbdev->js_data;
2089 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
2091 /* Prevent all contexts from submitting */
2092 js_devdata->runpool_irq.submit_allowed = 0;
2094 /* Retain each of the contexts, so we can cause it to leave even if it had
2095 * no refcount to begin with */
2096 for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
2097 kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
2098 kbase_context *kctx = js_per_as_data->kctx;
2099 retained = retained << 1;
2102 ++(js_per_as_data->as_busy_refcount);
2104 /* We can only cope with up to 1 privileged context - the
2105 * instrumented context. It'll be suspended by disabling
2106 * instrumentation */
2107 if (kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED)
2108 KBASE_DEBUG_ASSERT(++nr_privileged_ctx == 1);
2111 CSTD_UNUSED(nr_privileged_ctx);
2112 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
2114 /* De-ref the previous retain to ensure each context gets pulled out
2115 * sometime later. */
2118 ++i, retained = retained >> 1) {
2119 kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
2120 kbase_context *kctx = js_per_as_data->kctx;
2123 kbasep_js_runpool_release_ctx(kbdev,kctx);
2126 /* Caller must wait for all Power Manager active references to be dropped */
2129 void kbasep_js_resume(kbase_device *kbdev)
2131 kbasep_js_device_data *js_devdata;
2133 KBASE_DEBUG_ASSERT(kbdev);
2134 js_devdata = &kbdev->js_data;
2136 KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
2138 /* Schedule in as many contexts as address spaces. This also starts atoms. */
2139 for (i = 0 ; i < kbdev->nr_hw_address_spaces; ++i)
2141 kbasep_js_try_schedule_head_ctx(kbdev);
2143 /* JS Resume complete */